text
stringlengths 11
4.05M
|
|---|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package azurestack
import (
"fmt"
"net/http"
"os"
"github.com/Azure/aks-engine/pkg/armhelpers/azurestack/testserver"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-03-30/compute"
"github.com/Azure/go-autorest/autorest/azure"
)
const (
subscriptionID = "cc6b141e-6afc-4786-9bf6-e3b9a5601460"
tenantID = "19590a3f-b1af-4e6b-8f63-f917cbf40711"
resourceGroup = "TestResourceGroup"
computeAPIVersion = "2017-03-30"
networkAPIVersion = "2017-10-01"
deploymentAPIVersion = "2018-05-01"
resourceGroupAPIVersion = "2018-05-01"
logAnalyticsAPIVersion = "2015-11-01-preview"
subscriptionsAPIVersion = "2016-06-01"
deploymentName = "testDeplomentName"
deploymentStatus = "08586474508192185203"
virtualMachineScaleSetName = "vmscalesetName"
virtualMachineAvailabilitySetName = "vmavailabilitysetName"
virtualMachineName = "testVirtualMachineName"
logAnalyticsDefaultResourceGroupEUS = "DefaultResourceGroup-EUS"
logAnalyticsDefaultWorkspaceNameEUS = "DefaultWorkspace-cc6b141e-6afc-4786-9bf6-e3b9a5601460-EUS"
logAnalyticsDefaultResourceGroupWEU = "DefaultResourceGroup-WEU"
logAnalyticsDefaultWorkspaceNameWEU = "DefaultWorkspace-cc6b141e-6afc-4786-9bf6-e3b9a5601460-WEU"
logAnalyticsWorkspaceName = "testLogAnalyticsWorkspace"
logAnalyticsSolutionName = "ContainerInsights(testLogAnalyticsWorkspace)"
virtualNicName = "testVirtualNicName"
virutalDiskName = "testVirtualdickName"
location = "local"
operationID = "7184adda-13fc-4d49-b941-fbbc3b08ed64"
publisher = "DefaultPublisher"
sku = "DefaultSku"
offer = "DefaultOffer"
version = "DefaultVersion"
filePathTokenResponse = "httpMockClientData/tokenResponse.json"
filePathListLocations = "httpMockClientData/listLocations.json"
filePathListVirtualMachineScaleSets = "httpMockClientData/listVirtualMachineScaleSets.json"
filePathListVirtualMachineScaleSetVMs = "httpMockClientData/listVirtualMachineScaleSetVMs.json"
filePathListVirtualMachines = "httpMockClientData/listVirtualMachines.json"
filePathGetVirtualMachine = "httpMockClientData/getVirtualMachine.json"
fileDeployVirtualMachine = "httpMockClientData/deployVMResponse.json"
fileDeployVirtualMachineError = "httpMockClientData/deploymentVMError.json"
filePathGetAvailabilitySet = "httpMockClientData/getAvailabilitySet.json"
filePathGetLogAnalyticsWorkspace = "httpMockClientData/getLogAnalyticsWorkspace.json"
filePathGetLogAnalyticsWorkspaceSharedKeys = "httpMockClientData/getLogAnalyticsWorkspaceSharedKeys.json"
filePathListWorkspacesByResourceGroup = "httpMockClientData/getListWorkspacesByResourceGroup.json"
filePathCreateOrUpdateWorkspace = "httpMockClientData/createOrUpdateWorkspace.json"
filePathGetVirtualMachineImage = "httpMockClientData/getVirtualMachineImage.json"
filePathListVirtualMachineImages = "httpMockClientData/listVirtualMachineImages.json"
)
// HTTPMockClient is an wrapper of httpmock
type HTTPMockClient struct {
SubscriptionID string
TenantID string
ResourceGroup string
ResourceGroupAPIVersion string
ComputeAPIVersion string
NetworkAPIVersion string
DeploymentAPIVersion string
LogAnalyticsAPIVersion string
SubscriptionsAPIVersion string
DeploymentName string
DeploymentStatus string
VirtualMachineScaleSetName string
VirtualMachineName string
LogAnalyticsDefaultResourceGroupEUS string
LogAnalyticsDefaultWorkspaceNameEUS string
LogAnalyticsDefaultResourceGroupWEU string
LogAnalyticsDefaultWorkspaceNameWEU string
LogAnalyticsWorkspaceName string
LogAnalyticsSolutionName string
VirtualNicName string
VirutalDiskName string
Location string
OperationID string
TokenResponse string
Publisher string
Sku string
Offer string
Version string
ResponseListLocations string
ResponseListVirtualMachineScaleSets string
ResponseListVirtualMachineScaleSetVMs string
ResponseListVirtualMachines string
ResponseGetVirtualMachine string
ResponseDeployVirtualMachine string
ResponseDeployVirtualMachineError string
ResponseGetAvailabilitySet string
ResponseGetLogAnalyticsWorkspace string
ResponseGetLogAnalyticsWorkspaceSharedKeys string
ResponseListWorkspacesByResourceGroup string
ResponseCreateOrUpdateWorkspace string
ResponseGetVirtualMachineImage string
ResponseListVirtualMachineImages string
mux *http.ServeMux
server *testserver.TestServer
}
// VirtualMachineScaleSetListValues is an wrapper of virtual machine scale set list response values
type VirtualMachineScaleSetListValues struct {
Value []compute.VirtualMachineScaleSet
}
// VirtualMachineScaleSetVMValues is an wrapper of virtual machine scale set VM response values
type VirtualMachineScaleSetVMValues struct {
Value []compute.VirtualMachineScaleSetVM
}
// VirtualMachineVMValues is an wrapper of virtual machine VM response values
type VirtualMachineVMValues struct {
Value []compute.VirtualMachine
}
// NewHTTPMockClient creates HTTPMockClient with default values
func NewHTTPMockClient() (HTTPMockClient, error) {
client := HTTPMockClient{
SubscriptionID: subscriptionID,
TenantID: tenantID,
ResourceGroup: resourceGroup,
ResourceGroupAPIVersion: resourceGroupAPIVersion,
ComputeAPIVersion: computeAPIVersion,
LogAnalyticsAPIVersion: logAnalyticsAPIVersion,
SubscriptionsAPIVersion: subscriptionsAPIVersion,
NetworkAPIVersion: networkAPIVersion,
DeploymentAPIVersion: deploymentAPIVersion,
DeploymentName: deploymentName,
DeploymentStatus: deploymentStatus,
VirtualMachineScaleSetName: virtualMachineScaleSetName,
VirtualMachineName: virtualMachineName,
LogAnalyticsWorkspaceName: logAnalyticsWorkspaceName,
LogAnalyticsDefaultResourceGroupEUS: logAnalyticsDefaultResourceGroupEUS,
LogAnalyticsDefaultWorkspaceNameEUS: logAnalyticsDefaultWorkspaceNameEUS,
LogAnalyticsDefaultResourceGroupWEU: logAnalyticsDefaultResourceGroupWEU,
LogAnalyticsDefaultWorkspaceNameWEU: logAnalyticsDefaultWorkspaceNameWEU,
LogAnalyticsSolutionName: logAnalyticsSolutionName,
VirtualNicName: virtualNicName,
VirutalDiskName: virutalDiskName,
Location: location,
OperationID: operationID,
Publisher: publisher,
Offer: offer,
Sku: sku,
Version: version,
mux: http.NewServeMux(),
}
var err error
client.TokenResponse, err = readFromFile(filePathTokenResponse)
if err != nil {
return client, err
}
client.ResponseListVirtualMachineScaleSets, err = readFromFile(filePathListVirtualMachineScaleSets)
if err != nil {
return client, err
}
client.ResponseListVirtualMachineScaleSetVMs, err = readFromFile(filePathListVirtualMachineScaleSetVMs)
if err != nil {
return client, err
}
client.ResponseListVirtualMachines, err = readFromFile(filePathListVirtualMachines)
if err != nil {
return client, err
}
client.ResponseGetVirtualMachine, err = readFromFile(filePathGetVirtualMachine)
if err != nil {
return client, err
}
client.ResponseDeployVirtualMachine, err = readFromFile(fileDeployVirtualMachine)
if err != nil {
return client, err
}
client.ResponseDeployVirtualMachineError, err = readFromFile(fileDeployVirtualMachineError)
if err != nil {
return client, err
}
client.ResponseGetAvailabilitySet, err = readFromFile(filePathGetAvailabilitySet)
if err != nil {
return client, err
}
client.ResponseGetLogAnalyticsWorkspace, err = readFromFile(filePathGetLogAnalyticsWorkspace)
if err != nil {
return client, err
}
client.ResponseGetLogAnalyticsWorkspaceSharedKeys, err = readFromFile(filePathGetLogAnalyticsWorkspaceSharedKeys)
if err != nil {
return client, err
}
client.ResponseListWorkspacesByResourceGroup, err = readFromFile(filePathListWorkspacesByResourceGroup)
if err != nil {
return client, err
}
client.ResponseCreateOrUpdateWorkspace, err = readFromFile(filePathCreateOrUpdateWorkspace)
if err != nil {
return client, err
}
client.ResponseGetVirtualMachineImage, err = readFromFile(filePathGetVirtualMachineImage)
if err != nil {
return client, err
}
client.ResponseListVirtualMachineImages, err = readFromFile(filePathListVirtualMachineImages)
if err != nil {
return client, err
}
client.ResponseListLocations, err = readFromFile(filePathListLocations)
if err != nil {
return client, err
}
return client, nil
}
// Activate starts the mock environment and should only be called
// after all required endpoints have been registered.
func (mc *HTTPMockClient) Activate() error {
server, err := testserver.CreateAndStart(0, mc.mux)
if err != nil {
return err
}
mc.server = server
return nil
}
// DeactivateAndReset shuts down the mock environment and removes any registered mocks
func (mc *HTTPMockClient) DeactivateAndReset() {
if mc.server != nil {
mc.server.Stop()
}
mc.mux = http.NewServeMux()
mc.server = nil
}
// GetEnvironment return azure.Environment for Azure Stack
func (mc HTTPMockClient) GetEnvironment() azure.Environment {
env, _ := azure.EnvironmentFromName("AZUREPUBLICCLOUD")
env.Name = "AzureStackCloud"
if mc.server != nil {
mockURI := fmt.Sprintf("http://localhost:%d/", mc.server.Port)
env.ActiveDirectoryEndpoint = mockURI
env.ResourceManagerEndpoint = mockURI
}
return env
}
// RegisterLogin registers the mock response for login
func (mc HTTPMockClient) RegisterLogin() {
mc.mux.HandleFunc(fmt.Sprintf("/subscriptions/%s", mc.SubscriptionID), func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != "2016-06-01" {
w.WriteHeader(http.StatusNotFound)
} else {
w.Header().Add("Www-Authenticate", fmt.Sprintf(`Bearer authorization_uri="https://login.windows.net/%s", error="invalid_token", error_description="The authentication failed because of missing 'Authorization' header."`, mc.TenantID))
w.WriteHeader(http.StatusUnauthorized)
_, _ = fmt.Fprint(w, `{"error":{"code":"AuthenticationFailed","message":"Authentication failed. The 'Authorization' header is missing."}}`)
}
})
mc.mux.HandleFunc(fmt.Sprintf("/%s/oauth2/token", mc.TenantID), func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != "1.0" {
w.WriteHeader(http.StatusNotFound)
} else {
_, _ = fmt.Fprint(w, mc.TokenResponse)
}
})
}
// RegisterListVirtualMachineScaleSets registers the mock response for ListVirtualMachineScaleSets
func (mc HTTPMockClient) RegisterListVirtualMachineScaleSets() {
pattern := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets", mc.SubscriptionID, mc.ResourceGroup)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.ComputeAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
_, _ = fmt.Fprint(w, mc.ResponseListVirtualMachineScaleSets)
}
})
}
// RegisterListVirtualMachineScaleSetVMs registers the mock response for ListVirtualMachineScaleSetVMs
func (mc HTTPMockClient) RegisterListVirtualMachineScaleSetVMs() {
pattern := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines", mc.SubscriptionID, mc.ResourceGroup, mc.VirtualMachineScaleSetName)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.ComputeAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
_, _ = fmt.Fprint(w, mc.ResponseListVirtualMachineScaleSetVMs)
}
})
}
// RegisterListVirtualMachines registers the mock response for ListVirtualMachines
func (mc HTTPMockClient) RegisterListVirtualMachines() {
pattern := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines", mc.SubscriptionID, mc.ResourceGroup)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.ComputeAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
_, _ = fmt.Fprint(w, mc.ResponseListVirtualMachines)
}
})
}
// RegisterListLocations registers the mock response for ListVirtualMachines
func (mc HTTPMockClient) RegisterListLocations() {
pattern := fmt.Sprintf("/subscriptions/%s/locations", mc.SubscriptionID)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.SubscriptionsAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
_, _ = fmt.Fprint(w, mc.ResponseListLocations)
}
})
}
// RegisterGetAvailabilitySet registers the mock response for GetAvailabilitySet.
func (mc HTTPMockClient) RegisterGetAvailabilitySet() {
pattern := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/availabilitySets/vmavailabilitysetName", mc.SubscriptionID, mc.ResourceGroup)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.ComputeAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
_, _ = fmt.Fprint(w, mc.ResponseGetAvailabilitySet)
}
})
}
// RegisterGetAvailabilitySetFaultDomainCount registers a mock response for GetAvailabilitySet.
func (mc HTTPMockClient) RegisterGetAvailabilitySetFaultDomainCount() {
pattern := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/availabilitySets/id1", mc.SubscriptionID, mc.ResourceGroup)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.ComputeAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
_, _ = fmt.Fprint(w, mc.ResponseGetAvailabilitySet)
}
})
}
// RegisterListResourceSkus registers a mock response for ListResourceSkus.
func (mc HTTPMockClient) RegisterListResourceSkus() {
// Not implemented on Azure Stack.
}
// RegisterVirtualMachineEndpoint registers mock responses for the Microsoft.Compute/virtualMachines endpoint
func (mc *HTTPMockClient) RegisterVirtualMachineEndpoint() {
pattern := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s", mc.SubscriptionID, mc.ResourceGroup, mc.VirtualMachineName)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.ComputeAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
switch r.Method {
case http.MethodGet:
_, _ = fmt.Fprint(w, mc.ResponseGetVirtualMachine)
case http.MethodDelete:
w.Header().Add("Azure-Asyncoperation", fmt.Sprintf("http://localhost:%d/subscriptions/%s/providers/Microsoft.Compute/locations/%s/operations/%s?api-version=%s", mc.server.Port, mc.SubscriptionID, mc.Location, mc.OperationID, mc.ComputeAPIVersion))
w.Header().Add("Location", fmt.Sprintf("http://localhost:%d/subscriptions/%s/providers/Microsoft.Compute/locations/%s/operations/%s?monitor=true&api-version=%s", mc.server.Port, mc.SubscriptionID, mc.Location, mc.OperationID, mc.ComputeAPIVersion))
w.Header().Add("Azure-Asyncnotification", "Enabled")
w.Header().Add("Content-Length", "0")
w.WriteHeader(http.StatusAccepted)
default:
w.WriteHeader(http.StatusMethodNotAllowed)
}
}
})
}
// RegisterDeleteOperation registers mock responses for checking the status of a delete operation
func (mc HTTPMockClient) RegisterDeleteOperation() {
pattern := fmt.Sprintf("/subscriptions/%s/providers/Microsoft.Compute/locations/%s/operations/%s", mc.SubscriptionID, mc.Location, mc.OperationID)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.ComputeAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else if r.URL.Query().Get("monitor") == "true" {
w.WriteHeader(http.StatusOK)
} else {
_, _ = fmt.Fprintf(w, `
{
"startTime": "2019-03-30T00:23:10.9206154+00:00",
"endTime": "2019-03-30T00:23:51.8424926+00:00",
"status": "Succeeded",
"name": "%s"
}`, mc.OperationID)
}
})
}
// RegisterDeployTemplate registers the mock response for DeployTemplate
func (mc *HTTPMockClient) RegisterDeployTemplate() {
pattern := fmt.Sprintf("/subscriptions/%s/resourcegroups/%s/providers/Microsoft.Resources/deployments/%s", mc.SubscriptionID, mc.ResourceGroup, mc.DeploymentName)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.DeploymentAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
switch r.Method {
case http.MethodPut:
w.Header().Add("Azure-Asyncoperation", fmt.Sprintf("http://localhost:%d/subscriptions/%s/resourcegroups/%s/providers/Microsoft.Resources/deployments/%s/operationStatuses/%s?api-version=%s", mc.server.Port, subscriptionID, resourceGroup, deploymentName, deploymentStatus, deploymentAPIVersion))
w.WriteHeader(http.StatusCreated)
_, _ = fmt.Fprint(w, mc.ResponseDeployVirtualMachine)
case http.MethodGet:
_, _ = fmt.Fprint(w, mc.ResponseDeployVirtualMachine)
default:
w.WriteHeader(http.StatusMethodNotAllowed)
}
}
})
}
// RegisterDeployOperationSuccess registers the mock response for a successful deployment
func (mc HTTPMockClient) RegisterDeployOperationSuccess() {
pattern := fmt.Sprintf("/subscriptions/%s/resourcegroups/%s/providers/Microsoft.Resources/deployments/%s/operationStatuses/%s", mc.SubscriptionID, mc.ResourceGroup, mc.DeploymentName, mc.DeploymentStatus)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.DeploymentAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
_, _ = fmt.Fprint(w, `{"status": "Succeeded"}`)
}
})
}
// RegisterDeployOperationSuccess registers the mock response for a failed deployment
func (mc HTTPMockClient) RegisterDeployOperationFailure() {
pattern := fmt.Sprintf("/subscriptions/%s/resourcegroups/%s/providers/Microsoft.Resources/deployments/%s/operationStatuses/%s", mc.SubscriptionID, mc.ResourceGroup, mc.DeploymentName, mc.DeploymentStatus)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.DeploymentAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
_, _ = fmt.Fprint(w, mc.ResponseDeployVirtualMachineError)
}
})
}
// RegisterDeleteNetworkInterface registers the mock response for DeleteNetworkInterface
func (mc *HTTPMockClient) RegisterDeleteNetworkInterface() {
pattern := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkInterfaces/%s", mc.SubscriptionID, mc.ResourceGroup, mc.VirtualNicName)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.NetworkAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
w.Header().Add("Azure-Asyncoperation", fmt.Sprintf("http://localhost:%d/subscriptions/%s/providers/Microsoft.Network/locations/%s/operations/%s?api-version=%s", mc.server.Port, mc.SubscriptionID, mc.Location, mc.OperationID, mc.NetworkAPIVersion))
w.Header().Add("Location", fmt.Sprintf("http://localhost:%d/subscriptions/%s/providers/Microsoft.Network/locations/%s/operationResults/%s?api-version=%s", mc.server.Port, mc.SubscriptionID, mc.Location, mc.OperationID, mc.NetworkAPIVersion))
w.Header().Add("Azure-Asyncnotification", "Enabled")
w.Header().Add("Content-Length", "0")
w.WriteHeader(http.StatusAccepted)
}
})
pattern = fmt.Sprintf("/subscriptions/%s/providers/Microsoft.Network/locations/%s/operations/%s", mc.SubscriptionID, mc.Location, mc.OperationID)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.NetworkAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
_, _ = fmt.Fprint(w, `{"status": "Succeeded"}`)
}
})
pattern = fmt.Sprintf("/subscriptions/%s/providers/Microsoft.Network/locations/%s/operationResults/%s", mc.SubscriptionID, mc.Location, mc.OperationID)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.NetworkAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
w.WriteHeader(http.StatusOK)
}
})
}
// RegisterDeleteManagedDisk registers the mock response for DeleteManagedDisk
func (mc *HTTPMockClient) RegisterDeleteManagedDisk() {
pattern := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s", mc.SubscriptionID, mc.ResourceGroup, mc.VirutalDiskName)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.ComputeAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
w.Header().Add("Azure-Asyncoperation", fmt.Sprintf("http://localhost:%d/subscriptions/%s/providers/Microsoft.Compute/locations/%s/DiskOperations/%s?api-version=%s", mc.server.Port, mc.SubscriptionID, mc.Location, mc.OperationID, mc.ComputeAPIVersion))
w.Header().Add("Location", fmt.Sprintf("http://localhost:%d/subscriptions/%s/providers/Microsoft.Compute/locations/%s/DiskOperations/%s?monitor=true&api-version=%s", mc.server.Port, mc.SubscriptionID, mc.Location, mc.OperationID, mc.ComputeAPIVersion))
w.Header().Add("Azure-Asyncnotification", "Enabled")
w.Header().Add("Content-Length", "0")
w.WriteHeader(http.StatusAccepted)
}
})
pattern = fmt.Sprintf("/subscriptions/%s/providers/Microsoft.Compute/locations/%s/DiskOperations/%s", mc.SubscriptionID, mc.Location, mc.OperationID)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.ComputeAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else if r.URL.Query().Get("monitor") == "true" {
w.WriteHeader(http.StatusOK)
} else {
_, _ = fmt.Fprintf(w, `
{
"startTime": "2019-03-30T00:23:10.9206154+00:00",
"endTime": "2019-03-30T00:23:51.8424926+00:00",
"status": "Succeeded",
"name": "%s"
}`, mc.OperationID)
}
})
}
// RegisterGetLogAnalyticsWorkspaceInfo registers the mock response for GetLogAnalyticsWorkspaceInfo.
func (mc HTTPMockClient) RegisterGetLogAnalyticsWorkspaceInfo() {
pattern := fmt.Sprintf("/subscriptions/%s/resourcegroups/%s/providers/Microsoft.OperationalInsights/workspaces/%s", mc.SubscriptionID, mc.ResourceGroup, mc.LogAnalyticsWorkspaceName)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.LogAnalyticsAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
_, _ = fmt.Fprint(w, mc.ResponseGetLogAnalyticsWorkspace)
}
})
pattern = fmt.Sprintf("/subscriptions/%s/resourcegroups/%s/providers/Microsoft.OperationalInsights/workspaces/%s/sharedKeys", mc.SubscriptionID, mc.ResourceGroup, mc.LogAnalyticsWorkspaceName)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.LogAnalyticsAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
_, _ = fmt.Fprint(w, mc.ResponseGetLogAnalyticsWorkspaceSharedKeys)
}
})
}
// RegisterEnsureDefaultLogAnalyticsWorkspace registers the mock response for EnsureDefaultLogAnalyticsWorkspace.
func (mc HTTPMockClient) RegisterEnsureDefaultLogAnalyticsWorkspaceUseExisting() {
pattern := fmt.Sprintf("/subscriptions/%s/resourcegroups/%s", mc.SubscriptionID, mc.LogAnalyticsDefaultResourceGroupEUS)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.ResourceGroupAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
w.WriteHeader(http.StatusNoContent)
}
})
pattern = fmt.Sprintf("/subscriptions/%s/resourcegroups/%s/providers/Microsoft.OperationalInsights/workspaces", mc.SubscriptionID, mc.LogAnalyticsDefaultResourceGroupEUS)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.LogAnalyticsAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
_, _ = fmt.Fprint(w, mc.ResponseListWorkspacesByResourceGroup)
}
})
pattern = fmt.Sprintf("/subscriptions/%s/resourcegroups/%s/providers/Microsoft.OperationalInsights/workspaces/%s", mc.SubscriptionID, mc.LogAnalyticsDefaultResourceGroupEUS, mc.LogAnalyticsDefaultWorkspaceNameEUS)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.LogAnalyticsAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
_, _ = fmt.Fprint(w, mc.ResponseCreateOrUpdateWorkspace)
}
})
}
// RegisterEnsureDefaultLogAnalyticsWorkspace registers the mock response for EnsureDefaultLogAnalyticsWorkspace.
func (mc HTTPMockClient) RegisterEnsureDefaultLogAnalyticsWorkspaceCreateNew() {
pattern := fmt.Sprintf("/subscriptions/%s/resourcegroups/%s", mc.SubscriptionID, mc.LogAnalyticsDefaultResourceGroupWEU)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.ResourceGroupAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
w.WriteHeader(http.StatusNoContent)
}
})
pattern = fmt.Sprintf("/subscriptions/%s/resourcegroups/%s/providers/Microsoft.OperationalInsights/workspaces", mc.SubscriptionID, mc.LogAnalyticsDefaultResourceGroupWEU)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.LogAnalyticsAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
_, _ = fmt.Fprint(w, mc.ResponseListWorkspacesByResourceGroup)
}
})
pattern = fmt.Sprintf("/subscriptions/%s/resourcegroups/%s/providers/Microsoft.OperationalInsights/workspaces/%s", mc.SubscriptionID, mc.LogAnalyticsDefaultResourceGroupWEU, mc.LogAnalyticsDefaultWorkspaceNameWEU)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.LogAnalyticsAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
_, _ = fmt.Fprint(w, mc.ResponseCreateOrUpdateWorkspace)
}
})
}
// RegisterVMImageFetcherInterface registers the mock response for VMImageFetcherInterface methods.
func (mc *HTTPMockClient) RegisterVMImageFetcherInterface() {
pattern := fmt.Sprintf("/subscriptions/%s/providers/Microsoft.Compute/locations/%s/publishers/%s/artifacttypes/vmimage/offers/%s/skus/%s/versions/%s", mc.SubscriptionID, mc.Location, mc.Publisher, mc.Offer, mc.Sku, mc.Version)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.ComputeAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
_, _ = fmt.Fprint(w, mc.ResponseGetVirtualMachineImage)
}
})
pattern = fmt.Sprintf("/subscriptions/%s/providers/Microsoft.Compute/locations/%s/publishers/%s/artifacttypes/vmimage/offers/%s/skus/%s/versions", mc.SubscriptionID, mc.Location, mc.Publisher, mc.Offer, mc.Sku)
mc.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("api-version") != mc.ComputeAPIVersion {
w.WriteHeader(http.StatusNotFound)
} else {
_, _ = fmt.Fprint(w, mc.ResponseListVirtualMachineImages)
}
})
}
func readFromFile(filePath string) (string, error) {
bytes, err := os.ReadFile(filePath)
if err != nil {
return "", fmt.Errorf("Fail to read file %q , err - %q", filePath, err)
}
return string(bytes), nil
}
|
package tasks
import (
"crypto/rand"
"math/big"
"time"
)
type PeriodicTask struct {
Task func()
InitialDuration time.Duration
IntervalDuration time.Duration
JitterDuration time.Duration
quit chan bool
done chan bool
}
func (t *PeriodicTask) Start() {
if t.quit == nil {
t.quit = make(chan bool, 1)
}
if t.done == nil {
t.done = make(chan bool, 1)
}
go func() {
initial := t.InitialDuration
interval := t.IntervalDuration
for delay := (initial + t.jitter()); ; delay = (interval + t.jitter()) {
select {
case <-t.quit:
t.done <- true
return
case <-time.After(delay):
t.Task()
}
}
}()
}
func (t *PeriodicTask) Shutdown() {
if t.quit != nil {
t.quit <- true
}
}
func (t *PeriodicTask) Wait() {
if t.done != nil {
<-t.done
}
}
func (t *PeriodicTask) jitter() time.Duration {
jitter := int64(0)
jm := int64(t.JitterDuration)
if jm > 0 {
// Don't bother with jitter if there is no jitter
if jitterBig, err := rand.Int(rand.Reader, big.NewInt(jm*2)); err != nil {
// Failed to get jitter. Use no jitter.
jitter = 0
} else {
jitter = jitterBig.Int64() - jm
}
}
return time.Duration(jitter)
}
|
package controllers
import (
"database/sql"
"encoding/json"
"fmt"
"log"
"net/http"
"strconv"
"github.com/books-list/models"
"github.com/books-list/repository/bookRepository"
"github.com/gorilla/mux"
)
var books []models.Book
type Controller struct{}
func (c Controller) GetBooks(db *sql.DB) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
fmt.Println("Endpoint Hit: Books")
var book models.Book
// var error models.Error
books = []models.Book{}
bookRepo := bookRepository.BookRepository{}
books, _ := bookRepo.GetBooks(db, book, books)
// rows, err := db.Query("SELECT * FROM books")
// log.Println(err)
// defer rows.Close()
// for rows.Next() {
// var book models.Book
// err := rows.Scan(&book.ID, &book.Title, &book.Author, &book.Year)
// if err != nil {
// panic(err.Error())
// }
// books = append(books, book)
// log.Println(book.Title)
// }
json.NewEncoder(w).Encode(books)
}
}
func (c Controller) GetBook(db *sql.DB) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
fmt.Println("Endpoint Hit: Book")
var book models.Book
params := mux.Vars(r)
id, _ := strconv.Atoi(params["id"])
rows := db.QueryRow("SELECT * FROM books where id= ?", id)
err := rows.Scan(&book.ID, &book.Title, &book.Author, &book.Year)
log.Println(err)
json.NewEncoder(w).Encode(book)
}
}
func (c Controller) AddBook(db *sql.DB) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
fmt.Println("Endpoint Hit:Add Book")
var book models.Book
var bookID int
json.NewDecoder(r.Body).Decode(&book)
err := db.QueryRow("INSERT INTO books (title,author,year) VALUES ($1,$2,$3) RETURNING id;", book.Title, book.Author, book.Year).Scan(&bookID)
log.Println(err)
books = append(books, book)
json.NewEncoder(w).Encode(books)
}
}
func (c Controller) UpdateBook(db *sql.DB) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
fmt.Println("Endpoint Hit:Update Book")
var book models.Book
json.NewDecoder(r.Body).Decode(&book)
db, _ := sql.Open("mysql", "admin:admin@tcp(127.0.0.1:3306)/library")
defer db.Close()
result, _ := db.Exec("UPDATE books set title=$1 author=$2 year=$3 where id=$4 RETURNING id;", book.Title, book.Author, book.Year, book.ID)
// log.Println(er)
rowupdated, _ := result.RowsAffected()
json.NewEncoder(w).Encode(rowupdated)
}
}
func (c Controller) RemoveBook(db *sql.DB) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
fmt.Println("Endpoint Hit:DELETE Book")
params := mux.Vars(r)
id, _ := strconv.Atoi(params["id"])
result, _ := db.Exec("DELETE books where id=$1 RETURNING id;", id)
rowdeleted, _ := result.RowsAffected()
json.NewEncoder(w).Encode(rowdeleted)
}
}
|
package humanize
import (
"fmt"
"go/ast"
)
// Function is functions with name, not the func types
type Function struct {
pkg *Package
file *File
Name string
Docs Docs
Type *FuncType
Receiver *Variable // Nil means normal function
}
// String convert function object to go code
func (f *Function) String() string {
s := "func "
if f.Receiver != nil {
s += fmt.Sprintf("(%s)", f.Receiver.Type.String())
}
s += f.Name + f.Type.Sign()
return s
}
// Equal check if two functions are equal
func (f *Function) Equal(t *Function) bool {
if !f.pkg.Equal(t.pkg) {
return false
}
if f.Name != t.Name {
return false
}
if f.Receiver == nil && t.Receiver != nil {
return false
}
if f.Receiver != nil && t.Receiver == nil {
return false
}
if f.Receiver != nil {
if !f.Receiver.Equal(t.Receiver) {
return false
}
}
return f.Type.Equal(t.Type)
}
func (f *Function) lateBind() error {
if err := f.Type.lateBind(); err != nil {
return err
}
if f.Receiver != nil {
return f.Receiver.lateBind()
}
return nil
}
// Package get the package of function
func (f *Function) Package() *Package {
return f.pkg
}
// File get the file of function
func (f *Function) File() *File {
return f.file
}
// NewFunction return a single function annotation
func getFunction(p *Package, fl *File, f *ast.FuncDecl) *Function {
res := &Function{
pkg: p,
file: fl,
Name: nameFromIdent(f.Name),
Docs: docsFromNodeDoc(f.Doc),
}
if f.Recv != nil {
// Method receiver is only one parameter
for i := range f.Recv.List {
n := ""
if f.Recv.List[i].Names != nil {
n = nameFromIdent(f.Recv.List[i].Names[0])
}
p := variableFromExpr(p, fl, n, f.Recv.List[i].Type)
res.Receiver = p
}
}
// Change the name of the function to receiver.func
if res.Receiver != nil {
tmp := res.Receiver.Type
if _, ok := tmp.(*StarType); ok {
tmp = tmp.(*StarType).Target
}
res.Name = tmp.(*IdentType).Ident + "." + res.Name
}
res.Type = &FuncType{
pkg: p,
Parameters: getVariableList(p, fl, f.Type.Params),
Results: getVariableList(p, fl, f.Type.Results),
}
return res
}
|
package runner
import (
"sync"
discoveryv1 "github.com/syncromatics/kafmesh/internal/protos/kafmesh/discovery/v1"
"github.com/pkg/errors"
)
var (
serviceSetup sync.Once
)
// ServiceDiscovery provides service information for discovery
type ServiceDiscovery struct {
Name string
Description string
}
// ComponentDiscovery provides component information for discovery
type ComponentDiscovery struct {
Name string
Description string
}
// MessageType is the type of serialization used for the kafka topic
type MessageType int
const (
// MessageTypeProtobuf uses protobuf serialization
MessageTypeProtobuf MessageType = iota
)
// TopicDiscovery provides topic information for discovery
type TopicDiscovery struct {
Message string
Topic string
Type MessageType
}
// SourceDiscovery provides source information for discovery
type SourceDiscovery struct {
ServiceDiscovery
ComponentDiscovery
TopicDiscovery
}
// SinkDiscovery provides sink information for discovery
type SinkDiscovery struct {
ServiceDiscovery
ComponentDiscovery
TopicDiscovery
Name string
Description string
}
// ViewDiscovery adds view information for discovery
type ViewDiscovery struct {
ServiceDiscovery
ComponentDiscovery
TopicDiscovery
}
// ViewSourceDiscovery provides view source information for discovery
type ViewSourceDiscovery struct {
ServiceDiscovery
ComponentDiscovery
TopicDiscovery
Name string
Description string
}
// ViewSinkDiscovery provides view sink information for discovery
type ViewSinkDiscovery struct {
ServiceDiscovery
ComponentDiscovery
TopicDiscovery
Name string
Description string
}
// ProcessorDiscovery provides processor information for discovery
type ProcessorDiscovery struct {
ServiceDiscovery
ComponentDiscovery
Name string
Description string
GroupName string
Inputs []InputDiscovery
Joins []JoinDiscovery
Lookups []LookupDiscovery
Outputs []OutputDiscovery
Persistence *PersistentDiscovery
}
// InputDiscovery provides input information for discovery
type InputDiscovery struct {
TopicDiscovery
}
// JoinDiscovery provides join information for discovery
type JoinDiscovery struct {
TopicDiscovery
}
// LookupDiscovery provides lookup information for discovery
type LookupDiscovery struct {
TopicDiscovery
}
// OutputDiscovery provides output information for discovery
type OutputDiscovery struct {
TopicDiscovery
}
// PersistentDiscovery provides persistence information for discovery
type PersistentDiscovery struct {
TopicDiscovery
}
func (s *Service) registerService(service ServiceDiscovery) {
s.mtx.Lock()
defer s.mtx.Unlock()
serviceSetup.Do(func() {
s.DiscoverInfo.Name = service.Name
s.DiscoverInfo.Description = service.Description
})
}
// RegisterProcessor registers a processor with the discovery service
func (s *Service) RegisterProcessor(processor ProcessorDiscovery) error {
s.registerService(processor.ServiceDiscovery)
s.mtx.Lock()
defer s.mtx.Unlock()
component := s.getOrCreateDiscoveryComponent(processor.ComponentDiscovery)
proc := &discoveryv1.Processor{
Name: processor.Name,
Description: processor.Description,
GroupName: processor.GroupName,
}
for _, input := range processor.Inputs {
t, err := convertMessageType(input.Type)
if err != nil {
return errors.Wrapf(err, "processor '%s' input '%s' has invalid message type", processor.Name, input.Topic)
}
proc.Inputs = append(proc.Inputs, &discoveryv1.Input{
Topic: &discoveryv1.TopicDefinition{
Message: input.Message,
Topic: input.Topic,
Type: t,
},
})
}
for _, join := range processor.Joins {
t, err := convertMessageType(join.Type)
if err != nil {
return errors.Wrapf(err, "processor '%s' join '%s' has invalid message type", processor.Name, join.Topic)
}
proc.Joins = append(proc.Joins, &discoveryv1.Join{
Topic: &discoveryv1.TopicDefinition{
Message: join.Message,
Topic: join.Topic,
Type: t,
},
})
}
for _, lookup := range processor.Lookups {
t, err := convertMessageType(lookup.Type)
if err != nil {
return errors.Wrapf(err, "processor '%s' lookup '%s' has invalid message type", processor.Name, lookup.Topic)
}
proc.Lookups = append(proc.Lookups, &discoveryv1.Lookup{
Topic: &discoveryv1.TopicDefinition{
Message: lookup.Message,
Topic: lookup.Topic,
Type: t,
},
})
}
for _, output := range processor.Outputs {
t, err := convertMessageType(output.Type)
if err != nil {
return errors.Wrapf(err, "processor '%s' output '%s' has invalid message type", processor.Name, output.Topic)
}
proc.Outputs = append(proc.Outputs, &discoveryv1.Output{
Topic: &discoveryv1.TopicDefinition{
Message: output.Message,
Topic: output.Topic,
Type: t,
},
})
}
if processor.Persistence != nil {
t, err := convertMessageType(processor.Persistence.Type)
if err != nil {
return errors.Wrapf(err, "processor '%s' persistence '%s' has invalid message type", processor.Name, processor.Persistence.Topic)
}
proc.Persistence = &discoveryv1.Persistence{
Topic: &discoveryv1.TopicDefinition{
Message: processor.Persistence.Message,
Topic: processor.Persistence.Topic,
Type: t,
},
}
}
component.Processors = append(component.Processors, proc)
return nil
}
// RegisterSource registers a source with the discovery service
func (s *Service) RegisterSource(source SourceDiscovery) error {
s.registerService(source.ServiceDiscovery)
s.mtx.Lock()
defer s.mtx.Unlock()
component := s.getOrCreateDiscoveryComponent(source.ComponentDiscovery)
t, err := convertMessageType(source.Type)
if err != nil {
return errors.Wrapf(err, "source '%s' has invalid message type", source.Topic)
}
src := &discoveryv1.Source{
Topic: &discoveryv1.TopicDefinition{
Message: source.Message,
Topic: source.Topic,
Type: t,
},
}
component.Sources = append(component.Sources, src)
return nil
}
// RegisterSink registers a sink with the discovery service
func (s *Service) RegisterSink(sink SinkDiscovery) error {
s.registerService(sink.ServiceDiscovery)
s.mtx.Lock()
defer s.mtx.Unlock()
component := s.getOrCreateDiscoveryComponent(sink.ComponentDiscovery)
t, err := convertMessageType(sink.Type)
if err != nil {
return errors.Wrapf(err, "sink '%s' has invalid message type", sink.Topic)
}
component.Sinks = append(component.Sinks, &discoveryv1.Sink{
Topic: &discoveryv1.TopicDefinition{
Message: sink.Message,
Topic: sink.Topic,
Type: t,
},
Name: sink.Name,
Description: sink.Description,
})
return nil
}
// RegisterView registers a view with the discovery service
func (s *Service) RegisterView(view ViewDiscovery) error {
s.registerService(view.ServiceDiscovery)
s.mtx.Lock()
defer s.mtx.Unlock()
component := s.getOrCreateDiscoveryComponent(view.ComponentDiscovery)
t, err := convertMessageType(view.Type)
if err != nil {
return errors.Wrapf(err, "view '%s' has invalid message type", view.Topic)
}
component.Views = append(component.Views, &discoveryv1.View{
Topic: &discoveryv1.TopicDefinition{
Message: view.Message,
Topic: view.Topic,
Type: t,
},
})
return nil
}
// RegisterViewSource registers a view source with the discovery service
func (s *Service) RegisterViewSource(viewSource ViewSourceDiscovery) error {
s.registerService(viewSource.ServiceDiscovery)
s.mtx.Lock()
defer s.mtx.Unlock()
component := s.getOrCreateDiscoveryComponent(viewSource.ComponentDiscovery)
t, err := convertMessageType(viewSource.Type)
if err != nil {
return errors.Wrapf(err, "viewSource '%s' has invalid message type", viewSource.Name)
}
component.ViewSources = append(component.ViewSources, &discoveryv1.ViewSource{
Topic: &discoveryv1.TopicDefinition{
Message: viewSource.Message,
Topic: viewSource.Topic,
Type: t,
},
Name: viewSource.Name,
Description: viewSource.Description,
})
return nil
}
// RegisterViewSink registers a view sink with the discovery service
func (s *Service) RegisterViewSink(viewSink ViewSinkDiscovery) error {
s.registerService(viewSink.ServiceDiscovery)
s.mtx.Lock()
defer s.mtx.Unlock()
component := s.getOrCreateDiscoveryComponent(viewSink.ComponentDiscovery)
t, err := convertMessageType(viewSink.Type)
if err != nil {
return errors.Wrapf(err, "viewSink '%s' has invalid message type", viewSink.Name)
}
component.ViewSinks = append(component.ViewSinks, &discoveryv1.ViewSink{
Topic: &discoveryv1.TopicDefinition{
Message: viewSink.Message,
Topic: viewSink.Topic,
Type: t,
},
Name: viewSink.Name,
Description: viewSink.Description,
})
return nil
}
func convertMessageType(messageType MessageType) (discoveryv1.TopicType, error) {
switch messageType {
case MessageTypeProtobuf:
return discoveryv1.TopicType_TOPIC_TYPE_PROTOBUF, nil
}
return discoveryv1.TopicType_TOPIC_TYPE_INVALID, errors.Errorf("unknown message type '%d'", messageType)
}
func (s *Service) getOrCreateDiscoveryComponent(component ComponentDiscovery) *discoveryv1.Component {
for _, c := range s.DiscoverInfo.Components {
if c.Name == component.Name {
return c
}
}
c := &discoveryv1.Component{
Name: component.Name,
Description: component.Description,
}
s.DiscoverInfo.Components = append(s.DiscoverInfo.Components, c)
return c
}
|
/*Package eventual is a simple event dispatcher for golang base on channels.
The goal is to pass data arond, without depending to any package except for this
one.
There is some definition :
- Mandatory means publish is not finished unless all subscribers receive the message,
in this case, the receiver must always read the channel. TODO : a way to unsub
- Exclusive means there is only and only one receiver for a topic is available
package main
import "github.com/fzerorubigd/eventual"
func main() {
e := &eventual.Eventual{}
// Create a no mandatory, no exclusive topic
t ,_ := e.Register("the.topic.name", false, false)
c1 := t.Sub()
// In any other part of code, even in another package you can create
// an exactly same topic with exactly same parameter
t ,_ := e.Register("the.topic.name", false, false)
t.Pub(SomeIEventStructure{}) // No there is a data in c1, but if there is no reader the data is lost
}
## TODO
- Support for buffer size (aka pre fetch)
- Support for reply back
*/
package eventual
import (
"fmt"
"sync"
)
// IEvent is an interface to handle one instance of an event in system.
// each IEvent must contain the topic and data that passed to this event
type IEvent interface {
// Data is the event data that publisherdecide the type of it
Data() interface{}
}
// Publisher is an interface for publishing event in a system
type Publisher interface {
// Pub is for publishing an event. it's panic prof
Pub(IEvent)
}
// Subscriber is an interface to handle subscribers
type Subscriber interface {
// Sub is for getting an channel to read the events from it.
// if the event is exclusive, and there is a subscriber, then
// it panic.
Sub() <-chan IEvent
}
// Event is the actual event, must register to get one of this.
type Event interface {
Publisher
Subscriber
// GetTopic return the current topic
GetTopic() string
// IsMandatory return if this event is mandatory or not , mandatory means
IsMandatory() bool
// IsExclusive return if this event is exclusive and there is only one subscriber
// is allowed
IsExclusive() bool
// // GetTimeout return the timeout for this even. if the timeout is equal or less
// // than zero, there is no wait, if not the event wait this duration for the
// // subscriber to pick.
// GetTimeout() time.Duration
}
// Eventual is an event bus
type Eventual interface {
Register(topic string, mandatory, exclusive bool) (Event, error)
}
type eventInstance struct {
topic string
mandatory bool
exclusive bool
subs []chan IEvent
lock *sync.RWMutex
}
type eventualInstance struct {
list map[string]Event
lock *sync.Mutex
}
func (e *eventInstance) Pub(ei IEvent) {
if e.mandatory {
e.pubMandatory(ei)
} else {
e.pubNormal(ei)
}
}
func (e *eventInstance) pubMandatory(ei IEvent) {
wg := sync.WaitGroup{}
wg.Add(len(e.subs))
for i := range e.subs {
go func(c chan IEvent) {
e.lock.RLock()
defer e.lock.RUnlock()
defer wg.Done()
c <- ei
}(e.subs[i])
}
// In mandatory, its blocked until all data are recieved
wg.Wait()
}
func (e *eventInstance) pubNormal(ei IEvent) {
e.lock.RLock()
defer e.lock.RUnlock()
for i := range e.subs {
select {
case e.subs[i] <- ei:
default:
}
}
}
// Sub implementation of the Subscriber interface
func (e *eventInstance) Sub() <-chan IEvent {
e.lock.Lock()
defer e.lock.Unlock()
if e.IsExclusive() && len(e.subs) > 0 {
panic(fmt.Errorf("this is a exclusive event, and there is already a subscriber available"))
}
res := make(chan IEvent)
e.subs = append(e.subs, res)
return res
}
func (e *eventInstance) IsMandatory() bool {
return e.mandatory
}
func (e *eventInstance) IsExclusive() bool {
return e.exclusive
}
func (e *eventInstance) GetTopic() string {
return e.topic
}
// Register and event in system, the topic is the event topic, mandatory means
// if the event is hanged until the subscriber gets it. exclusive means the event
// can only and only one subscriber.
// The parameters must be exactly same when calling this several times for a topic
// unless yo get an error.
func (e *eventualInstance) Register(topic string, mandatory, exclusive bool) (Event, error) {
e.lock.Lock()
defer e.lock.Unlock()
event, ok := e.list[topic]
if ok {
if event.IsMandatory() != mandatory || event.IsExclusive() != exclusive {
err := fmt.Errorf(
"the topic mandatory is %t exclusive is %t, requested topic is respectively %t and %t",
event.IsMandatory(),
event.IsExclusive(),
mandatory,
exclusive,
)
return nil, err
}
} else {
event = &eventInstance{topic, mandatory, exclusive, nil, &sync.RWMutex{}}
e.list[topic] = event
}
return event, nil
}
// New return an eventual structure
func New() Eventual {
return &eventualInstance{make(map[string]Event), &sync.Mutex{}}
}
|
package node
import log "code.google.com/p/log4go"
import "github.com/d-d-j/ddj_master/dto"
var NodeManager = NewManager()
//This structure is used to get node. Node is returned on BackChan
type GetNodeRequest struct {
NodeId int32
BackChan chan<- *Node
}
//Manager is an object that takes car of nodes and they status. It add new node if it appear on AddChan, remove it when
//it's id is passed on DelChan and return information about node when question come on GetChan
type Manager struct {
nodes map[int32]*Node
AddChan chan *Node
GetChan chan GetNodeRequest
DelChan chan int32
QuitChan chan bool
InfoChan chan []*dto.Info
}
//Node Manager constructor
func NewManager() *Manager {
m := new(Manager)
m.nodes = make(map[int32]*Node)
m.AddChan = make(chan *Node)
m.GetChan = make(chan GetNodeRequest)
m.DelChan = make(chan int32)
m.QuitChan = make(chan bool)
m.InfoChan = make(chan []*dto.Info)
return m
}
//Return map of all nodes
func (this *Manager) GetNodes() map[int32]*Node {
return this.nodes
}
//Return count of connected nodes
func (this *Manager) GetNodesLen() int {
return len(this.nodes)
}
//This method is responsible for handling all requests that came to Manager on every channel
func (m *Manager) Manage() {
log.Info("Node manager started managing")
for {
select {
case get := <-m.GetChan:
if node, ok := m.nodes[get.NodeId]; ok {
get.BackChan <- node
} else {
panic("Node not found")
}
case newNode := <-m.AddChan:
m.nodes[newNode.Id] = newNode
case closedNodeId := <-m.DelChan:
delete(m.nodes, closedNodeId)
log.Info("Node manager deleted Node #%d from nodes", closedNodeId)
case <-m.QuitChan:
log.Info("Node manager stopped managing")
return
}
}
}
//This method send message to all nodes.
func (this *Manager) SendToAllNodes(message []byte) {
log.Debug("Sending message to all %d", this.GetNodesLen(), " nodes")
// SEND MESSAGE TO ALL NODES
for _, n := range this.nodes {
log.Finest("Sending message to node #%d", n.Id)
n.Incoming <- message
}
}
|
/*
Copyright 2020 Skyscanner Limited.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kmsca
import (
"crypto"
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"errors"
"fmt"
"time"
"crypto/sha1" //nolint:gosec // Used for consistent hash
"math/big"
"github.com/Skyscanner/kms-issuer/pkg/signer"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/service/kms"
"github.com/aws/aws-sdk-go/service/kms/kmsiface"
)
const (
// DefaultCertDuration is the default CA certificate validity duration
DefaultCertDuration = time.Hour * 24 * 365 * 3 // 3 year
// DefaultCertRenewalRatio is default ratio of time before the certificate
// is expected to be renewed
DefaultCertRenewalRatio = 2.0 / 3
)
// KMSCA KMS Certificate Authority provides the API operation methods for implementation
// a certificate authority on top of AWS KMS.
type KMSCA struct {
Client kmsiface.KMSAPI
}
// NewKMSCA creates a new instance of the KMSCA client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
func NewKMSCA(p client.ConfigProvider, cfgs ...*aws.Config) *KMSCA {
return &KMSCA{
Client: kms.New(p, cfgs...),
}
}
// CreateKey creates an asymetric KMS key used to sign certificates and a KMS Alias pointing at the key.
// The method only creates the key if the alias hasn't yet been created.
// Returns the KeyID string
func (ca *KMSCA) CreateKey(input *CreateKeyInput) (string, error) {
// Check if the key already exists
response, err := ca.Client.DescribeKey(&kms.DescribeKeyInput{
KeyId: aws.String(input.AliasName),
})
if err == nil {
// return existing key if one already exists
return aws.StringValue(response.KeyMetadata.KeyId), nil
}
if err.(awserr.Error).Code() != kms.ErrCodeNotFoundException {
return "", err
}
// Create the KMS key
keyInput := &kms.CreateKeyInput{
KeyUsage: aws.String(kms.KeyUsageTypeSignVerify),
CustomerMasterKeySpec: aws.String(kms.CustomerMasterKeySpecRsa2048),
}
if len(input.CustomerMasterKeySpec) > 0 {
keyInput.CustomerMasterKeySpec = aws.String(input.CustomerMasterKeySpec)
}
if len(input.Description) > 0 {
keyInput.Description = aws.String(input.Description)
}
if len(input.Policy) > 0 {
keyInput.Policy = aws.String(input.Policy)
}
if len(input.Tags) > 0 {
for k, v := range input.Tags {
keyInput.Tags = append(keyInput.Tags, &kms.Tag{TagKey: aws.String(k), TagValue: aws.String(v)})
}
}
key, err := ca.Client.CreateKey(keyInput)
if err != nil {
return "", err
}
// Create the KMS alias
_, err = ca.Client.CreateAlias(&kms.CreateAliasInput{
TargetKeyId: key.KeyMetadata.KeyId,
AliasName: aws.String(input.AliasName),
})
if err != nil {
return "", err
}
return aws.StringValue(key.KeyMetadata.KeyId), nil
}
// DeleteKey delete a KMS key alias and the underlying target KMS Key.
func (ca *KMSCA) DeleteKey(input *DeleteKeyInput) error {
// Check if the key already exists
response, err := ca.Client.DescribeKey(&kms.DescribeKeyInput{
KeyId: aws.String(input.AliasName),
})
if err != nil {
return err
}
// Delete the KMS key
deleteInput := &kms.ScheduleKeyDeletionInput{
KeyId: response.KeyMetadata.KeyId,
}
if input.PendingWindowInDays > 0 {
deleteInput.PendingWindowInDays = aws.Int64(int64(input.PendingWindowInDays))
}
_, err = ca.Client.ScheduleKeyDeletion(deleteInput)
if err != nil {
return err
}
// Delete the KMS alias
_, err = ca.Client.DeleteAlias(&kms.DeleteAliasInput{
AliasName: aws.String(input.AliasName),
})
if err != nil {
return err
}
return nil
}
// GenerateCertificateAuthorityCertificate returns the Certificate Authority Certificate
func (ca *KMSCA) GenerateCertificateAuthorityCertificate(input *GenerateCertificateAuthorityCertificateInput) *x509.Certificate {
// Compute the start/end validity.
// The rounding factor is used to ensure all the certificates issued within the same period are identical.
notBefore := time.Now().Truncate(input.Rounding)
notAfter := notBefore.Add(input.Duration)
// Compute CA certificate
cert := &x509.Certificate{
Subject: input.Subject,
NotBefore: notBefore,
NotAfter: notAfter,
IsCA: true,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
}
// Compute the serial number
serialNumberKey := fmt.Sprintf("%s %v", input, cert)
sha := sha1.Sum([]byte(serialNumberKey)) //nolint:gosec // Used for consistent hash
cert.SerialNumber = new(big.Int).SetBytes(sha[:])
return cert
}
// GenerateAndSignCertificateAuthorityCertificate returns the signed Certificate Authority Certificate
func (ca *KMSCA) GenerateAndSignCertificateAuthorityCertificate(input *GenerateCertificateAuthorityCertificateInput) (*x509.Certificate, error) {
cert := ca.GenerateCertificateAuthorityCertificate(input)
newSigner, err := signer.New(ca.Client, input.KeyID)
if err != nil {
return nil, err
}
pub := newSigner.Public()
if pub == nil {
return nil, errors.New("could not retrieve the public key associated with the KMS private key")
}
signedBytes, err := x509.CreateCertificate(rand.Reader, cert, cert, pub, newSigner)
if err != nil {
return nil, err
}
return x509.ParseCertificate(signedBytes)
}
// SignCertificate Signs a certificate request using KMS.
func (ca *KMSCA) SignCertificate(input *IssueCertificateInput) (*x509.Certificate, error) {
newSigner, err := signer.New(ca.Client, input.KeyID)
if err != nil {
return nil, err
}
signedBytes, err := x509.CreateCertificate(rand.Reader, input.Cert, input.Parent, input.PublicKey, newSigner)
if err != nil {
return nil, err
}
return x509.ParseCertificate(signedBytes)
}
// CreateKeyInput input for the CreateKey method
type CreateKeyInput struct {
// AliasName Specifies the alias name for the kms key. This value must begin with alias/ followed by a
// name, such as alias/ExampleAlias.
AliasName string
// Description for the key
Description string
// CustomerMasterKeySpec determines the signing algorithms that the CMK supports.
// Only RSA_2048 is currently supported.
CustomerMasterKeySpec string
// The key policy to attach to the CMK
Policy string
// Tags is a list of tags for the key
Tags map[string]string
}
// DeleteKeyInput input for the CreateKey method
type DeleteKeyInput struct {
// AliasName Specifies the alias name for the kms key. This value must begin with alias/ followed by a
// name, such as alias/ExampleAlias.
AliasName string
// PendingWindowInDays. This value is optional. If you include a value, it must be between 7 and
// 30, inclusive. If you do not include a value, it defaults to 30.
PendingWindowInDays int
}
type Key struct {
// KeyID is the KMS Key Id
KeyID string
}
type GenerateCertificateAuthorityCertificateInput struct {
// KeyID is the KMS Key Id
KeyID string
// Subject of the CA certificate
Subject pkix.Name
// Duration is certificate validity duration
Duration time.Duration
// Rounding is used to round down the certificate NotBefore time.
// For example, by setting the rounding period to 1h, all the certificates generated between the start
// and in the end of an hour will be identical
Rounding time.Duration
}
type IssueCertificateInput struct {
// KeyID is the KMS Key Id
KeyID string
// CSR Certificate Request
Cert *x509.Certificate
// PublicKey
PublicKey crypto.PublicKey
// Parent Signing Certificate
Parent *x509.Certificate
// Public
}
|
package main
import (
"gopkg.in/alecthomas/kingpin.v2"
"mingchuan.me/app"
)
func main() {
var (
configPath = kingpin.Flag("config", "config path").Default("config.yml").Short('c').String()
)
// parse argv
kingpin.Parse()
// start App
err := app.Start(*configPath)
if err != nil {
panic(err)
}
}
|
package ondemand
import (
"fmt"
"strings"
)
// Competitors https://www.barchart.com/ondemand/api/getCompetitors
type Competitors struct {
Results []struct {
Symbol string `json:"symbol"`
Name string `json:"name"`
MarketCap int64 `json:"marketCap"`
FiftyTwoWkHigh float64 `json:"fiftyTwoWkHigh"`
FiftyTwoWkHighDate string `json:"fiftyTwoWkHighDate"`
FiftyTwoWkLow float64 `json:"fiftyTwoWkLow"`
FiftyTwoWkLowDate string `json:"fiftyTwoWkLowDate"`
}
}
// Competitors https://www.barchart.com/ondemand/api/getCompetitors
func (od *OnDemand) Competitors(symbol string, fields []string) (Competitors, error) {
competitors := Competitors{}
_symbol := strings.ToUpper(symbol)
_fields := ""
if fields != nil {
_fields = strings.Join(fields, ",")
}
_, err := od.Request("getCompetitors.json", fmt.Sprintf("symbol=%v&fields=%s", _symbol, _fields), &competitors)
return competitors, err
}
|
package resources_test
import (
"testing"
"themis/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestResources(t *testing.T) {
// setup logger
utils.InitLogger()
utils.SetLogFile("test.log")
RegisterFailHandler(Fail)
RunSpecs(t, "Resources Suite")
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package beta
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"strings"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/operations"
)
func (r *Instance) validate() error {
if err := dcl.RequiredParameter(r.Name, "Name"); err != nil {
return err
}
if err := dcl.Required(r, "type"); err != nil {
return err
}
if err := dcl.RequiredParameter(r.Project, "Project"); err != nil {
return err
}
if err := dcl.RequiredParameter(r.Location, "Location"); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(r.NetworkConfig) {
if err := r.NetworkConfig.validate(); err != nil {
return err
}
}
return nil
}
func (r *InstanceNetworkConfig) validate() error {
return nil
}
func (r *InstanceAvailableVersion) validate() error {
return nil
}
func (r *Instance) basePath() string {
params := map[string]interface{}{}
return dcl.Nprintf("https://datafusion.googleapis.com/v1beta1/", params)
}
func (r *Instance) getURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
params := map[string]interface{}{
"project": dcl.ValueOrEmptyString(nr.Project),
"location": dcl.ValueOrEmptyString(nr.Location),
"name": dcl.ValueOrEmptyString(nr.Name),
}
return dcl.URL("projects/{{project}}/locations/{{location}}/instances/{{name}}", nr.basePath(), userBasePath, params), nil
}
func (r *Instance) listURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
params := map[string]interface{}{
"project": dcl.ValueOrEmptyString(nr.Project),
"location": dcl.ValueOrEmptyString(nr.Location),
}
return dcl.URL("projects/{{project}}/locations/{{location}}/instances", nr.basePath(), userBasePath, params), nil
}
func (r *Instance) createURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
params := map[string]interface{}{
"project": dcl.ValueOrEmptyString(nr.Project),
"location": dcl.ValueOrEmptyString(nr.Location),
"name": dcl.ValueOrEmptyString(nr.Name),
}
return dcl.URL("projects/{{project}}/locations/{{location}}/instances?instanceId={{name}}", nr.basePath(), userBasePath, params), nil
}
func (r *Instance) deleteURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
params := map[string]interface{}{
"project": dcl.ValueOrEmptyString(nr.Project),
"location": dcl.ValueOrEmptyString(nr.Location),
"name": dcl.ValueOrEmptyString(nr.Name),
}
return dcl.URL("projects/{{project}}/locations/{{location}}/instances/{{name}}", nr.basePath(), userBasePath, params), nil
}
// instanceApiOperation represents a mutable operation in the underlying REST
// API such as Create, Update, or Delete.
type instanceApiOperation interface {
do(context.Context, *Instance, *Client) error
}
// newUpdateInstanceUpdateInstanceRequest creates a request for an
// Instance resource's UpdateInstance update type by filling in the update
// fields based on the intended state of the resource.
func newUpdateInstanceUpdateInstanceRequest(ctx context.Context, f *Instance, c *Client) (map[string]interface{}, error) {
req := map[string]interface{}{}
res := f
_ = res
if v := f.EnableStackdriverLogging; !dcl.IsEmptyValueIndirect(v) {
req["enableStackdriverLogging"] = v
}
if v := f.EnableStackdriverMonitoring; !dcl.IsEmptyValueIndirect(v) {
req["enableStackdriverMonitoring"] = v
}
if v := f.Labels; !dcl.IsEmptyValueIndirect(v) {
req["labels"] = v
}
if v := f.Version; !dcl.IsEmptyValueIndirect(v) {
req["version"] = v
}
if v := f.DataprocServiceAccount; !dcl.IsEmptyValueIndirect(v) {
req["dataprocServiceAccount"] = v
}
return req, nil
}
// marshalUpdateInstanceUpdateInstanceRequest converts the update into
// the final JSON request body.
func marshalUpdateInstanceUpdateInstanceRequest(c *Client, m map[string]interface{}) ([]byte, error) {
return json.Marshal(m)
}
type updateInstanceUpdateInstanceOperation struct {
// If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated.
// Usually it will be nil - this is to prevent us from accidentally depending on apply
// options, which should usually be unnecessary.
ApplyOptions []dcl.ApplyOption
FieldDiffs []*dcl.FieldDiff
}
// do creates a request and sends it to the appropriate URL. In most operations,
// do will transcribe a subset of the resource into a request object and send a
// PUT request to a single URL.
func (op *updateInstanceUpdateInstanceOperation) do(ctx context.Context, r *Instance, c *Client) error {
_, err := c.GetInstance(ctx, r)
if err != nil {
return err
}
u, err := r.updateURL(c.Config.BasePath, "UpdateInstance")
if err != nil {
return err
}
mask := op.UpdateMask()
u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask})
if err != nil {
return err
}
req, err := newUpdateInstanceUpdateInstanceRequest(ctx, r, c)
if err != nil {
return err
}
c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req)
body, err := marshalUpdateInstanceUpdateInstanceRequest(c, req)
if err != nil {
return err
}
resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider)
if err != nil {
return err
}
var o operations.StandardGCPOperation
if err := dcl.ParseResponse(resp.Response, &o); err != nil {
return err
}
err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET")
if err != nil {
return err
}
return nil
}
func (c *Client) listInstanceRaw(ctx context.Context, r *Instance, pageToken string, pageSize int32) ([]byte, error) {
u, err := r.urlNormalized().listURL(c.Config.BasePath)
if err != nil {
return nil, err
}
m := make(map[string]string)
if pageToken != "" {
m["pageToken"] = pageToken
}
if pageSize != InstanceMaxPage {
m["pageSize"] = fmt.Sprintf("%v", pageSize)
}
u, err = dcl.AddQueryParams(u, m)
if err != nil {
return nil, err
}
resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider)
if err != nil {
return nil, err
}
defer resp.Response.Body.Close()
return ioutil.ReadAll(resp.Response.Body)
}
type listInstanceOperation struct {
Instances []map[string]interface{} `json:"instances"`
Token string `json:"nextPageToken"`
}
func (c *Client) listInstance(ctx context.Context, r *Instance, pageToken string, pageSize int32) ([]*Instance, string, error) {
b, err := c.listInstanceRaw(ctx, r, pageToken, pageSize)
if err != nil {
return nil, "", err
}
var m listInstanceOperation
if err := json.Unmarshal(b, &m); err != nil {
return nil, "", err
}
var l []*Instance
for _, v := range m.Instances {
res, err := unmarshalMapInstance(v, c, r)
if err != nil {
return nil, m.Token, err
}
res.Project = r.Project
res.Location = r.Location
l = append(l, res)
}
return l, m.Token, nil
}
func (c *Client) deleteAllInstance(ctx context.Context, f func(*Instance) bool, resources []*Instance) error {
var errors []string
for _, res := range resources {
if f(res) {
// We do not want deleteAll to fail on a deletion or else it will stop deleting other resources.
err := c.DeleteInstance(ctx, res)
if err != nil {
errors = append(errors, err.Error())
}
}
}
if len(errors) > 0 {
return fmt.Errorf("%v", strings.Join(errors, "\n"))
} else {
return nil
}
}
type deleteInstanceOperation struct{}
func (op *deleteInstanceOperation) do(ctx context.Context, r *Instance, c *Client) error {
r, err := c.GetInstance(ctx, r)
if err != nil {
if dcl.IsNotFound(err) {
c.Config.Logger.InfoWithContextf(ctx, "Instance not found, returning. Original error: %v", err)
return nil
}
c.Config.Logger.WarningWithContextf(ctx, "GetInstance checking for existence. error: %v", err)
return err
}
u, err := r.deleteURL(c.Config.BasePath)
if err != nil {
return err
}
// Delete should never have a body
body := &bytes.Buffer{}
resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider)
if err != nil {
return err
}
// wait for object to be deleted.
var o operations.StandardGCPOperation
if err := dcl.ParseResponse(resp.Response, &o); err != nil {
return err
}
if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil {
return err
}
// We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration.
// This is the reason we are adding retry to handle that case.
retriesRemaining := 10
dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) {
_, err := c.GetInstance(ctx, r)
if dcl.IsNotFound(err) {
return nil, nil
}
if retriesRemaining > 0 {
retriesRemaining--
return &dcl.RetryDetails{}, dcl.OperationNotDone{}
}
return nil, dcl.NotDeletedError{ExistingResource: r}
}, c.Config.RetryProvider)
return nil
}
// Create operations are similar to Update operations, although they do not have
// specific request objects. The Create request object is the json encoding of
// the resource, which is modified by res.marshal to form the base request body.
type createInstanceOperation struct {
response map[string]interface{}
}
func (op *createInstanceOperation) FirstResponse() (map[string]interface{}, bool) {
return op.response, len(op.response) > 0
}
func (op *createInstanceOperation) do(ctx context.Context, r *Instance, c *Client) error {
c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r)
u, err := r.createURL(c.Config.BasePath)
if err != nil {
return err
}
req, err := r.marshal(c)
if err != nil {
return err
}
resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider)
if err != nil {
return err
}
// wait for object to be created.
var o operations.StandardGCPOperation
if err := dcl.ParseResponse(resp.Response, &o); err != nil {
return err
}
if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil {
c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err)
return err
}
c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation")
op.response, _ = o.FirstResponse()
if _, err := c.GetInstance(ctx, r); err != nil {
c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err)
return err
}
return nil
}
func (c *Client) getInstanceRaw(ctx context.Context, r *Instance) ([]byte, error) {
u, err := r.getURL(c.Config.BasePath)
if err != nil {
return nil, err
}
resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider)
if err != nil {
return nil, err
}
defer resp.Response.Body.Close()
b, err := ioutil.ReadAll(resp.Response.Body)
if err != nil {
return nil, err
}
return b, nil
}
func (c *Client) instanceDiffsForRawDesired(ctx context.Context, rawDesired *Instance, opts ...dcl.ApplyOption) (initial, desired *Instance, diffs []*dcl.FieldDiff, err error) {
c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...")
// First, let us see if the user provided a state hint. If they did, we will start fetching based on that.
var fetchState *Instance
if sh := dcl.FetchStateHint(opts); sh != nil {
if r, ok := sh.(*Instance); !ok {
c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Instance, got %T", sh)
} else {
fetchState = r
}
}
if fetchState == nil {
fetchState = rawDesired
}
// 1.2: Retrieval of raw initial state from API
rawInitial, err := c.GetInstance(ctx, fetchState)
if rawInitial == nil {
if !dcl.IsNotFound(err) {
c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Instance resource already exists: %s", err)
return nil, nil, nil, fmt.Errorf("failed to retrieve Instance resource: %v", err)
}
c.Config.Logger.InfoWithContext(ctx, "Found that Instance resource did not exist.")
// Perform canonicalization to pick up defaults.
desired, err = canonicalizeInstanceDesiredState(rawDesired, rawInitial)
return nil, desired, nil, err
}
c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Instance: %v", rawInitial)
c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Instance: %v", rawDesired)
// The Get call applies postReadExtract and so the result may contain fields that are not part of API version.
if err := extractInstanceFields(rawInitial); err != nil {
return nil, nil, nil, err
}
// 1.3: Canonicalize raw initial state into initial state.
initial, err = canonicalizeInstanceInitialState(rawInitial, rawDesired)
if err != nil {
return nil, nil, nil, err
}
c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Instance: %v", initial)
// 1.4: Canonicalize raw desired state into desired state.
desired, err = canonicalizeInstanceDesiredState(rawDesired, rawInitial, opts...)
if err != nil {
return nil, nil, nil, err
}
c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Instance: %v", desired)
// 2.1: Comparison of initial and desired state.
diffs, err = diffInstance(c, desired, initial, opts...)
return initial, desired, diffs, err
}
func canonicalizeInstanceInitialState(rawInitial, rawDesired *Instance) (*Instance, error) {
// TODO(magic-modules-eng): write canonicalizer once relevant traits are added.
return rawInitial, nil
}
/*
* Canonicalizers
*
* These are responsible for converting either a user-specified config or a
* GCP API response to a standard format that can be used for difference checking.
* */
func canonicalizeInstanceDesiredState(rawDesired, rawInitial *Instance, opts ...dcl.ApplyOption) (*Instance, error) {
if rawInitial == nil {
// Since the initial state is empty, the desired state is all we have.
// We canonicalize the remaining nested objects with nil to pick up defaults.
rawDesired.NetworkConfig = canonicalizeInstanceNetworkConfig(rawDesired.NetworkConfig, nil, opts...)
return rawDesired, nil
}
canonicalDesired := &Instance{}
if dcl.NameToSelfLink(rawDesired.Name, rawInitial.Name) {
canonicalDesired.Name = rawInitial.Name
} else {
canonicalDesired.Name = rawDesired.Name
}
if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) {
canonicalDesired.Description = rawInitial.Description
} else {
canonicalDesired.Description = rawDesired.Description
}
if dcl.IsZeroValue(rawDesired.Type) || (dcl.IsEmptyValueIndirect(rawDesired.Type) && dcl.IsEmptyValueIndirect(rawInitial.Type)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
canonicalDesired.Type = rawInitial.Type
} else {
canonicalDesired.Type = rawDesired.Type
}
if dcl.BoolCanonicalize(rawDesired.EnableStackdriverLogging, rawInitial.EnableStackdriverLogging) {
canonicalDesired.EnableStackdriverLogging = rawInitial.EnableStackdriverLogging
} else {
canonicalDesired.EnableStackdriverLogging = rawDesired.EnableStackdriverLogging
}
if dcl.BoolCanonicalize(rawDesired.EnableStackdriverMonitoring, rawInitial.EnableStackdriverMonitoring) {
canonicalDesired.EnableStackdriverMonitoring = rawInitial.EnableStackdriverMonitoring
} else {
canonicalDesired.EnableStackdriverMonitoring = rawDesired.EnableStackdriverMonitoring
}
if dcl.BoolCanonicalize(rawDesired.PrivateInstance, rawInitial.PrivateInstance) {
canonicalDesired.PrivateInstance = rawInitial.PrivateInstance
} else {
canonicalDesired.PrivateInstance = rawDesired.PrivateInstance
}
canonicalDesired.NetworkConfig = canonicalizeInstanceNetworkConfig(rawDesired.NetworkConfig, rawInitial.NetworkConfig, opts...)
if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
canonicalDesired.Labels = rawInitial.Labels
} else {
canonicalDesired.Labels = rawDesired.Labels
}
if dcl.IsZeroValue(rawDesired.Options) || (dcl.IsEmptyValueIndirect(rawDesired.Options) && dcl.IsEmptyValueIndirect(rawInitial.Options)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
canonicalDesired.Options = rawInitial.Options
} else {
canonicalDesired.Options = rawDesired.Options
}
if dcl.StringCanonicalize(rawDesired.Zone, rawInitial.Zone) {
canonicalDesired.Zone = rawInitial.Zone
} else {
canonicalDesired.Zone = rawDesired.Zone
}
if dcl.StringCanonicalize(rawDesired.Version, rawInitial.Version) {
canonicalDesired.Version = rawInitial.Version
} else {
canonicalDesired.Version = rawDesired.Version
}
if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) {
canonicalDesired.DisplayName = rawInitial.DisplayName
} else {
canonicalDesired.DisplayName = rawDesired.DisplayName
}
if dcl.IsZeroValue(rawDesired.DataprocServiceAccount) || (dcl.IsEmptyValueIndirect(rawDesired.DataprocServiceAccount) && dcl.IsEmptyValueIndirect(rawInitial.DataprocServiceAccount)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
canonicalDesired.DataprocServiceAccount = rawInitial.DataprocServiceAccount
} else {
canonicalDesired.DataprocServiceAccount = rawDesired.DataprocServiceAccount
}
if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) {
canonicalDesired.Project = rawInitial.Project
} else {
canonicalDesired.Project = rawDesired.Project
}
if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) {
canonicalDesired.Location = rawInitial.Location
} else {
canonicalDesired.Location = rawDesired.Location
}
return canonicalDesired, nil
}
func canonicalizeInstanceNewState(c *Client, rawNew, rawDesired *Instance) (*Instance, error) {
rawNew.Name = rawDesired.Name
if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) {
rawNew.Description = rawDesired.Description
} else {
if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) {
rawNew.Description = rawDesired.Description
}
}
if dcl.IsEmptyValueIndirect(rawNew.Type) && dcl.IsEmptyValueIndirect(rawDesired.Type) {
rawNew.Type = rawDesired.Type
} else {
}
if dcl.IsEmptyValueIndirect(rawNew.EnableStackdriverLogging) && dcl.IsEmptyValueIndirect(rawDesired.EnableStackdriverLogging) {
rawNew.EnableStackdriverLogging = rawDesired.EnableStackdriverLogging
} else {
if dcl.BoolCanonicalize(rawDesired.EnableStackdriverLogging, rawNew.EnableStackdriverLogging) {
rawNew.EnableStackdriverLogging = rawDesired.EnableStackdriverLogging
}
}
if dcl.IsEmptyValueIndirect(rawNew.EnableStackdriverMonitoring) && dcl.IsEmptyValueIndirect(rawDesired.EnableStackdriverMonitoring) {
rawNew.EnableStackdriverMonitoring = rawDesired.EnableStackdriverMonitoring
} else {
if dcl.BoolCanonicalize(rawDesired.EnableStackdriverMonitoring, rawNew.EnableStackdriverMonitoring) {
rawNew.EnableStackdriverMonitoring = rawDesired.EnableStackdriverMonitoring
}
}
if dcl.IsEmptyValueIndirect(rawNew.PrivateInstance) && dcl.IsEmptyValueIndirect(rawDesired.PrivateInstance) {
rawNew.PrivateInstance = rawDesired.PrivateInstance
} else {
if dcl.BoolCanonicalize(rawDesired.PrivateInstance, rawNew.PrivateInstance) {
rawNew.PrivateInstance = rawDesired.PrivateInstance
}
}
if dcl.IsEmptyValueIndirect(rawNew.NetworkConfig) && dcl.IsEmptyValueIndirect(rawDesired.NetworkConfig) {
rawNew.NetworkConfig = rawDesired.NetworkConfig
} else {
rawNew.NetworkConfig = canonicalizeNewInstanceNetworkConfig(c, rawDesired.NetworkConfig, rawNew.NetworkConfig)
}
if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) {
rawNew.Labels = rawDesired.Labels
} else {
}
if dcl.IsEmptyValueIndirect(rawNew.Options) && dcl.IsEmptyValueIndirect(rawDesired.Options) {
rawNew.Options = rawDesired.Options
} else {
}
if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) {
rawNew.CreateTime = rawDesired.CreateTime
} else {
}
if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) {
rawNew.UpdateTime = rawDesired.UpdateTime
} else {
}
if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) {
rawNew.State = rawDesired.State
} else {
}
if dcl.IsEmptyValueIndirect(rawNew.StateMessage) && dcl.IsEmptyValueIndirect(rawDesired.StateMessage) {
rawNew.StateMessage = rawDesired.StateMessage
} else {
if dcl.StringCanonicalize(rawDesired.StateMessage, rawNew.StateMessage) {
rawNew.StateMessage = rawDesired.StateMessage
}
}
if dcl.IsEmptyValueIndirect(rawNew.ServiceEndpoint) && dcl.IsEmptyValueIndirect(rawDesired.ServiceEndpoint) {
rawNew.ServiceEndpoint = rawDesired.ServiceEndpoint
} else {
if dcl.StringCanonicalize(rawDesired.ServiceEndpoint, rawNew.ServiceEndpoint) {
rawNew.ServiceEndpoint = rawDesired.ServiceEndpoint
}
}
if dcl.IsEmptyValueIndirect(rawNew.Zone) && dcl.IsEmptyValueIndirect(rawDesired.Zone) {
rawNew.Zone = rawDesired.Zone
} else {
if dcl.StringCanonicalize(rawDesired.Zone, rawNew.Zone) {
rawNew.Zone = rawDesired.Zone
}
}
if dcl.IsEmptyValueIndirect(rawNew.Version) && dcl.IsEmptyValueIndirect(rawDesired.Version) {
rawNew.Version = rawDesired.Version
} else {
if dcl.StringCanonicalize(rawDesired.Version, rawNew.Version) {
rawNew.Version = rawDesired.Version
}
}
if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) {
rawNew.DisplayName = rawDesired.DisplayName
} else {
if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) {
rawNew.DisplayName = rawDesired.DisplayName
}
}
if dcl.IsEmptyValueIndirect(rawNew.AvailableVersion) && dcl.IsEmptyValueIndirect(rawDesired.AvailableVersion) {
rawNew.AvailableVersion = rawDesired.AvailableVersion
} else {
rawNew.AvailableVersion = canonicalizeNewInstanceAvailableVersionSlice(c, rawDesired.AvailableVersion, rawNew.AvailableVersion)
}
if dcl.IsEmptyValueIndirect(rawNew.ApiEndpoint) && dcl.IsEmptyValueIndirect(rawDesired.ApiEndpoint) {
rawNew.ApiEndpoint = rawDesired.ApiEndpoint
} else {
if dcl.StringCanonicalize(rawDesired.ApiEndpoint, rawNew.ApiEndpoint) {
rawNew.ApiEndpoint = rawDesired.ApiEndpoint
}
}
if dcl.IsEmptyValueIndirect(rawNew.GcsBucket) && dcl.IsEmptyValueIndirect(rawDesired.GcsBucket) {
rawNew.GcsBucket = rawDesired.GcsBucket
} else {
if dcl.StringCanonicalize(rawDesired.GcsBucket, rawNew.GcsBucket) {
rawNew.GcsBucket = rawDesired.GcsBucket
}
}
if dcl.IsEmptyValueIndirect(rawNew.P4ServiceAccount) && dcl.IsEmptyValueIndirect(rawDesired.P4ServiceAccount) {
rawNew.P4ServiceAccount = rawDesired.P4ServiceAccount
} else {
if dcl.StringCanonicalize(rawDesired.P4ServiceAccount, rawNew.P4ServiceAccount) {
rawNew.P4ServiceAccount = rawDesired.P4ServiceAccount
}
}
if dcl.IsEmptyValueIndirect(rawNew.TenantProjectId) && dcl.IsEmptyValueIndirect(rawDesired.TenantProjectId) {
rawNew.TenantProjectId = rawDesired.TenantProjectId
} else {
}
if dcl.IsEmptyValueIndirect(rawNew.DataprocServiceAccount) && dcl.IsEmptyValueIndirect(rawDesired.DataprocServiceAccount) {
rawNew.DataprocServiceAccount = rawDesired.DataprocServiceAccount
} else {
}
rawNew.Project = rawDesired.Project
rawNew.Location = rawDesired.Location
return rawNew, nil
}
func canonicalizeInstanceNetworkConfig(des, initial *InstanceNetworkConfig, opts ...dcl.ApplyOption) *InstanceNetworkConfig {
if des == nil {
return initial
}
if des.empty {
return des
}
if initial == nil {
return des
}
cDes := &InstanceNetworkConfig{}
if dcl.IsZeroValue(des.Network) || (dcl.IsEmptyValueIndirect(des.Network) && dcl.IsEmptyValueIndirect(initial.Network)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
cDes.Network = initial.Network
} else {
cDes.Network = des.Network
}
if dcl.StringCanonicalize(des.IPAllocation, initial.IPAllocation) || dcl.IsZeroValue(des.IPAllocation) {
cDes.IPAllocation = initial.IPAllocation
} else {
cDes.IPAllocation = des.IPAllocation
}
return cDes
}
func canonicalizeInstanceNetworkConfigSlice(des, initial []InstanceNetworkConfig, opts ...dcl.ApplyOption) []InstanceNetworkConfig {
if dcl.IsEmptyValueIndirect(des) {
return initial
}
if len(des) != len(initial) {
items := make([]InstanceNetworkConfig, 0, len(des))
for _, d := range des {
cd := canonicalizeInstanceNetworkConfig(&d, nil, opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
items := make([]InstanceNetworkConfig, 0, len(des))
for i, d := range des {
cd := canonicalizeInstanceNetworkConfig(&d, &initial[i], opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
func canonicalizeNewInstanceNetworkConfig(c *Client, des, nw *InstanceNetworkConfig) *InstanceNetworkConfig {
if des == nil {
return nw
}
if nw == nil {
if dcl.IsEmptyValueIndirect(des) {
c.Config.Logger.Info("Found explicitly empty value for InstanceNetworkConfig while comparing non-nil desired to nil actual. Returning desired object.")
return des
}
return nil
}
if dcl.StringCanonicalize(des.IPAllocation, nw.IPAllocation) {
nw.IPAllocation = des.IPAllocation
}
return nw
}
func canonicalizeNewInstanceNetworkConfigSet(c *Client, des, nw []InstanceNetworkConfig) []InstanceNetworkConfig {
if des == nil {
return nw
}
// Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw.
var items []InstanceNetworkConfig
for _, d := range des {
matchedIndex := -1
for i, n := range nw {
if diffs, _ := compareInstanceNetworkConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 {
matchedIndex = i
break
}
}
if matchedIndex != -1 {
items = append(items, *canonicalizeNewInstanceNetworkConfig(c, &d, &nw[matchedIndex]))
nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...)
}
}
// Also include elements in nw that are not matched in des.
items = append(items, nw...)
return items
}
func canonicalizeNewInstanceNetworkConfigSlice(c *Client, des, nw []InstanceNetworkConfig) []InstanceNetworkConfig {
if des == nil {
return nw
}
// Lengths are unequal. A diff will occur later, so we shouldn't canonicalize.
// Return the original array.
if len(des) != len(nw) {
return nw
}
var items []InstanceNetworkConfig
for i, d := range des {
n := nw[i]
items = append(items, *canonicalizeNewInstanceNetworkConfig(c, &d, &n))
}
return items
}
func canonicalizeInstanceAvailableVersion(des, initial *InstanceAvailableVersion, opts ...dcl.ApplyOption) *InstanceAvailableVersion {
if des == nil {
return initial
}
if des.empty {
return des
}
if initial == nil {
return des
}
cDes := &InstanceAvailableVersion{}
return cDes
}
func canonicalizeInstanceAvailableVersionSlice(des, initial []InstanceAvailableVersion, opts ...dcl.ApplyOption) []InstanceAvailableVersion {
if dcl.IsEmptyValueIndirect(des) {
return initial
}
if len(des) != len(initial) {
items := make([]InstanceAvailableVersion, 0, len(des))
for _, d := range des {
cd := canonicalizeInstanceAvailableVersion(&d, nil, opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
items := make([]InstanceAvailableVersion, 0, len(des))
for i, d := range des {
cd := canonicalizeInstanceAvailableVersion(&d, &initial[i], opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
func canonicalizeNewInstanceAvailableVersion(c *Client, des, nw *InstanceAvailableVersion) *InstanceAvailableVersion {
if des == nil {
return nw
}
if nw == nil {
if dcl.IsEmptyValueIndirect(des) {
c.Config.Logger.Info("Found explicitly empty value for InstanceAvailableVersion while comparing non-nil desired to nil actual. Returning desired object.")
return des
}
return nil
}
if dcl.StringCanonicalize(des.VersionNumber, nw.VersionNumber) {
nw.VersionNumber = des.VersionNumber
}
if dcl.BoolCanonicalize(des.DefaultVersion, nw.DefaultVersion) {
nw.DefaultVersion = des.DefaultVersion
}
if dcl.StringArrayCanonicalize(des.AvailableFeatures, nw.AvailableFeatures) {
nw.AvailableFeatures = des.AvailableFeatures
}
return nw
}
func canonicalizeNewInstanceAvailableVersionSet(c *Client, des, nw []InstanceAvailableVersion) []InstanceAvailableVersion {
if des == nil {
return nw
}
// Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw.
var items []InstanceAvailableVersion
for _, d := range des {
matchedIndex := -1
for i, n := range nw {
if diffs, _ := compareInstanceAvailableVersionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 {
matchedIndex = i
break
}
}
if matchedIndex != -1 {
items = append(items, *canonicalizeNewInstanceAvailableVersion(c, &d, &nw[matchedIndex]))
nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...)
}
}
// Also include elements in nw that are not matched in des.
items = append(items, nw...)
return items
}
func canonicalizeNewInstanceAvailableVersionSlice(c *Client, des, nw []InstanceAvailableVersion) []InstanceAvailableVersion {
if des == nil {
return nw
}
// Lengths are unequal. A diff will occur later, so we shouldn't canonicalize.
// Return the original array.
if len(des) != len(nw) {
return nw
}
var items []InstanceAvailableVersion
for i, d := range des {
n := nw[i]
items = append(items, *canonicalizeNewInstanceAvailableVersion(c, &d, &n))
}
return items
}
// The differ returns a list of diffs, along with a list of operations that should be taken
// to remedy them. Right now, it does not attempt to consolidate operations - if several
// fields can be fixed with a patch update, it will perform the patch several times.
// Diffs on some fields will be ignored if the `desired` state has an empty (nil)
// value. This empty value indicates that the user does not care about the state for
// the field. Empty fields on the actual object will cause diffs.
// TODO(magic-modules-eng): for efficiency in some resources, add batching.
func diffInstance(c *Client, desired, actual *Instance, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) {
if desired == nil || actual == nil {
return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual)
}
c.Config.Logger.Infof("Diff function called with desired state: %v", desired)
c.Config.Logger.Infof("Diff function called with actual state: %v", actual)
var fn dcl.FieldName
var newDiffs []*dcl.FieldDiff
// New style diffs.
if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Description")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Type, actual.Type, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Type")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.EnableStackdriverLogging, actual.EnableStackdriverLogging, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateInstanceUpdateInstanceOperation")}, fn.AddNest("EnableStackdriverLogging")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.EnableStackdriverMonitoring, actual.EnableStackdriverMonitoring, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateInstanceUpdateInstanceOperation")}, fn.AddNest("EnableStackdriverMonitoring")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.PrivateInstance, actual.PrivateInstance, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PrivateInstance")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.NetworkConfig, actual.NetworkConfig, dcl.DiffInfo{ObjectFunction: compareInstanceNetworkConfigNewStyle, EmptyObject: EmptyInstanceNetworkConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NetworkConfig")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateInstanceUpdateInstanceOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Options, actual.Options, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Options")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.StateMessage, actual.StateMessage, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("StateMessage")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.ServiceEndpoint, actual.ServiceEndpoint, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceEndpoint")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Zone, actual.Zone, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Zone")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateInstanceUpdateInstanceOperation")}, fn.AddNest("Version")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.AvailableVersion, actual.AvailableVersion, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareInstanceAvailableVersionNewStyle, EmptyObject: EmptyInstanceAvailableVersion, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AvailableVersion")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.ApiEndpoint, actual.ApiEndpoint, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ApiEndpoint")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.GcsBucket, actual.GcsBucket, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GcsBucket")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.P4ServiceAccount, actual.P4ServiceAccount, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("P4ServiceAccount")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.TenantProjectId, actual.TenantProjectId, dcl.DiffInfo{OutputOnly: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TenantProjectId")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.DataprocServiceAccount, actual.DataprocServiceAccount, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateInstanceUpdateInstanceOperation")}, fn.AddNest("DataprocServiceAccount")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if len(newDiffs) > 0 {
c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs)
}
return newDiffs, nil
}
func compareInstanceNetworkConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) {
var diffs []*dcl.FieldDiff
desired, ok := d.(*InstanceNetworkConfig)
if !ok {
desiredNotPointer, ok := d.(InstanceNetworkConfig)
if !ok {
return nil, fmt.Errorf("obj %v is not a InstanceNetworkConfig or *InstanceNetworkConfig", d)
}
desired = &desiredNotPointer
}
actual, ok := a.(*InstanceNetworkConfig)
if !ok {
actualNotPointer, ok := a.(InstanceNetworkConfig)
if !ok {
return nil, fmt.Errorf("obj %v is not a InstanceNetworkConfig", a)
}
actual = &actualNotPointer
}
if ds, err := dcl.Diff(desired.Network, actual.Network, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Network")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.IPAllocation, actual.IPAllocation, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IpAllocation")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
return diffs, nil
}
func compareInstanceAvailableVersionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) {
var diffs []*dcl.FieldDiff
desired, ok := d.(*InstanceAvailableVersion)
if !ok {
desiredNotPointer, ok := d.(InstanceAvailableVersion)
if !ok {
return nil, fmt.Errorf("obj %v is not a InstanceAvailableVersion or *InstanceAvailableVersion", d)
}
desired = &desiredNotPointer
}
actual, ok := a.(*InstanceAvailableVersion)
if !ok {
actualNotPointer, ok := a.(InstanceAvailableVersion)
if !ok {
return nil, fmt.Errorf("obj %v is not a InstanceAvailableVersion", a)
}
actual = &actualNotPointer
}
if ds, err := dcl.Diff(desired.VersionNumber, actual.VersionNumber, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("VersionNumber")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.DefaultVersion, actual.DefaultVersion, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DefaultVersion")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.AvailableFeatures, actual.AvailableFeatures, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AvailableFeatures")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
return diffs, nil
}
// urlNormalized returns a copy of the resource struct with values normalized
// for URL substitutions. For instance, it converts long-form self-links to
// short-form so they can be substituted in.
func (r *Instance) urlNormalized() *Instance {
normalized := dcl.Copy(*r).(Instance)
normalized.Name = dcl.SelfLinkToName(r.Name)
normalized.Description = dcl.SelfLinkToName(r.Description)
normalized.StateMessage = dcl.SelfLinkToName(r.StateMessage)
normalized.ServiceEndpoint = dcl.SelfLinkToName(r.ServiceEndpoint)
normalized.Zone = dcl.SelfLinkToName(r.Zone)
normalized.Version = dcl.SelfLinkToName(r.Version)
normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName)
normalized.ApiEndpoint = dcl.SelfLinkToName(r.ApiEndpoint)
normalized.GcsBucket = dcl.SelfLinkToName(r.GcsBucket)
normalized.P4ServiceAccount = dcl.SelfLinkToName(r.P4ServiceAccount)
normalized.TenantProjectId = dcl.SelfLinkToName(r.TenantProjectId)
normalized.DataprocServiceAccount = dcl.SelfLinkToName(r.DataprocServiceAccount)
normalized.Project = dcl.SelfLinkToName(r.Project)
normalized.Location = dcl.SelfLinkToName(r.Location)
return &normalized
}
func (r *Instance) updateURL(userBasePath, updateName string) (string, error) {
nr := r.urlNormalized()
if updateName == "UpdateInstance" {
fields := map[string]interface{}{
"project": dcl.ValueOrEmptyString(nr.Project),
"location": dcl.ValueOrEmptyString(nr.Location),
"name": dcl.ValueOrEmptyString(nr.Name),
}
return dcl.URL("projects/{{project}}/locations/{{location}}/instances/{{name}}", nr.basePath(), userBasePath, fields), nil
}
return "", fmt.Errorf("unknown update name: %s", updateName)
}
// marshal encodes the Instance resource into JSON for a Create request, and
// performs transformations from the resource schema to the API schema if
// necessary.
func (r *Instance) marshal(c *Client) ([]byte, error) {
m, err := expandInstance(c, r)
if err != nil {
return nil, fmt.Errorf("error marshalling Instance: %w", err)
}
return json.Marshal(m)
}
// unmarshalInstance decodes JSON responses into the Instance resource schema.
func unmarshalInstance(b []byte, c *Client, res *Instance) (*Instance, error) {
var m map[string]interface{}
if err := json.Unmarshal(b, &m); err != nil {
return nil, err
}
return unmarshalMapInstance(m, c, res)
}
func unmarshalMapInstance(m map[string]interface{}, c *Client, res *Instance) (*Instance, error) {
flattened := flattenInstance(c, m, res)
if flattened == nil {
return nil, fmt.Errorf("attempted to flatten empty json object")
}
return flattened, nil
}
// expandInstance expands Instance into a JSON request object.
func expandInstance(c *Client, f *Instance) (map[string]interface{}, error) {
m := make(map[string]interface{})
res := f
_ = res
if v, err := dcl.EmptyValue(); err != nil {
return nil, fmt.Errorf("error expanding Name into name: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["name"] = v
}
if v := f.Description; dcl.ValueShouldBeSent(v) {
m["description"] = v
}
if v := f.Type; dcl.ValueShouldBeSent(v) {
m["type"] = v
}
if v := f.EnableStackdriverLogging; dcl.ValueShouldBeSent(v) {
m["enableStackdriverLogging"] = v
}
if v := f.EnableStackdriverMonitoring; dcl.ValueShouldBeSent(v) {
m["enableStackdriverMonitoring"] = v
}
if v := f.PrivateInstance; dcl.ValueShouldBeSent(v) {
m["privateInstance"] = v
}
if v, err := expandInstanceNetworkConfig(c, f.NetworkConfig, res); err != nil {
return nil, fmt.Errorf("error expanding NetworkConfig into networkConfig: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["networkConfig"] = v
}
if v := f.Labels; dcl.ValueShouldBeSent(v) {
m["labels"] = v
}
if v := f.Options; dcl.ValueShouldBeSent(v) {
m["options"] = v
}
if v := f.Zone; dcl.ValueShouldBeSent(v) {
m["zone"] = v
}
if v := f.Version; dcl.ValueShouldBeSent(v) {
m["version"] = v
}
if v := f.DisplayName; dcl.ValueShouldBeSent(v) {
m["displayName"] = v
}
if v := f.DataprocServiceAccount; dcl.ValueShouldBeSent(v) {
m["dataprocServiceAccount"] = v
}
if v, err := dcl.EmptyValue(); err != nil {
return nil, fmt.Errorf("error expanding Project into project: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["project"] = v
}
if v, err := dcl.EmptyValue(); err != nil {
return nil, fmt.Errorf("error expanding Location into location: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["location"] = v
}
return m, nil
}
// flattenInstance flattens Instance from a JSON request object into the
// Instance type.
func flattenInstance(c *Client, i interface{}, res *Instance) *Instance {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
if len(m) == 0 {
return nil
}
resultRes := &Instance{}
resultRes.Name = dcl.FlattenString(m["name"])
resultRes.Description = dcl.FlattenString(m["description"])
resultRes.Type = flattenInstanceTypeEnum(m["type"])
resultRes.EnableStackdriverLogging = dcl.FlattenBool(m["enableStackdriverLogging"])
resultRes.EnableStackdriverMonitoring = dcl.FlattenBool(m["enableStackdriverMonitoring"])
resultRes.PrivateInstance = dcl.FlattenBool(m["privateInstance"])
resultRes.NetworkConfig = flattenInstanceNetworkConfig(c, m["networkConfig"], res)
resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"])
resultRes.Options = dcl.FlattenKeyValuePairs(m["options"])
resultRes.CreateTime = dcl.FlattenString(m["createTime"])
resultRes.UpdateTime = dcl.FlattenString(m["updateTime"])
resultRes.State = flattenInstanceStateEnum(m["state"])
resultRes.StateMessage = dcl.FlattenString(m["stateMessage"])
resultRes.ServiceEndpoint = dcl.FlattenString(m["serviceEndpoint"])
resultRes.Zone = dcl.FlattenString(m["zone"])
resultRes.Version = dcl.FlattenString(m["version"])
resultRes.DisplayName = dcl.FlattenString(m["displayName"])
resultRes.AvailableVersion = flattenInstanceAvailableVersionSlice(c, m["availableVersion"], res)
resultRes.ApiEndpoint = dcl.FlattenString(m["apiEndpoint"])
resultRes.GcsBucket = dcl.FlattenString(m["gcsBucket"])
resultRes.P4ServiceAccount = dcl.FlattenString(m["p4ServiceAccount"])
resultRes.TenantProjectId = dcl.FlattenString(m["tenantProjectId"])
resultRes.DataprocServiceAccount = dcl.FlattenString(m["dataprocServiceAccount"])
resultRes.Project = dcl.FlattenString(m["project"])
resultRes.Location = dcl.FlattenString(m["location"])
return resultRes
}
// expandInstanceNetworkConfigMap expands the contents of InstanceNetworkConfig into a JSON
// request object.
func expandInstanceNetworkConfigMap(c *Client, f map[string]InstanceNetworkConfig, res *Instance) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := make(map[string]interface{})
for k, item := range f {
i, err := expandInstanceNetworkConfig(c, &item, res)
if err != nil {
return nil, err
}
if i != nil {
items[k] = i
}
}
return items, nil
}
// expandInstanceNetworkConfigSlice expands the contents of InstanceNetworkConfig into a JSON
// request object.
func expandInstanceNetworkConfigSlice(c *Client, f []InstanceNetworkConfig, res *Instance) ([]map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := []map[string]interface{}{}
for _, item := range f {
i, err := expandInstanceNetworkConfig(c, &item, res)
if err != nil {
return nil, err
}
items = append(items, i)
}
return items, nil
}
// flattenInstanceNetworkConfigMap flattens the contents of InstanceNetworkConfig from a JSON
// response object.
func flattenInstanceNetworkConfigMap(c *Client, i interface{}, res *Instance) map[string]InstanceNetworkConfig {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]InstanceNetworkConfig{}
}
if len(a) == 0 {
return map[string]InstanceNetworkConfig{}
}
items := make(map[string]InstanceNetworkConfig)
for k, item := range a {
items[k] = *flattenInstanceNetworkConfig(c, item.(map[string]interface{}), res)
}
return items
}
// flattenInstanceNetworkConfigSlice flattens the contents of InstanceNetworkConfig from a JSON
// response object.
func flattenInstanceNetworkConfigSlice(c *Client, i interface{}, res *Instance) []InstanceNetworkConfig {
a, ok := i.([]interface{})
if !ok {
return []InstanceNetworkConfig{}
}
if len(a) == 0 {
return []InstanceNetworkConfig{}
}
items := make([]InstanceNetworkConfig, 0, len(a))
for _, item := range a {
items = append(items, *flattenInstanceNetworkConfig(c, item.(map[string]interface{}), res))
}
return items
}
// expandInstanceNetworkConfig expands an instance of InstanceNetworkConfig into a JSON
// request object.
func expandInstanceNetworkConfig(c *Client, f *InstanceNetworkConfig, res *Instance) (map[string]interface{}, error) {
if dcl.IsEmptyValueIndirect(f) {
return nil, nil
}
m := make(map[string]interface{})
if v := f.Network; !dcl.IsEmptyValueIndirect(v) {
m["network"] = v
}
if v := f.IPAllocation; !dcl.IsEmptyValueIndirect(v) {
m["ipAllocation"] = v
}
return m, nil
}
// flattenInstanceNetworkConfig flattens an instance of InstanceNetworkConfig from a JSON
// response object.
func flattenInstanceNetworkConfig(c *Client, i interface{}, res *Instance) *InstanceNetworkConfig {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
r := &InstanceNetworkConfig{}
if dcl.IsEmptyValueIndirect(i) {
return EmptyInstanceNetworkConfig
}
r.Network = dcl.FlattenString(m["network"])
r.IPAllocation = dcl.FlattenString(m["ipAllocation"])
return r
}
// expandInstanceAvailableVersionMap expands the contents of InstanceAvailableVersion into a JSON
// request object.
func expandInstanceAvailableVersionMap(c *Client, f map[string]InstanceAvailableVersion, res *Instance) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := make(map[string]interface{})
for k, item := range f {
i, err := expandInstanceAvailableVersion(c, &item, res)
if err != nil {
return nil, err
}
if i != nil {
items[k] = i
}
}
return items, nil
}
// expandInstanceAvailableVersionSlice expands the contents of InstanceAvailableVersion into a JSON
// request object.
func expandInstanceAvailableVersionSlice(c *Client, f []InstanceAvailableVersion, res *Instance) ([]map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := []map[string]interface{}{}
for _, item := range f {
i, err := expandInstanceAvailableVersion(c, &item, res)
if err != nil {
return nil, err
}
items = append(items, i)
}
return items, nil
}
// flattenInstanceAvailableVersionMap flattens the contents of InstanceAvailableVersion from a JSON
// response object.
func flattenInstanceAvailableVersionMap(c *Client, i interface{}, res *Instance) map[string]InstanceAvailableVersion {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]InstanceAvailableVersion{}
}
if len(a) == 0 {
return map[string]InstanceAvailableVersion{}
}
items := make(map[string]InstanceAvailableVersion)
for k, item := range a {
items[k] = *flattenInstanceAvailableVersion(c, item.(map[string]interface{}), res)
}
return items
}
// flattenInstanceAvailableVersionSlice flattens the contents of InstanceAvailableVersion from a JSON
// response object.
func flattenInstanceAvailableVersionSlice(c *Client, i interface{}, res *Instance) []InstanceAvailableVersion {
a, ok := i.([]interface{})
if !ok {
return []InstanceAvailableVersion{}
}
if len(a) == 0 {
return []InstanceAvailableVersion{}
}
items := make([]InstanceAvailableVersion, 0, len(a))
for _, item := range a {
items = append(items, *flattenInstanceAvailableVersion(c, item.(map[string]interface{}), res))
}
return items
}
// expandInstanceAvailableVersion expands an instance of InstanceAvailableVersion into a JSON
// request object.
func expandInstanceAvailableVersion(c *Client, f *InstanceAvailableVersion, res *Instance) (map[string]interface{}, error) {
if dcl.IsEmptyValueIndirect(f) {
return nil, nil
}
m := make(map[string]interface{})
return m, nil
}
// flattenInstanceAvailableVersion flattens an instance of InstanceAvailableVersion from a JSON
// response object.
func flattenInstanceAvailableVersion(c *Client, i interface{}, res *Instance) *InstanceAvailableVersion {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
r := &InstanceAvailableVersion{}
if dcl.IsEmptyValueIndirect(i) {
return EmptyInstanceAvailableVersion
}
r.VersionNumber = dcl.FlattenString(m["versionNumber"])
r.DefaultVersion = dcl.FlattenBool(m["defaultVersion"])
r.AvailableFeatures = dcl.FlattenStringSlice(m["availableFeatures"])
return r
}
// flattenInstanceTypeEnumMap flattens the contents of InstanceTypeEnum from a JSON
// response object.
func flattenInstanceTypeEnumMap(c *Client, i interface{}, res *Instance) map[string]InstanceTypeEnum {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]InstanceTypeEnum{}
}
if len(a) == 0 {
return map[string]InstanceTypeEnum{}
}
items := make(map[string]InstanceTypeEnum)
for k, item := range a {
items[k] = *flattenInstanceTypeEnum(item.(interface{}))
}
return items
}
// flattenInstanceTypeEnumSlice flattens the contents of InstanceTypeEnum from a JSON
// response object.
func flattenInstanceTypeEnumSlice(c *Client, i interface{}, res *Instance) []InstanceTypeEnum {
a, ok := i.([]interface{})
if !ok {
return []InstanceTypeEnum{}
}
if len(a) == 0 {
return []InstanceTypeEnum{}
}
items := make([]InstanceTypeEnum, 0, len(a))
for _, item := range a {
items = append(items, *flattenInstanceTypeEnum(item.(interface{})))
}
return items
}
// flattenInstanceTypeEnum asserts that an interface is a string, and returns a
// pointer to a *InstanceTypeEnum with the same value as that string.
func flattenInstanceTypeEnum(i interface{}) *InstanceTypeEnum {
s, ok := i.(string)
if !ok {
return nil
}
return InstanceTypeEnumRef(s)
}
// flattenInstanceStateEnumMap flattens the contents of InstanceStateEnum from a JSON
// response object.
func flattenInstanceStateEnumMap(c *Client, i interface{}, res *Instance) map[string]InstanceStateEnum {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]InstanceStateEnum{}
}
if len(a) == 0 {
return map[string]InstanceStateEnum{}
}
items := make(map[string]InstanceStateEnum)
for k, item := range a {
items[k] = *flattenInstanceStateEnum(item.(interface{}))
}
return items
}
// flattenInstanceStateEnumSlice flattens the contents of InstanceStateEnum from a JSON
// response object.
func flattenInstanceStateEnumSlice(c *Client, i interface{}, res *Instance) []InstanceStateEnum {
a, ok := i.([]interface{})
if !ok {
return []InstanceStateEnum{}
}
if len(a) == 0 {
return []InstanceStateEnum{}
}
items := make([]InstanceStateEnum, 0, len(a))
for _, item := range a {
items = append(items, *flattenInstanceStateEnum(item.(interface{})))
}
return items
}
// flattenInstanceStateEnum asserts that an interface is a string, and returns a
// pointer to a *InstanceStateEnum with the same value as that string.
func flattenInstanceStateEnum(i interface{}) *InstanceStateEnum {
s, ok := i.(string)
if !ok {
return nil
}
return InstanceStateEnumRef(s)
}
// This function returns a matcher that checks whether a serialized resource matches this resource
// in its parameters (as defined by the fields in a Get, which definitionally define resource
// identity). This is useful in extracting the element from a List call.
func (r *Instance) matcher(c *Client) func([]byte) bool {
return func(b []byte) bool {
cr, err := unmarshalInstance(b, c, r)
if err != nil {
c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.")
return false
}
nr := r.urlNormalized()
ncr := cr.urlNormalized()
c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr)
if nr.Project == nil && ncr.Project == nil {
c.Config.Logger.Info("Both Project fields null - considering equal.")
} else if nr.Project == nil || ncr.Project == nil {
c.Config.Logger.Info("Only one Project field is null - considering unequal.")
return false
} else if *nr.Project != *ncr.Project {
return false
}
if nr.Location == nil && ncr.Location == nil {
c.Config.Logger.Info("Both Location fields null - considering equal.")
} else if nr.Location == nil || ncr.Location == nil {
c.Config.Logger.Info("Only one Location field is null - considering unequal.")
return false
} else if *nr.Location != *ncr.Location {
return false
}
if nr.Name == nil && ncr.Name == nil {
c.Config.Logger.Info("Both Name fields null - considering equal.")
} else if nr.Name == nil || ncr.Name == nil {
c.Config.Logger.Info("Only one Name field is null - considering unequal.")
return false
} else if *nr.Name != *ncr.Name {
return false
}
return true
}
}
type instanceDiff struct {
// The diff should include one or the other of RequiresRecreate or UpdateOp.
RequiresRecreate bool
UpdateOp instanceApiOperation
FieldName string // used for error logging
}
func convertFieldDiffsToInstanceDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]instanceDiff, error) {
opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff)
// Map each operation name to the field diffs associated with it.
for _, fd := range fds {
for _, ro := range fd.ResultingOperation {
if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok {
fieldDiffs = append(fieldDiffs, fd)
opNamesToFieldDiffs[ro] = fieldDiffs
} else {
config.Logger.Infof("%s required due to diff: %v", ro, fd)
opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd}
}
}
}
var diffs []instanceDiff
// For each operation name, create a instanceDiff which contains the operation.
for opName, fieldDiffs := range opNamesToFieldDiffs {
// Use the first field diff's field name for logging required recreate error.
diff := instanceDiff{FieldName: fieldDiffs[0].FieldName}
if opName == "Recreate" {
diff.RequiresRecreate = true
} else {
apiOp, err := convertOpNameToInstanceApiOperation(opName, fieldDiffs, opts...)
if err != nil {
return diffs, err
}
diff.UpdateOp = apiOp
}
diffs = append(diffs, diff)
}
return diffs, nil
}
func convertOpNameToInstanceApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (instanceApiOperation, error) {
switch opName {
case "updateInstanceUpdateInstanceOperation":
return &updateInstanceUpdateInstanceOperation{FieldDiffs: fieldDiffs}, nil
default:
return nil, fmt.Errorf("no such operation with name: %v", opName)
}
}
func extractInstanceFields(r *Instance) error {
vNetworkConfig := r.NetworkConfig
if vNetworkConfig == nil {
// note: explicitly not the empty object.
vNetworkConfig = &InstanceNetworkConfig{}
}
if err := extractInstanceNetworkConfigFields(r, vNetworkConfig); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vNetworkConfig) {
r.NetworkConfig = vNetworkConfig
}
return nil
}
func extractInstanceNetworkConfigFields(r *Instance, o *InstanceNetworkConfig) error {
return nil
}
func extractInstanceAvailableVersionFields(r *Instance, o *InstanceAvailableVersion) error {
return nil
}
func postReadExtractInstanceFields(r *Instance) error {
vNetworkConfig := r.NetworkConfig
if vNetworkConfig == nil {
// note: explicitly not the empty object.
vNetworkConfig = &InstanceNetworkConfig{}
}
if err := postReadExtractInstanceNetworkConfigFields(r, vNetworkConfig); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vNetworkConfig) {
r.NetworkConfig = vNetworkConfig
}
return nil
}
func postReadExtractInstanceNetworkConfigFields(r *Instance, o *InstanceNetworkConfig) error {
return nil
}
func postReadExtractInstanceAvailableVersionFields(r *Instance, o *InstanceAvailableVersion) error {
return nil
}
|
/*
The mode of a group of numbers is the value (or values) that occur most often (values have to occur more than once).
Given a sorted array of numbers, return an array of all modes in ascending order.
Notes
In this challenge, all group of numbers will have at least one mode.
*/
package main
import (
"fmt"
)
func main() {
fmt.Println(mode([]int{1, 2, 3, 3, 6, 7, 8, 9}))
fmt.Println(mode([]int{2, 3, 3, 4, 4, 6, 7, 8}))
fmt.Println(mode([]int{1, 6, 6, 7, 7, 8, 9}))
fmt.Println(mode([]int{4, 4, 4, 6, 8, 9, 10, 10}))
fmt.Println(mode([]int{1, 4, 6, 7, 9, 9}))
fmt.Println(mode([]int{2, 2, 2, 3, 7, 8, 9, 9}))
fmt.Println(mode([]int{2, 4, 5, 5, 7, 8, 10, 10}))
fmt.Println(mode([]int{1, 1, 4, 4, 5, 7, 9}))
fmt.Println(mode([]int{2, 3, 3, 3, 4, 7, 9}))
fmt.Println(mode([]int{1, 1, 2, 4, 4, 6, 6, 9}))
fmt.Println(mode([]int{1, 2, 3, 3, 3, 7, 9, 10}))
fmt.Println(mode([]int{1, 2, 5, 6, 6, 6, 7, 10}))
fmt.Println(mode([]int{2, 2, 6, 9, 10, 10, 10}))
fmt.Println(mode([]int{1, 1, 5, 6, 6, 10, 10}))
fmt.Println(mode([]int{2, 2, 3, 3, 4, 8}))
fmt.Println(mode([]int{2, 3, 8, 10, 10, 10, 10}))
fmt.Println(mode([]int{2, 2, 3, 4, 6, 9}))
fmt.Println(mode([]int{1, 2, 5, 8, 9, 9, 10, 10}))
fmt.Println(mode([]int{2, 3, 3, 4, 4, 5}))
fmt.Println(mode([]int{2, 2, 3, 3, 4, 5, 10, 10}))
fmt.Println(mode([]int{1, 3, 3, 5, 5, 9, 10, 10}))
}
func mode(a []int) []int {
var p []int
m := 0
for n := 0; n < 2; n++ {
for i := 0; i < len(a); {
v := a[i]
c := 1
for ; i+1 < len(a) && a[i] == a[i+1]; i++ {
c++
}
if c == 1 {
i++
}
if n == 0 {
m = max(m, c)
} else if m == c {
p = append(p, v)
}
}
}
return p
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
|
package chapter8
import (
"fmt"
"testing"
)
func TestSearchPostingsListRecursive(t *testing.T) {
a := &PostingsListElement{Value: 1, Order: -1}
b := &PostingsListElement{Value: 2, Order: -1}
c := &PostingsListElement{Value: 3, Order: -1}
d := &PostingsListElement{Value: 4, Order: -1}
a.Next = b
b.Next = c
c.Next = d
d.Next = nil
a.Jump = c
b.Jump = d
c.Jump = b
d.Jump = d
fmt.Println("TestSearchPostingsListRecursive:")
SearchPostingsListRecursive(a)
curr := a
for curr != nil {
fmt.Println(curr.Order)
curr = curr.Next
}
fmt.Println()
}
func TestSearchPostingsListIterative(t *testing.T) {
a := &PostingsListElement{Value: 1, Order: -1}
b := &PostingsListElement{Value: 2, Order: -1}
c := &PostingsListElement{Value: 3, Order: -1}
d := &PostingsListElement{Value: 4, Order: -1}
a.Next = b
b.Next = c
c.Next = d
d.Next = nil
a.Jump = c
b.Jump = d
c.Jump = b
d.Jump = d
fmt.Println("TestSearchPostingsListIterative:")
SearchPostingsListIterative(a)
curr := a
for curr != nil {
fmt.Println(curr.Order)
curr = curr.Next
}
fmt.Println()
}
|
// Copyright 2013 The Go Circuit Project
// Use of this source code is governed by the license for
// The Go Circuit Project, found in the LICENSE file.
//
// Authors:
// 2013 Petar Maymounkov <p@gocircuit.org>
package main
import (
"flag"
"github.com/gocircuit/runtime/boot"
"github.com/gocircuit/runtime/circuit"
"github.com/gocircuit/runtime/sys/tcp"
"os"
)
var flagAddr = flag.String("addr", "", "Our address")
var flagDial = flag.String("dial", "", "Their address")
func main() {
flag.Parse()
if err := boot.BootTCP(*flagAddr); err != nil {
println("boot error:", err.Error())
os.Exit(1)
}
circuit.Listen("greet", &helloService{})
if *flagDial != "" {
a, err := tcp.ResolveAddr(*flagDial)
if err != nil {
println("dial resolve error:", err.Error())
os.Exit(1)
}
x, err := circuit.TryDial(a, "greet")
if err != nil {
println("circuit dial error:", err.Error())
os.Exit(1)
}
x.Call("Hello")[0].(circuit.X).Call("Welcome")
}
select {}
}
type helloService struct{}
func (s *helloService) Hello() circuit.X {
println("hello")
return circuit.Ref(&welcomeService{})
}
type welcomeService struct{}
func (s *welcomeService) Welcome() {
println("welcome")
}
func init() {
circuit.RegisterValue(&helloService{})
circuit.RegisterValue(&welcomeService{})
}
|
package main
// 如果返回值命名了,可以通过名字来修改返回值,也可以通过defer语句在return语句之后修改返回值。
// 其中defer语句延迟执行了一个匿名函数,因为这个匿名函数捕获了外部函数的局部变量v,这种函数我们一般称之为闭包。
// 闭包对捕获的外部变量并不是以传值方式访问,而是以引用方式访问。
func Inc() (v int) {
defer func() {
v++ // 在函数return 之后会执行++ 42+1= 43
}()
return 42
}
|
package problem0501
//TreeNode 树节点
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func findMode(root *TreeNode) []int {
result := []int{}
if root == nil {
return result
}
maxTimes := 0
curTimes := 0
stack := []*TreeNode{}
cur := root
var prev *TreeNode
for cur != nil || len(stack) > 0 {
for cur != nil {
stack = append(stack, cur)
cur = cur.Left
}
node := stack[len(stack)-1]
stack = stack[:len(stack)-1]
if prev == nil || prev.Val != node.Val {
curTimes = 1
} else {
curTimes++
}
if curTimes > maxTimes {
result = []int{node.Val}
maxTimes = curTimes
} else if curTimes == maxTimes {
result = append(result, node.Val)
}
prev = node
cur = node.Right
}
return result
}
|
package main
import (
"fmt"
"regexp"
)
func main() {
re := regexp.MustCompile("i")
fmt.Println(re.ReplaceAllString("sift rise", "o"))
}
|
package dictator
import (
"crypto/rand"
"crypto/sha1"
"errors"
"fmt"
"math/big"
"time"
"gopkg.in/mgo.v2/bson"
)
type (
DictatorPayload struct {
Type int
Blob []byte
DictatorID string
}
NodeContext struct {
NodeID string
BecomeDictator *time.Timer
SuicideChan chan struct{}
AppContext Context
UDPIn chan UDPPacket
UDPOut chan UDPPacket
Mission MissionSpecs
IsDictatorAlive bool
}
)
func NewNodeID() (string, error) {
buf := make([]byte, 1000)
_, err := rand.Read(buf)
if err != nil {
return "", err
}
k := sha1.Sum(buf)
s := fmt.Sprintf("%x", k)
return s, nil
}
func NewRandomTimeout(min, max int) (time.Duration, error) {
minR := big.NewInt(int64(min))
maxR := big.NewInt(int64(max - min))
random, err := rand.Int(rand.Reader, maxR)
if err != nil {
return time.Duration(0), err
}
random = random.Add(random, minR)
return time.Duration(random.Int64()) * time.Millisecond, nil
}
func ReadDictatorPayload(packet UDPPacket) (DictatorPayload, error) {
payload := DictatorPayload{}
err := bson.Unmarshal(packet.Payload, &payload)
if err != nil {
return DictatorPayload{}, err
}
return payload, nil
}
func IsDictatorPayload(packet UDPPacket) bool {
pd, err := ReadDictatorPayload(packet)
if err != nil {
return false
}
if 1 > pd.Type || pd.Type > 4 {
return false
}
return true
}
func IsThatMe(id string, payload DictatorPayload) bool {
if id == payload.DictatorID {
return true
}
return false
}
func IsCommand(payload DictatorPayload) bool {
if payload.Type == 2 {
return true
}
return false
}
func IsCommandResponse(payload DictatorPayload) bool {
if payload.Type == 3 {
return true
}
return false
}
func IsHeartbeat(payload DictatorPayload) bool {
if payload.Type == 1 {
return true
}
return false
}
func StatusMsg(nodeID string, msg interface{}) string {
return fmt.Sprintf("%v - %v", nodeID, msg)
}
func (nodeCtx NodeContext) HandlePacket(packet UDPPacket) error {
l := nodeCtx.AppContext.Log
if IsDictatorPayload(packet) {
payload, err := ReadDictatorPayload(packet)
if err != nil {
return err
}
// Ignore myself except it is a CommandResposne
if IsThatMe(nodeCtx.NodeID, payload) {
if !IsCommandResponse(payload) {
return nil
}
}
// Make the command of the great dictator
if IsCommand(payload) {
// When a other dictator take over we have to die
// and become a slave
if nodeCtx.IsDictatorAlive {
nodeCtx.SuicideChan <- struct{}{}
}
r := nodeCtx.Mission.CommandRouter
blob := CommandBlob{}
err := bson.Unmarshal(payload.Blob, &blob)
if err != nil {
return err
}
fun, ok := r.FindHandler(blob.Name)
if !ok {
errMsg := StatusMsg(nodeCtx.NodeID, "Cannot find CommandHandler")
return errors.New(errMsg)
}
return fun(nodeCtx, payload)
}
// Just care about CommandResponse of my commands
if IsCommandResponse(payload) {
if IsThatMe(nodeCtx.NodeID, payload) {
debugMsg := StatusMsg(nodeCtx.NodeID, "Receive command response")
l.Debug.Println(debugMsg)
nodeCtx.Mission.ResponseChan <- payload
return nil
}
errMsg := StatusMsg(nodeCtx.NodeID, "Not my Command")
return errors.New(errMsg)
}
// Reset heartbeat timeout
if IsHeartbeat(payload) {
// When a other dictator take over we have to die
// and become a slave
if nodeCtx.IsDictatorAlive {
nodeCtx.SuicideChan <- struct{}{}
}
timeout, err := NewRandomTimeout(500, 1500)
if err != nil {
return err
}
nodeCtx.BecomeDictator.Reset(timeout)
return nil
}
}
return nil
}
func (nodeCtx NodeContext) LoopNode() {
l := nodeCtx.AppContext.Log
var err error
var dictatorIsDead <-chan struct{}
for {
select {
case <-nodeCtx.AppContext.DoneChan:
debugMsg := StatusMsg(nodeCtx.NodeID, "Goodbye")
l.Debug.Println(debugMsg)
nodeCtx.SuicideChan <- struct{}{}
return
case packet := <-nodeCtx.UDPIn:
//l.Debug.Println("Receive UDP packet", nodeCtx.NodeID)
err = nodeCtx.HandlePacket(packet)
if err != nil {
errMsg := StatusMsg(nodeCtx.NodeID, err)
l.Error.Println(errMsg)
}
case <-nodeCtx.BecomeDictator.C:
nodeCtx.BecomeDictator.Stop()
dictatorIsDead, err = nodeCtx.AwakeDictator()
nodeCtx.IsDictatorAlive = true
if err != nil {
errMsg := StatusMsg(nodeCtx.NodeID, err)
l.Error.Println(errMsg)
return
}
case <-dictatorIsDead:
nodeCtx.IsDictatorAlive = false
}
}
}
func (nodeCtx NodeContext) AwakeDictator() (<-chan struct{}, error) {
l := nodeCtx.AppContext.Log
debugMsg := StatusMsg(nodeCtx.NodeID, "Time to enslave some people")
l.Debug.Println(debugMsg)
nodeID := nodeCtx.NodeID
dictatorIsDead := make(chan struct{})
timeout, err := NewRandomTimeout(100, 150)
if err != nil {
return nil, err
}
dictatorHeartbeat := time.NewTicker(timeout)
// Run mission impossible
nodeCtx.Mission.Mission(nodeCtx)
go func() {
for {
select {
case <-nodeCtx.AppContext.DoneChan:
errMsg := StatusMsg(nodeID, "The world shutdown")
l.Debug.Println(errMsg)
dictatorHeartbeat.Stop()
return
case <-nodeCtx.SuicideChan:
errMsg := StatusMsg(nodeID, "Dictator must die")
l.Debug.Println(errMsg)
dictatorHeartbeat.Stop()
dictatorIsDead <- struct{}{}
return
case <-dictatorHeartbeat.C:
// It's time to say hello to the people
// due to they don't forget us
heartbeatPacket := DictatorPayload{
Type: 1,
DictatorID: nodeID,
Blob: []byte{},
}
p, err := bson.Marshal(heartbeatPacket)
if err != nil {
l.Error.Println(err.Error())
continue
}
nodeCtx.UDPOut <- UDPPacket{
Payload: p,
}
}
}
}()
return dictatorIsDead, nil
}
func Node(ctx Context, udpIn, udpOut chan UDPPacket, missionSpecs MissionSpecs) error {
nodeID, err := NewNodeID()
if err != nil {
return err
}
go func() {
// First wait if there already a dictator
timeout, err := NewRandomTimeout(500, 1500)
if err != nil {
ctx.Log.Error.Println(err.Error())
return
}
nodeCtx := NodeContext{
NodeID: nodeID,
SuicideChan: make(chan struct{}),
BecomeDictator: time.NewTimer(timeout),
UDPIn: udpIn,
UDPOut: udpOut,
AppContext: ctx,
Mission: missionSpecs,
IsDictatorAlive: false,
}
nodeCtx.LoopNode()
}()
return nil
}
|
package mapreduce
import (
"encoding/json"
"log"
"os"
"sort"
)
// !!! Define ByKey
type ByKey []KeyValue
func (a ByKey) Len() int {
return len(a)
}
func (a ByKey) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (a ByKey) Less(i, j int) bool {
return a[i].Key < a[j].Key
}
// doReduce manages one reduce task: it reads the intermediate
// key/value pairs (produced by the map phase) for this task, sorts the
// intermediate key/value pairs by key, calls the user-defined reduce function
// (reduceF) for each key, and writes the output to disk.
func doReduce(
jobName string, // the name of the whole MapReduce job
reduceTaskNumber int, // which reduce task this is
outFile string, // write the output here
nMap int, // the number of map tasks that were run ("M" in the paper)
reduceF func(key string, values []string) string,
) {
//
// You will need to write this function.
//
// You'll need to read one intermediate file from each map task;
// reduceName(jobName, m, reduceTaskNumber) yields the file
// name from map task m.
//
// Your doMap() encoded the key/value pairs in the intermediate
// files, so you will need to decode them. If you used JSON, you can
// read and decode by creating a decoder and repeatedly calling
// .Decode(&kv) on it until it returns an error.
//
// You may find the first example in the golang sort package
// documentation useful.
//
// reduceF() is the application's reduce function. You should
// call it once per distinct key, with a slice of all the values
// for that key. reduceF() returns the reduced value for that key.
//
// You should write the reduce output as JSON encoded KeyValue
// objects to the file named outFile. We require you to use JSON
// because that is what the merger than combines the output
// from all the reduce tasks expects. There is nothing special about
// JSON -- it is just the marshalling format we chose to use. Your
// output code will look something like this:
//
// enc := json.NewEncoder(file)
// for key := ... {
// enc.Encode(KeyValue{key, reduceF(...)})
// }
// file.Close()
//
// !!! Read input files from map tasks
var keyValues []KeyValue
for m := 0; m < nMap; m += 1 {
fName := reduceName(jobName, m, reduceTaskNumber)
f, err := os.Open(fName)
if err != nil {
log.Fatal(err)
}
defer f.Close()
dec := json.NewDecoder(f)
for dec.More() {
var kv KeyValue
if dec.Decode(&kv) != nil {
log.Fatal(err)
}
keyValues = append(keyValues, kv)
}
}
// !!! Create reduce result file
f, err := os.Create(mergeName(jobName, reduceTaskNumber))
if err != nil {
log.Fatal(err)
}
defer f.Close()
// !!! Sort key value pairs by key, and group together values of same key
sort.Sort(ByKey(keyValues))
type KeyGroups struct {
Key string
Values []string
}
var keyGroups []KeyGroups
for i, kv := range keyValues {
if i == 0 || kv.Key != keyValues[i-1].Key {
keyGroups = append(keyGroups, KeyGroups{kv.Key, []string{kv.Value}})
} else {
j := len(keyGroups) - 1
keyGroups[j].Values = append(keyGroups[j].Values, kv.Value)
}
}
// !!! Run customized reduce function per key
enc := json.NewEncoder(f)
for _, kv := range keyGroups {
if enc.Encode(KeyValue{kv.Key, reduceF(kv.Key, kv.Values)}) != nil {
log.Fatal(err)
}
}
}
|
package main
import (
"fmt"
"os"
"strings"
. "github.com/colefan/gsgo/tools/packettool"
"github.com/colefan/gsgo/tools/utils"
)
//Useage: packettool.exe xmlpath storepath
//
func main() {
xmlPath := ""
storePath := ""
if len(os.Args) < 3 {
fmt.Println("Useage:packettool.exe protocol_desc_file packet_store_path")
xmlPath = "e:\\goproject\\src\\github.com\\colefan\\gsgo\\gameprotocol"
storePath = "e:\\goproject\\src\\github.com\\colefan\\gsgo\\gameprotocol"
fmt.Println("Packettool.exe will use default path : xmlpath = ", xmlPath, " ,storepath = ", storePath)
} else {
xmlPath = strings.TrimSpace(os.Args[1])
storePath = strings.TrimSpace(os.Args[2])
}
if len(xmlPath) < 0 {
fmt.Println("protocol_desc_file is empty:(")
return
}
if len(storePath) < 0 {
fmt.Println("packet_store_path is empty:(")
return
}
files, err := toolsutils.ListDir(xmlPath, ".xml")
if err != nil {
fmt.Println("xmlpath read error:", err)
return
}
//fmt.Println("files,", len(files), "xmlpath=", xmlPath)
for _, filepath := range files {
//遍历目录,
fmt.Println("filepath :", filepath)
if perr := GenPacketForGoLang(storePath, filepath); perr != nil {
fmt.Println("GenPacket error, filename =", filepath, ",error:", perr)
}
}
return
}
|
package deprecated
type Deprecation struct {
DeprecatedSince int
AlternativeAvailableSince int
}
var Stdlib = map[string]Deprecation{
// FIXME(dh): AllowBinary isn't being detected as deprecated
// because the comment has a newline right after "Deprecated:"
"go/build.AllowBinary": {7, 7},
"(archive/zip.FileHeader).CompressedSize": {1, 1},
"(archive/zip.FileHeader).UncompressedSize": {1, 1},
"(archive/zip.FileHeader).ModifiedTime": {10, 10},
"(archive/zip.FileHeader).ModifiedDate": {10, 10},
"(*archive/zip.FileHeader).ModTime": {10, 10},
"(*archive/zip.FileHeader).SetModTime": {10, 10},
"(go/doc.Package).Bugs": {1, 1},
"os.SEEK_SET": {7, 7},
"os.SEEK_CUR": {7, 7},
"os.SEEK_END": {7, 7},
"(net.Dialer).Cancel": {7, 7},
"runtime.CPUProfile": {9, 0},
"compress/flate.ReadError": {6, 6},
"compress/flate.WriteError": {6, 6},
"path/filepath.HasPrefix": {0, 0},
"(net/http.Transport).Dial": {7, 7},
"(*net/http.Transport).CancelRequest": {6, 5},
"net/http.ErrWriteAfterFlush": {7, 0},
"net/http.ErrHeaderTooLong": {8, 0},
"net/http.ErrShortBody": {8, 0},
"net/http.ErrMissingContentLength": {8, 0},
"net/http/httputil.ErrPersistEOF": {0, 0},
"net/http/httputil.ErrClosed": {0, 0},
"net/http/httputil.ErrPipeline": {0, 0},
"net/http/httputil.ServerConn": {0, 0},
"net/http/httputil.NewServerConn": {0, 0},
"net/http/httputil.ClientConn": {0, 0},
"net/http/httputil.NewClientConn": {0, 0},
"net/http/httputil.NewProxyClientConn": {0, 0},
"(net/http.Request).Cancel": {7, 7},
"(text/template/parse.PipeNode).Line": {1, 1},
"(text/template/parse.ActionNode).Line": {1, 1},
"(text/template/parse.BranchNode).Line": {1, 1},
"(text/template/parse.TemplateNode).Line": {1, 1},
"database/sql/driver.ColumnConverter": {9, 9},
"database/sql/driver.Execer": {8, 8},
"database/sql/driver.Queryer": {8, 8},
"(database/sql/driver.Conn).Begin": {8, 8},
"(database/sql/driver.Stmt).Exec": {8, 8},
"(database/sql/driver.Stmt).Query": {8, 8},
"syscall.StringByteSlice": {1, 1},
"syscall.StringBytePtr": {1, 1},
"syscall.StringSlicePtr": {1, 1},
"syscall.StringToUTF16": {1, 1},
"syscall.StringToUTF16Ptr": {1, 1},
"(*regexp.Regexp).Copy": {12, 12},
"(archive/tar.Header).Xattrs": {10, 10},
"archive/tar.TypeRegA": {11, 1},
"go/types.NewInterface": {11, 11},
"(*go/types.Interface).Embedded": {11, 11},
"go/importer.For": {12, 12},
"encoding/json.InvalidUTF8Error": {2, 2},
"encoding/json.UnmarshalFieldError": {2, 2},
"encoding/csv.ErrTrailingComma": {2, 2},
"(encoding/csv.Reader).TrailingComma": {2, 2},
"(net.Dialer).DualStack": {12, 12},
"net/http.ErrUnexpectedTrailer": {12, 12},
"net/http.CloseNotifier": {11, 7},
"net/http.ProtocolError": {8, 8},
"(crypto/x509.CertificateRequest).Attributes": {5, 3},
// This function has no alternative, but also no purpose.
"(*crypto/rc4.Cipher).Reset": {12, 0},
"(net/http/httptest.ResponseRecorder).HeaderMap": {11, 7},
"image.ZP": {13, 0},
"image.ZR": {13, 0},
"(*debug/gosym.LineTable).LineToPC": {2, 2},
"(*debug/gosym.LineTable).PCToLine": {2, 2},
"crypto/tls.VersionSSL30": {13, 0},
"(crypto/tls.Config).NameToCertificate": {14, 14},
"(*crypto/tls.Config).BuildNameToCertificate": {14, 14},
"image/jpeg.Reader": {4, 0},
// All of these have been deprecated in favour of external libraries
"syscall.AttachLsf": {7, 0},
"syscall.DetachLsf": {7, 0},
"syscall.LsfSocket": {7, 0},
"syscall.SetLsfPromisc": {7, 0},
"syscall.LsfJump": {7, 0},
"syscall.LsfStmt": {7, 0},
"syscall.BpfStmt": {7, 0},
"syscall.BpfJump": {7, 0},
"syscall.BpfBuflen": {7, 0},
"syscall.SetBpfBuflen": {7, 0},
"syscall.BpfDatalink": {7, 0},
"syscall.SetBpfDatalink": {7, 0},
"syscall.SetBpfPromisc": {7, 0},
"syscall.FlushBpf": {7, 0},
"syscall.BpfInterface": {7, 0},
"syscall.SetBpfInterface": {7, 0},
"syscall.BpfTimeout": {7, 0},
"syscall.SetBpfTimeout": {7, 0},
"syscall.BpfStats": {7, 0},
"syscall.SetBpfImmediate": {7, 0},
"syscall.SetBpf": {7, 0},
"syscall.CheckBpfVersion": {7, 0},
"syscall.BpfHeadercmpl": {7, 0},
"syscall.SetBpfHeadercmpl": {7, 0},
"syscall.RouteRIB": {8, 0},
"syscall.RoutingMessage": {8, 0},
"syscall.RouteMessage": {8, 0},
"syscall.InterfaceMessage": {8, 0},
"syscall.InterfaceAddrMessage": {8, 0},
"syscall.ParseRoutingMessage": {8, 0},
"syscall.ParseRoutingSockaddr": {8, 0},
"syscall.InterfaceAnnounceMessage": {7, 0},
"syscall.InterfaceMulticastAddrMessage": {7, 0},
"syscall.FormatMessage": {5, 0},
}
|
package main
import "fmt"
func main() {
fmt.Println("This is main function!!")
fmt.Println(sum(1, 2))
}
var sum = func(a, b int) int {
return a + b
}
|
// Package document - пакет обеспечивает создание структур документа реализующих контракт Documenter
package document
import (
"fmt"
"regexp"
"strings"
)
type Documenter interface {
Id() int
Title() string
Words() []string
fmt.Stringer
}
type WebPage struct {
id int
title string
url string
Documenter
}
func New(id int, url string, title string) Documenter {
return WebPage{id: id, title: title, url: url}
}
// Getter Id
func (d WebPage) Id() int {
return d.id
}
// Getter Title
func (d WebPage) Title() string {
return d.title
}
// Getter Url
func (d WebPage) Url() string {
return d.url
}
func (d WebPage) Words() []string {
str := strings.Trim(strings.ToLower(d.url + " " + d.title),"\r\n ")
str = regexp.MustCompile(`[^a-zA-Zа-яА-Я0-9]`).ReplaceAllString(str, " ")
str = regexp.MustCompile("\\s+").ReplaceAllString(str, ",")
return strings.Split(str,",")
}
func (d WebPage) String() string {
return fmt.Sprintf("%s: %s", d.url, d.title)
}
|
package main
import (
"fmt"
"log"
"math/big"
"time"
)
func main() {
start := time.Now()
r := new(big.Int)
fmt.Println(r.Binomial(1000, 10))
elapsed := time.Since(start)
log.Printf("Binomial took %s", elapsed)
}
|
package main
import (
"fmt"
"github.com/jackytck/projecteuler/tools"
)
func extend(d []int) []int {
ret := d
if tools.IncludesInt(d, 6) && !tools.IncludesInt(d, 9) {
ret = append(d, 9)
}
if tools.IncludesInt(d, 9) && !tools.IncludesInt(d, 6) {
ret = append(d, 6)
}
return ret
}
func isValidPair(d1, d2 []int, a, b int) bool {
return tools.IncludesInt(d1, a) && tools.IncludesInt(d2, b) ||
tools.IncludesInt(d1, b) && tools.IncludesInt(d2, a)
}
func isValid(d1, d2 []int) bool {
d1, d2 = extend(d1), extend(d2)
cases := []struct {
a, b int
}{
{0, 1},
{0, 4},
{0, 9},
{1, 6},
{2, 5},
{3, 6},
{4, 9},
{6, 4},
{8, 1},
}
valid := true
for _, c := range cases {
if !isValidPair(d1, d2, c.a, c.b) {
valid = false
break
}
}
return valid
}
func hash(d1, d2 []int) string {
s1 := tools.JoinIntsString(d1...)
s2 := tools.JoinIntsString(d2...)
if s1 > s2 {
return s2 + s1
}
return s1 + s2
}
func solve() int {
set := make(map[string]bool)
for d1 := range tools.CombIndex(10, 6) {
for d2 := range tools.CombIndex(10, 6) {
if isValid(d1, d2) {
h := hash(d1, d2)
set[h] = true
}
}
}
return len(set)
}
func main() {
fmt.Println(solve())
}
// How many distinct arrangements of the two cubes allow for all of the square
// numbers to be displayed?
// Note:
// Space is small. Just brute force.
|
package queue
import "gx/ipfs/QmPJxxDsX2UbchSHobbYuvz7qnyJTFKvaKMzE2rZWJ4x5B/go-libp2p-peer"
// PeerQueue maintains a set of peers ordered according to a metric.
// Implementations of PeerQueue could order peers based on distances along
// a KeySpace, latency measurements, trustworthiness, reputation, etc.
type PeerQueue interface {
// Len returns the number of items in PeerQueue
Len() int
// Enqueue adds this node to the queue.
Enqueue(peer.ID)
// Dequeue retrieves the highest (smallest int) priority node
Dequeue() peer.ID
}
|
package intstr
import "k8s.io/apimachinery/pkg/util/intstr"
// convenience func to get a pointer
func ParsePtr(val string) *intstr.IntOrString {
x := intstr.Parse(val)
return &x
}
|
package main
import "fmt"
func main() {
var n int
fmt.Scan(&n)
phoneBook := make(map[string]int)
for i := 0; i < n; i++ {
var (
name string
phoneNumber int
)
fmt.Scan(&name, &phoneNumber)
phoneBook[name] = phoneNumber
}
for {
var name string
if _, err := fmt.Scan(&name); err != nil {
break
}
if phone, ok := phoneBook[name]; ok {
fmt.Printf("%v=%v\n", name, phone)
} else {
fmt.Println("Not found")
}
}
}
|
package tcp
import (
"encoding/binary"
"chat-system/log"
"chat-system/util"
)
type INetMsg interface {
Clone() INetMsg
Version() uint8
SetVersion(version uint8)
CheckCode() uint8
SetCheckCode(code uint8)
PackLen() uint16
SetPackLen(length uint16)
MainCmd() uint16
SetMainCmd(cmd uint16)
SubCmd() uint16
SetSubCmd(cmd uint16)
HeadLen() int
TotalLen() int
// TotalEncryptedLen() int
Body() []byte
BodyLen() int
SetBody(data []byte)
PackData() []byte
// EncryptedPackData() []byte
Encrypt(coder Coder) ([]byte, error)
Decrypt(coder Coder) error
//GetClient() ITcpClient
}
type NetMsg struct {
//Client ITcpClient
Buf []byte
// Encrypted []byte
// flag int32
}
func (msg *NetMsg) Clone() INetMsg {
return &NetMsg{
Buf: append([]byte{}, msg.Buf...),
}
}
func (msg *NetMsg) Version() uint8 {
return uint8(msg.Buf[0])
}
func (msg *NetMsg) SetVersion(version uint8) {
msg.Buf[0] = byte(version)
}
func (msg *NetMsg) CheckCode() uint8 {
return uint8(msg.Buf[1])
}
func (msg *NetMsg) SetCheckCode(code uint8) {
msg.Buf[1] = byte(code)
}
func (msg *NetMsg) PackLen() uint16 {
return uint16(binary.LittleEndian.Uint16(msg.Buf[2:]))
}
func (msg *NetMsg) SetPackLen(length uint16) {
binary.LittleEndian.PutUint16(msg.Buf[2:], length)
}
func (msg *NetMsg) MainCmd() uint16 {
return uint16(binary.LittleEndian.Uint16(msg.Buf[4:]))
}
func (msg *NetMsg) SetMainCmd(cmd uint16) {
binary.LittleEndian.PutUint16(msg.Buf[4:], cmd)
}
func (msg *NetMsg) SubCmd() uint16 {
return uint16(binary.LittleEndian.Uint16(msg.Buf[6:]))
}
func (msg *NetMsg) SetSubCmd(cmd uint16) {
binary.LittleEndian.PutUint16(msg.Buf[6:], cmd)
}
func (msg *NetMsg) HeadLen() int {
return DEFAULT_PACK_HEAD_LEN
}
func (msg *NetMsg) TotalLen() int {
return len(msg.Buf)
}
// func (msg *NetMsg) TotalEncryptedLen() int {
// return len(msg.Encrypted)
// }
func (msg *NetMsg) Body() []byte {
return msg.Buf[DEFAULT_PACK_HEAD_LEN:]
}
func (msg *NetMsg) BodyLen() int {
return len(msg.Buf[DEFAULT_PACK_HEAD_LEN:])
}
func (msg *NetMsg) SetBody(data []byte) {
needLen := len(data) - len(msg.Buf) + DEFAULT_PACK_HEAD_LEN
if needLen > 0 {
msg.Buf = append(msg.Buf, make([]byte, needLen)...)
} else if needLen < 0 {
msg.Buf = msg.Buf[:len(data)+DEFAULT_PACK_HEAD_LEN]
}
copy(msg.Buf[DEFAULT_PACK_HEAD_LEN:], data)
}
func (msg *NetMsg) PackData() []byte {
return msg.Buf
}
// func (msg *NetMsg) EncryptedPackData() []byte {
// return msg.Encrypted
// }
func (msg *NetMsg) Encrypt(coder Coder) ([]byte, error) {
// if atomic.CompareAndSwapInt32(&msg.flag, 0, 1) {
if coder != nil {
if encrypted, err := coder.Encrypt(append([]byte{}, msg.Buf...)); err == nil {
return encrypted, err
}
}
// }
return msg.Buf, nil
}
func (msg *NetMsg) Decrypt(coder Coder) error {
// if atomic.CompareAndSwapInt32(&msg.flag, 0, 1) {
if coder != nil {
decrypted, err := coder.Decrypt(msg.Buf)
if err == nil {
msg.Buf = decrypted
}
return err
}
// }
return nil
}
// func (msg *NetMsg) GetClient() ITcpClient {
// return msg.Client
// }
func NewNetMsg(mainCmd uint16, subCmd uint16, data interface{}) *NetMsg {
msg := NetMsg{}
if data != nil {
buf := util.DataToBuf(data)
if buf != nil {
if len(buf) <= DEFAULT_MAX_BODY_LEN {
msg.Buf = append(make([]byte, DEFAULT_PACK_HEAD_LEN), buf...)
} else {
log.Debug("NewNetMsg failed: body len(%d) > DEFAULT_MAX_BODY_LEN(%d)", len(buf), DEFAULT_MAX_BODY_LEN)
}
}
}
if len(msg.Buf) == 0 {
msg.Buf = make([]byte, DEFAULT_PACK_HEAD_LEN)
}
msg.Buf[0] = SOCKET_VER
msg.SetPackLen(uint16(len(msg.Buf) + DEFAULT_PACK_HEAD_LEN))
msg.SetMainCmd(mainCmd)
msg.SetSubCmd(subCmd)
return &msg
}
|
/*
* @lc app=leetcode.cn id=150 lang=golang
*
* [150] 逆波兰表达式求值
*/
// @lc code=start
// 前提是 s 是有效的表达式
package main
import "fmt"
import "strconv"
func isOperator(s string) bool {
return s == "+" || s == "-" || s == "*" || s == "/"
}
func doOperate(a, b int, ope string) (res int){
switch ope {
case "+":
res = a + b
case "-":
res = a - b
case "*":
res = a * b
case "/":
res = a / b
}
return
}
func evalRPN(tokens []string) int {
if tokens == nil {
return 0
}
stack := []int{}
var res int
for i := 0 ; i < len(tokens) ; i++{
if !isOperator(tokens[i]) {
tmp, _ := strconv.Atoi(tokens[i])
stack = append(stack, tmp)
} else {
a := stack[len(stack) - 2]
b := stack[len(stack) - 1]
stack = stack[0:len(stack)-2]
fmt.Printf("i is %d, a is %d, b is %d, operator is %s, stack is %v\n", i, a, b, tokens[i], stack)
res = doOperate(a, b, tokens[i])
stack = append(stack, res)
}
}
return stack[0]
}
// @lc code=end
func main(){
a1 := []string{"2", "1", "+", "3", "*"}
fmt.Println(evalRPN(a1))
a2 := []string{"4", "13", "5", "/", "+"}
fmt.Println(evalRPN(a2))
a3 := []string{"10", "6", "9", "3", "+", "-11", "*", "/", "*", "17", "+", "5", "+"}
fmt.Println(evalRPN(a3))
a4:= []string{"18"}
fmt.Println(evalRPN(a4))
}
|
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package core_test
import (
"fmt"
"strings"
"testing"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessiontxn"
"github.com/pingcap/tidb/testkit"
driver "github.com/pingcap/tidb/types/parser_driver"
"github.com/pingcap/tidb/util/mock"
"github.com/stretchr/testify/require"
)
func TestFixControl44823(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table t (a int)`)
var va []string
for i := 0; i < 201; i++ {
tk.MustExec(fmt.Sprintf(`set @a%v = %v`, i, i))
va = append(va, fmt.Sprintf("@a%v", i))
}
// prepared plan cache
tk.MustExec(fmt.Sprintf(`prepare st from 'select * from t where a in (%v?)'`, strings.Repeat("?,", 200)))
tk.MustQuery(`show warnings`).Check(testkit.Rows(`Warning 1105 skip prepared plan-cache: too many values in in-list`))
tk.MustExec(fmt.Sprintf(`execute st using %v`, strings.Join(va, ",")))
tk.MustExec(fmt.Sprintf(`execute st using %v`, strings.Join(va, ",")))
tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("0"))
tk.MustExec(`set @@tidb_opt_fix_control = "44823:250"`)
tk.MustExec(fmt.Sprintf(`prepare st from 'select * from t where a in (%v?)'`, strings.Repeat("?,", 200)))
tk.MustQuery(`show warnings`).Check(testkit.Rows()) // no warning
tk.MustExec(fmt.Sprintf(`execute st using %v`, strings.Join(va, ",")))
tk.MustExec(fmt.Sprintf(`execute st using %v`, strings.Join(va, ",")))
tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) // can hit
tk.MustExec(`set @@tidb_opt_fix_control = "44823:0"`)
tk.MustExec(fmt.Sprintf(`prepare st from 'select * from t where a in (%v?)'`, strings.Repeat("?,", 200)))
tk.MustQuery(`show warnings`).Check(testkit.Rows()) // no warning
tk.MustExec(fmt.Sprintf(`execute st using %v`, strings.Join(va, ",")))
tk.MustExec(fmt.Sprintf(`execute st using %v`, strings.Join(va, ",")))
tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1"))
// non prepared plan cache
values := make([]string, 0, 201)
for i := 0; i < 201; i++ {
values = append(values, fmt.Sprintf("%v", i))
}
query := fmt.Sprintf("select * from t where a in (%v)", strings.Join(values, ","))
tk.MustExec(`set tidb_enable_non_prepared_plan_cache=1`)
tk.MustExec(`set @@tidb_opt_fix_control = ""`)
tk.MustQuery(query).Check(testkit.Rows())
tk.MustQuery(query).Check(testkit.Rows())
tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("0"))
tk.MustExec(`set @@tidb_opt_fix_control = "44823:250"`)
tk.MustQuery(query).Check(testkit.Rows())
tk.MustQuery(query).Check(testkit.Rows())
tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1"))
tk.MustExec(`set @@tidb_opt_fix_control = "44823:0"`)
tk.MustQuery(query).Check(testkit.Rows())
tk.MustQuery(query).Check(testkit.Rows())
tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1"))
}
func TestCacheable(t *testing.T) {
store := testkit.CreateMockStore(t)
mockCtx := mock.NewContext()
mockCtx.GetSessionVars().EnablePlanCacheForParamLimit = true
mockCtx.GetSessionVars().EnablePlanCacheForSubquery = true
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t1(a int, b int) partition by range(a) ( partition p0 values less than (6), partition p1 values less than (11) )")
tk.MustExec("create table t2(a int, b int) partition by hash(a) partitions 11")
tk.MustExec("create table t3(a int, b int)")
tbl := &ast.TableName{Schema: model.NewCIStr("test"), Name: model.NewCIStr("t3")}
is := tk.Session().GetInfoSchema().(infoschema.InfoSchema)
// test non-SelectStmt/-InsertStmt/-DeleteStmt/-UpdateStmt/-SetOprStmt
var stmt ast.Node = &ast.ShowStmt{}
require.False(t, core.Cacheable(stmt, is))
stmt = &ast.LoadDataStmt{}
require.False(t, core.Cacheable(stmt, is))
stmt = &ast.ImportIntoStmt{}
require.False(t, core.Cacheable(stmt, is))
// test SetOprStmt
stmt = &ast.SetOprStmt{}
require.True(t, core.Cacheable(stmt, is))
tableRefsClause := &ast.TableRefsClause{TableRefs: &ast.Join{Left: &ast.TableSource{Source: tbl}}}
// test InsertStmt
stmt = &ast.InsertStmt{Table: tableRefsClause} // insert-values-stmt
require.True(t, core.Cacheable(stmt, is))
stmt = &ast.InsertStmt{Table: tableRefsClause, Select: &ast.SelectStmt{}} // insert-select-stmt
require.True(t, core.Cacheable(stmt, is))
// test DeleteStmt
whereExpr := &ast.FuncCallExpr{}
stmt = &ast.DeleteStmt{
TableRefs: tableRefsClause,
Where: whereExpr,
}
require.True(t, core.Cacheable(stmt, is))
for funcName := range expression.UnCacheableFunctions {
whereExpr.FnName = model.NewCIStr(funcName)
require.False(t, core.Cacheable(stmt, is))
}
whereExpr.FnName = model.NewCIStr(ast.Rand)
require.True(t, core.Cacheable(stmt, is))
stmt = &ast.DeleteStmt{
TableRefs: tableRefsClause,
Where: &ast.ExistsSubqueryExpr{Sel: &ast.SubqueryExpr{Query: &ast.SelectStmt{}}},
}
c, _ := core.CacheableWithCtx(mockCtx, stmt, is)
require.True(t, c)
limitStmt := &ast.Limit{
Count: &driver.ParamMarkerExpr{},
}
stmt = &ast.DeleteStmt{
TableRefs: tableRefsClause,
Limit: limitStmt,
}
c, _ = core.CacheableWithCtx(mockCtx, stmt, is)
require.True(t, c)
limitStmt = &ast.Limit{
Offset: &driver.ParamMarkerExpr{},
}
stmt = &ast.DeleteStmt{
TableRefs: tableRefsClause,
Limit: limitStmt,
}
c, _ = core.CacheableWithCtx(mockCtx, stmt, is)
require.True(t, c)
limitStmt = &ast.Limit{}
stmt = &ast.DeleteStmt{
TableRefs: tableRefsClause,
Limit: limitStmt,
}
c, _ = core.CacheableWithCtx(mockCtx, stmt, is)
require.True(t, c)
stmt.(*ast.DeleteStmt).TableHints = append(stmt.(*ast.DeleteStmt).TableHints, &ast.TableOptimizerHint{
HintName: model.NewCIStr(core.HintIgnorePlanCache),
})
require.False(t, core.Cacheable(stmt, is))
// test UpdateStmt
whereExpr = &ast.FuncCallExpr{}
stmt = &ast.UpdateStmt{
TableRefs: tableRefsClause,
Where: whereExpr,
}
require.True(t, core.Cacheable(stmt, is))
for funcName := range expression.UnCacheableFunctions {
whereExpr.FnName = model.NewCIStr(funcName)
require.False(t, core.Cacheable(stmt, is))
}
whereExpr.FnName = model.NewCIStr(ast.Rand)
require.True(t, core.Cacheable(stmt, is))
stmt = &ast.UpdateStmt{
TableRefs: tableRefsClause,
Where: &ast.ExistsSubqueryExpr{Sel: &ast.SubqueryExpr{Query: &ast.SelectStmt{}}},
}
c, _ = core.CacheableWithCtx(mockCtx, stmt, is)
require.True(t, c)
limitStmt = &ast.Limit{
Count: &driver.ParamMarkerExpr{},
}
stmt = &ast.UpdateStmt{
TableRefs: tableRefsClause,
Limit: limitStmt,
}
c, _ = core.CacheableWithCtx(mockCtx, stmt, is)
require.True(t, c)
limitStmt = &ast.Limit{
Offset: &driver.ParamMarkerExpr{},
}
stmt = &ast.UpdateStmt{
TableRefs: tableRefsClause,
Limit: limitStmt,
}
c, _ = core.CacheableWithCtx(mockCtx, stmt, is)
require.True(t, c)
limitStmt = &ast.Limit{}
stmt = &ast.UpdateStmt{
TableRefs: tableRefsClause,
Limit: limitStmt,
}
c, _ = core.CacheableWithCtx(mockCtx, stmt, is)
require.True(t, c)
stmt.(*ast.UpdateStmt).TableHints = append(stmt.(*ast.UpdateStmt).TableHints, &ast.TableOptimizerHint{
HintName: model.NewCIStr(core.HintIgnorePlanCache),
})
require.False(t, core.Cacheable(stmt, is))
// test SelectStmt
whereExpr = &ast.FuncCallExpr{}
stmt = &ast.SelectStmt{
Where: whereExpr,
}
require.True(t, core.Cacheable(stmt, is))
for funcName := range expression.UnCacheableFunctions {
whereExpr.FnName = model.NewCIStr(funcName)
require.False(t, core.Cacheable(stmt, is))
}
whereExpr.FnName = model.NewCIStr(ast.Rand)
require.True(t, core.Cacheable(stmt, is))
stmt = &ast.SelectStmt{
Where: &ast.ExistsSubqueryExpr{Sel: &ast.SubqueryExpr{Query: &ast.SelectStmt{}}},
}
c, _ = core.CacheableWithCtx(mockCtx, stmt, is)
require.True(t, c)
limitStmt = &ast.Limit{
Count: &driver.ParamMarkerExpr{},
}
stmt = &ast.SelectStmt{
Limit: limitStmt,
}
c, _ = core.CacheableWithCtx(mockCtx, stmt, is)
require.True(t, c)
limitStmt = &ast.Limit{
Offset: &driver.ParamMarkerExpr{},
}
stmt = &ast.SelectStmt{
Limit: limitStmt,
}
c, _ = core.CacheableWithCtx(mockCtx, stmt, is)
require.True(t, c)
limitStmt = &ast.Limit{}
stmt = &ast.SelectStmt{
Limit: limitStmt,
}
c, _ = core.CacheableWithCtx(mockCtx, stmt, is)
require.True(t, c)
paramExpr := &driver.ParamMarkerExpr{}
orderByClause := &ast.OrderByClause{Items: []*ast.ByItem{{Expr: paramExpr}}}
stmt = &ast.SelectStmt{
OrderBy: orderByClause,
}
require.False(t, core.Cacheable(stmt, is))
valExpr := &driver.ValueExpr{}
orderByClause = &ast.OrderByClause{Items: []*ast.ByItem{{Expr: valExpr}}}
stmt = &ast.SelectStmt{
OrderBy: orderByClause,
}
require.True(t, core.Cacheable(stmt, is))
stmt.(*ast.SelectStmt).TableHints = append(stmt.(*ast.SelectStmt).TableHints, &ast.TableOptimizerHint{
HintName: model.NewCIStr(core.HintIgnorePlanCache),
})
require.False(t, core.Cacheable(stmt, is))
boundExpr := &ast.FrameBound{Expr: &driver.ParamMarkerExpr{}}
require.False(t, core.Cacheable(boundExpr, is))
// Partition table can not be cached.
join := &ast.Join{
Left: &ast.TableName{Schema: model.NewCIStr("test"), Name: model.NewCIStr("t1")},
Right: &ast.TableName{Schema: model.NewCIStr("test"), Name: model.NewCIStr("t2")},
}
stmt = &ast.SelectStmt{
From: &ast.TableRefsClause{
TableRefs: join,
},
}
require.False(t, core.Cacheable(stmt, is))
join = &ast.Join{
Left: &ast.TableName{Schema: model.NewCIStr("test"), Name: model.NewCIStr("t3")},
}
stmt = &ast.SelectStmt{
From: &ast.TableRefsClause{
TableRefs: join,
},
}
require.True(t, core.Cacheable(stmt, is))
}
func TestNonPreparedPlanCacheable(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table t (a int, b int, c int, d int, key(a), key(b))`)
tk.MustExec("create table t1(a int, b int, index idx_b(b)) partition by range(a) ( partition p0 values less than (6), partition p1 values less than (11) )")
tk.MustExec("create table t2(a int, b int) partition by hash(a) partitions 11")
tk.MustExec("create table t3(a int, b int)")
is := tk.Session().GetInfoSchema().(infoschema.InfoSchema)
p := parser.New()
charset := mysql.DefaultCharset
collation := mysql.DefaultCollationName
supported := []string{
"select * from test.t where a<10",
"select * from test.t where a<13 and b<15",
"select * from test.t where b=13",
"select * from test.t where c<8",
"select * from test.t where d>8",
"select * from test.t where c=8 and d>10",
"select * from test.t where a<12 and b<13 and c<12 and d>2",
"select * from test.t where a in (1, 2, 3)",
"select * from test.t where a<13 or b<15",
"select * from test.t where a<13 or b<15 and c=13",
"select * from test.t where a in (1, 2)",
"select * from test.t where a in (1, 2) and b in (1, 2, 3)",
"select * from test.t where a in (1, 2) and b < 15",
"select * from test.t where a between 1 and 10",
"select * from test.t where a between 1 and 10 and b < 15",
"select * from test.t where a+b=13", // '+'
"select * from test.t where mod(a, 3)=1", // mod
"select * from test.t where d>now()", // now
"select a+1 from test.t where a<13",
"select mod(a, 10) from test.t where a<13",
"select * from test.t limit 1", // limit
// 2-way joins
"select * from test.t inner join test.t3 on test.t.a=test.t3.a",
"select * from test.t inner join test.t3 on test.t.a=test.t3.a where test.t.a<10",
"select * from test.t, test.t3",
"select * from test.t, test.t3 where test.t.a=test.t3.a",
"select * from test.t, test.t3 where test.t.a=test.t3.a and test.t.b=t3.b",
"select * from test.t, test.t3 where test.t.a=test.t3.a and test.t.a<10",
}
unsupported := []string{
"select /*+ use_index(t1, idx_b) */ * from t1 where a > 1 and b < 2", // hint
"select distinct a from test.t1 where a > 1 and b < 2", // distinct
"select count(*) from test.t1 where a > 1 and b < 2 group by a", // group by
"select a, sum(b) as c from test.t1 where a > 1 and b < 2 group by a having sum(b) > 1", // having
"select * from test.t1 order by a", // order by
"select * from (select * from test.t1) t", // sub-query
"insert into test.t1 values(1, 1)", // insert
"insert into t1(a, b) select a, b from test.t1", // insert into select
"update test.t1 set a = 1 where b = 2", // update
"delete from test.t1 where b = 1", // delete
"select * from test.t1 for update", // lock
"select * from test.t1 where a in (select a from t)", // uncorrelated sub-query
"select * from test.t1 where a in (select a from test.t where a > t1.a)", // correlated sub-query
}
sctx := tk.Session()
for _, q := range unsupported {
stmt, err := p.ParseOneStmt(q, charset, collation)
require.NoError(t, err)
ok, _ := core.NonPreparedPlanCacheableWithCtx(sctx, stmt, is)
require.False(t, ok)
}
for _, q := range supported {
stmt, err := p.ParseOneStmt(q, charset, collation)
require.NoError(t, err)
ok, _ := core.NonPreparedPlanCacheableWithCtx(sctx, stmt, is)
require.True(t, ok)
}
}
func BenchmarkNonPreparedPlanCacheableChecker(b *testing.B) {
store := testkit.CreateMockStore(b)
tk := testkit.NewTestKit(b, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, b int)")
p := parser.New()
sql := "select * from test.t where a<10"
stmt, err := p.ParseOneStmt(sql, "", "")
if err != nil {
b.Fatal(err)
}
sctx := tk.Session()
is := sessiontxn.GetTxnManager(sctx).GetTxnInfoSchema()
core.NonPreparedPlanCacheableWithCtx(sctx, stmt, is)
b.ResetTimer()
for i := 0; i < b.N; i++ {
ok, _ := core.NonPreparedPlanCacheableWithCtx(sctx, stmt, is)
if !ok {
b.Fatal()
}
}
}
|
package main
import (
"flag"
"fmt"
"github.com/kjirou/tower-of-go/controller"
"github.com/kjirou/tower-of-go/views"
"github.com/nsf/termbox-go"
"math/rand"
"time"
)
func drawTerminal(screen *views.Screen) {
screen.ForEachCells(func (y int, x int, symbol rune, fg termbox.Attribute, bg termbox.Attribute) {
// NOTE: Probably, termbox.SetCell's outputs are asynchronous.
// Therefore, multiple executions on the same cell at the same time will nest the output buffers.
// As a result, the display will be corrupted.
termbox.SetCell(x, y, symbol, fg, bg)
})
termbox.Flush()
}
func convertScreenToText(screen *views.Screen) string {
output := ""
lastY := 0
screen.ForEachCells(func (y int, x int, symbol rune, fg termbox.Attribute, bg termbox.Attribute) {
if y != lastY {
output += "\n"
lastY = y
}
output += string(symbol)
})
return output
}
func runMainLoop(controller *controller.Controller) {
for {
// TODO: Expecting 60fps. However, it is behind the real time.
// For example, my computer needs 33-36 seconds of real time for 30 seconds of a game.
// It becomes more accurate if fps is lowered.
interval := controller.CalculateIntervalToNextMainLoop(time.Now())
time.Sleep(interval)
newState, err := controller.HandleMainLoop(interval)
if err != nil {
termbox.Close()
errMessage, _ := fmt.Printf("%+v", err)
panic(errMessage)
} else if newState != nil {
controller.Dispatch(newState)
drawTerminal(controller.GetScreen())
}
}
}
func main() {
var debugMode bool
flag.BoolVar(&debugMode, "debug", false, "Runs with debug mode.")
flag.Parse()
rand.Seed(time.Now().UnixNano())
controller, createControllerErr := controller.CreateController()
if createControllerErr != nil {
panic(createControllerErr)
}
if debugMode {
fmt.Println(convertScreenToText(controller.GetScreen()))
} else {
termboxErr := termbox.Init()
if termboxErr != nil {
panic(termboxErr)
}
termbox.SetInputMode(termbox.InputEsc)
termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
defer termbox.Close()
drawTerminal(controller.GetScreen())
go runMainLoop(controller)
// Observe termbox events.
didQuitApplication := false
for !didQuitApplication {
event := termbox.PollEvent()
switch event.Type {
case termbox.EventKey:
// Quit the application. Only this operation is resolved with priority.
if event.Key == termbox.KeyEsc || event.Key == termbox.KeyCtrlC || event.Key == termbox.KeyCtrlQ {
didQuitApplication = true
break
}
controller.HandleKeyPress(event.Ch, event.Key)
}
}
}
}
|
package main
import (
"log"
"net/http"
"github.com/garyburd/redigo/redis"
"github.com/felipeguilhermefs/restis/commands/global"
"github.com/felipeguilhermefs/restis/commands/hashes"
"github.com/felipeguilhermefs/restis/commands/lists"
"github.com/felipeguilhermefs/restis/commands/sets"
"github.com/felipeguilhermefs/restis/commands/strings"
"github.com/felipeguilhermefs/restis/commands/transactions"
"github.com/felipeguilhermefs/restis/commands/zsets"
"github.com/felipeguilhermefs/restis/router"
)
func main() {
conn, err := redis.Dial("tcp", ":6379")
if err != nil {
panic(err)
}
defer conn.Close()
var routes = router.Routes{
strings.GetRoute(conn),
strings.MGetRoute(conn),
strings.SetRoute(conn),
strings.StrLenRoute(conn),
strings.GetRangeRoute(conn),
strings.AppendRoute(conn),
strings.IncrRoute(conn),
strings.IncrByRoute(conn),
global.DelRoute(conn),
global.FlushDbRoute(conn),
global.KeysRoute(conn),
global.PipelineRoute(conn),
transactions.MultiRoute(conn),
transactions.DiscardRoute(conn),
transactions.ExecRoute(conn),
transactions.WatchRoute(conn),
transactions.UnwatchRoute(conn),
transactions.TransactionRoute(conn),
hashes.HSetRoute(conn),
hashes.HGetRoute(conn),
hashes.HMSetRoute(conn),
hashes.HMGetRoute(conn),
hashes.HGetAllRoute(conn),
hashes.HKeysRoute(conn),
hashes.HDelRoute(conn),
lists.LPushRoute(conn),
lists.LTrimRoute(conn),
lists.LRangeRoute(conn),
sets.SAddRoute(conn),
sets.SIsMemberRoute(conn),
zsets.ZAddRoute(conn),
zsets.ZCountRoute(conn),
zsets.ZRankRoute(conn),
zsets.ZRevRankRoute(conn),
}
router := router.NewRouter(routes)
http.Handle("/", router)
log.Println("Server started: router://localhost:3000")
log.Fatal(http.ListenAndServe(":3000", nil))
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aggfuncs_test
import (
"testing"
"time"
"github.com/pingcap/tidb/executor/aggfuncs"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/expression/aggregation"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/mock"
"github.com/stretchr/testify/require"
)
type windowTest struct {
dataType *types.FieldType
numRows int
funcName string
args []expression.Expression
orderByCols []*expression.Column
results []types.Datum
}
func (p *windowTest) genSrcChk() *chunk.Chunk {
srcChk := chunk.NewChunkWithCapacity([]*types.FieldType{p.dataType}, p.numRows)
dataGen := getDataGenFunc(p.dataType)
for i := 0; i < p.numRows; i++ {
dt := dataGen(i)
srcChk.AppendDatum(0, &dt)
}
return srcChk
}
type windowMemTest struct {
windowTest windowTest
allocMemDelta int64
updateMemDeltaGens updateMemDeltaGens
}
func testWindowFunc(t *testing.T, p windowTest) {
srcChk := p.genSrcChk()
ctx := mock.NewContext()
desc, err := aggregation.NewAggFuncDesc(ctx, p.funcName, p.args, false)
require.NoError(t, err)
finalFunc := aggfuncs.BuildWindowFunctions(ctx, desc, 0, p.orderByCols)
finalPr, _ := finalFunc.AllocPartialResult()
resultChk := chunk.NewChunkWithCapacity([]*types.FieldType{desc.RetTp}, 1)
iter := chunk.NewIterator4Chunk(srcChk)
for row := iter.Begin(); row != iter.End(); row = iter.Next() {
_, err = finalFunc.UpdatePartialResult(ctx, []chunk.Row{row}, finalPr)
require.NoError(t, err)
}
require.Len(t, p.results, p.numRows)
for i := 0; i < p.numRows; i++ {
err = finalFunc.AppendFinalResult2Chunk(ctx, finalPr, resultChk)
require.NoError(t, err)
dt := resultChk.GetRow(0).GetDatum(0, desc.RetTp)
result, err := dt.Compare(ctx.GetSessionVars().StmtCtx, &p.results[i], collate.GetCollator(desc.RetTp.GetCollate()))
require.NoError(t, err)
require.Equal(t, 0, result)
resultChk.Reset()
}
finalFunc.ResetPartialResult(finalPr)
}
func testWindowAggMemFunc(t *testing.T, p windowMemTest) {
srcChk := p.windowTest.genSrcChk()
ctx := mock.NewContext()
desc, err := aggregation.NewAggFuncDesc(ctx, p.windowTest.funcName, p.windowTest.args, false)
require.NoError(t, err)
finalFunc := aggfuncs.BuildWindowFunctions(ctx, desc, 0, p.windowTest.orderByCols)
finalPr, memDelta := finalFunc.AllocPartialResult()
require.Equal(t, p.allocMemDelta, memDelta)
updateMemDeltas, err := p.updateMemDeltaGens(srcChk, p.windowTest.dataType)
require.NoError(t, err)
i := 0
iter := chunk.NewIterator4Chunk(srcChk)
for row := iter.Begin(); row != iter.End(); row = iter.Next() {
memDelta, err = finalFunc.UpdatePartialResult(ctx, []chunk.Row{row}, finalPr)
require.NoError(t, err)
require.Equal(t, updateMemDeltas[i], memDelta)
i++
}
}
func buildWindowTesterWithArgs(funcName string, tp byte, args []expression.Expression, orderByCols int, numRows int, results ...interface{}) windowTest {
pt := windowTest{
dataType: types.NewFieldType(tp),
numRows: numRows,
funcName: funcName,
}
if funcName != ast.WindowFuncNtile {
pt.args = append(pt.args, &expression.Column{RetType: pt.dataType, Index: 0})
}
pt.args = append(pt.args, args...)
if orderByCols > 0 {
pt.orderByCols = append(pt.orderByCols, &expression.Column{RetType: pt.dataType, Index: 0})
}
for _, result := range results {
pt.results = append(pt.results, types.NewDatum(result))
}
return pt
}
func buildWindowTester(funcName string, tp byte, constantArg uint64, orderByCols int, numRows int, results ...interface{}) windowTest {
pt := windowTest{
dataType: types.NewFieldType(tp),
numRows: numRows,
funcName: funcName,
}
if funcName != ast.WindowFuncNtile {
pt.args = append(pt.args, &expression.Column{RetType: pt.dataType, Index: 0})
}
if constantArg > 0 {
pt.args = append(pt.args, &expression.Constant{Value: types.NewUintDatum(constantArg)})
}
if orderByCols > 0 {
pt.orderByCols = append(pt.orderByCols, &expression.Column{RetType: pt.dataType, Index: 0})
}
for _, result := range results {
pt.results = append(pt.results, types.NewDatum(result))
}
return pt
}
func buildWindowMemTester(funcName string, tp byte, constantArg uint64, numRows int, orderByCols int, allocMemDelta int64, updateMemDeltaGens updateMemDeltaGens) windowMemTest {
windowTest := buildWindowTester(funcName, tp, constantArg, orderByCols, numRows)
pt := windowMemTest{
windowTest: windowTest,
allocMemDelta: allocMemDelta,
updateMemDeltaGens: updateMemDeltaGens,
}
return pt
}
func buildWindowMemTesterWithArgs(funcName string, tp byte, args []expression.Expression, orderByCols int, numRows int, allocMemDelta int64, updateMemDeltaGens updateMemDeltaGens) windowMemTest {
windowTest := buildWindowTesterWithArgs(funcName, tp, args, orderByCols, numRows)
pt := windowMemTest{
windowTest: windowTest,
allocMemDelta: allocMemDelta,
updateMemDeltaGens: updateMemDeltaGens,
}
return pt
}
func TestWindowFunctions(t *testing.T) {
tests := []windowTest{
buildWindowTester(ast.WindowFuncCumeDist, mysql.TypeLonglong, 0, 1, 1, 1),
buildWindowTester(ast.WindowFuncCumeDist, mysql.TypeLonglong, 0, 0, 2, 1, 1),
buildWindowTester(ast.WindowFuncCumeDist, mysql.TypeLonglong, 0, 1, 4, 0.25, 0.5, 0.75, 1),
buildWindowTester(ast.WindowFuncDenseRank, mysql.TypeLonglong, 0, 0, 2, 1, 1),
buildWindowTester(ast.WindowFuncDenseRank, mysql.TypeLonglong, 0, 1, 4, 1, 2, 3, 4),
buildWindowTester(ast.WindowFuncFirstValue, mysql.TypeLonglong, 0, 1, 2, 0, 0),
buildWindowTester(ast.WindowFuncFirstValue, mysql.TypeFloat, 0, 1, 2, 0, 0),
buildWindowTester(ast.WindowFuncFirstValue, mysql.TypeDouble, 0, 1, 2, 0, 0),
buildWindowTester(ast.WindowFuncFirstValue, mysql.TypeNewDecimal, 0, 1, 2, types.NewDecFromInt(0), types.NewDecFromInt(0)),
buildWindowTester(ast.WindowFuncFirstValue, mysql.TypeString, 0, 1, 2, "0", "0"),
buildWindowTester(ast.WindowFuncFirstValue, mysql.TypeDate, 0, 1, 2, types.TimeFromDays(365), types.TimeFromDays(365)),
buildWindowTester(ast.WindowFuncFirstValue, mysql.TypeDuration, 0, 1, 2, types.Duration{Duration: time.Duration(0)}, types.Duration{Duration: time.Duration(0)}),
buildWindowTester(ast.WindowFuncFirstValue, mysql.TypeJSON, 0, 1, 2, types.CreateBinaryJSON(int64(0)), types.CreateBinaryJSON(int64(0))),
buildWindowTester(ast.WindowFuncLastValue, mysql.TypeLonglong, 1, 0, 2, 1, 1),
buildWindowTester(ast.WindowFuncNthValue, mysql.TypeLonglong, 2, 0, 3, 1, 1, 1),
buildWindowTester(ast.WindowFuncNthValue, mysql.TypeLonglong, 5, 0, 3, nil, nil, nil),
buildWindowTester(ast.WindowFuncNtile, mysql.TypeLonglong, 3, 0, 4, 1, 1, 2, 3),
buildWindowTester(ast.WindowFuncNtile, mysql.TypeLonglong, 5, 0, 3, 1, 2, 3),
buildWindowTester(ast.WindowFuncPercentRank, mysql.TypeLonglong, 0, 1, 1, 0),
buildWindowTester(ast.WindowFuncPercentRank, mysql.TypeLonglong, 0, 0, 3, 0, 0, 0),
buildWindowTester(ast.WindowFuncPercentRank, mysql.TypeLonglong, 0, 1, 4, 0, 0.3333333333333333, 0.6666666666666666, 1),
buildWindowTester(ast.WindowFuncRank, mysql.TypeLonglong, 0, 1, 1, 1),
buildWindowTester(ast.WindowFuncRank, mysql.TypeLonglong, 0, 0, 3, 1, 1, 1),
buildWindowTester(ast.WindowFuncRank, mysql.TypeLonglong, 0, 1, 4, 1, 2, 3, 4),
buildWindowTester(ast.WindowFuncRowNumber, mysql.TypeLonglong, 0, 0, 4, 1, 2, 3, 4),
}
for _, test := range tests {
testWindowFunc(t, test)
}
}
|
package main
import (
"net/http"
)
// Message struct
type Message struct {
Name string `json:"name"`
Data interface{} `json:"data"`
}
// Channel struct
type Channel struct {
ID string `json:"id"`
Name string `json:"name"`
}
func main() {
router := NewRouter()
router.Handle("channel add", addChannel)
http.Handle("/", router)
http.ListenAndServe(":4000", nil)
}
|
package taskflow
import (
"github.com/Huawei/eSDK_K8S_Plugin/src/utils"
"github.com/Huawei/eSDK_K8S_Plugin/src/utils/log"
)
type TaskRunFunc func(params map[string]interface{}, result map[string]interface{}) (map[string]interface{}, error)
type TaskRevertFunc func(result map[string]interface{}) error
type Task struct {
name string
finish bool
run TaskRunFunc
revert TaskRevertFunc
}
type TaskFlow struct {
name string
tasks []*Task
result map[string]interface{}
}
func NewTaskFlow(name string) *TaskFlow {
return &TaskFlow{
name: name,
result: make(map[string]interface{}),
}
}
func (p *TaskFlow) AddTask(name string, run TaskRunFunc, revert TaskRevertFunc) {
p.tasks = append(p.tasks, &Task{
name: name,
finish: false,
run: run,
revert: revert,
})
}
func (p *TaskFlow) Run(params map[string]interface{}) (map[string]interface{}, error) {
log.Infof("Start to run taskflow %s", p.name)
for _, task := range p.tasks {
result, err := task.run(params, p.result)
if err != nil {
log.Errorf("Run task %s of taskflow %s error: %v", task.name, p.name, err)
return nil, err
}
task.finish = true
if result != nil {
p.result = utils.MergeMap(p.result, result)
}
}
log.Infof("Taskflow %s is finished", p.name)
return p.result, nil
}
func (p *TaskFlow) GetResult() map[string]interface{} {
return p.result
}
func (p *TaskFlow) Revert() {
log.Infof("Start to revert taskflow %s", p.name)
for i := len(p.tasks) - 1; i >= 0; i-- {
task := p.tasks[i]
if task.finish && task.revert != nil {
err := task.revert(p.result)
if err != nil {
log.Warningf("Revert task %s of taskflow %s error: %v", task.name, p.name, err)
}
}
}
log.Infof("Taskflow %s is reverted", p.name)
}
|
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
)
var programName string
var programVersion = "3.0.0"
func init() {
programName = filepath.Base(os.Args[0])
}
func main() {
commands := map[string]command{
"ruler": makeRulerCommand(),
"version": makeVersionCommand(),
}
flag.Usage = func() {
fmt.Printf("%s is a command-line interface to Twitter.\n\n", programName)
var maxCommandLen int
for name := range commands {
if len := len(name); len > maxCommandLen {
maxCommandLen = len
}
}
for name, subcommand := range commands {
formatString := fmt.Sprintf("%%%ds: %%s\n", maxCommandLen)
fmt.Printf(formatString, name, subcommand.desc)
}
}
flag.Parse()
args := flag.Args()
if len(args) <= 0 || args[0] == "help" {
flag.Usage()
os.Exit(2)
}
cmd, ok := commands[args[0]]
if !ok {
printError("unknown subcommand %q\nRun '%s help' for usage.\n", args[0], programName)
os.Exit(1)
}
if err := cmd.fn(args[1:]); err != nil {
printError("error running %q: %s", args[0], err)
os.Exit(1)
}
}
type command struct {
fs *flag.FlagSet
desc string
fn func([]string) error
}
func makeRulerCommand() command {
fs := flag.NewFlagSet("ruler", flag.ExitOnError)
spacesToIndent := fs.Int("indent", 0, "the number of spaces to print before the ruler")
fn := func(args []string) error {
fs.Parse(args)
_, err := fmt.Println(ruler(*spacesToIndent))
return err
}
return command{
fs: fs,
desc: "print a 140-character ruler",
fn: fn,
}
}
func ruler(spacesToIndent int) string {
var ruler string
for i := 0; i < spacesToIndent; i++ {
ruler += " "
}
for i := 1; i <= 140; i++ {
if i%5 == 0 {
ruler += "|"
} else {
ruler += "-"
}
}
return ruler
}
func makeVersionCommand() command {
fn := func(args []string) error {
_, err := fmt.Println(programVersion)
return err
}
desc := fmt.Sprintf("print %s version", programName)
return command{
desc: desc,
fn: fn,
}
}
func printError(format string, args ...interface{}) {
fmt.Fprintf(os.Stderr, programName+": "+format, args...)
}
|
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"bytes"
"encoding/binary"
"encoding/hex"
"fmt"
"math"
"slices"
"sort"
"unicode/utf8"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/util/hack"
"github.com/pingcap/tidb/util/stringutil"
)
// Type returns type of BinaryJSON as string.
func (bj BinaryJSON) Type() string {
switch bj.TypeCode {
case JSONTypeCodeObject:
return "OBJECT"
case JSONTypeCodeArray:
return "ARRAY"
case JSONTypeCodeLiteral:
switch bj.Value[0] {
case JSONLiteralNil:
return "NULL"
default:
return "BOOLEAN"
}
case JSONTypeCodeInt64:
return "INTEGER"
case JSONTypeCodeUint64:
return "UNSIGNED INTEGER"
case JSONTypeCodeFloat64:
return "DOUBLE"
case JSONTypeCodeString:
return "STRING"
case JSONTypeCodeOpaque:
typ := bj.GetOpaqueFieldType()
switch typ {
case mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob, mysql.TypeBlob, mysql.TypeString, mysql.TypeVarString, mysql.TypeVarchar:
return "BLOB"
case mysql.TypeBit:
return "BIT"
default:
return "OPAQUE"
}
case JSONTypeCodeDate:
return "DATE"
case JSONTypeCodeDatetime:
return "DATETIME"
case JSONTypeCodeTimestamp:
return "DATETIME"
case JSONTypeCodeDuration:
return "TIME"
default:
msg := fmt.Sprintf(unknownTypeCodeErrorMsg, bj.TypeCode)
panic(msg)
}
}
// Unquote is for JSON_UNQUOTE.
func (bj BinaryJSON) Unquote() (string, error) {
switch bj.TypeCode {
case JSONTypeCodeString:
str := string(hack.String(bj.GetString()))
return UnquoteString(str)
default:
return bj.String(), nil
}
}
// UnquoteString remove quotes in a string,
// including the quotes at the head and tail of string.
func UnquoteString(str string) (string, error) {
strLen := len(str)
if strLen < 2 {
return str, nil
}
head, tail := str[0], str[strLen-1]
if head == '"' && tail == '"' {
// Remove prefix and suffix '"' before unquoting
return unquoteJSONString(str[1 : strLen-1])
}
// if value is not double quoted, do nothing
return str, nil
}
// unquoteJSONString recognizes the escape sequences shown in:
// https://dev.mysql.com/doc/refman/5.7/en/json-modification-functions.html#json-unquote-character-escape-sequences
func unquoteJSONString(s string) (string, error) {
ret := new(bytes.Buffer)
for i := 0; i < len(s); i++ {
if s[i] == '\\' {
i++
if i == len(s) {
return "", errors.New("Missing a closing quotation mark in string")
}
switch s[i] {
case '"':
ret.WriteByte('"')
case 'b':
ret.WriteByte('\b')
case 'f':
ret.WriteByte('\f')
case 'n':
ret.WriteByte('\n')
case 'r':
ret.WriteByte('\r')
case 't':
ret.WriteByte('\t')
case '\\':
ret.WriteByte('\\')
case 'u':
if i+4 > len(s) {
return "", errors.Errorf("Invalid unicode: %s", s[i+1:])
}
char, size, err := decodeEscapedUnicode(hack.Slice(s[i+1 : i+5]))
if err != nil {
return "", errors.Trace(err)
}
ret.Write(char[0:size])
i += 4
default:
// For all other escape sequences, backslash is ignored.
ret.WriteByte(s[i])
}
} else {
ret.WriteByte(s[i])
}
}
return ret.String(), nil
}
// decodeEscapedUnicode decodes unicode into utf8 bytes specified in RFC 3629.
// According RFC 3629, the max length of utf8 characters is 4 bytes.
// And MySQL use 4 bytes to represent the unicode which must be in [0, 65536).
func decodeEscapedUnicode(s []byte) (char [4]byte, size int, err error) {
size, err = hex.Decode(char[0:2], s)
if err != nil || size != 2 {
// The unicode must can be represented in 2 bytes.
return char, 0, errors.Trace(err)
}
unicode := binary.BigEndian.Uint16(char[0:2])
size = utf8.RuneLen(rune(unicode))
utf8.EncodeRune(char[0:size], rune(unicode))
return
}
// quoteJSONString escapes interior quote and other characters for JSON_QUOTE
// https://dev.mysql.com/doc/refman/5.7/en/json-creation-functions.html#function_json-quote
// TODO: add JSON_QUOTE builtin
func quoteJSONString(s string) string {
var escapeByteMap = map[byte]string{
'\\': "\\\\",
'"': "\\\"",
'\b': "\\b",
'\f': "\\f",
'\n': "\\n",
'\r': "\\r",
'\t': "\\t",
}
ret := new(bytes.Buffer)
ret.WriteByte('"')
start := 0
hasEscaped := false
for i := 0; i < len(s); {
if b := s[i]; b < utf8.RuneSelf {
escaped, ok := escapeByteMap[b]
if ok {
if start < i {
ret.WriteString(s[start:i])
}
hasEscaped = true
ret.WriteString(escaped)
i++
start = i
} else {
i++
}
} else {
c, size := utf8.DecodeRuneInString(s[i:])
if c == utf8.RuneError && size == 1 { // refer to codes of `binary.jsonMarshalStringTo`
if start < i {
ret.WriteString(s[start:i])
}
hasEscaped = true
ret.WriteString(`\ufffd`)
i += size
start = i
continue
}
i += size
}
}
if start < len(s) {
ret.WriteString(s[start:])
}
if hasEscaped || !isEcmascriptIdentifier(s) {
ret.WriteByte('"')
return ret.String()
}
return ret.String()[1:]
}
// Extract receives several path expressions as arguments, matches them in bj, and returns:
//
// ret: target JSON matched any path expressions. maybe autowrapped as an array.
// found: true if any path expressions matched.
func (bj BinaryJSON) Extract(pathExprList []JSONPathExpression) (ret BinaryJSON, found bool) {
buf := make([]BinaryJSON, 0, 1)
for _, pathExpr := range pathExprList {
buf = bj.extractTo(buf, pathExpr, make(map[*byte]struct{}), false)
}
if len(buf) == 0 {
found = false
} else if len(pathExprList) == 1 && len(buf) == 1 {
// If pathExpr contains asterisks, len(elemList) won't be 1
// even if len(pathExprList) equals to 1.
found = true
ret = buf[0]
// Fix https://github.com/pingcap/tidb/issues/30352
if pathExprList[0].CouldMatchMultipleValues() {
ret = buildBinaryJSONArray(buf)
}
} else {
found = true
ret = buildBinaryJSONArray(buf)
}
return
}
func (bj BinaryJSON) extractOne(pathExpr JSONPathExpression) []BinaryJSON {
result := make([]BinaryJSON, 0, 1)
return bj.extractTo(result, pathExpr, nil, true)
}
func (bj BinaryJSON) extractTo(buf []BinaryJSON, pathExpr JSONPathExpression, dup map[*byte]struct{}, one bool) []BinaryJSON {
if len(pathExpr.legs) == 0 {
if dup != nil {
if _, exists := dup[&bj.Value[0]]; exists {
return buf
}
dup[&bj.Value[0]] = struct{}{}
}
return append(buf, bj)
}
currentLeg, subPathExpr := pathExpr.popOneLeg()
if currentLeg.typ == jsonPathLegArraySelection {
if bj.TypeCode != JSONTypeCodeArray {
// If the current object is not an array, still append them if the selection includes
// 0. But for asterisk, it still returns NULL.
//
// don't call `getIndexRange` or `getIndexFromStart`, they will panic if the argument
// is not array.
switch selection := currentLeg.arraySelection.(type) {
case jsonPathArraySelectionIndex:
if selection.index == 0 {
buf = bj.extractTo(buf, subPathExpr, dup, one)
}
case jsonPathArraySelectionRange:
// for [0 to Non-negative Number] and [0 to last], it extracts itself
if selection.start == 0 && selection.end >= -1 {
buf = bj.extractTo(buf, subPathExpr, dup, one)
}
}
return buf
}
start, end := currentLeg.arraySelection.getIndexRange(bj)
if start >= 0 && start <= end {
for i := start; i <= end; i++ {
buf = bj.ArrayGetElem(i).extractTo(buf, subPathExpr, dup, one)
}
}
} else if currentLeg.typ == jsonPathLegKey && bj.TypeCode == JSONTypeCodeObject {
elemCount := bj.GetElemCount()
if currentLeg.dotKey == "*" {
for i := 0; i < elemCount && !jsonFinished(buf, one); i++ {
buf = bj.objectGetVal(i).extractTo(buf, subPathExpr, dup, one)
}
} else {
child, ok := bj.objectSearchKey(hack.Slice(currentLeg.dotKey))
if ok {
buf = child.extractTo(buf, subPathExpr, dup, one)
}
}
} else if currentLeg.typ == jsonPathLegDoubleAsterisk {
buf = bj.extractTo(buf, subPathExpr, dup, one)
if bj.TypeCode == JSONTypeCodeArray {
elemCount := bj.GetElemCount()
for i := 0; i < elemCount && !jsonFinished(buf, one); i++ {
buf = bj.ArrayGetElem(i).extractTo(buf, pathExpr, dup, one)
}
} else if bj.TypeCode == JSONTypeCodeObject {
elemCount := bj.GetElemCount()
for i := 0; i < elemCount && !jsonFinished(buf, one); i++ {
buf = bj.objectGetVal(i).extractTo(buf, pathExpr, dup, one)
}
}
}
return buf
}
func jsonFinished(buf []BinaryJSON, one bool) bool {
return one && len(buf) > 0
}
func (bj BinaryJSON) objectSearchKey(key []byte) (BinaryJSON, bool) {
elemCount := bj.GetElemCount()
idx := sort.Search(elemCount, func(i int) bool {
return bytes.Compare(bj.objectGetKey(i), key) >= 0
})
if idx < elemCount && bytes.Equal(bj.objectGetKey(idx), key) {
return bj.objectGetVal(idx), true
}
return BinaryJSON{}, false
}
func buildBinaryJSONArray(elems []BinaryJSON) BinaryJSON {
totalSize := headerSize + len(elems)*valEntrySize
for _, elem := range elems {
if elem.TypeCode != JSONTypeCodeLiteral {
totalSize += len(elem.Value)
}
}
buf := make([]byte, headerSize+len(elems)*valEntrySize, totalSize)
jsonEndian.PutUint32(buf, uint32(len(elems)))
jsonEndian.PutUint32(buf[dataSizeOff:], uint32(totalSize))
buf = buildBinaryJSONElements(buf, headerSize, elems)
return BinaryJSON{TypeCode: JSONTypeCodeArray, Value: buf}
}
func buildBinaryJSONElements(buf []byte, entryStart int, elems []BinaryJSON) []byte {
for i, elem := range elems {
buf[entryStart+i*valEntrySize] = elem.TypeCode
if elem.TypeCode == JSONTypeCodeLiteral {
buf[entryStart+i*valEntrySize+valTypeSize] = elem.Value[0]
} else {
jsonEndian.PutUint32(buf[entryStart+i*valEntrySize+valTypeSize:], uint32(len(buf)))
buf = append(buf, elem.Value...)
}
}
return buf
}
func buildBinaryJSONObject(keys [][]byte, elems []BinaryJSON) (BinaryJSON, error) {
totalSize := headerSize + len(elems)*(keyEntrySize+valEntrySize)
for i, elem := range elems {
if elem.TypeCode != JSONTypeCodeLiteral {
totalSize += len(elem.Value)
}
totalSize += len(keys[i])
}
buf := make([]byte, headerSize+len(elems)*(keyEntrySize+valEntrySize), totalSize)
jsonEndian.PutUint32(buf, uint32(len(elems)))
jsonEndian.PutUint32(buf[dataSizeOff:], uint32(totalSize))
for i, key := range keys {
if len(key) > math.MaxUint16 {
return BinaryJSON{}, ErrJSONObjectKeyTooLong
}
jsonEndian.PutUint32(buf[headerSize+i*keyEntrySize:], uint32(len(buf)))
jsonEndian.PutUint16(buf[headerSize+i*keyEntrySize+keyLenOff:], uint16(len(key)))
buf = append(buf, key...)
}
entryStart := headerSize + len(elems)*keyEntrySize
buf = buildBinaryJSONElements(buf, entryStart, elems)
return BinaryJSON{TypeCode: JSONTypeCodeObject, Value: buf}, nil
}
// Modify modifies a JSON object by insert, replace or set.
// All path expressions cannot contain * or ** wildcard.
// If any error occurs, the input won't be changed.
func (bj BinaryJSON) Modify(pathExprList []JSONPathExpression, values []BinaryJSON, mt JSONModifyType) (retj BinaryJSON, err error) {
if len(pathExprList) != len(values) {
// TODO: should return 1582(42000)
return retj, errors.New("Incorrect parameter count")
}
for _, pathExpr := range pathExprList {
if pathExpr.flags.containsAnyAsterisk() || pathExpr.flags.containsAnyRange() {
// TODO: should return 3149(42000)
return retj, errors.New("Invalid path expression")
}
}
for i := 0; i < len(pathExprList); i++ {
pathExpr, value := pathExprList[i], values[i]
modifier := &binaryModifier{bj: bj}
switch mt {
case JSONModifyInsert:
bj = modifier.insert(pathExpr, value)
case JSONModifyReplace:
bj = modifier.replace(pathExpr, value)
case JSONModifySet:
bj = modifier.set(pathExpr, value)
}
if modifier.err != nil {
return BinaryJSON{}, modifier.err
}
}
if bj.GetElemDepth()-1 > maxJSONDepth {
return bj, ErrJSONDocumentTooDeep
}
return bj, nil
}
// ArrayInsert insert a BinaryJSON into the given array cell.
// All path expressions cannot contain * or ** wildcard.
// If any error occurs, the input won't be changed.
func (bj BinaryJSON) ArrayInsert(pathExpr JSONPathExpression, value BinaryJSON) (res BinaryJSON, err error) {
// Check the path is a index
if len(pathExpr.legs) < 1 {
return bj, ErrInvalidJSONPathArrayCell
}
parentPath, lastLeg := pathExpr.popOneLastLeg()
if lastLeg.typ != jsonPathLegArraySelection {
return bj, ErrInvalidJSONPathArrayCell
}
// Find the target array
obj, exists := bj.Extract([]JSONPathExpression{parentPath})
if !exists || obj.TypeCode != JSONTypeCodeArray {
return bj, nil
}
idx := 0
switch selection := lastLeg.arraySelection.(type) {
case jsonPathArraySelectionIndex:
idx = selection.index.getIndexFromStart(obj)
default:
return bj, ErrInvalidJSONPathArrayCell
}
count := obj.GetElemCount()
if idx >= count {
idx = count
}
// Insert into the array
newArray := make([]BinaryJSON, 0, count+1)
for i := 0; i < idx; i++ {
elem := obj.ArrayGetElem(i)
newArray = append(newArray, elem)
}
newArray = append(newArray, value)
for i := idx; i < count; i++ {
elem := obj.ArrayGetElem(i)
newArray = append(newArray, elem)
}
obj = buildBinaryJSONArray(newArray)
bj, err = bj.Modify([]JSONPathExpression{parentPath}, []BinaryJSON{obj}, JSONModifySet)
if err != nil {
return bj, err
}
return bj, nil
}
// Remove removes the elements indicated by pathExprList from JSON.
func (bj BinaryJSON) Remove(pathExprList []JSONPathExpression) (BinaryJSON, error) {
for _, pathExpr := range pathExprList {
if len(pathExpr.legs) == 0 {
// TODO: should return 3153(42000)
return bj, errors.New("Invalid path expression")
}
if pathExpr.flags.containsAnyAsterisk() || pathExpr.flags.containsAnyRange() {
// TODO: should return 3149(42000)
return bj, errors.New("Invalid path expression")
}
modifer := &binaryModifier{bj: bj}
bj = modifer.remove(pathExpr)
if modifer.err != nil {
return BinaryJSON{}, modifer.err
}
}
return bj, nil
}
type binaryModifier struct {
bj BinaryJSON
modifyPtr *byte
modifyValue BinaryJSON
err error
}
func (bm *binaryModifier) set(path JSONPathExpression, newBj BinaryJSON) BinaryJSON {
result := bm.bj.extractOne(path)
if len(result) > 0 {
bm.modifyPtr = &result[0].Value[0]
bm.modifyValue = newBj
return bm.rebuild()
}
bm.doInsert(path, newBj)
if bm.err != nil {
return BinaryJSON{}
}
return bm.rebuild()
}
func (bm *binaryModifier) replace(path JSONPathExpression, newBj BinaryJSON) BinaryJSON {
result := bm.bj.extractOne(path)
if len(result) == 0 {
return bm.bj
}
bm.modifyPtr = &result[0].Value[0]
bm.modifyValue = newBj
return bm.rebuild()
}
func (bm *binaryModifier) insert(path JSONPathExpression, newBj BinaryJSON) BinaryJSON {
result := bm.bj.extractOne(path)
if len(result) > 0 {
return bm.bj
}
bm.doInsert(path, newBj)
if bm.err != nil {
return BinaryJSON{}
}
return bm.rebuild()
}
// doInsert inserts the newBj to its parent, and builds the new parent.
func (bm *binaryModifier) doInsert(path JSONPathExpression, newBj BinaryJSON) {
parentPath, lastLeg := path.popOneLastLeg()
result := bm.bj.extractOne(parentPath)
if len(result) == 0 {
return
}
parentBj := result[0]
if lastLeg.typ == jsonPathLegArraySelection {
bm.modifyPtr = &parentBj.Value[0]
if parentBj.TypeCode != JSONTypeCodeArray {
bm.modifyValue = buildBinaryJSONArray([]BinaryJSON{parentBj, newBj})
return
}
elemCount := parentBj.GetElemCount()
elems := make([]BinaryJSON, 0, elemCount+1)
for i := 0; i < elemCount; i++ {
elems = append(elems, parentBj.ArrayGetElem(i))
}
elems = append(elems, newBj)
bm.modifyValue = buildBinaryJSONArray(elems)
return
}
if parentBj.TypeCode != JSONTypeCodeObject {
return
}
bm.modifyPtr = &parentBj.Value[0]
elemCount := parentBj.GetElemCount()
insertKey := hack.Slice(lastLeg.dotKey)
insertIdx := sort.Search(elemCount, func(i int) bool {
return bytes.Compare(parentBj.objectGetKey(i), insertKey) >= 0
})
keys := make([][]byte, 0, elemCount+1)
elems := make([]BinaryJSON, 0, elemCount+1)
for i := 0; i < elemCount; i++ {
if i == insertIdx {
keys = append(keys, insertKey)
elems = append(elems, newBj)
}
keys = append(keys, parentBj.objectGetKey(i))
elems = append(elems, parentBj.objectGetVal(i))
}
if insertIdx == elemCount {
keys = append(keys, insertKey)
elems = append(elems, newBj)
}
bm.modifyValue, bm.err = buildBinaryJSONObject(keys, elems)
}
func (bm *binaryModifier) remove(path JSONPathExpression) BinaryJSON {
result := bm.bj.extractOne(path)
if len(result) == 0 {
return bm.bj
}
bm.doRemove(path)
if bm.err != nil {
return BinaryJSON{}
}
return bm.rebuild()
}
func (bm *binaryModifier) doRemove(path JSONPathExpression) {
parentPath, lastLeg := path.popOneLastLeg()
result := bm.bj.extractOne(parentPath)
if len(result) == 0 {
return
}
parentBj := result[0]
if lastLeg.typ == jsonPathLegArraySelection {
if parentBj.TypeCode != JSONTypeCodeArray {
return
}
selectionIndex, ok := lastLeg.arraySelection.(jsonPathArraySelectionIndex)
if !ok {
return
}
idx := selectionIndex.index.getIndexFromStart(parentBj)
bm.modifyPtr = &parentBj.Value[0]
elemCount := parentBj.GetElemCount()
elems := make([]BinaryJSON, 0, elemCount-1)
for i := 0; i < elemCount; i++ {
if i != idx {
elems = append(elems, parentBj.ArrayGetElem(i))
}
}
bm.modifyValue = buildBinaryJSONArray(elems)
return
}
if parentBj.TypeCode != JSONTypeCodeObject {
return
}
bm.modifyPtr = &parentBj.Value[0]
elemCount := parentBj.GetElemCount()
removeKey := hack.Slice(lastLeg.dotKey)
keys := make([][]byte, 0, elemCount+1)
elems := make([]BinaryJSON, 0, elemCount+1)
for i := 0; i < elemCount; i++ {
key := parentBj.objectGetKey(i)
if !bytes.Equal(key, removeKey) {
keys = append(keys, parentBj.objectGetKey(i))
elems = append(elems, parentBj.objectGetVal(i))
}
}
bm.modifyValue, bm.err = buildBinaryJSONObject(keys, elems)
}
// rebuild merges the old and the modified JSON into a new BinaryJSON
func (bm *binaryModifier) rebuild() BinaryJSON {
buf := make([]byte, 0, len(bm.bj.Value)+len(bm.modifyValue.Value))
value, tpCode := bm.rebuildTo(buf)
return BinaryJSON{TypeCode: tpCode, Value: value}
}
func (bm *binaryModifier) rebuildTo(buf []byte) ([]byte, JSONTypeCode) {
if bm.modifyPtr == &bm.bj.Value[0] {
bm.modifyPtr = nil
return append(buf, bm.modifyValue.Value...), bm.modifyValue.TypeCode
} else if bm.modifyPtr == nil {
return append(buf, bm.bj.Value...), bm.bj.TypeCode
}
bj := bm.bj
if bj.TypeCode != JSONTypeCodeArray && bj.TypeCode != JSONTypeCodeObject {
return append(buf, bj.Value...), bj.TypeCode
}
docOff := len(buf)
elemCount := bj.GetElemCount()
var valEntryStart int
if bj.TypeCode == JSONTypeCodeArray {
copySize := headerSize + elemCount*valEntrySize
valEntryStart = headerSize
buf = append(buf, bj.Value[:copySize]...)
} else {
copySize := headerSize + elemCount*(keyEntrySize+valEntrySize)
valEntryStart = headerSize + elemCount*keyEntrySize
buf = append(buf, bj.Value[:copySize]...)
if elemCount > 0 {
firstKeyOff := int(jsonEndian.Uint32(bj.Value[headerSize:]))
lastKeyOff := int(jsonEndian.Uint32(bj.Value[headerSize+(elemCount-1)*keyEntrySize:]))
lastKeyLen := int(jsonEndian.Uint16(bj.Value[headerSize+(elemCount-1)*keyEntrySize+keyLenOff:]))
buf = append(buf, bj.Value[firstKeyOff:lastKeyOff+lastKeyLen]...)
}
}
for i := 0; i < elemCount; i++ {
valEntryOff := valEntryStart + i*valEntrySize
elem := bj.valEntryGet(valEntryOff)
bm.bj = elem
var tpCode JSONTypeCode
valOff := len(buf) - docOff
buf, tpCode = bm.rebuildTo(buf)
buf[docOff+valEntryOff] = tpCode
if tpCode == JSONTypeCodeLiteral {
lastIdx := len(buf) - 1
jsonEndian.PutUint32(buf[docOff+valEntryOff+valTypeSize:], uint32(buf[lastIdx]))
buf = buf[:lastIdx]
} else {
jsonEndian.PutUint32(buf[docOff+valEntryOff+valTypeSize:], uint32(valOff))
}
}
jsonEndian.PutUint32(buf[docOff+dataSizeOff:], uint32(len(buf)-docOff))
return buf, bj.TypeCode
}
// floatEpsilon is the acceptable error quantity when comparing two float numbers.
const floatEpsilon = 1.e-8
// compareFloat64PrecisionLoss returns an integer comparing the float64 x to y,
// allowing precision loss.
func compareFloat64PrecisionLoss(x, y float64) int {
if x-y < floatEpsilon && y-x < floatEpsilon {
return 0
} else if x-y < 0 {
return -1
}
return 1
}
func compareInt64(x int64, y int64) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
func compareFloat64(x float64, y float64) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
func compareUint64(x uint64, y uint64) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
func compareInt64Uint64(x int64, y uint64) int {
if x < 0 {
return -1
}
return compareUint64(uint64(x), y)
}
func compareFloat64Int64(x float64, y int64) int {
return compareFloat64PrecisionLoss(x, float64(y))
}
func compareFloat64Uint64(x float64, y uint64) int {
return compareFloat64PrecisionLoss(x, float64(y))
}
// CompareBinaryJSON compares two binary json objects. Returns -1 if left < right,
// 0 if left == right, else returns 1.
func CompareBinaryJSON(left, right BinaryJSON) int {
precedence1 := jsonTypePrecedences[left.Type()]
precedence2 := jsonTypePrecedences[right.Type()]
var cmp int
if precedence1 == precedence2 {
if precedence1 == jsonTypePrecedences["NULL"] {
// for JSON null.
cmp = 0
}
switch left.TypeCode {
case JSONTypeCodeLiteral:
// false is less than true.
cmp = int(right.Value[0]) - int(left.Value[0])
case JSONTypeCodeInt64:
switch right.TypeCode {
case JSONTypeCodeInt64:
cmp = compareInt64(left.GetInt64(), right.GetInt64())
case JSONTypeCodeUint64:
cmp = compareInt64Uint64(left.GetInt64(), right.GetUint64())
case JSONTypeCodeFloat64:
cmp = -compareFloat64Int64(right.GetFloat64(), left.GetInt64())
}
case JSONTypeCodeUint64:
switch right.TypeCode {
case JSONTypeCodeInt64:
cmp = -compareInt64Uint64(right.GetInt64(), left.GetUint64())
case JSONTypeCodeUint64:
cmp = compareUint64(left.GetUint64(), right.GetUint64())
case JSONTypeCodeFloat64:
cmp = -compareFloat64Uint64(right.GetFloat64(), left.GetUint64())
}
case JSONTypeCodeFloat64:
switch right.TypeCode {
case JSONTypeCodeInt64:
cmp = compareFloat64Int64(left.GetFloat64(), right.GetInt64())
case JSONTypeCodeUint64:
cmp = compareFloat64Uint64(left.GetFloat64(), right.GetUint64())
case JSONTypeCodeFloat64:
cmp = compareFloat64(left.GetFloat64(), right.GetFloat64())
}
case JSONTypeCodeString:
cmp = bytes.Compare(left.GetString(), right.GetString())
case JSONTypeCodeArray:
leftCount := left.GetElemCount()
rightCount := right.GetElemCount()
for i := 0; i < leftCount && i < rightCount; i++ {
elem1 := left.ArrayGetElem(i)
elem2 := right.ArrayGetElem(i)
cmp = CompareBinaryJSON(elem1, elem2)
if cmp != 0 {
return cmp
}
}
cmp = leftCount - rightCount
case JSONTypeCodeObject:
// reference:
// https://github.com/mysql/mysql-server/blob/ee4455a33b10f1b1886044322e4893f587b319ed/sql/json_dom.cc#L2561
leftCount, rightCount := left.GetElemCount(), right.GetElemCount()
cmp := compareInt64(int64(leftCount), int64(rightCount))
if cmp != 0 {
return cmp
}
for i := 0; i < leftCount; i++ {
leftKey, rightKey := left.objectGetKey(i), right.objectGetKey(i)
cmp = bytes.Compare(leftKey, rightKey)
if cmp != 0 {
return cmp
}
cmp = CompareBinaryJSON(left.objectGetVal(i), right.objectGetVal(i))
if cmp != 0 {
return cmp
}
}
case JSONTypeCodeOpaque:
cmp = bytes.Compare(left.GetOpaque().Buf, right.GetOpaque().Buf)
case JSONTypeCodeDate, JSONTypeCodeDatetime, JSONTypeCodeTimestamp:
// the jsonTypePrecedences guarantees that the DATE is only
// comparable with the DATE, and the DATETIME and TIMESTAMP will compare with each other
// as the `Type()` of `JSONTypeCodeTimestamp` is also `DATETIME`.
leftTime := left.GetTime()
rightTime := right.GetTime()
cmp = leftTime.Compare(rightTime)
case JSONTypeCodeDuration:
leftDuration := left.GetDuration()
rightDuration := right.GetDuration()
cmp = leftDuration.Compare(rightDuration)
}
} else {
cmp = precedence1 - precedence2
if cmp > 0 {
cmp = 1
} else if cmp < 0 {
cmp = -1
}
}
return cmp
}
// MergePatchBinaryJSON implements RFC7396
// https://datatracker.ietf.org/doc/html/rfc7396
func MergePatchBinaryJSON(bjs []*BinaryJSON) (*BinaryJSON, error) {
var err error
length := len(bjs)
// according to the implements of RFC7396
// when the last item is not object
// we can return the last item directly
for i := length - 1; i >= 0; i-- {
if bjs[i] == nil || bjs[i].TypeCode != JSONTypeCodeObject {
bjs = bjs[i:]
break
}
}
target := bjs[0]
for _, patch := range bjs[1:] {
target, err = mergePatchBinaryJSON(target, patch)
if err != nil {
return nil, err
}
}
return target, nil
}
func mergePatchBinaryJSON(target, patch *BinaryJSON) (result *BinaryJSON, err error) {
if patch == nil {
return nil, nil
}
if patch.TypeCode == JSONTypeCodeObject {
if target == nil {
return nil, nil
}
keyValMap := make(map[string]BinaryJSON)
if target.TypeCode == JSONTypeCodeObject {
elemCount := target.GetElemCount()
for i := 0; i < elemCount; i++ {
key := target.objectGetKey(i)
val := target.objectGetVal(i)
keyValMap[string(key)] = val
}
}
var tmp *BinaryJSON
elemCount := patch.GetElemCount()
for i := 0; i < elemCount; i++ {
key := patch.objectGetKey(i)
val := patch.objectGetVal(i)
k := string(key)
targetKV, exists := keyValMap[k]
if val.TypeCode == JSONTypeCodeLiteral && val.Value[0] == JSONLiteralNil {
if exists {
delete(keyValMap, k)
}
} else {
tmp, err = mergePatchBinaryJSON(&targetKV, &val)
if err != nil {
return result, err
}
keyValMap[k] = *tmp
}
}
length := len(keyValMap)
keys := make([][]byte, 0, length)
for key := range keyValMap {
keys = append(keys, []byte(key))
}
slices.SortFunc(keys, bytes.Compare)
length = len(keys)
values := make([]BinaryJSON, 0, len(keys))
for i := 0; i < length; i++ {
values = append(values, keyValMap[string(keys[i])])
}
binaryObject, e := buildBinaryJSONObject(keys, values)
if e != nil {
return nil, e
}
return &binaryObject, nil
}
return patch, nil
}
// MergeBinaryJSON merges multiple BinaryJSON into one according the following rules:
// 1) adjacent arrays are merged to a single array;
// 2) adjacent object are merged to a single object;
// 3) a scalar value is autowrapped as an array before merge;
// 4) an adjacent array and object are merged by autowrapping the object as an array.
func MergeBinaryJSON(bjs []BinaryJSON) BinaryJSON {
var remain = bjs
var objects []BinaryJSON
var results []BinaryJSON
for len(remain) > 0 {
if remain[0].TypeCode != JSONTypeCodeObject {
results = append(results, remain[0])
remain = remain[1:]
} else {
objects, remain = getAdjacentObjects(remain)
results = append(results, mergeBinaryObject(objects))
}
}
if len(results) == 1 {
return results[0]
}
return mergeBinaryArray(results)
}
func getAdjacentObjects(bjs []BinaryJSON) (objects, remain []BinaryJSON) {
for i := 0; i < len(bjs); i++ {
if bjs[i].TypeCode != JSONTypeCodeObject {
return bjs[:i], bjs[i:]
}
}
return bjs, nil
}
func mergeBinaryArray(elems []BinaryJSON) BinaryJSON {
buf := make([]BinaryJSON, 0, len(elems))
for i := 0; i < len(elems); i++ {
elem := elems[i]
if elem.TypeCode != JSONTypeCodeArray {
buf = append(buf, elem)
} else {
childCount := elem.GetElemCount()
for j := 0; j < childCount; j++ {
buf = append(buf, elem.ArrayGetElem(j))
}
}
}
return buildBinaryJSONArray(buf)
}
func mergeBinaryObject(objects []BinaryJSON) BinaryJSON {
keyValMap := make(map[string]BinaryJSON)
keys := make([][]byte, 0, len(keyValMap))
for _, obj := range objects {
elemCount := obj.GetElemCount()
for i := 0; i < elemCount; i++ {
key := obj.objectGetKey(i)
val := obj.objectGetVal(i)
if old, ok := keyValMap[string(key)]; ok {
keyValMap[string(key)] = MergeBinaryJSON([]BinaryJSON{old, val})
} else {
keyValMap[string(key)] = val
keys = append(keys, key)
}
}
}
slices.SortFunc(keys, bytes.Compare)
values := make([]BinaryJSON, len(keys))
for i, key := range keys {
values[i] = keyValMap[string(key)]
}
binaryObject, err := buildBinaryJSONObject(keys, values)
if err != nil {
panic("mergeBinaryObject should never panic, please contact the TiDB team for help")
}
return binaryObject
}
// PeekBytesAsJSON trys to peek some bytes from b, until
// we can deserialize a JSON from those bytes.
func PeekBytesAsJSON(b []byte) (n int, err error) {
if len(b) <= 0 {
err = errors.New("Cant peek from empty bytes")
return
}
switch c := b[0]; c {
case JSONTypeCodeObject, JSONTypeCodeArray:
if len(b) >= valTypeSize+headerSize {
size := jsonEndian.Uint32(b[valTypeSize+dataSizeOff:])
n = valTypeSize + int(size)
return
}
case JSONTypeCodeString:
strLen, lenLen := binary.Uvarint(b[valTypeSize:])
return valTypeSize + int(strLen) + lenLen, nil
case JSONTypeCodeInt64, JSONTypeCodeUint64, JSONTypeCodeFloat64, JSONTypeCodeDate, JSONTypeCodeDatetime, JSONTypeCodeTimestamp:
n = valTypeSize + 8
return
case JSONTypeCodeLiteral:
n = valTypeSize + 1
return
case JSONTypeCodeOpaque:
bufLen, lenLen := binary.Uvarint(b[valTypeSize+1:])
return valTypeSize + 1 + int(bufLen) + lenLen, nil
case JSONTypeCodeDuration:
n = valTypeSize + 12
return
}
err = errors.New("Invalid JSON bytes")
return
}
// ContainsBinaryJSON check whether JSON document contains specific target according the following rules:
// 1) object contains a target object if and only if every key is contained in source object and the value associated with the target key is contained in the value associated with the source key;
// 2) array contains a target nonarray if and only if the target is contained in some element of the array;
// 3) array contains a target array if and only if every element is contained in some element of the array;
// 4) scalar contains a target scalar if and only if they are comparable and are equal;
func ContainsBinaryJSON(obj, target BinaryJSON) bool {
switch obj.TypeCode {
case JSONTypeCodeObject:
if target.TypeCode == JSONTypeCodeObject {
elemCount := target.GetElemCount()
for i := 0; i < elemCount; i++ {
key := target.objectGetKey(i)
val := target.objectGetVal(i)
if exp, exists := obj.objectSearchKey(key); !exists || !ContainsBinaryJSON(exp, val) {
return false
}
}
return true
}
return false
case JSONTypeCodeArray:
if target.TypeCode == JSONTypeCodeArray {
elemCount := target.GetElemCount()
for i := 0; i < elemCount; i++ {
if !ContainsBinaryJSON(obj, target.ArrayGetElem(i)) {
return false
}
}
return true
}
elemCount := obj.GetElemCount()
for i := 0; i < elemCount; i++ {
if ContainsBinaryJSON(obj.ArrayGetElem(i), target) {
return true
}
}
return false
default:
return CompareBinaryJSON(obj, target) == 0
}
}
// OverlapsBinaryJSON is similar with ContainsBinaryJSON, but it checks the `OR` relationship.
func OverlapsBinaryJSON(obj, target BinaryJSON) bool {
if obj.TypeCode != JSONTypeCodeArray && target.TypeCode == JSONTypeCodeArray {
obj, target = target, obj
}
switch obj.TypeCode {
case JSONTypeCodeObject:
if target.TypeCode == JSONTypeCodeObject {
elemCount := target.GetElemCount()
for i := 0; i < elemCount; i++ {
key := target.objectGetKey(i)
val := target.objectGetVal(i)
if exp, exists := obj.objectSearchKey(key); exists && CompareBinaryJSON(exp, val) == 0 {
return true
}
}
}
return false
case JSONTypeCodeArray:
if target.TypeCode == JSONTypeCodeArray {
for i := 0; i < obj.GetElemCount(); i++ {
o := obj.ArrayGetElem(i)
for j := 0; j < target.GetElemCount(); j++ {
if CompareBinaryJSON(o, target.ArrayGetElem(j)) == 0 {
return true
}
}
}
return false
}
elemCount := obj.GetElemCount()
for i := 0; i < elemCount; i++ {
if CompareBinaryJSON(obj.ArrayGetElem(i), target) == 0 {
return true
}
}
return false
default:
return CompareBinaryJSON(obj, target) == 0
}
}
// GetElemDepth for JSON_DEPTH
// Returns the maximum depth of a JSON document
// rules referenced by MySQL JSON_DEPTH function
// [https://dev.mysql.com/doc/refman/5.7/en/json-attribute-functions.html#function_json-depth]
// 1) An empty array, empty object, or scalar value has depth 1.
// 2) A nonempty array containing only elements of depth 1 or nonempty object containing only member values of depth 1 has depth 2.
// 3) Otherwise, a JSON document has depth greater than 2.
// e.g. depth of '{}', '[]', 'true': 1
// e.g. depth of '[10, 20]', '[[], {}]': 2
// e.g. depth of '[10, {"a": 20}]': 3
func (bj BinaryJSON) GetElemDepth() int {
switch bj.TypeCode {
case JSONTypeCodeObject:
elemCount := bj.GetElemCount()
maxDepth := 0
for i := 0; i < elemCount; i++ {
obj := bj.objectGetVal(i)
depth := obj.GetElemDepth()
if depth > maxDepth {
maxDepth = depth
}
}
return maxDepth + 1
case JSONTypeCodeArray:
elemCount := bj.GetElemCount()
maxDepth := 0
for i := 0; i < elemCount; i++ {
obj := bj.ArrayGetElem(i)
depth := obj.GetElemDepth()
if depth > maxDepth {
maxDepth = depth
}
}
return maxDepth + 1
default:
return 1
}
}
// Search for JSON_Search
// rules referenced by MySQL JSON_SEARCH function
// [https://dev.mysql.com/doc/refman/5.7/en/json-search-functions.html#function_json-search]
func (bj BinaryJSON) Search(containType string, search string, escape byte, pathExpres []JSONPathExpression) (res BinaryJSON, isNull bool, err error) {
if containType != JSONContainsPathOne && containType != JSONContainsPathAll {
return res, true, ErrInvalidJSONPath
}
patChars, patTypes := stringutil.CompilePattern(search, escape)
result := make([]interface{}, 0)
walkFn := func(fullpath JSONPathExpression, bj BinaryJSON) (stop bool, err error) {
if bj.TypeCode == JSONTypeCodeString && stringutil.DoMatch(string(bj.GetString()), patChars, patTypes) {
result = append(result, fullpath.String())
if containType == JSONContainsPathOne {
return true, nil
}
}
return false, nil
}
if len(pathExpres) != 0 {
err := bj.Walk(walkFn, pathExpres...)
if err != nil {
return res, true, err
}
} else {
err := bj.Walk(walkFn)
if err != nil {
return res, true, err
}
}
switch len(result) {
case 0:
return res, true, nil
case 1:
return CreateBinaryJSON(result[0]), false, nil
default:
return CreateBinaryJSON(result), false, nil
}
}
// extractCallbackFn the type of CALLBACK function for extractToCallback
type extractCallbackFn func(fullpath JSONPathExpression, bj BinaryJSON) (stop bool, err error)
// extractToCallback callback alternative of extractTo
//
// would be more effective when walk through the whole JSON is unnecessary
//
// NOTICE: path [0] & [*] for JSON object other than array is INVALID, which is different from extractTo.
func (bj BinaryJSON) extractToCallback(pathExpr JSONPathExpression, callbackFn extractCallbackFn, fullpath JSONPathExpression) (stop bool, err error) {
if len(pathExpr.legs) == 0 {
return callbackFn(fullpath, bj)
}
currentLeg, subPathExpr := pathExpr.popOneLeg()
if currentLeg.typ == jsonPathLegArraySelection && bj.TypeCode == JSONTypeCodeArray {
elemCount := bj.GetElemCount()
switch selection := currentLeg.arraySelection.(type) {
case jsonPathArraySelectionAsterisk:
for i := 0; i < elemCount; i++ {
// buf = bj.ArrayGetElem(i).extractTo(buf, subPathExpr)
path := fullpath.pushBackOneArraySelectionLeg(jsonPathArraySelectionIndex{jsonPathArrayIndexFromStart(i)})
stop, err = bj.ArrayGetElem(i).extractToCallback(subPathExpr, callbackFn, path)
if stop || err != nil {
return
}
}
case jsonPathArraySelectionIndex:
idx := selection.index.getIndexFromStart(bj)
if idx < elemCount && idx >= 0 {
// buf = bj.ArrayGetElem(currentLeg.arraySelection).extractTo(buf, subPathExpr)
path := fullpath.pushBackOneArraySelectionLeg(currentLeg.arraySelection)
stop, err = bj.ArrayGetElem(idx).extractToCallback(subPathExpr, callbackFn, path)
if stop || err != nil {
return
}
}
case jsonPathArraySelectionRange:
start := selection.start.getIndexFromStart(bj)
end := selection.end.getIndexFromStart(bj)
if end >= elemCount {
end = elemCount - 1
}
if start <= end && start >= 0 {
for i := start; i <= end; i++ {
path := fullpath.pushBackOneArraySelectionLeg(jsonPathArraySelectionIndex{jsonPathArrayIndexFromStart(i)})
stop, err = bj.ArrayGetElem(i).extractToCallback(subPathExpr, callbackFn, path)
if stop || err != nil {
return
}
}
}
}
} else if currentLeg.typ == jsonPathLegKey && bj.TypeCode == JSONTypeCodeObject {
elemCount := bj.GetElemCount()
if currentLeg.dotKey == "*" {
for i := 0; i < elemCount; i++ {
// buf = bj.objectGetVal(i).extractTo(buf, subPathExpr)
path := fullpath.pushBackOneKeyLeg(string(bj.objectGetKey(i)))
stop, err = bj.objectGetVal(i).extractToCallback(subPathExpr, callbackFn, path)
if stop || err != nil {
return
}
}
} else {
child, ok := bj.objectSearchKey(hack.Slice(currentLeg.dotKey))
if ok {
// buf = child.extractTo(buf, subPathExpr)
path := fullpath.pushBackOneKeyLeg(currentLeg.dotKey)
stop, err = child.extractToCallback(subPathExpr, callbackFn, path)
if stop || err != nil {
return
}
}
}
} else if currentLeg.typ == jsonPathLegDoubleAsterisk {
// buf = bj.extractTo(buf, subPathExpr)
stop, err = bj.extractToCallback(subPathExpr, callbackFn, fullpath)
if stop || err != nil {
return
}
if bj.TypeCode == JSONTypeCodeArray {
elemCount := bj.GetElemCount()
for i := 0; i < elemCount; i++ {
// buf = bj.ArrayGetElem(i).extractTo(buf, pathExpr)
path := fullpath.pushBackOneArraySelectionLeg(jsonPathArraySelectionIndex{jsonPathArrayIndexFromStart(i)})
stop, err = bj.ArrayGetElem(i).extractToCallback(pathExpr, callbackFn, path)
if stop || err != nil {
return
}
}
} else if bj.TypeCode == JSONTypeCodeObject {
elemCount := bj.GetElemCount()
for i := 0; i < elemCount; i++ {
// buf = bj.objectGetVal(i).extractTo(buf, pathExpr)
path := fullpath.pushBackOneKeyLeg(string(bj.objectGetKey(i)))
stop, err = bj.objectGetVal(i).extractToCallback(pathExpr, callbackFn, path)
if stop || err != nil {
return
}
}
}
}
return false, nil
}
// BinaryJSONWalkFunc is used as callback function for BinaryJSON.Walk
type BinaryJSONWalkFunc func(fullpath JSONPathExpression, bj BinaryJSON) (stop bool, err error)
// Walk traverse BinaryJSON objects
func (bj BinaryJSON) Walk(walkFn BinaryJSONWalkFunc, pathExprList ...JSONPathExpression) (err error) {
pathSet := make(map[string]bool)
var doWalk extractCallbackFn
doWalk = func(fullpath JSONPathExpression, bj BinaryJSON) (stop bool, err error) {
pathStr := fullpath.String()
if _, ok := pathSet[pathStr]; ok {
return false, nil
}
stop, err = walkFn(fullpath, bj)
pathSet[pathStr] = true
if stop || err != nil {
return
}
if bj.TypeCode == JSONTypeCodeArray {
elemCount := bj.GetElemCount()
for i := 0; i < elemCount; i++ {
path := fullpath.pushBackOneArraySelectionLeg(jsonPathArraySelectionIndex{jsonPathArrayIndexFromStart(i)})
stop, err = doWalk(path, bj.ArrayGetElem(i))
if stop || err != nil {
return
}
}
} else if bj.TypeCode == JSONTypeCodeObject {
elemCount := bj.GetElemCount()
for i := 0; i < elemCount; i++ {
path := fullpath.pushBackOneKeyLeg(string(bj.objectGetKey(i)))
stop, err = doWalk(path, bj.objectGetVal(i))
if stop || err != nil {
return
}
}
}
return false, nil
}
fullpath := JSONPathExpression{legs: make([]jsonPathLeg, 0, 32), flags: jsonPathExpressionFlag(0)}
if len(pathExprList) > 0 {
for _, pathExpr := range pathExprList {
var stop bool
stop, err = bj.extractToCallback(pathExpr, doWalk, fullpath)
if stop || err != nil {
return err
}
}
} else {
_, err = doWalk(fullpath, bj)
if err != nil {
return
}
}
return nil
}
|
package ratecounter
import (
"strconv"
"time"
)
// An AvgRateCounter is a thread-safe counter which returns
// the ratio between the number of calls 'Incr' and the counter value in the last interval
type AvgRateCounter struct {
hits *RateCounter
counter *RateCounter
interval time.Duration
}
// NewAvgRateCounter constructs a new AvgRateCounter, for the interval provided
func NewAvgRateCounter(intrvl time.Duration) *AvgRateCounter {
return &AvgRateCounter{
hits: NewRateCounter(intrvl),
counter: NewRateCounter(intrvl),
interval: intrvl,
}
}
// WithResolution determines the minimum resolution of this counter
func (a *AvgRateCounter) WithResolution(resolution int) *AvgRateCounter {
if resolution < 1 {
panic("AvgRateCounter resolution cannot be less than 1")
}
a.hits = a.hits.WithResolution(resolution)
a.counter = a.counter.WithResolution(resolution)
return a
}
// Incr Adds an event into the AvgRateCounter
func (a *AvgRateCounter) Incr(val int64) {
a.hits.Incr(1)
a.counter.Incr(val)
}
// Rate Returns the current ratio between the events count and its values during the last interval
func (a *AvgRateCounter) Rate() float64 {
hits, value := a.hits.Rate(), a.counter.Rate()
if hits == 0 {
return 0 // Avoid division by zero
}
return float64(value) / float64(hits)
}
// Hits returns the number of calling method Incr during specified interval
func (a *AvgRateCounter) Hits() int64 {
return a.hits.Rate()
}
// String returns counter's rate formatted to string
func (a *AvgRateCounter) String() string {
return strconv.FormatFloat(a.Rate(), 'e', 5, 64)
}
|
package main
import (
"encoding/json"
"log"
"strings"
"github.com/kataras/iris"
"github.com/kataras/iris/websocket"
)
func main() {
app := iris.New()
app.Get("/", func(ctx iris.Context) {
ctx.ServeFile("websockets.html", false) // second parameter: enable gzip?
})
setupWebsocket(app)
app.Run(iris.Addr(":8080"))
}
func setupWebsocket(app *iris.Application) {
// create our echo websocket server
ws := websocket.New(websocket.Config{
MaxMessageSize: 4096, // 크기 초과하는 경우 클라이언트 접속 강제 종료됨
ReadBufferSize: 4096,
WriteBufferSize: 4096,
})
ws.OnConnection(handleConnection)
app.Get("/chat", ws.Handler())
app.Any("/ws.js", websocket.ClientHandler())
}
// SocketMessage is..
type SocketMessage struct {
UserID string
Nickname string
Message string
Time int
}
func handleConnection(c websocket.Connection) {
// Read events from browser
c.On("chat", func(request string) {
// ip가 필요한 경우 꺼내 사용한다.
// context := c.Context()
// ip := context.RemoteAddr()
// 요청에 담긴 json string으로부터 socketMessage JSON 생성
var socketMessage SocketMessage
json.Unmarshal([]byte(request), &socketMessage)
if socketMessage.UserID == "" {
log.Println("UserId가 없음")
return
}
msg, includeBadWords := filterBadWords(socketMessage.Message)
// 자신 포함 모든 사용자에게 메시지 전달 (클라이언트에서 욕설 처리하지 않음)
c.To(websocket.All).Emit("chat", socketMessage.Nickname+": "+msg)
if includeBadWords == true {
c.Emit("chat", "관리자: 욕설을 사용하는 경우 영구적으로 접속이 제한될 수 있습니다.")
}
// Write message back to the client message owner with: (발송자에게만 전송)
// c.Emit("chat", msg)
// Write message to all except this client with: (발송자 제외 전송)
// c.To(websocket.Broadcast).Emit("chat", ip+":"+msg)
})
}
var (
badWords = [...]string{"새끼", "소새끼", "말새끼"}
badWordsReplacer = strings.NewReplacer("새끼", "응애~", "소새끼", "음메~", "말새끼", "히힝~")
)
func filterBadWords(message string) (string, bool) {
var includeBadWords = false
for i := 0; i < len(badWords); i++ {
if strings.Index(message, badWords[i]) > -1 {
includeBadWords = true
// 비속어 사전에 있는 문자열은 모두 대체시킨다.
message = badWordsReplacer.Replace(message)
}
}
return message, includeBadWords
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package importintotest
import (
_ "embed"
"fmt"
"os"
"path"
"time"
"github.com/fsouza/fake-gcs-server/fakestorage"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/testkit"
"github.com/stretchr/testify/require"
)
//go:embed test.parquet
var parquetContent []byte
func (s *mockGCSSuite) TestDetachedLoadParquet() {
s.tk.MustExec("DROP DATABASE IF EXISTS load_csv;")
s.tk.MustExec("CREATE DATABASE load_csv;")
s.tk.MustExec("USE load_csv;")
s.tk.MustExec("CREATE TABLE t (" +
"id INT, val1 INT, val2 VARCHAR(20), " +
"d1 DECIMAL(10, 0), d2 DECIMAL(10, 2), d3 DECIMAL(8, 8)," +
"d4 DECIMAL(20, 0), d5 DECIMAL(36, 0), d6 DECIMAL(28, 8));")
s.server.CreateObject(fakestorage.Object{
ObjectAttrs: fakestorage.ObjectAttrs{
BucketName: "test-load-parquet",
Name: "p",
},
Content: parquetContent,
})
tempDir := s.T().TempDir()
s.NoError(os.WriteFile(path.Join(tempDir, "test.parquet"), parquetContent, 0o644))
s.tk.MustQuery(fmt.Sprintf("IMPORT INTO t FROM '%s' FORMAT 'parquet';", path.Join(tempDir, "test.parquet")))
s.tk.MustQuery("SELECT * FROM t;").Check(testkit.Rows(
"1 1 0 123 1.23 0.00000001 1234567890 123 1.23000000",
"2 123456 0 123456 9999.99 0.12345678 99999999999999999999 999999999999999999999999999999999999 99999999999999999999.99999999",
"3 123456 0 -123456 -9999.99 -0.12340000 -99999999999999999999 -999999999999999999999999999999999999 -99999999999999999999.99999999",
"4 1 0 123 1.23 0.00000001 1234567890 123 1.23000000",
"5 123456 0 123456 9999.99 0.12345678 12345678901234567890 123456789012345678901234567890123456 99999999999999999999.99999999",
"6 123456 0 -123456 -9999.99 -0.12340000 -12345678901234567890 -123456789012345678901234567890123456 -99999999999999999999.99999999",
))
s.tk.MustExec("TRUNCATE TABLE t;")
s.T().Cleanup(func() { executor.TestDetachedTaskFinished.Store(false) })
s.enableFailpoint("github.com/pingcap/tidb/executor/testDetachedTaskFinished", "return(true)")
sql := fmt.Sprintf(`IMPORT INTO t FROM 'gs://test-load-parquet/p?endpoint=%s'
FORMAT 'parquet' WITH detached;`, gcsEndpoint)
rows := s.tk.MustQuery(sql).Rows()
require.Len(s.T(), rows, 1)
require.Eventually(s.T(), func() bool {
return executor.TestDetachedTaskFinished.Load()
}, 10*time.Second, time.Second)
s.tk.MustQuery("SELECT * FROM t;").Check(testkit.Rows(
"1 1 0 123 1.23 0.00000001 1234567890 123 1.23000000",
"2 123456 0 123456 9999.99 0.12345678 99999999999999999999 999999999999999999999999999999999999 99999999999999999999.99999999",
"3 123456 0 -123456 -9999.99 -0.12340000 -99999999999999999999 -999999999999999999999999999999999999 -99999999999999999999.99999999",
"4 1 0 123 1.23 0.00000001 1234567890 123 1.23000000",
"5 123456 0 123456 9999.99 0.12345678 12345678901234567890 123456789012345678901234567890123456 99999999999999999999.99999999",
"6 123456 0 -123456 -9999.99 -0.12340000 -12345678901234567890 -123456789012345678901234567890123456 -99999999999999999999.99999999",
))
s.tk.MustExec("TRUNCATE TABLE t;")
}
|
/*
Create a function that takes two parameters and, if both parameters are strings, add them as if they were integers or if the two parameters are integers, concatenate them.
Examples
stupid_addition(1, 2) ➞ "12"
stupid_addition("1", "2") ➞ 3
stupid_addition("1", 2) ➞ None
Notes
If the two parameters are different data types, return None.
All parameters will either be strings or integers.
*/
package main
import (
"fmt"
"strconv"
)
func main() {
assert(add(1, 2) == "12")
assert(add("1", "2") == 3)
assert(add(1, "2") == nil)
assert(add("1", 2) == nil)
}
func add(x, y interface{}) interface{} {
a, i1 := x.(int)
b, i2 := y.(int)
if i1 && i2 {
return fmt.Sprintf("%d%d", a, b)
}
s, s1 := x.(string)
t, s2 := y.(string)
if s1 && s2 {
a, _ := strconv.Atoi(s)
b, _ := strconv.Atoi(t)
return a + b
}
return nil
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
|
// Package rest allows for quick and easy access any REST or REST-like API.
package rest
import (
"errors"
"net/http"
"net/url"
)
// Method contains the supported HTTP verbs.
type Method string
// Supported HTTP verbs.
const (
Get Method = "GET"
Post Method = "POST"
Put Method = "PUT"
Patch Method = "PATCH"
Delete Method = "DELETE"
)
// DefaultClient is used if no custom HTTP client is defined
var DefaultClient = &Client{HTTPClient: http.DefaultClient}
// Client allows modification of client headers, redirect policy
// and other settings
// See https://golang.org/pkg/net/http
type Client struct {
HTTPClient *http.Client
}
func makeRequest(req *http.Request) (*http.Response, error) {
return DefaultClient.HTTPClient.Do(req)
}
// API is the main interface to the API.
func API(request *http.Request) (*http.Response, error) {
return DefaultClient.API(request)
}
// AddQueryParameters adds query parameters to the URL.
func AddQueryParameters(baseURL string, queryParams map[string]string) string {
baseURL += "?"
params := url.Values{}
for key, value := range queryParams {
params.Add(key, value)
}
return baseURL + params.Encode()
}
// MakeRequest makes the API call.
func MakeRequest(req *http.Request) (*http.Response, error) {
return DefaultClient.HTTPClient.Do(req)
}
// The following functions enable the ability to define a
// custom HTTP Client
// MakeRequest makes the API call.
func (c *Client) makeRequest(req *http.Request) (*http.Response, error) {
return c.HTTPClient.Do(req)
}
// API is the main interface to the API.
func (c *Client) API(request *http.Request) (*http.Response, error) {
if request == nil {
return nil, errors.New("invalid request: request cannot be nil")
}
if request.Header.Get("Content-Type") == "" && request.Body != nil {
request.Header.Set("Content-Type", "application/json")
}
return c.makeRequest(request)
}
|
package problems
func removeElement(nums []int, val int) int {
var nLen int
for i := 0; i < len(nums); i++ {
if nums[i] != val {
nums[nLen] = nums[i]
nLen++
}
}
return nLen
}
// less assignment
func removeElement1(nums []int, val int) int {
i, nLen := 0, len(nums)
for i < nLen {
if nums[i] == val {
nums[i] = nums[nLen-1]
nLen--
} else {
i++
}
}
return nLen
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errno
import (
"sync"
"time"
)
// The error summary is protected by a mutex for simplicity.
// It is not expected to be hot unless there are concurrent workloads
// that are generating high error/warning counts, in which case
// the system probably has other issues already.
// ErrorSummary summarizes errors and warnings
type ErrorSummary struct {
ErrorCount int
WarningCount int
FirstSeen time.Time
LastSeen time.Time
}
// instanceStatistics provide statistics for a tidb-server instance.
type instanceStatistics struct {
sync.Mutex
global map[uint16]*ErrorSummary
users map[string]map[uint16]*ErrorSummary
hosts map[string]map[uint16]*ErrorSummary
}
var stats instanceStatistics
func init() {
FlushStats()
}
// FlushStats resets errors and warnings across global/users/hosts
func FlushStats() {
stats.Lock()
defer stats.Unlock()
stats.global = make(map[uint16]*ErrorSummary)
stats.users = make(map[string]map[uint16]*ErrorSummary)
stats.hosts = make(map[string]map[uint16]*ErrorSummary)
}
func copyMap(oldMap map[uint16]*ErrorSummary) map[uint16]*ErrorSummary {
newMap := make(map[uint16]*ErrorSummary, len(oldMap))
for k, v := range oldMap {
newMap[k] = &ErrorSummary{
ErrorCount: v.ErrorCount,
WarningCount: v.WarningCount,
FirstSeen: v.FirstSeen,
LastSeen: v.LastSeen,
}
}
return newMap
}
// GlobalStats summarizes errors and warnings across all users/hosts
func GlobalStats() map[uint16]*ErrorSummary {
stats.Lock()
defer stats.Unlock()
return copyMap(stats.global)
}
// UserStats summarizes per-user
func UserStats() map[string]map[uint16]*ErrorSummary {
stats.Lock()
defer stats.Unlock()
newMap := make(map[string]map[uint16]*ErrorSummary, len(stats.users))
for k, v := range stats.users {
newMap[k] = copyMap(v)
}
return newMap
}
// HostStats summarizes per remote-host
func HostStats() map[string]map[uint16]*ErrorSummary {
stats.Lock()
defer stats.Unlock()
newMap := make(map[string]map[uint16]*ErrorSummary, len(stats.hosts))
for k, v := range stats.hosts {
newMap[k] = copyMap(v)
}
return newMap
}
func initCounters(errCode uint16, user, host string) {
seen := time.Now()
stats.Lock()
defer stats.Unlock()
if _, ok := stats.global[errCode]; !ok {
stats.global[errCode] = &ErrorSummary{FirstSeen: seen}
}
if _, ok := stats.users[user]; !ok {
stats.users[user] = make(map[uint16]*ErrorSummary)
}
if _, ok := stats.users[user][errCode]; !ok {
stats.users[user][errCode] = &ErrorSummary{FirstSeen: seen}
}
if _, ok := stats.hosts[host]; !ok {
stats.hosts[host] = make(map[uint16]*ErrorSummary)
}
if _, ok := stats.hosts[host][errCode]; !ok {
stats.hosts[host][errCode] = &ErrorSummary{FirstSeen: seen}
}
}
// IncrementError increments the global/user/host statistics for an errCode
func IncrementError(errCode uint16, user, host string) {
seen := time.Now()
initCounters(errCode, user, host)
stats.Lock()
defer stats.Unlock()
// Increment counter + update last seen
stats.global[errCode].ErrorCount++
stats.global[errCode].LastSeen = seen
// Increment counter + update last seen
stats.users[user][errCode].ErrorCount++
stats.users[user][errCode].LastSeen = seen
// Increment counter + update last seen
stats.hosts[host][errCode].ErrorCount++
stats.hosts[host][errCode].LastSeen = seen
}
// IncrementWarning increments the global/user/host statistics for an errCode
func IncrementWarning(errCode uint16, user, host string) {
seen := time.Now()
initCounters(errCode, user, host)
stats.Lock()
defer stats.Unlock()
// Increment counter + update last seen
stats.global[errCode].WarningCount++
stats.global[errCode].LastSeen = seen
// Increment counter + update last seen
stats.users[user][errCode].WarningCount++
stats.users[user][errCode].LastSeen = seen
// Increment counter + update last seen
stats.hosts[host][errCode].WarningCount++
stats.hosts[host][errCode].LastSeen = seen
}
|
// exibe os argumentos da linha de comando
// arataca89@gmail.com
// 20210413
package main
import (
"fmt"
"os"
)
func main() {
for indice, valor := range os.Args[:] {
fmt.Println(indice, " - ", valor)
}
}
/////////////////////////////////////////////////////////////////////
//
// Exemplo do uso:
//
// go run args1.go one II three IV cinco
// 0 - C:\Users\nerd\golearning\args1.exe
// 1 - one
// 2 - II
// 3 - three
// 4 - IV
// 5 - cinco
//
|
package main
import (
"testing"
"time"
"github.com/go-redis/redis"
)
func TestMain(t *testing.T) {
client := redis.NewClient(&redis.Options{})
t.Log(client.SetNX("key", "value", 10*time.Second).Result())
t.Log(client.Get("key").Result())
}
|
package vsock
// GetContextID returns the local Context Identifier for the VSOCK socket
// address family. This may be a privileged operation.
func GetContextID() (uint32, error) {
return getContextID()
}
|
package detector
import (
"fmt"
"strings"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/wata727/tflint/issue"
)
type TerraformModulePinnedSourceDetector struct {
source string
line int
file string
}
func NewTerraformModulePinnedSourceDetector(detector *Detector, file string, item *ast.ObjectItem) *TerraformModulePinnedSourceDetector {
sourceToken, err := hclLiteralToken(item, "source")
if err != nil {
detector.Logger.Error(err)
return nil
}
sourceText, err := detector.evalToString(sourceToken.Text)
if err != nil {
detector.Logger.Error(err)
return nil
}
return &TerraformModulePinnedSourceDetector{
source: sourceText,
file: file,
line: sourceToken.Pos.Line,
}
}
func (d *TerraformModulePinnedSourceDetector) DetectPinnedModuleSource(issues *[]*issue.Issue) {
lower := strings.ToLower(d.source)
if strings.Contains(lower, "git") || strings.Contains(lower, "bitbucket") {
if issue := d.detectGitSource(d.source); issue != nil {
tmp := append(*issues, issue)
*issues = tmp
}
} else if strings.HasPrefix(lower, "hg:") {
if issue := d.detectMercurialSource(d.source); issue != nil {
tmp := append(*issues, issue)
*issues = tmp
}
}
}
func (d *TerraformModulePinnedSourceDetector) detectGitSource(source string) *issue.Issue {
if strings.Contains(source, "ref=") {
if strings.Contains(source, "ref=master") {
return &issue.Issue{
Type: issue.WARNING,
Message: fmt.Sprintf("Module source \"%s\" uses default ref \"master\"", source),
Line: d.line,
File: d.file,
}
}
} else {
return &issue.Issue{
Type: issue.WARNING,
Message: fmt.Sprintf("Module source \"%s\" is not pinned", source),
Line: d.line,
File: d.file,
}
}
return nil
}
func (d *TerraformModulePinnedSourceDetector) detectMercurialSource(source string) *issue.Issue {
if strings.Contains(source, "rev=") {
if strings.Contains(source, "rev=default") {
return &issue.Issue{
Type: issue.WARNING,
Message: fmt.Sprintf("Module source \"%s\" uses default rev \"default\"", source),
Line: d.line,
File: d.file,
}
}
} else {
return &issue.Issue{
Type: issue.WARNING,
Message: fmt.Sprintf("Module source \"%s\" is not pinned", source),
Line: d.line,
File: d.file,
}
}
return nil
}
|
package main
import (
"flag"
"fmt"
"github.com/dominikh/arp"
"os"
)
var (
dash string
count int
itf string
num = 0
)
func handler(w arp.ResponseSender, r *arp.Request) {
if r.SenderHardwareAddr.String() == dash && r.SenderIP.String() == "0.0.0.0" {
num += 1
fmt.Printf("Dash %d!\n", num)
if count > 0 && num == count {
os.Exit(0)
}
}
}
func main() {
flag.StringVar(&dash, "d", "74:c2:46:fc:84:19", "MAC address of DashButton")
flag.IntVar(&count, "c", 0, "number of clicks to handle, 0 for unlimited")
flag.String(&itf, "i", "wlan0", "name of interface to listen on")
flag.Parse()
if *countFlag == 0 {
fmt.Printf("Handling clicks on %s\n", dash)
} else {
fmt.Printf("Handling %d clicks on %s\n", count, dash)
}
e := arp.ListenAndServe(itf, arp.HandlerFunc(handler))
if e.Error() == "operation not permitted" {
fmt.Printf("Either first run 'sudo setcap cap_net_raw+pe goDash' or run as root\n")
} else {
fmt.Printf("Got error %s, aborting\n", e)
}
}
|
package arima
import (
"math"
"github.com/DoOR-Team/goutils/log"
"github.com/DoOR-Team/timeseries_forecasting/arima/matrix"
)
func Fit(data []float64, p int) []float64 {
length := len(data)
if length == 0 || p < 1 {
log.Fatalf(
"fitYuleWalker - Invalid Parameters length= %d, p ", length, p)
}
r := make([]float64, p+1)
for _, aData := range data {
r[0] += math.Pow(aData, 2)
}
r[0] /= float64(length)
for j := 1; j < p+1; j++ {
for i := 0; i < length-j; i++ {
r[j] += data[i] * data[i+j]
}
r[j] /= float64(length)
}
toeplitz := initToeplitz(r[0:p])
rVector := matrix.NewInsightVectorWithData(r[1:p+1], false)
return toeplitz.SolveSPDIntoVector(rVector, maxConditionNumber).DeepCopy()
}
|
package main
import (
. "awesomeProject/internal/app/db/servicesDAO"
. "awesomeProject/internal/app/model"
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"gopkg.in/mgo.v2/bson"
"log"
"net/http"
)
var servicesDAO = ServicesDAO{}
func AllServicesEndPoint(w http.ResponseWriter, r *http.Request) {
services, err := servicesDAO.FindAll()
if err != nil {
respondWithError(w, http.StatusInternalServerError, err.Error())
return
}
respondWithJson(w, http.StatusOK, services)
}
func FindServiceEndpoint(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
service, err := servicesDAO.FindById(params["id"])
if err != nil {
respondWithError(w, http.StatusInternalServerError, err.Error())
return
}
respondWithJson(w, http.StatusOK, service)
}
func CreateServiceEndPoint(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
var service Service
if err := json.NewDecoder(r.Body).Decode(&service); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
service.ID = bson.NewObjectId()
if err := servicesDAO.Insert(service); err != nil {
respondWithError(w, http.StatusInternalServerError, err.Error())
return
}
respondWithJson(w, http.StatusCreated, service)
}
func UpdateServiceEndPoint(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "not implemented yet !")
}
func DeleteServiceEndPoint(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
var service Service
if err := json.NewDecoder(r.Body).Decode(&service); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
if err := servicesDAO.Delete(service); err != nil {
respondWithError(w, http.StatusInternalServerError, err.Error())
return
}
respondWithJson(w, http.StatusOK, map[string]string{"result": "success"})
}
func FindServiceNameEndpoint(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
service, err := servicesDAO.FindByName(params["name"])
if err != nil {
respondWithError(w, http.StatusInternalServerError, err.Error())
return
}
respondWithJson(w, http.StatusOK, service)
}
func init() {
fmt.Println("starting server")
servicesDAO.Server = "localhost"
servicesDAO.Database = "services_db"
servicesDAO.Connect()
fmt.Println("server started")
}
func main() {
r := mux.NewRouter()
r.HandleFunc("/services", AllServicesEndPoint).Methods("GET")
r.HandleFunc("/services", CreateServiceEndPoint).Methods("POST")
r.HandleFunc("/services", UpdateServiceEndPoint).Methods("PUT")
r.HandleFunc("/services", DeleteServiceEndPoint).Methods("DELETE")
r.HandleFunc("/services/id/{id}", FindServiceEndpoint).Methods("GET")
r.HandleFunc("/services/name/{name}", FindServiceNameEndpoint).Methods("GET")
if err := http.ListenAndServe(":3000", r); err != nil {
log.Fatal(err)
}
}
func respondWithError(w http.ResponseWriter, code int, msg string) {
respondWithJson(w, code, map[string]string{"error": msg})
}
func respondWithJson(w http.ResponseWriter, code int, payload interface{}) {
response, _ := json.Marshal(payload)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(code)
w.Write(response)
}
|
package main
import "github.com/01-edu/z01"
func main() {
var var1 = 'z'
for var1 >= 'a' {
z01.PrintRune(var1)
var1--
}
z01.PrintRune(10)
}
|
package secrets
import (
"errors"
"testing"
"github.com/10gen/realm-cli/internal/cli"
"github.com/10gen/realm-cli/internal/cloud/realm"
"github.com/10gen/realm-cli/internal/utils/test/assert"
"github.com/10gen/realm-cli/internal/utils/test/mock"
)
func TestSecretsCreateHandler(t *testing.T) {
projectID := "projectID"
appID := "appID"
secretID := "secretID"
secretName := "secretname"
secretValue := "secretvalue"
app := realm.App{
ID: appID,
GroupID: projectID,
ClientAppID: "eggcorn-abcde",
Name: "eggcorn",
}
t.Run("should create app secrets", func(t *testing.T) {
out, ui := mock.NewUI()
realmClient := mock.RealmClient{}
var capturedFilter realm.AppFilter
var capturedGroupID, capturedAppID, capturedName, capturedValue string
realmClient.FindAppsFn = func(filter realm.AppFilter) ([]realm.App, error) {
capturedFilter = filter
return []realm.App{app}, nil
}
realmClient.CreateSecretFn = func(groupID, appID, name, value string) (realm.Secret, error) {
capturedGroupID = groupID
capturedAppID = appID
capturedName = name
capturedValue = value
return realm.Secret{secretID, secretName}, nil
}
cmd := &CommandCreate{createInputs{
ProjectInputs: cli.ProjectInputs{
Project: projectID,
App: appID,
},
Name: secretName,
Value: secretValue,
}}
assert.Nil(t, cmd.Handler(nil, ui, cli.Clients{Realm: realmClient}))
assert.Equal(t, "Successfully created secret, id: secretID\n", out.String())
t.Log("and should properly pass through the expected inputs")
assert.Equal(t, realm.AppFilter{projectID, appID, nil}, capturedFilter)
assert.Equal(t, projectID, capturedGroupID)
assert.Equal(t, appID, capturedAppID)
assert.Equal(t, secretName, capturedName)
assert.Equal(t, secretValue, capturedValue)
})
t.Run("should return an error", func(t *testing.T) {
for _, tc := range []struct {
description string
setupClient func() realm.Client
expectedErr error
}{
{
description: "when resolving the app fails",
setupClient: func() realm.Client {
realmClient := mock.RealmClient{}
realmClient.FindAppsFn = func(filter realm.AppFilter) ([]realm.App, error) {
return nil, errors.New("something bad happened")
}
return realmClient
},
expectedErr: errors.New("something bad happened"),
},
{
description: "when creating a secret fails",
setupClient: func() realm.Client {
realmClient := mock.RealmClient{}
realmClient.FindAppsFn = func(filter realm.AppFilter) ([]realm.App, error) {
return []realm.App{app}, nil
}
realmClient.CreateSecretFn = func(groupID, appID, name, value string) (realm.Secret, error) {
return realm.Secret{}, errors.New("something bad happened")
}
return realmClient
},
expectedErr: errors.New("something bad happened"),
},
} {
t.Run(tc.description, func(t *testing.T) {
realmClient := tc.setupClient()
cmd := &CommandCreate{}
err := cmd.Handler(nil, nil, cli.Clients{Realm: realmClient})
assert.Equal(t, tc.expectedErr, err)
})
}
})
}
|
package main
import (
"fmt"
"log"
"net/http"
"os"
"os/signal"
"strconv"
"time"
"github.com/gorilla/websocket"
"github.com/wuxc/gowebwx/webwx"
)
var upgrader = websocket.Upgrader{}
func serveWs(w http.ResponseWriter, r *http.Request) {
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println("connect:", err)
return
}
log.Println("websocket connected.")
defer ws.Close()
for {
mt, msg, err := ws.ReadMessage()
if err != nil {
log.Println("read:", err)
break
}
log.Println("recieved:", mt, msg)
err = ws.WriteMessage(mt, msg)
if err != nil {
log.Println("write:", err)
break
}
}
}
func serveStatic(w http.ResponseWriter, r *http.Request) {
path := "res/index.html"
if r.URL.Path != "/" {
path = "res" + r.URL.Path
}
log.Println("serveStatic", path)
http.ServeFile(w, r, path)
}
func main() {
t := time.Now()
var it int = int(t.UnixNano() / 1000)
fmt.Println(^it + 1)
fmt.Println(t, t.Unix())
fmt.Println(strconv.FormatInt(time.Now().UnixNano(), 10))
http.HandleFunc("/ws", serveWs)
http.HandleFunc("/", serveStatic)
w := webwx.New()
w.Start()
// handle Ctrl+C
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for _ = range c {
fmt.Println("Ctrl+C received.")
w.Stop()
os.Exit(0)
}
}()
log.Fatal(http.ListenAndServe(":8080", nil))
}
|
package healthcheck
import (
"context"
"google.golang.org/grpc/health/grpc_health_v1"
)
type HealthChecker struct{}
func (s *HealthChecker) Check(_ context.Context, _ *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) {
return &grpc_health_v1.HealthCheckResponse{
Status: grpc_health_v1.HealthCheckResponse_SERVING,
}, nil
}
func (s *HealthChecker) Watch(_ *grpc_health_v1.HealthCheckRequest, server grpc_health_v1.Health_WatchServer) error {
return server.Send(&grpc_health_v1.HealthCheckResponse{
Status: grpc_health_v1.HealthCheckResponse_SERVING,
})
}
func NewHealthChecker() *HealthChecker {
return &HealthChecker{}
}
|
package user
import (
"context"
"sync"
"time"
google_protobuf2 "github.com/gogo/protobuf/types"
proto "github.com/weisd/web-kit/api/protobuf/user"
"github.com/weisd/web-kit/internal/pkg/ierrors"
"github.com/weisd/web-kit/internal/pkg/istring"
)
// var _ proto.RPCServiceServer = &MemoryServer{}
var _ RPCServer = &MemoryServer{}
func init() {
RPCRegister("memory", &MemoryServer{})
}
// MemoryServer MemoryServer
type MemoryServer struct {
data map[int64]*proto.User
lock *sync.RWMutex
idGen int64
}
// Init Init
func (p *MemoryServer) Init(ctx context.Context, dsn string) error {
p.data = make(map[int64]*proto.User)
p.lock = new(sync.RWMutex)
return nil
}
// Create 创建用户
func (p *MemoryServer) Create(ctx context.Context, in *proto.User) (out *google_protobuf2.Empty, err error) {
out = &google_protobuf2.Empty{}
p.lock.Lock()
p.idGen++
in.ID = p.idGen
in.Status = proto.UserStatus_Normal
in.CreatedAt = time.Now()
in.UpdatedAt = in.CreatedAt
p.data[in.ID] = in
p.lock.Unlock()
return
}
// UpdatePassword 更新密码
func (p *MemoryServer) UpdatePassword(ctx context.Context, in *proto.IDPassword) (out *google_protobuf2.Empty, err error) {
out = &google_protobuf2.Empty{}
p.lock.Lock()
info, has := p.data[in.ID]
if !has {
p.lock.Unlock()
return
}
info.Password = in.Password
info.Salt = in.Salt
p.lock.Unlock()
return
}
// UpdatePhone 更新手机号
func (p *MemoryServer) UpdatePhone(ctx context.Context, in *proto.IDPhone) (out *google_protobuf2.Empty, err error) {
out = &google_protobuf2.Empty{}
p.lock.Lock()
info, has := p.data[in.ID]
if !has {
p.lock.Unlock()
return
}
info.Phone = in.Phone
p.lock.Unlock()
return
}
// UpdateEmail 更新email
func (p *MemoryServer) UpdateEmail(ctx context.Context, in *proto.IDEmail) (out *google_protobuf2.Empty, err error) {
out = &google_protobuf2.Empty{}
p.lock.Lock()
info, has := p.data[in.ID]
if !has {
p.lock.Unlock()
return
}
info.Email = in.Email
p.lock.Unlock()
return
}
// UpdateNickname 更新昵称
func (p *MemoryServer) UpdateNickname(ctx context.Context, in *proto.IDNickname) (out *google_protobuf2.Empty, err error) {
out = &google_protobuf2.Empty{}
p.lock.Lock()
info, has := p.data[in.ID]
if !has {
p.lock.Unlock()
return
}
info.NickName = in.NickName
p.lock.Unlock()
return
}
// UpdateAvatar 更新头像
func (p *MemoryServer) UpdateAvatar(ctx context.Context, in *proto.IDAvatar) (out *google_protobuf2.Empty, err error) {
out = &google_protobuf2.Empty{}
p.lock.Lock()
info, has := p.data[in.ID]
if !has {
p.lock.Unlock()
return
}
info.Avatar = in.Avatar
p.lock.Unlock()
return
}
// UpdateStatus 更新状态
func (p *MemoryServer) UpdateStatus(ctx context.Context, in *proto.IDStatus) (out *google_protobuf2.Empty, err error) {
out = &google_protobuf2.Empty{}
p.lock.Lock()
info, has := p.data[in.ID]
if !has {
p.lock.Unlock()
return
}
info.Status = in.Status
p.lock.Unlock()
return
}
// InfoByID 通过id查询
func (p *MemoryServer) InfoByID(ctx context.Context, in *proto.ID) (out *proto.User, err error) {
out = &proto.User{}
p.lock.RLock()
info, has := p.data[in.ID]
if !has {
p.lock.RUnlock()
return
}
*out = *info
p.lock.RUnlock()
return
}
// InfoByPhone 通过手机查询
func (p *MemoryServer) InfoByPhone(ctx context.Context, in *proto.Phone) (out *proto.User, err error) {
out = &proto.User{}
p.lock.RLock()
for _, v := range p.data {
if v.Phone == in.Phone {
*out = *v
break
}
}
p.lock.RUnlock()
return
}
// InfoByEmail 通过email查询
func (p *MemoryServer) InfoByEmail(ctx context.Context, in *proto.Email) (out *proto.User, err error) {
out = &proto.User{}
p.lock.RLock()
for _, v := range p.data {
if v.Email == in.Email {
*out = *v
break
}
}
p.lock.RUnlock()
return
}
// InfoByNickname 通过Nickname查询
func (p *MemoryServer) InfoByNickname(ctx context.Context, in *proto.Nickname) (out *proto.User, err error) {
out = &proto.User{}
p.lock.RLock()
for _, v := range p.data {
if v.NickName == in.Nickname {
*out = *v
break
}
}
p.lock.RUnlock()
return
}
// InfoByAccount 通过 手机、email、昵称查询
func (p *MemoryServer) InfoByAccount(ctx context.Context, in *proto.Account) (out *proto.User, err error) {
switch {
case istring.IsPhone(in.Account):
return p.InfoByPhone(ctx, &proto.Phone{Phone: in.Account})
case istring.IsEmail(in.Account):
return p.InfoByEmail(ctx, &proto.Email{Email: in.Account})
case istring.IsValidNickname(in.Account):
return p.InfoByNickname(ctx, &proto.Nickname{Nickname: in.Account})
default:
err = ierrors.ErrInValidNickname
return
}
return
}
|
package main
import (
"encoding/json"
"html/template"
"log"
"net/http"
"net/url"
)
// Client contains infomation of a client
type Client struct {
ClientID string
ClientSecret string
RedirectURIs []string
Scope string
}
// Service contains AuthServer and its Clients
type Service struct {
Clients []Client
}
func (s Service) getClient(clientID string) (*Client, *errorResponse) {
for _, v := range s.Clients {
if v.ClientID == clientID {
return &v, nil
}
}
return nil, &errorResponse{
Error: "invalid_client",
ErrorDescription: "Unknown client: client_id = " + clientID,
ErrorURI: "",
}
}
type tokenResponse struct {
TokenType string `json:"token_type"`
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int64 `json:"expires_in"`
Scope string `json:"scope"`
}
type errorResponse struct {
Error string `json:"error"`
ErrorDescription string `json:"error_description"`
ErrorURI string `json:"error_uri"`
}
type clientRegistrationResponse struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
IssuedAt int64 `json:"iat"`
ExpiresAt int64 `json:"exp"`
TokenEndpointAuthMethod string `json:"token_endpoint_auth_method"`
ClientName string `json:"client_name"`
RedirectURIs []string `json:"redirect_uris"`
ClientURI string `json:"client_uri"`
GrantTypes []string `json:"grant_type"`
ResponseTypes []string `json:"response_type"`
Scope string `json:"scope"`
}
type header struct {
Type string `json:"typ"`
Algorithm string `json:"alg"`
KeyID string `json:"kid"`
}
type payload struct {
Issuer string `json:"iss"`
Subject string `json:"sub"`
Audience string `json:"aud"`
ExpiresAt int64 `json:"exp"`
IssuedAt int64 `json:"iat"`
UniqueIdentifier string `json:"jti"`
}
type responseContent interface {
jsonify() ([]byte, error)
}
func (tr *tokenResponse) jsonify() ([]byte, error) {
return json.Marshal(tr)
}
func (er *errorResponse) jsonify() ([]byte, error) {
return json.Marshal(er)
}
func (cr *clientRegistrationResponse) jsonify() ([]byte, error) {
return json.Marshal(cr)
}
type response struct {
content responseContent
code int
}
// env contains global parameters for a web application
type env struct {
template *template.Template
service Service
requests map[string]url.Values
scopes map[string][]string
}
// handlerFunc is a function that can be registed to a router
type appHandlerFunc func(http.ResponseWriter, *http.Request, *env) *response
// appHandler is a struct that contains appContext and an original appHandler
type appHandler struct {
env *env
handlerFunc appHandlerFunc
}
// newEnv is a function to initiate this framework
func newEnv() *env {
return &env{
template: template.Must(template.ParseGlob("templates/*")),
service: Service{
Clients: []Client{
Client{
ClientID: "oauth-client-1",
ClientSecret: "oauth-client-secret-1",
RedirectURIs: []string{"http://localhost:9000/callback"},
Scope: "foo bar",
},
},
},
requests: map[string]url.Values{},
scopes: map[string][]string{},
}
}
// executeTemplate is for rendering html with given data
func (e *env) executeTemplate(w http.ResponseWriter, name string, data interface{}) {
if err := e.template.ExecuteTemplate(w, name, data); err != nil {
http.Error(w, "Internal Error", http.StatusInternalServerError)
log.Println(err)
}
}
// add is a shortcut for http.Handle
func (e *env) add(p string, h appHandlerFunc) {
http.Handle(p, &appHandler{env: e, handlerFunc: h})
}
// ServeHTTP makes the handler implement the http.Handler interface
func (h *appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
//Response a JSON if catch an error
if res := h.handlerFunc(w, r, h.env); res != nil {
// Jsonify the response. Return a error if failed to create a JSON.
result, err := res.content.jsonify()
if err != nil {
http.Error(w, "Internal Error", http.StatusInternalServerError)
log.Println(err)
return
}
// Send back JSON
w.WriteHeader(res.code)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
w.Header().Set("Pragma", "no-cache")
w.Write(result)
}
}
|
package daemon
import (
"context"
"fmt"
"sync"
"time"
"github.com/hashicorp/serf/serf"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog"
"github.com/xmwilldo/edge-health/cmd/app-health/app/options"
"github.com/xmwilldo/edge-health/pkg/app-health-daemon/server"
)
type AppDaemon struct {
serf *serf.Serf
joinIp string
namespace string
svcName string
}
func NewAppDaemon(options *options.AppHealthOptions) *AppDaemon {
config := serf.DefaultConfig()
serf, err := serf.Create(config)
if err != nil {
return nil
}
return &AppDaemon{
serf: serf,
joinIp: options.JoinIp,
namespace: options.Namespace,
svcName: options.SvcName,
}
}
func (d *AppDaemon) Run(ctx context.Context) {
wg := sync.WaitGroup{}
err := d.serf.SetTags(map[string]string{"namespace": d.namespace, "svc_name": d.svcName})
if err != nil {
klog.Errorf("set tags err: %v", err)
return
}
if d.joinIp != "" {
_, err := d.serf.Join([]string{d.joinIp}, true)
if err != nil {
klog.Errorf("join serf err: %v", err)
return
}
}
go wait.Until(d.PrintMembers, time.Second*3, ctx.Done())
wg.Add(1)
go server.Server(ctx, &wg, d.serf)
for range ctx.Done() {
wg.Wait()
return
}
}
func (d *AppDaemon) PrintMembers() {
members := d.serf.Members()
fmt.Printf("Members: %+v\n", members)
}
|
package g2util
import (
cRand "crypto/rand"
"math/big"
"math/rand"
)
// CryptoRandInt ...
func CryptoRandInt(n int) *big.Int {
b, _ := cRand.Int(cRand.Reader, big.NewInt(int64(n)))
return b
}
// MathRandInt ...
func MathRandInt(n int) int { return rand.Intn(n) }
|
package middlewares
import (
"errors"
"fmt"
"github.com/Highway-Project/highway/config"
"github.com/Highway-Project/highway/logging"
"github.com/Highway-Project/highway/pkg/middlewares"
"github.com/Highway-Project/highway/pkg/middlewares/cors"
"github.com/Highway-Project/highway/pkg/middlewares/nothing"
"github.com/Highway-Project/highway/pkg/middlewares/prometheus"
"github.com/Highway-Project/highway/pkg/middlewares/ratelimit"
"plugin"
)
var middlewareConstructors map[string]func(middlewares.MiddlewareParams) (middlewares.Middleware, error)
var middlewareMap map[string]middlewares.Middleware
func init() {
middlewareConstructors = make(map[string]func(params middlewares.MiddlewareParams) (middlewares.Middleware, error))
middlewareMap = make(map[string]middlewares.Middleware)
_ = RegisterMiddleware("nothing", nothing.New)
_ = RegisterMiddleware("cors", cors.New)
_ = RegisterMiddleware("ratelimit", ratelimit.New)
_ = RegisterMiddleware("prometheus", prometheus.New)
}
func RegisterMiddleware(name string, constructor func(params middlewares.MiddlewareParams) (middlewares.Middleware, error)) error {
if _, exists := middlewareConstructors[name]; exists {
return errors.New("Middleware with this name exists: " + name)
}
middlewareConstructors[name] = constructor
return nil
}
func loadCustomMiddleware(spec config.MiddlewareSpec) error {
plug, err := plugin.Open(spec.MiddlewarePath)
if err != nil {
msg := fmt.Sprintf("could not open custom middleware %s's file", spec.MiddlewareName)
logging.Logger.WithError(err).Errorf(msg)
return errors.New(msg)
}
constructorSym, err := plug.Lookup("New")
if err != nil {
msg := fmt.Sprintf("could not load middleware %s's constructor. New function is not found", spec.MiddlewareName)
logging.Logger.WithError(err).Errorf(msg)
return errors.New(msg)
}
constructor, ok := constructorSym.(func(map[string]interface{}) (interface{}, error))
if !ok {
msg := fmt.Sprintf("New function for middleware %s is not valid", spec.MiddlewareName)
logging.Logger.WithError(err).Errorf(msg)
return errors.New(msg)
}
var refName string
if spec.RefName != "" {
refName = spec.RefName
} else {
refName = spec.MiddlewareName
}
_, exists := middlewareMap[refName]
if exists {
msg := fmt.Sprintf("middleware with name %s already exists", refName)
logging.Logger.Errorf(msg)
return errors.New(msg)
}
mwInterface, err := constructor(spec.Params)
if err != nil {
msg := fmt.Sprintf("could not create middlware %s", refName)
logging.Logger.Errorf(msg)
return errors.New(msg)
}
mw, ok := mwInterface.(middlewares.Middleware)
if !ok {
msg := fmt.Sprintf("output of New function middlware %s is not implementing midlewares.Middleware Interface", refName)
logging.Logger.Errorf(msg)
return errors.New(msg)
}
middlewareMap[refName] = mw
return nil
}
func loadBuiltinMiddleware(spec config.MiddlewareSpec) error {
constructor, exists := middlewareConstructors[spec.MiddlewareName]
if !exists {
msg := fmt.Sprintf("could not load middleware %s", spec.MiddlewareName)
logging.Logger.Errorf(msg)
return errors.New(msg)
}
var refName string
if spec.RefName != "" {
refName = spec.RefName
} else {
refName = spec.MiddlewareName
}
_, exists = middlewareMap[refName]
if exists {
msg := fmt.Sprintf("middleware %s already exists", refName)
logging.Logger.Errorf(msg)
return errors.New(msg)
}
mw, err := constructor(middlewares.MiddlewareParams{Params: spec.Params})
if err != nil {
msg := fmt.Sprintf("could not create middlware %s", refName)
logging.Logger.Errorf(msg)
return errors.New(msg)
}
middlewareMap[refName] = mw
return nil
}
func LoadMiddlewares(specs []config.MiddlewareSpec) error {
for _, spec := range specs {
if spec.CustomMiddleware {
err := loadCustomMiddleware(spec)
if err != nil {
return err
}
} else {
err := loadBuiltinMiddleware(spec)
if err != nil {
return err
}
}
}
return nil
}
func GetMiddlewareByName(refName string) (middlewares.Middleware, error) {
mw, exists := middlewareMap[refName]
if !exists {
msg := fmt.Sprintf("middleware %s does not exist", refName)
logging.Logger.Errorf(msg)
return nil, errors.New(msg)
}
return mw, nil
}
|
/*
* Copyright 2020 American Express Travel Related Services Company, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package simplemli
import (
"encoding/hex"
"testing"
)
type MLICase struct {
Name string
Size int
Encoded string
Invalid string
Value int
EmbeddedHeader int
}
func TestMLIs(t *testing.T) {
mc := []MLICase{
MLICase{
Name: "2I",
Size: Size2I,
Encoded: "002d",
Invalid: "0001",
Value: 43,
},
MLICase{
Name: "2E",
Size: Size2E,
Encoded: "002b",
Value: 43,
},
MLICase{
Name: "4I",
Size: Size4I,
Encoded: "00000035",
Invalid: "00000001",
Value: 49,
},
MLICase{
Name: "4E",
Size: Size4E,
Encoded: "00000022",
Value: 34,
},
MLICase{
Name: "2EE",
Size: Size2EE,
Encoded: "0036",
Value: 56,
EmbeddedHeader: 2,
},
MLICase{
Name: "2BCD2",
Size: Size2BCD2,
Encoded: "00000288",
Invalid: "00000001",
Value: 284,
},
MLICase{
Name: "A4E",
Size: SizeA4E,
Encoded: "30303433",
Value: 43,
},
}
// Execute Various Test Cases
for _, c := range mc {
t.Run("Decode "+c.Name, func(t *testing.T) {
b, err := hex.DecodeString(c.Encoded)
if err != nil {
t.Errorf("Unable to decode test case sample payload hex - %s", err)
t.FailNow()
}
n, err := Decode(c.Name, &b)
if err != nil {
t.Errorf("Unexpected error decoding sample MLI - %s", err)
}
if n != c.Value {
t.Errorf("Unexpected value returned from MLI %s, got %d expected %d", c.Encoded, n, c.Value)
}
})
t.Run("Encode "+c.Name, func(t *testing.T) {
b, err := Encode(c.Name, c.Value)
if err != nil {
t.Errorf("Unable to encode test case length - %s", err)
t.FailNow()
}
if hex.EncodeToString(b) != c.Encoded {
t.Errorf("Encoded value does not match expectations, got %s, expected %s", hex.EncodeToString(b), c.Encoded)
}
})
t.Run("Encode & Decode "+c.Name, func(t *testing.T) {
b, err := Encode(c.Name, c.Value)
if err != nil {
t.Errorf("Unable to encode test case length - %s", err)
t.FailNow()
}
n, err := Decode(c.Name, &b)
if err != nil {
t.Errorf("Unexpected error decoding sample MLI - %s", err)
}
if n != c.Value {
t.Errorf("Unexpected value returned from MLI %s, got %d expected %d", c.Encoded, n, c.Value)
}
})
t.Run("Zero Byte Encode & Decode "+c.Name, func(t *testing.T) {
b, err := Encode(c.Name, 0+c.EmbeddedHeader)
if err != nil {
t.Errorf("Unable to encode test case length - %s", err)
t.FailNow()
}
n, err := Decode(c.Name, &b)
if err != nil {
t.Errorf("Unexpected error decoding sample MLI - %s", err)
}
if n != 0+c.EmbeddedHeader {
t.Errorf("Unexpected value returned from MLI %s, got %d expected %d", hex.EncodeToString(b), n, 0)
}
})
if c.Invalid != "" {
t.Run("Invalid MLI value "+c.Name, func(t *testing.T) {
b, err := hex.DecodeString(c.Invalid)
if err != nil {
t.Errorf("Unable to decode test case sample payload hex - %s", err)
t.FailNow()
}
_, err = Decode(c.Name, &b)
if err == nil || err != ErrLength {
t.Errorf("Expected error decoding invalid MLI got %s", err)
}
})
}
}
}
func TestInvalid(t *testing.T) {
t.Run("Encode", func(t *testing.T) {
_, err := Encode("Invalid", 0)
if err == nil {
t.Errorf("Expected error when calling Encode with bad mli type - got nil")
}
})
t.Run("Encode with negative number", func(t *testing.T) {
_, err := Encode("2I", -1)
if err == nil {
t.Errorf("Expected error when calling Encode with a negative number - got nil")
}
})
t.Run("Decode", func(t *testing.T) {
_, err := Decode("Invalid", &empty)
if err == nil {
t.Errorf("Expected error when calling Decode with bad mli type - got nil")
}
})
t.Run("A4E Random String", func(t *testing.T) {
b := []byte("helo")
_, err := Decode("A4E", &b)
if err == nil {
t.Errorf("Expected error when feeding decode a random string - got nil")
}
})
}
func TestBadSizedBytes(t *testing.T) {
tl := map[string]int{
"2I": Size2I,
"2E": Size2E,
"4I": Size4I,
"4E": Size4E,
"2EE": Size2EE,
"2BCD2": Size2BCD2,
}
for k, v := range tl {
t.Run(k+" Bigger than expected test", func(t *testing.T) {
b := make([]byte, v+10000)
_, err := Decode(k, &b)
if err == nil {
t.Errorf("Expected error when sending too big byte slice to decode sent %d for mli type %s", len(b), k)
}
})
t.Run(k+" Smaller than expected test", func(t *testing.T) {
b := make([]byte, v-1)
_, err := Decode(k, &b)
if err == nil {
t.Errorf("Expected error when sending too small byte slice to decode sent %d for mli type %s", len(b), k)
}
})
}
}
|
--- vendor/github.com/modern-go/reflect2/go_below_118.go.orig 2022-04-16 21:56:08 UTC
+++ vendor/github.com/modern-go/reflect2/go_below_118.go
@@ -0,0 +1,21 @@
+//+build !go1.18
+
+package reflect2
+
+import (
+ "unsafe"
+)
+
+// m escapes into the return value, but the caller of mapiterinit
+// doesn't let the return value escape.
+//go:noescape
+//go:linkname mapiterinit reflect.mapiterinit
+func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) (val *hiter)
+
+func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
+ return &UnsafeMapIterator{
+ hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)),
+ pKeyRType: type2.pKeyRType,
+ pElemRType: type2.pElemRType,
+ }
+}
|
package main
import (
"io/ioutil"
)
func main() {
a, err := ioutil.ReadFile("crc.ascii")
if err != nil {
panic(err)
}
b := []byte{0, 0, 0, 0}
for k := 0; k < 8; k++ {
if a[k] > 0x4f {
a[k] = a[k] - 0x20
}
if a[k] > 0x39 {
a[k] = a[k] - 7
}
}
b[0] = ((a[6] - 0x30) * 0x10) + (a[7] - 0x30)
b[1] = ((a[4] - 0x30) * 0x10) + (a[5] - 0x30)
b[2] = ((a[2] - 0x30) * 0x10) + (a[3] - 0x30)
b[3] = ((a[0] - 0x30) * 0x10) + (a[1] - 0x30)
err = ioutil.WriteFile("crc", b, 0644)
if err != nil {
panic(err)
}
}
|
package cmd
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/google/uuid"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/lint/support"
"sigs.k8s.io/kustomize/kyaml/yaml"
"github.com/dollarshaveclub/acyl/pkg/persistence"
"github.com/rivo/tview"
"github.com/dollarshaveclub/acyl/pkg/eventlogger"
"github.com/alecthomas/chroma/quick"
"github.com/dollarshaveclub/acyl/pkg/ghclient"
"github.com/dollarshaveclub/acyl/pkg/models"
"github.com/dollarshaveclub/acyl/pkg/nitro/meta"
"github.com/dollarshaveclub/acyl/pkg/nitro/metahelm"
"github.com/dollarshaveclub/acyl/pkg/nitro/metrics"
"github.com/dollarshaveclub/metahelm/pkg/dag"
metahelmlib "github.com/dollarshaveclub/metahelm/pkg/metahelm"
"github.com/gdamore/tcell"
"github.com/spf13/afero"
"gopkg.in/src-d/go-billy.v4"
"gopkg.in/src-d/go-billy.v4/osfs"
"github.com/mitchellh/go-homedir"
"github.com/spf13/cobra"
)
// configCmd represents the config command
var configCmd = &cobra.Command{
Use: "config",
Short: "local testing and development tools for acyl.yml",
Long: `config and subcommands are used to do local validation and testing
of acyl.yml configurations`,
}
var configInfoCmd = &cobra.Command{
Use: "info",
Short: "get summary information from an acyl.yml",
Long: `Parses, validates and displays summary information about the acyl.yml in the current directory
(which must be a valid git repo with GitHub remotes). Branch matching will use the currently checked-out branch and the value
passed in for the base-branch flag.
Paths provided by --search-paths will be recursively searched for valid git repositories containing GitHub remotes,
and if found, they will be used as repo dependencies if those GitHub repository names are referenced by acyl.yml. Any branches present in the
local repositories will be used for branch-matching purposes (they do not need to exist in the remote GitHub repo).
Any repo or chart_repo_path dependencies that are referenced in acyl.yml (or transitively included acyl.yml files) but not found in the local filesystem will be accessed via the GitHub API.
Ensure that you have a valid GitHub token in the environment variable GITHUB_TOKEN and that it has at least read permissions
for the repositories referenced that are not present locally.`,
Run: configInfo,
}
var configCheckCmd = &cobra.Command{
Use: "check",
Short: "quicly validate an acyl.yml",
Long: `Parses and validates the acyl.yml in the current directory and exits with code 0 if successful, or 1 if an error was detected.
This is intended for use in scripts or CI as a check that acyl.yml is valid.
Branch matching will use the currently checked-out branch and the value passed in for the base-branch flag.
Paths provided by --search-paths will be recursively searched for valid git repositories containing GitHub remotes,
and if found, they will be used as repo dependencies if those GitHub repository names are referenced by acyl.yml. Any branches present in the
local repositories will be used for branch-matching purposes (they do not need to exist in the remote GitHub repo).
Any repo or chart_repo_path dependencies that are referenced in acyl.yml (or transitively included acyl.yml files) but not found in the local filesystem will be accessed via the GitHub API.
Ensure that you have a valid GitHub token in the environment variable GITHUB_TOKEN and that it has at least read permissions
for the repositories referenced that are not present locally.`,
Run: configCheck,
}
var repoSearchPaths []string
var workingTreeRepos []string
var localRepos map[string]string
var githubHostname, baseBranch string
var shell, dotPath, openPath string
var verbose, triggeringRepoUsesWorkingTree bool
func init() {
// info
configInfoCmd.Flags().StringVar(&shell, "shell", "/bin/bash -c", "Path to command shell plus command prefix")
configInfoCmd.Flags().StringVar(&dotPath, "dot-path", "dot", "Path to Graphviz dot")
switch runtime.GOOS {
case "darwin":
openPath = "open"
case "windows":
openPath = "start"
default:
openPath = "xdg-open"
}
configInfoCmd.Flags().StringVar(&openPath, "open-path", openPath, "Path to OS-specific open command")
// check
// test (create/update/delete)
// (see test.go)
// shared flags
hd, err := homedir.Dir()
if err != nil {
log.Printf("error getting home directory: %v", err)
hd = ""
}
configCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Verbose output")
configCmd.PersistentFlags().StringSliceVar(&repoSearchPaths, "search-paths", []string{filepath.Join(hd, "code")}, "comma-separated list of paths to search for git repositories")
configCmd.PersistentFlags().StringSliceVar(&workingTreeRepos, "working-tree-repos", []string{}, "comma-separated list of repo names to use the working tree instead of commits, if present locally")
configCmd.PersistentFlags().BoolVar(&triggeringRepoUsesWorkingTree, "triggering-repo-working-tree", true, "Triggering repo always uses working tree instead of commits")
configCmd.PersistentFlags().StringVar(&githubHostname, "github-hostname", "github.com", "GitHub hostname in git repo SSH remotes")
configCmd.PersistentFlags().StringVar(&baseBranch, "base-branch", "master", "Base branch to use for branch-matching logic")
configCmd.PersistentFlags().StringVar(&testEnvCfg.kubeCfgPath, "kubecfg", "", "Path to kubeconfig (overrides KUBECONFIG)")
configCmd.AddCommand(configTestCmd)
configCmd.AddCommand(configInfoCmd)
configCmd.AddCommand(configCheckCmd)
RootCmd.AddCommand(configCmd)
}
func generateLocalMetaGetter(dl persistence.DataLayer, scb ghclient.StatusCallback) (*meta.DataGetter, ghclient.LocalRepoInfo, string, context.Context) {
var lf func(string, ...interface{})
var elsink io.Writer
logw, stdlogw := ioutil.Discard, ioutil.Discard
if verbose {
lf = log.Printf
elsink = os.Stdout
logw = os.Stdout
stdlogw = os.Stderr
}
log.SetOutput(stdlogw)
if os.Getenv("GITHUB_TOKEN") == "" {
log.Fatalf("GITHUB_TOKEN is empty: make sure you have that environment variable set with a valid token")
}
logger = log.New(logw, "", log.LstdFlags)
f := ghclient.RepoFinder{
GitHubHostname: githubHostname,
FSFunc: func(path string) afero.Fs { return afero.NewOsFs() },
LF: lf,
}
logger.Printf("scanning for GitHub repos in paths: %v", repoSearchPaths)
repos, err := f.Find(repoSearchPaths)
if err != nil {
log.Fatalf("error searching for repos: %v", err)
}
localRepos = repos
wd, err := os.Getwd()
if err != nil {
log.Fatalf("error getting working directory: %v", err)
}
logger.Printf("processing current working directory (must be a git repo w/ GitHub remotes): %v", wd)
ri, err := ghclient.RepoInfo(afero.NewOsFs(), wd, githubHostname)
if err != nil {
log.Fatalf("error getting repo info for current directory: %v", err)
}
el := &eventlogger.Logger{
ExcludeID: true,
ID: uuid.Must(uuid.NewRandom()),
Sink: elsink,
DL: dl,
}
if err := el.Init([]byte{}, ri.GitHubRepoName, testEnvCfg.pullRequest); err != nil {
log.Fatalf("error initializing event: %v", err)
}
ctx := eventlogger.NewEventLoggerContext(context.Background(), el)
if triggeringRepoUsesWorkingTree {
workingTreeRepos = append(workingTreeRepos, ri.GitHubRepoName)
}
lw := &ghclient.LocalWrapper{
WorkingTreeRepos: workingTreeRepos,
Backend: ghclient.NewGitHubClient(os.Getenv("GITHUB_TOKEN")),
FSFunc: func(path string) billy.Filesystem { return osfs.New(path) },
RepoPathMap: repos,
SetStatusCallback: scb,
}
// override refs (and therefore docker image tags) for local repos so we don't use the HEAD commit SHA
// for working tree changes
// https://stackoverflow.com/questions/22892120/how-to-generate-a-random-string-of-a-fixed-length-in-go
randString := func(n int) string {
rand.Seed(time.Now().UTC().UnixNano())
chars := []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890")
out := make([]rune, n)
for i := range out {
out[i] = chars[rand.Intn(len(chars))]
}
return string(out)
}
repoRefOverrides := make(map[string]string, len(workingTreeRepos))
for _, wtr := range workingTreeRepos {
// we don't know the image repo name yet, so we can't reliably make sure that the image tag
// stays <= 128 characters. Image build/push will fail if the tag is too long.
repoRefOverrides[wtr] = "local-" + randString(12)
}
if triggeringRepoUsesWorkingTree {
ri.HeadSHA = repoRefOverrides[ri.GitHubRepoName]
}
return &meta.DataGetter{RepoRefOverrides: repoRefOverrides, RC: lw, FS: osfs.New("")}, ri, wd, ctx
}
func configCheck(cmd *cobra.Command, args []string) {
perr := func(msg string, args ...interface{}) {
fmt.Fprintf(os.Stderr, msg+"\n", args...)
}
var err error
defer func() {
if err != nil {
os.Exit(1)
}
}()
mg, ri, wd, ctx := generateLocalMetaGetter(persistence.NewFakeDataLayer(), nil)
rrd := models.RepoRevisionData{
PullRequest: 999,
Repo: ri.GitHubRepoName,
BaseBranch: baseBranch,
BaseSHA: "",
SourceBranch: ri.HeadBranch,
SourceSHA: ri.HeadSHA,
User: "john.doe",
}
logger.Printf("processing %v", filepath.Join(wd, "acyl.yml"))
rc, err := mg.Get(ctx, rrd)
if err != nil {
perr("error processing config: %v", err)
return
}
tempd, err := ioutil.TempDir("", "acyl-config-check")
if err != nil {
perr("error creating temp file: %v", err)
return
}
cl, err := mg.FetchCharts(ctx, rc, tempd)
if err != nil {
perr("error fetching charts: %v", err)
return
}
ci, err := metahelm.NewChartInstallerWithClientsetFromContext(nil, persistence.NewFakeDataLayer(), osfs.New(""), &metrics.FakeCollector{}, k8sConfig.GroupBindings, k8sConfig.PrivilegedRepoWhitelist, k8sConfig.SecretInjections, testEnvCfg.kubeCfgPath, helmClientConfig)
if err != nil {
perr("error creating chart installer: %v", err)
return
}
mcloc := metahelm.ChartLocations{}
for k, v := range cl {
mcloc[k] = metahelm.ChartLocation{
ChartPath: v.ChartPath,
VarFilePath: v.VarFilePath,
}
}
_, err = ci.GenerateCharts(ctx, "nitro-12345-some-name", &metahelm.EnvInfo{RC: rc, Env: &models.QAEnvironment{Name: "some-name", Repo: rc.Application.Repo}}, mcloc)
if err != nil {
perr("error generating charts: %v", err)
return
}
}
func configInfo(cmd *cobra.Command, args []string) {
mg, ri, wd, ctx := generateLocalMetaGetter(persistence.NewFakeDataLayer(), nil)
rrd := models.RepoRevisionData{
PullRequest: 999,
Repo: ri.GitHubRepoName,
BaseBranch: baseBranch,
BaseSHA: "",
SourceBranch: ri.HeadBranch,
SourceSHA: ri.HeadSHA,
User: "john.doe",
}
logger.Printf("processing %v", filepath.Join(wd, "acyl.yml"))
rc, err := mg.Get(ctx, rrd)
os.Exit(displayInfoTerminal(rc, err, mg))
}
// detailsType enumerates the content type currently occupying the details grid pane
type detailsType int
const (
emptyDetailsType detailsType = iota
triggeringRepoDetailsType
dependencyDetailsType
dagDetailsType
)
func displayInfoTerminal(rc *models.RepoConfig, err error, mg meta.Getter) int {
ctx := eventlogger.NewEventLoggerContext(context.Background(), &eventlogger.Logger{Sink: ioutil.Discard})
app := tview.NewApplication()
errorModalText := func(msg, help string, err error) string {
return "[red::b]" + msg + "\n\n[white::-]" + tview.Escape(err.Error()) + "\n\n[yellow::b]" + help
}
if err != nil {
modal := tview.NewModal().
SetText(errorModalText("Error Processing Config", "Fix your acyl.yml (or dependencies) and retry.", err)).
AddButtons([]string{"Quit"}).
SetDoneFunc(func(buttonIndex int, buttonLabel string) { app.Stop() })
app.SetRoot(modal, false).SetFocus(modal).Run()
log.Printf("error: %v", err) // always print the error even if not in verbose mode
return 1
}
mcharts := map[string]metahelmlib.Chart{}
var currentDetailsType detailsType
var currentTreeNode *tview.TreeNode
details := tview.NewTable()
tree := tview.NewTreeView()
grid := tview.NewGrid()
pages := tview.NewPages()
chartmodal := tview.NewModal().SetText("Loading charts...")
chartmodalgrid := tview.NewGrid()
chartmodalgrid.SetColumns(0, 20, 0).
SetRows(0, 5, 0)
addToChartModalGrid := func(m *tview.Modal) {
chartmodalgrid.RemoveItem(chartmodal)
chartmodalgrid.AddItem(m, 1, 1, 1, 1, 0, 0, true)
}
addToChartModalGrid(chartmodal)
chartvaltxt := tview.NewTextView().SetDynamicColors(true).SetTextAlign(tview.AlignLeft)
chartvaltxt.SetBorder(true).SetTitle("Rendered Chart Values (esc to go back)")
pages.AddPage("chart_vals", chartvaltxt, true, false)
chartlinttxt := tview.NewTextView().SetDynamicColors(true).SetTextAlign(tview.AlignLeft)
chartlinttxt.SetBorder(true).SetTitle("Chart Linter (esc to go back)")
pages.AddPage("chart_lint", chartlinttxt, true, false)
errorModal := func(msg, help string, err error) {
errmodal := tview.NewModal().SetText(errorModalText(msg, help, err)).
AddButtons([]string{"OK"}).SetDoneFunc(func(int, string) {
pages.SwitchToPage("main")
})
addToChartModalGrid(errmodal)
pages.ShowPage("modal")
app.SetFocus(errmodal)
app.Draw()
}
infoModal := func(msg string, fn func()) {
infomodal := tview.NewModal().SetText(msg).
AddButtons([]string{"OK"}).SetDoneFunc(func(int, string) {
pages.SwitchToPage("main")
if fn != nil {
fn()
}
})
addToChartModalGrid(infomodal)
pages.ShowPage("modal")
app.SetFocus(infomodal)
app.Draw()
}
getChartVals := func() (metahelmlib.Chart, error) {
ref := currentTreeNode.GetReference()
if ref == nil {
return metahelmlib.Chart{}, errors.New("ref is nil!")
}
var mc metahelmlib.Chart
var ok bool
switch v := ref.(type) {
case *models.RepoConfig:
mc, ok = mcharts[models.GetName(v.Application.Repo)]
if !ok {
return metahelmlib.Chart{}, errors.New("chart location not found")
}
case models.RepoConfigDependency:
mc, ok = mcharts[v.Name]
if !ok {
return metahelmlib.Chart{}, errors.New("chart location not found")
}
default:
return metahelmlib.Chart{}, errors.New("bad type for ref: " + fmt.Sprintf("%T", ref))
}
return mc, nil
}
valBtn := tview.NewButton("View Rendered Chart Values").SetSelectedFunc(func() {
chartvaltxt.Clear()
defer func() {
pages.SwitchToPage("chart_vals")
app.SetFocus(chartvaltxt)
app.Draw()
}()
mc, err := getChartVals()
if err != nil {
chartvaltxt.SetText("[red::b]" + err.Error() + "[-::-]")
return
}
err = quick.Highlight(tview.ANSIWriter(chartvaltxt), string(mc.ValueOverrides), "YAML", "terminal256", "monokai")
if err != nil {
chartvaltxt.SetText("[red::b]Error syntax highlighting: [white:-:-]" + err.Error())
}
})
lintBtn := tview.NewButton("Chart Linter").SetSelectedFunc(func() {
chartlinttxt.Clear()
defer func() {
pages.SwitchToPage("chart_lint")
app.SetFocus(chartlinttxt)
app.Draw()
}()
mc, err := getChartVals()
if err != nil {
chartlinttxt.SetText("[red::b]" + err.Error() + "[-::-]")
return
}
severityString := func(severity int) (string, string) {
switch severity {
case support.ErrorSev:
return "ERROR", "red"
case support.InfoSev:
return "INFO", "green"
case support.WarningSev:
return "WARNING", "yellow"
case support.UnknownSev:
return "UNKNOWN", "cyan"
}
return "UNKNOWN", "cyan"
}
vos := make(map[string]interface{})
yaml.Unmarshal(mc.ValueOverrides, &vos) // if error, empty value overrides
log.SetOutput(ioutil.Discard)
l := action.NewLint()
l.Strict = true
l.Namespace = "nitro-12345-some-name"
lr := l.Run([]string{mc.Location}, vos)
if verbose {
log.SetOutput(os.Stderr)
}
fmt.Fprintf(chartlinttxt, "[::b]Lint Messages:[::-] %v\n", len(lr.Messages))
fmt.Fprintf(chartlinttxt, "[::u]Messages:[::-]\n\n")
for _, m := range lr.Messages {
s, c := severityString(m.Severity)
fmt.Fprintf(chartlinttxt, "Severity: [%v::b]%v[-::-]\n", c, s)
fmt.Fprintf(chartlinttxt, "Path: %v\n", m.Path)
fmt.Fprintf(chartlinttxt, "Message: [::b]%v[::-]\n\n", m.Err)
}
})
getDAGOG := func() (dag.ObjectGraph, error) {
objs := []dag.GraphObject{}
for k := range mcharts {
v := mcharts[k]
objs = append(objs, &v)
}
og := dag.ObjectGraph{}
return og, og.Build(objs)
}
dagBtn := tview.NewButton("Display Graph").SetSelectedFunc(func() {
og, err := getDAGOG()
if err != nil {
errorModal("Object Graph Error", "Check dependency configuration.", err)
return
}
b, err := og.Dot(`"` + rc.Application.Repo + `"`)
if err != nil {
errorModal("Error Generating Graph", "Check dependency configuration.", err)
return
}
f, err := ioutil.TempFile("", "acyl-metahelm-dag")
if err != nil {
errorModal("Error Creating Temp File", "Check your disk.", err)
return
}
f.Write(b)
f.Close()
fn := f.Name()
opencmd := fmt.Sprintf("%v %v -Tpng -o %v.png && %v %v.png", dotPath, fn, fn, openPath, fn)
shellsl := strings.Split(shell, " ")
cmdsl := append(shellsl, opencmd)
c := exec.Command(cmdsl[0], cmdsl[1:]...)
if out, err := c.CombinedOutput(); err != nil {
err = fmt.Errorf("%v: %v", err, string(out))
errorModal("Error Running Command", strings.Join(cmdsl, " "), err)
return
}
infoModal("Displayed Graph: "+fn+".png", func() { os.Remove(fn); os.Remove(fn + ".png") })
})
renderTriggeringRepo := func() {
var row int
addRow := func(name, value string) {
details.SetCellSimple(row, 0, "[white::b]"+name+"[white::-]")
details.SetCellSimple(row, 1, "[white::-]"+value)
row++
}
details.Clear()
details.SetTitle("[white::b]Triggering Repo[white::-]")
addRow("Repo:", rc.Application.Repo)
var sfx string
if triggeringRepoUsesWorkingTree {
sfx = " [green::b](WORKING TREE)[white::-]"
}
wd, _ := os.Getwd()
addRow("Path:", wd+sfx)
addRow("Branch:", rc.Application.Branch)
addRow("Commit:", rc.Application.Ref)
addRow("Image Repo:", rc.Application.Image)
addRow("Chart Image Tag Value:", rc.Application.ChartTagValue)
addRow("Chart Namespace Value:", rc.Application.NamespaceValue)
addRow("Chart Environment Name Value:", rc.Application.EnvNameValue)
var cp string
if rc.Application.ChartPath != "" {
cp = rc.Application.ChartPath
} else {
cp = rc.Application.ChartRepoPath
}
addRow("Chart:", cp)
var vp string
if rc.Application.ChartVarsPath != "" {
vp = rc.Application.ChartVarsPath
} else {
vp = rc.Application.ChartVarsRepoPath
}
addRow("Chart Vars:", vp)
if len(rc.Application.ValueOverrides) > 0 {
details.SetCellSimple(row, 0, "[white::b]Chart Value Overrides:[white::-]")
for i, vor := range rc.Application.ValueOverrides {
details.SetCellSimple(row+i, 1, "[white::-]"+tview.Escape(vor))
}
}
grid.RemoveItem(dagBtn)
grid.AddItem(valBtn, 2, 1, 1, 1, 0, 0, true)
grid.AddItem(lintBtn, 3, 1, 1, 1, 0, 0, true)
}
renderDependency := func(d models.RepoConfigDependency) {
var row int
addRow := func(name, value string) {
details.SetCellSimple(row, 0, "[white::b]"+name+"[white::-]")
details.SetCellSimple(row, 1, "[white::-]"+value)
row++
}
details.Clear()
details.SetTitle("[white::b]Dependency[white::-]")
addRow("Name:", d.Name)
switch {
case d.Repo != "":
if p, ok := localRepos[d.Repo]; ok {
var sfx string
for _, wtr := range workingTreeRepos {
if d.Repo == wtr {
sfx = " [green::b](WORKING TREE)[white::-]"
}
}
addRow("Path:", p+sfx)
} else {
addRow("URL:", "https://github.com/"+d.Repo)
}
addRow("Repo:", d.Repo)
addRow("Type:", "repo")
case d.AppMetadata.ChartPath != "":
addRow("Type:", "chart_path")
case d.AppMetadata.ChartRepoPath != "":
addRow("Type:", "chart_repo_path")
default:
addRow("Type:", "[red::]unknown[-::]")
}
if d.Parent != "" {
addRow("Parent:", d.Parent)
}
if d.BranchMatchable() {
addRow("Branch Matching:", fmt.Sprintf("%v", !d.DisableBranchMatch))
if d.DefaultBranch != "" {
addRow("Default Branch:", d.DefaultBranch)
}
}
if d.AppMetadata.Branch != "" {
addRow("Branch:", d.AppMetadata.Branch)
}
addRow("Commit:", d.AppMetadata.Ref)
if d.AppMetadata.Image != "" {
addRow("Image Repo:", d.AppMetadata.Image)
addRow("Chart Image Tag Value:", d.AppMetadata.ChartTagValue)
}
addRow("Chart Namespace Value:", d.AppMetadata.NamespaceValue)
addRow("Chart Environment Name Value:", d.AppMetadata.EnvNameValue)
var cp string
if d.AppMetadata.ChartPath != "" {
cp = d.AppMetadata.ChartPath
} else {
cp = d.AppMetadata.ChartRepoPath
}
addRow("Chart:", cp)
var vp string
if d.AppMetadata.ChartVarsPath != "" {
vp = d.AppMetadata.ChartVarsPath
} else {
vp = d.AppMetadata.ChartVarsRepoPath
}
addRow("Chart Vars:", vp)
if len(d.Requires) > 0 {
details.SetCellSimple(row, 0, "[white::b]Requires:[white::-]")
for _, r := range d.Requires {
details.SetCellSimple(row, 1, "[white::-]"+tview.Escape(r))
}
row++
}
if len(d.AppMetadata.ValueOverrides) > 0 {
details.SetCellSimple(row, 0, "[white::b]Chart Value Overrides:[white::-]")
for _, vor := range d.AppMetadata.ValueOverrides {
details.SetCellSimple(row, 1, "[white::-]"+tview.Escape(vor))
}
row++
}
grid.RemoveItem(dagBtn)
grid.AddItem(valBtn, 2, 1, 1, 1, 0, 0, true)
grid.AddItem(lintBtn, 3, 1, 1, 1, 0, 0, true)
}
displayMetahelmDAG := func() {
og, err := getDAGOG()
if err != nil {
errorModal("Object Graph Error", "Check dependency configuration.", err)
return
}
r, lvls, err := og.Info()
if err != nil {
errorModal("Object Graph Info Error", "Check your dependency requirement configuration.", err)
return
}
var row int
addRow := func(name, value string) {
details.SetCellSimple(row, 0, "[white::b]"+name+"[white::-]")
details.SetCellSimple(row, 1, "[white::-]"+value)
row++
}
details.Clear()
addRow("Graph Root:", r.Name())
j := 1
for i := len(lvls) - 1; i >= 0; i-- {
details.SetCellSimple(row, 0, "[white::b]Install/Upgrade Phase "+fmt.Sprintf("%v", j)+":[white::-]")
for k, obj := range lvls[i] {
details.SetCellSimple(row+k, 1, "[white::-]"+obj.Name())
}
j++
row += len(lvls[i])
}
grid.RemoveItem(valBtn)
grid.RemoveItem(lintBtn)
grid.AddItem(dagBtn, 2, 1, 2, 1, 0, 0, true)
}
app.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey {
// Switch focus on tab
if event.Key() == tcell.KeyTAB {
focus := app.GetFocus()
if focus == nil {
return event
}
switch v := focus.(type) {
case *tview.Button:
switch v.GetLabel() {
case valBtn.GetLabel():
app.SetFocus(lintBtn)
case lintBtn.GetLabel():
app.SetFocus(tree)
case dagBtn.GetLabel():
app.SetFocus(tree)
}
case *tview.Table:
switch currentDetailsType {
case dagDetailsType:
app.SetFocus(dagBtn)
case triggeringRepoDetailsType:
app.SetFocus(valBtn)
case dependencyDetailsType:
app.SetFocus(valBtn)
case emptyDetailsType:
return event
default:
return event
}
case *tview.TreeView:
if currentDetailsType == emptyDetailsType {
return nil
}
app.SetFocus(details)
default:
break
}
return nil
}
if event.Key() == tcell.KeyESC {
pages.SwitchToPage("main")
app.SetFocus(tree)
app.Draw()
}
return event
})
triggeringiswt := " [white::](LOCAL)"
if triggeringRepoUsesWorkingTree {
triggeringiswt = " [green::b](WORKING TREE)"
}
root := tview.NewTreeNode(rc.Application.Repo + " [yellow](triggering repo)" + triggeringiswt + "[white::-]").
SetSelectable(true).
SetReference(rc)
tree.SetRoot(root).SetSelectedFunc(func(node *tview.TreeNode) {
ref := node.GetReference()
if ref == nil {
return
}
currentTreeNode = node
switch v := ref.(type) {
case models.RepoConfigDependency:
currentDetailsType = dependencyDetailsType
renderDependency(v)
case *models.RepoConfig:
currentDetailsType = triggeringRepoDetailsType
renderTriggeringRepo()
case string:
currentDetailsType = dagDetailsType
displayMetahelmDAG()
default:
errorModal("Tree Node Reference Error", "Bug!", fmt.Errorf("ref is unexpected type: %T", ref))
}
}).SetCurrentNode(root)
renderTree(root, rc)
details.SetCellSimple(0, 1, "[::d](select tree item)[::-]")
grid.SetRows(1, 0, 1, 1, 1).SetColumns(0, 0).SetBorders(true).
AddItem(tview.NewTextView().
SetTextAlign(tview.AlignCenter).
SetDynamicColors(true).
SetText("[yellow::b]Acyl Environment Config: [white::-]"+rc.Application.Repo), 0, 0, 1, 2, 0, 0, false).
AddItem(tree, 1, 0, 3, 1, 0, 0, true).
AddItem(details, 1, 1, 1, 1, 0, 0, false).
AddItem(tview.NewTextView().
SetTextAlign(tview.AlignCenter).
SetText("enter to select item - tab to switch focus - ctrl+c to exit"), 4, 0, 1, 2, 0, 0, false)
pages.AddPage("main", grid, true, true)
pages.AddPage("modal", chartmodalgrid, false, true) // must add after main so main is visible behind it
tempd, err := ioutil.TempDir("", "acyl-config")
if err != nil {
log.Printf("error creating temp dir: %v", err)
return 1
}
defer os.RemoveAll(tempd)
go func() {
pages.ShowPage("modal")
cl, err := mg.FetchCharts(ctx, rc, tempd)
if err != nil {
errorModal("Error Processing Charts", "Check your chart configuration.", err)
return
}
ci, err := metahelm.NewChartInstallerWithClientsetFromContext(nil, persistence.NewFakeDataLayer(), osfs.New(""), &metrics.FakeCollector{}, k8sConfig.GroupBindings, k8sConfig.PrivilegedRepoWhitelist, k8sConfig.SecretInjections, testEnvCfg.kubeCfgPath, helmClientConfig)
if err != nil {
errorModal("Error Instantiating Chart Installer", "Bug!", err)
return
}
mcloc := metahelm.ChartLocations{}
for k, v := range cl {
mcloc[k] = metahelm.ChartLocation{
ChartPath: v.ChartPath,
VarFilePath: v.VarFilePath,
}
}
charts, err := ci.GenerateCharts(ctx, "nitro-12345-some-name", &metahelm.EnvInfo{RC: rc, Env: &models.QAEnvironment{Name: "some-name", Repo: rc.Application.Repo}}, mcloc)
if err != nil {
errorModal("Error Generating Metahelm Charts", "Check your chart configuration.", err)
return
}
for _, c := range charts {
mcharts[c.Title] = c
}
pages.HidePage("modal")
app.SetFocus(tree)
app.Draw()
}()
if err := app.SetRoot(pages, true).SetFocus(pages).Run(); err != nil {
log.Printf("error starting terminal UI: %v", err)
return 1
}
return 0
}
func renderTree(root *tview.TreeNode, rc *models.RepoConfig) {
root.AddChild(tview.NewTreeNode("[white::b]Metahelm DAG").SetSelectable(true).SetReference("DAG"))
depmap := make(map[string]*tview.TreeNode, rc.Dependencies.Count())
wtrm := make(map[string]struct{}, len(workingTreeRepos))
for _, r := range workingTreeRepos {
wtrm[r] = struct{}{}
}
nodename := func(d models.RepoConfigDependency) string {
if d.Repo != "" {
if _, ok := wtrm[d.Repo]; ok {
return d.Name + " [green::b](WORKING TREE)[white::-]"
}
if _, ok := localRepos[d.Repo]; ok {
return d.Name + " [white::b](LOCAL)[white::-]"
}
return d.Name + " [white::b](REMOTE)[white::-]"
}
return d.Name
}
reqtargetmap := map[string]string{}
for _, d := range rc.Dependencies.All() {
for _, r := range d.Requires {
reqtargetmap[r] = d.Name
}
}
if len(rc.Dependencies.Direct) > 0 {
ddeps := tview.NewTreeNode("[white::b]Direct Dependencies").SetSelectable(false)
for _, d := range rc.Dependencies.Direct {
parent := d.Parent
if r, ok := reqtargetmap[d.Name]; ok {
parent = r
}
if parent == "" {
dn := tview.NewTreeNode(nodename(d)).SetSelectable(true).SetReference(d)
ddeps.AddChild(dn)
depmap[d.Name] = dn
}
}
root.AddChild(ddeps)
}
if len(rc.Dependencies.Environment) > 0 {
edeps := tview.NewTreeNode("[white::b]Environment Dependencies").SetSelectable(false)
for _, d := range rc.Dependencies.Environment {
parent := d.Parent
if r, ok := reqtargetmap[d.Name]; ok {
parent = r
}
if parent == "" {
dn := tview.NewTreeNode(nodename(d)).SetSelectable(true).SetReference(d)
edeps.AddChild(dn)
depmap[d.Name] = dn
}
}
root.AddChild(edeps)
}
for {
if len(depmap) == rc.Dependencies.Count() {
return
}
for _, d := range rc.Dependencies.All() {
parent := d.Parent
if r, ok := reqtargetmap[d.Name]; ok {
parent = r
}
if parent != "" {
if n, ok := depmap[parent]; ok {
dn := tview.NewTreeNode(nodename(d)).SetSelectable(true).SetReference(d)
n.AddChild(dn)
depmap[d.Name] = dn
}
}
}
}
}
|
package instruction
// CountSteps returns a number of step required to reach the end of slice
// when instruction is incremented by 1 after jump
func CountSteps(instructions []int) int {
return count(instructions, func(n int) int { return n + 1 })
}
// CountStrangeSteps returns a number of step required to reach the end of slice
// when the jumps are even stranger: after each jump,
// if the offset was three or more, instead decrease it by 1.
// Otherwise, increase it by 1 as before.
func CountStrangeSteps(instructions []int) int {
return count(instructions, func(n int) int {
if n >= 3 {
return n - 1
}
return n + 1
})
}
func count(instructions []int, increment func(int) int) int {
var n, c = 0, 0
for i := 0; i < len(instructions); i += n {
n = instructions[i]
instructions[i] = increment(instructions[i])
c++
}
return c
}
|
package html
import (
"github.com/elliotchance/gedcom"
"regexp"
"strings"
)
var alnumOrDashRegexp = regexp.MustCompile("[^a-z_0-9-]+")
func GetIndividuals(document *gedcom.Document, placesMap map[string]*place) map[string]*gedcom.IndividualNode {
individualMap := map[string]*gedcom.IndividualNode{}
for _, individual := range document.Individuals() {
name := individual.Name().String()
key := getUniqueKey(individualMap, alnumOrDashRegexp.
ReplaceAllString(strings.ToLower(name), "-"), placesMap)
individualMap[key] = individual
}
return individualMap
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package set
import (
"math"
"github.com/pingcap/tidb/util/hack"
)
// MemAwareMap is a map which is aware of its memory usage. It's adapted from SetWithMemoryUsage.
// It doesn't support delete.
// The estimate usage of memory is usually smaller than the real usage.
// According to experiments with SetWithMemoryUsage, 2/3 * estimated usage <= real usage <= estimated usage.
type MemAwareMap[K comparable, V any] struct {
M map[K]V // it's public, when callers want to directly access it, e.g. use in a for-range-loop
bInMap int64
bucketMemoryUsage uint64
}
// EstimateMapSize returns the estimated size of the map. It doesn't include the dynamic part, e.g. objects pointed to by pointers in the map.
// len(map) <= load_factor * 2^bInMap. bInMap = ceil(log2(len(map)/load_factor)).
// memory = bucketSize * 2^bInMap
func EstimateMapSize(length int, bucketSize uint64) uint64 {
if length == 0 {
return 0
}
bInMap := uint64(math.Ceil(math.Log2(float64(length) * hack.LoadFactorDen / hack.LoadFactorNum)))
return bucketSize * uint64(1<<bInMap)
}
// NewMemAwareMap creates a new MemAwareMap.
func NewMemAwareMap[K comparable, V any]() MemAwareMap[K, V] {
return MemAwareMap[K, V]{
M: make(map[K]V),
bInMap: 0,
bucketMemoryUsage: hack.EstimateBucketMemoryUsage[K, V](),
}
}
// Get the value of the key.
func (m *MemAwareMap[K, V]) Get(k K) (v V, ok bool) {
v, ok = m.M[k]
return
}
// Set the value of the key.
func (m *MemAwareMap[K, V]) Set(k K, v V) (memDelta int64) {
m.M[k] = v
if len(m.M) > (1<<m.bInMap)*hack.LoadFactorNum/hack.LoadFactorDen {
memDelta = int64(m.bucketMemoryUsage * (1 << m.bInMap))
m.bInMap++
}
return memDelta
}
// Len returns the number of elements in the map.
func (m *MemAwareMap[K, V]) Len() int {
return len(m.M)
}
|
package main
import (
"context"
"errors"
"flag"
"os"
"github.com/cybozu-go/log"
"github.com/cybozu-go/well"
"github.com/fsnotify/fsnotify"
)
func main() {
flag.Parse()
err := well.LogConfig{}.Apply()
if err != nil {
log.ErrorExit(err)
}
if len(flag.Args()) == 0 {
log.ErrorExit(errors.New("please specify a path to a monitored dir"))
}
fi, err := os.Stat(flag.Args()[0])
if err != nil {
log.ErrorExit(err)
}
w, err := fsnotify.NewWatcher()
if err != nil {
log.ErrorExit(err)
}
defer w.Close()
err = w.Add(fi.Name())
if err != nil {
log.ErrorExit(err)
}
well.Go(func(ctx context.Context) error {
for {
select {
case <-ctx.Done():
return ctx.Err()
case e := <-w.Events:
log.Info("received event", map[string]interface{}{
"path": e.Name,
"op": e.Op,
})
case err := <-w.Errors:
return err
}
}
})
well.Stop()
err = well.Wait()
if err != nil && !well.IsSignaled(err) {
log.ErrorExit(err)
}
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//29. Divide Two Integers
//Given two integers dividend and divisor, divide two integers without using multiplication, division and mod operator.
//Return the quotient after dividing dividend by divisor.
//The integer division should truncate toward zero.
//Example 1:
//Input: dividend = 10, divisor = 3
//Output: 3
//Example 2:
//Input: dividend = 7, divisor = -3
//Output: -2
//Note:
//Both dividend and divisor will be 32-bit signed integers.
//The divisor will never be 0.
//Assume we are dealing with an environment which could only store integers within the 32-bit signed integer range: [−231, 231 − 1]. For the purpose of this problem, assume that your function returns 231 − 1 when the division result overflows.
//func divide(dividend int, divisor int) int {
//}
// Time Is Money
|
package coremidi
import (
"errors"
"fmt"
"syscall"
"unsafe"
)
/*
#cgo LDFLAGS: -framework CoreMIDI -framework CoreFoundation
#include <CoreMIDI/CoreMIDI.h>
#include <stdio.h>
#include <unistd.h>
static void MIDIInputProc(const MIDIPacketList *pktlist, void *readProcRefCon, void *srcConnRefCon)
{
MIDIPacket *packet = (MIDIPacket *)&(pktlist->packet[0]);
UInt32 packetCount = pktlist->numPackets;
int i, j, n;
Byte *data;
int lengthBytes = 2;
int timeStampBytes = 8;
for (i = 0; i < packetCount; i++) {
data = calloc(sizeof(Byte), packet->length + lengthBytes + timeStampBytes);
memcpy(data, &(packet->length), lengthBytes);
memcpy(data + lengthBytes, &(packet->timeStamp), timeStampBytes);
memcpy(data + lengthBytes + timeStampBytes, packet->data, packet->length);
// http://man7.org/linux/man-pages/man7/pipe.7.html
//
// POSIX.1-2001 says that write(2)s of less than PIPE_BUF bytes must be
// atomic: the output data is written to the pipe as a contiguous sequence.
//
// POSIX.1-2001 requires PIPE_BUF to be at least 512 bytes.
n = write(*(int *)srcConnRefCon, data, packet->length + lengthBytes + timeStampBytes);
packet = MIDIPacketNext(packet);
free(data);
}
}
typedef void (*midi_input_proc)(const MIDIPacketList *pktlist, void *readProcRefCon, void *srcConnRefCon);
static midi_input_proc getProc()
{
return *MIDIInputProc;
}
*/
import "C"
type OutputPort struct {
port C.MIDIPortRef
}
func NewOutputPort(client Client, name string) (outputPort OutputPort, err error) {
var port C.MIDIPortRef
stringToCFString(name, func(cfName C.CFStringRef) {
osStatus := C.MIDIOutputPortCreate(client.client, cfName, &port)
if osStatus != C.noErr {
err = errors.New(fmt.Sprintf("%d: failed to create a port", int(osStatus)))
} else {
outputPort = OutputPort{port}
}
})
return
}
type ReadProc func(source Source, packet Packet)
type InputPort struct {
port C.MIDIPortRef
readProc ReadProc
writeFds []*C.int
}
func NewInputPort(client Client, name string, readProc ReadProc) (inputPort InputPort, err error) {
var port C.MIDIPortRef
stringToCFString(name, func(cfName C.CFStringRef) {
osStatus := C.MIDIInputPortCreate(client.client,
cfName,
(C.MIDIReadProc)(C.getProc()),
unsafe.Pointer(uintptr(0)),
&port)
if osStatus != C.noErr {
err = errors.New(fmt.Sprintf("%d: failed to create a port", int(osStatus)))
} else {
inputPort = InputPort{port, readProc, make([]*C.int, 0)}
}
})
return
}
func (port InputPort) Connect(source Source) (portConnection, error) {
fd := make([]int, 2)
syscall.Pipe(fd)
readFd := fd[0]
writeFd := C.int(fd[1])
port.writeFds = append(port.writeFds, &writeFd)
C.MIDIPortConnectSource(port.port, source.endpoint, unsafe.Pointer(&writeFd))
go processIncomingPacket(
readFd,
func(data []byte, timeStamp uint64) {
port.readProc(source, NewPacket(data, timeStamp))
},
)
return portConnection{port, source, &writeFd}, nil
}
type portConnection struct {
port InputPort
source Source
writeFd *C.int
}
func (connection portConnection) Disconnect() {
syscall.Close(int(*connection.writeFd))
C.MIDIPortDisconnectSource(connection.port.port, connection.source.endpoint)
}
|
package service
import (
"encoding/json"
"fmt"
"github.com/bar41234/bar_book_service/datastore"
"github.com/bar41234/bar_book_service/models"
"github.com/gin-gonic/gin"
"net/http"
)
const (
errorMsgInvalidPutRequest = "Error: Invalid PUT request"
errorMsgInvalidPostRequest = "Error: Invalid POST request"
)
func Ping(c *gin.Context) {
c.JSON(http.StatusOK, map[string]string{
"message": "Ping Pong!",
})
}
func GetBook(c *gin.Context) {
bookStore, _ := datastore.BooksStoreFactory()
id := c.Param("id")
book, err := bookStore.GetBook(id)
if err != nil {
c.JSON(http.StatusNotFound, err.Error())
return
}
c.JSON(http.StatusOK, *book)
}
func AddBook(c *gin.Context) {
bookStore, _ := datastore.BooksStoreFactory()
book := models.Book{}
err := c.ShouldBind(&book)
if err != nil {
c.JSON(http.StatusBadRequest, errorMsgInvalidPutRequest)
return
}
id, err := bookStore.AddBook(book)
if err != nil {
c.JSON(http.StatusBadRequest, err)
return
}
c.JSON(http.StatusOK, *id)
}
func UpdateBook(c *gin.Context) {
bookStore, _ := datastore.BooksStoreFactory()
id := c.Param("id")
jsonData, err := c.GetRawData()
if err != nil {
c.JSON(http.StatusBadRequest, errorMsgInvalidPostRequest)
return
}
var book models.Book
err = json.Unmarshal(jsonData, &book)
if err != nil {
c.JSON(http.StatusBadRequest, errorMsgInvalidPostRequest)
return
}
bookId, err := bookStore.UpdateBook(id, book.Title)
if err != nil {
c.JSON(http.StatusBadRequest, err)
return
}
c.JSON(http.StatusOK, fmt.Sprintf("Book %s was successfully updated", *bookId))
}
func DeleteBook(c *gin.Context) {
bookStore, _ := datastore.BooksStoreFactory()
id := c.Param("id")
err := bookStore.DeleteBook(id)
if err != nil {
c.JSON(http.StatusBadRequest, err)
return
}
c.JSON(http.StatusOK, "Book was successfully deleted!")
}
func SearchBook(c *gin.Context) {
bookStore, _ := datastore.BooksStoreFactory()
title := c.Query("title")
author := c.Query("author_name")
priceRange := c.Query("price_range")
books, err := bookStore.Search(title, author, priceRange)
if err != nil {
c.JSON(http.StatusBadRequest, err)
return
}
c.JSON(http.StatusOK, books)
}
func GetStoreInfo(c *gin.Context) {
bookStore, _ := datastore.BooksStoreFactory()
store, err := bookStore.GetStoreInfo()
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err})
return
}
c.JSON(http.StatusOK, store)
}
func GetActivities(c *gin.Context) {
userActivity, _ := datastore.UserActivityFactory()
username := c.Query("username")
if username == "" {
c.JSON(http.StatusBadRequest, "Error: username field is missing")
return
}
actions, err := userActivity.GetActivities(username)
if err != nil {
c.JSON(http.StatusBadRequest, err)
return
}
c.JSON(http.StatusOK, actions)
}
func Middleware(c *gin.Context) {
userActivity, _ := datastore.UserActivityFactory()
username := c.Query("username")
if username == "" {
return
}
userActivity.AddActivity(username, c.Request.Method, c.Request.RequestURI)
c.Next()
}
|
package exercise
import (
"fmt"
)
func TestCfb() {
for a := 1; a < 10; a++ { // 行
for b := 1; b <= a; b++ { // 列
fmt.Printf("%d*%d = %d ", b, a, b*a)
}
fmt.Println()
}
}
|
package util
import (
"github.com/go-playground/locales/id"
ut "github.com/go-playground/universal-translator"
"github.com/labstack/echo/v4"
"gopkg.in/go-playground/validator.v9"
idTrans "gopkg.in/go-playground/validator.v9/translations/id"
)
// CustomValidator validation that handle validation
type CustomValidator struct {
Validator *validator.Validate
}
var translator ut.Translator
func setValidator(e *echo.Echo) {
id := id.New()
uni := ut.New(id, id)
translator, _ = uni.GetTranslator("id")
validator := validator.New()
_ = idTrans.RegisterDefaultTranslations(validator, translator)
e.Validator = &CustomValidator{Validator: validator}
}
// Validate Struct
func (cv *CustomValidator) Validate(i interface{}) error {
return cv.Validator.Struct(i)
}
|
////////////////////////////////////////////////////////////////////////////////
// //
// Copyright 2019 Broadcom. The term Broadcom refers to Broadcom Inc. and/or //
// its subsidiaries. //
// //
// Licensed under the Apache License, Version 2.0 (the "License"); //
// you may not use this file except in compliance with the License. //
// You may obtain a copy of the License at //
// //
// http://www.apache.org/licenses/LICENSE-2.0 //
// //
// Unless required by applicable law or agreed to in writing, software //
// distributed under the License is distributed on an "AS IS" BASIS, //
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
// See the License for the specific language governing permissions and //
// limitations under the License. //
// //
////////////////////////////////////////////////////////////////////////////////
/*
Package translib implements APIs like Create, Get, Subscribe etc.
to be consumed by the north bound management server implementations
This package takes care of translating the incoming requests to
Redis ABNF format and persisting them in the Redis DB.
It can also translate the ABNF format to YANG specific JSON IETF format
This package can also talk to non-DB clients.
*/
package translib
import (
"sync"
"github.com/Azure/sonic-mgmt-common/translib/db"
"github.com/Azure/sonic-mgmt-common/translib/tlerr"
"github.com/Workiva/go-datastructures/queue"
log "github.com/golang/glog"
)
//Write lock for all write operations to be synchronized
var writeMutex = &sync.Mutex{}
//Interval value for interval based subscription needs to be within the min and max
//minimum global interval for interval based subscribe in secs
var minSubsInterval = 20
//maximum global interval for interval based subscribe in secs
var maxSubsInterval = 600
type ErrSource int
const (
ProtoErr ErrSource = iota
AppErr
)
type UserRoles struct {
Name string
Roles []string
}
type SetRequest struct {
Path string
Payload []byte
User UserRoles
AuthEnabled bool
ClientVersion Version
DeleteEmptyEntry bool
}
type SetResponse struct {
ErrSrc ErrSource
Err error
}
type GetRequest struct {
Path string
User UserRoles
AuthEnabled bool
ClientVersion Version
// Depth limits the depth of data subtree in the response
// payload. Default value 0 indicates there is no limit.
Depth uint
}
type GetResponse struct {
Payload []byte
ErrSrc ErrSource
}
type ActionRequest struct {
Path string
Payload []byte
User UserRoles
AuthEnabled bool
ClientVersion Version
}
type ActionResponse struct {
Payload []byte
ErrSrc ErrSource
}
type BulkRequest struct {
DeleteRequest []SetRequest
ReplaceRequest []SetRequest
UpdateRequest []SetRequest
CreateRequest []SetRequest
User UserRoles
AuthEnabled bool
ClientVersion Version
}
type BulkResponse struct {
DeleteResponse []SetResponse
ReplaceResponse []SetResponse
UpdateResponse []SetResponse
CreateResponse []SetResponse
}
type SubscribeRequest struct {
Paths []string
Q *queue.PriorityQueue
Stop chan struct{}
User UserRoles
AuthEnabled bool
ClientVersion Version
}
type SubscribeResponse struct {
Path string
Payload []byte
Timestamp int64
SyncComplete bool
IsTerminated bool
}
type NotificationType int
const (
Sample NotificationType = iota
OnChange
)
type IsSubscribeRequest struct {
Paths []string
User UserRoles
AuthEnabled bool
ClientVersion Version
}
type IsSubscribeResponse struct {
Path string
IsOnChangeSupported bool
MinInterval int
Err error
PreferredType NotificationType
}
type ModelData struct {
Name string
Org string
Ver string
}
type notificationOpts struct {
isOnChangeSupported bool
mInterval int
pType NotificationType // for TARGET_DEFINED
}
//initializes logging and app modules
func init() {
log.Flush()
}
//Create - Creates entries in the redis DB pertaining to the path and payload
func Create(req SetRequest) (SetResponse, error) {
var keys []db.WatchKeys
var resp SetResponse
path := req.Path
payload := req.Payload
if !isAuthorizedForSet(req) {
return resp, tlerr.AuthorizationError{
Format: "User is unauthorized for Create Operation",
Path: path,
}
}
log.Info("Create request received with path =", path)
log.Info("Create request received with payload =", string(payload))
app, appInfo, err := getAppModule(path, req.ClientVersion)
if err != nil {
resp.ErrSrc = ProtoErr
return resp, err
}
err = appInitialize(app, appInfo, path, &payload, nil, CREATE)
if err != nil {
resp.ErrSrc = AppErr
return resp, err
}
writeMutex.Lock()
defer writeMutex.Unlock()
isWriteDisabled := false
d, err := db.NewDB(getDBOptions(db.ConfigDB, isWriteDisabled))
if err != nil {
resp.ErrSrc = ProtoErr
return resp, err
}
defer d.DeleteDB()
keys, err = (*app).translateCreate(d)
if err != nil {
resp.ErrSrc = AppErr
return resp, err
}
err = d.StartTx(keys, appInfo.tablesToWatch)
if err != nil {
resp.ErrSrc = AppErr
return resp, err
}
resp, err = (*app).processCreate(d)
if err != nil {
d.AbortTx()
resp.ErrSrc = AppErr
return resp, err
}
err = d.CommitTx()
if err != nil {
resp.ErrSrc = AppErr
}
return resp, err
}
//Update - Updates entries in the redis DB pertaining to the path and payload
func Update(req SetRequest) (SetResponse, error) {
var keys []db.WatchKeys
var resp SetResponse
path := req.Path
payload := req.Payload
if !isAuthorizedForSet(req) {
return resp, tlerr.AuthorizationError{
Format: "User is unauthorized for Update Operation",
Path: path,
}
}
log.Info("Update request received with path =", path)
log.Info("Update request received with payload =", string(payload))
app, appInfo, err := getAppModule(path, req.ClientVersion)
if err != nil {
resp.ErrSrc = ProtoErr
return resp, err
}
err = appInitialize(app, appInfo, path, &payload, nil, UPDATE)
if err != nil {
resp.ErrSrc = AppErr
return resp, err
}
writeMutex.Lock()
defer writeMutex.Unlock()
isWriteDisabled := false
d, err := db.NewDB(getDBOptions(db.ConfigDB, isWriteDisabled))
if err != nil {
resp.ErrSrc = ProtoErr
return resp, err
}
defer d.DeleteDB()
keys, err = (*app).translateUpdate(d)
if err != nil {
resp.ErrSrc = AppErr
return resp, err
}
err = d.StartTx(keys, appInfo.tablesToWatch)
if err != nil {
resp.ErrSrc = AppErr
return resp, err
}
resp, err = (*app).processUpdate(d)
if err != nil {
d.AbortTx()
resp.ErrSrc = AppErr
return resp, err
}
err = d.CommitTx()
if err != nil {
resp.ErrSrc = AppErr
}
return resp, err
}
//Replace - Replaces entries in the redis DB pertaining to the path and payload
func Replace(req SetRequest) (SetResponse, error) {
var err error
var keys []db.WatchKeys
var resp SetResponse
path := req.Path
payload := req.Payload
if !isAuthorizedForSet(req) {
return resp, tlerr.AuthorizationError{
Format: "User is unauthorized for Replace Operation",
Path: path,
}
}
log.Info("Replace request received with path =", path)
log.Info("Replace request received with payload =", string(payload))
app, appInfo, err := getAppModule(path, req.ClientVersion)
if err != nil {
resp.ErrSrc = ProtoErr
return resp, err
}
err = appInitialize(app, appInfo, path, &payload, nil, REPLACE)
if err != nil {
resp.ErrSrc = AppErr
return resp, err
}
writeMutex.Lock()
defer writeMutex.Unlock()
isWriteDisabled := false
d, err := db.NewDB(getDBOptions(db.ConfigDB, isWriteDisabled))
if err != nil {
resp.ErrSrc = ProtoErr
return resp, err
}
defer d.DeleteDB()
keys, err = (*app).translateReplace(d)
if err != nil {
resp.ErrSrc = AppErr
return resp, err
}
err = d.StartTx(keys, appInfo.tablesToWatch)
if err != nil {
resp.ErrSrc = AppErr
return resp, err
}
resp, err = (*app).processReplace(d)
if err != nil {
d.AbortTx()
resp.ErrSrc = AppErr
return resp, err
}
err = d.CommitTx()
if err != nil {
resp.ErrSrc = AppErr
}
return resp, err
}
//Delete - Deletes entries in the redis DB pertaining to the path
func Delete(req SetRequest) (SetResponse, error) {
var err error
var keys []db.WatchKeys
var resp SetResponse
path := req.Path
if !isAuthorizedForSet(req) {
return resp, tlerr.AuthorizationError{
Format: "User is unauthorized for Delete Operation",
Path: path,
}
}
log.Info("Delete request received with path =", path)
app, appInfo, err := getAppModule(path, req.ClientVersion)
if err != nil {
resp.ErrSrc = ProtoErr
return resp, err
}
opts := appOptions{deleteEmptyEntry: req.DeleteEmptyEntry}
err = appInitialize(app, appInfo, path, nil, &opts, DELETE)
if err != nil {
resp.ErrSrc = AppErr
return resp, err
}
writeMutex.Lock()
defer writeMutex.Unlock()
isWriteDisabled := false
d, err := db.NewDB(getDBOptions(db.ConfigDB, isWriteDisabled))
if err != nil {
resp.ErrSrc = ProtoErr
return resp, err
}
defer d.DeleteDB()
keys, err = (*app).translateDelete(d)
if err != nil {
resp.ErrSrc = AppErr
return resp, err
}
err = d.StartTx(keys, appInfo.tablesToWatch)
if err != nil {
resp.ErrSrc = AppErr
return resp, err
}
resp, err = (*app).processDelete(d)
if err != nil {
d.AbortTx()
resp.ErrSrc = AppErr
return resp, err
}
err = d.CommitTx()
if err != nil {
resp.ErrSrc = AppErr
}
return resp, err
}
//Get - Gets data from the redis DB and converts it to northbound format
func Get(req GetRequest) (GetResponse, error) {
var payload []byte
var resp GetResponse
path := req.Path
if !isAuthorizedForGet(req) {
return resp, tlerr.AuthorizationError{
Format: "User is unauthorized for Get Operation",
Path: path,
}
}
log.Info("Received Get request for path = ", path)
app, appInfo, err := getAppModule(path, req.ClientVersion)
if err != nil {
resp = GetResponse{Payload: payload, ErrSrc: ProtoErr}
return resp, err
}
opts := appOptions{ depth: req.Depth }
err = appInitialize(app, appInfo, path, nil, &opts, GET)
if err != nil {
resp = GetResponse{Payload: payload, ErrSrc: AppErr}
return resp, err
}
isGetCase := true
dbs, err := getAllDbs(isGetCase)
if err != nil {
resp = GetResponse{Payload: payload, ErrSrc: ProtoErr}
return resp, err
}
defer closeAllDbs(dbs[:])
err = (*app).translateGet(dbs)
if err != nil {
resp = GetResponse{Payload: payload, ErrSrc: AppErr}
return resp, err
}
resp, err = (*app).processGet(dbs)
return resp, err
}
func Action(req ActionRequest) (ActionResponse, error) {
var payload []byte
var resp ActionResponse
path := req.Path
if !isAuthorizedForAction(req) {
return resp, tlerr.AuthorizationError{
Format: "User is unauthorized for Action Operation",
Path: path,
}
}
log.Info("Received Action request for path = ", path)
app, appInfo, err := getAppModule(path, req.ClientVersion)
if err != nil {
resp = ActionResponse{Payload: payload, ErrSrc: ProtoErr}
return resp, err
}
aInfo := *appInfo
aInfo.isNative = true
err = appInitialize(app, &aInfo, path, &req.Payload, nil, GET)
if err != nil {
resp = ActionResponse{Payload: payload, ErrSrc: AppErr}
return resp, err
}
writeMutex.Lock()
defer writeMutex.Unlock()
isGetCase := false
dbs, err := getAllDbs(isGetCase)
if err != nil {
resp = ActionResponse{Payload: payload, ErrSrc: ProtoErr}
return resp, err
}
defer closeAllDbs(dbs[:])
err = (*app).translateAction(dbs)
if err != nil {
resp = ActionResponse{Payload: payload, ErrSrc: AppErr}
return resp, err
}
resp, err = (*app).processAction(dbs)
return resp, err
}
func Bulk(req BulkRequest) (BulkResponse, error) {
var err error
var keys []db.WatchKeys
var errSrc ErrSource
delResp := make([]SetResponse, len(req.DeleteRequest))
replaceResp := make([]SetResponse, len(req.ReplaceRequest))
updateResp := make([]SetResponse, len(req.UpdateRequest))
createResp := make([]SetResponse, len(req.CreateRequest))
resp := BulkResponse{DeleteResponse: delResp,
ReplaceResponse: replaceResp,
UpdateResponse: updateResp,
CreateResponse: createResp}
if (!isAuthorizedForBulk(req)) {
return resp, tlerr.AuthorizationError{
Format: "User is unauthorized for Action Operation",
}
}
writeMutex.Lock()
defer writeMutex.Unlock()
isWriteDisabled := false
d, err := db.NewDB(getDBOptions(db.ConfigDB, isWriteDisabled))
if err != nil {
return resp, err
}
defer d.DeleteDB()
//Start the transaction without any keys or tables to watch will be added later using AppendWatchTx
err = d.StartTx(nil, nil)
if err != nil {
return resp, err
}
for i := range req.DeleteRequest {
path := req.DeleteRequest[i].Path
opts := appOptions{deleteEmptyEntry: req.DeleteRequest[i].DeleteEmptyEntry}
log.Info("Delete request received with path =", path)
app, appInfo, err := getAppModule(path, req.DeleteRequest[i].ClientVersion)
if err != nil {
errSrc = ProtoErr
goto BulkDeleteError
}
err = appInitialize(app, appInfo, path, nil, &opts, DELETE)
if err != nil {
errSrc = AppErr
goto BulkDeleteError
}
keys, err = (*app).translateDelete(d)
if err != nil {
errSrc = AppErr
goto BulkDeleteError
}
err = d.AppendWatchTx(keys, appInfo.tablesToWatch)
if err != nil {
errSrc = AppErr
goto BulkDeleteError
}
resp.DeleteResponse[i], err = (*app).processDelete(d)
if err != nil {
errSrc = AppErr
}
BulkDeleteError:
if err != nil {
d.AbortTx()
resp.DeleteResponse[i].ErrSrc = errSrc
resp.DeleteResponse[i].Err = err
return resp, err
}
}
for i := range req.ReplaceRequest {
path := req.ReplaceRequest[i].Path
payload := req.ReplaceRequest[i].Payload
log.Info("Replace request received with path =", path)
app, appInfo, err := getAppModule(path, req.ReplaceRequest[i].ClientVersion)
if err != nil {
errSrc = ProtoErr
goto BulkReplaceError
}
log.Info("Bulk replace request received with path =", path)
log.Info("Bulk replace request received with payload =", string(payload))
err = appInitialize(app, appInfo, path, &payload, nil, REPLACE)
if err != nil {
errSrc = AppErr
goto BulkReplaceError
}
keys, err = (*app).translateReplace(d)
if err != nil {
errSrc = AppErr
goto BulkReplaceError
}
err = d.AppendWatchTx(keys, appInfo.tablesToWatch)
if err != nil {
errSrc = AppErr
goto BulkReplaceError
}
resp.ReplaceResponse[i], err = (*app).processReplace(d)
if err != nil {
errSrc = AppErr
}
BulkReplaceError:
if err != nil {
d.AbortTx()
resp.ReplaceResponse[i].ErrSrc = errSrc
resp.ReplaceResponse[i].Err = err
return resp, err
}
}
for i := range req.UpdateRequest {
path := req.UpdateRequest[i].Path
payload := req.UpdateRequest[i].Payload
log.Info("Update request received with path =", path)
app, appInfo, err := getAppModule(path, req.UpdateRequest[i].ClientVersion)
if err != nil {
errSrc = ProtoErr
goto BulkUpdateError
}
err = appInitialize(app, appInfo, path, &payload, nil, UPDATE)
if err != nil {
errSrc = AppErr
goto BulkUpdateError
}
keys, err = (*app).translateUpdate(d)
if err != nil {
errSrc = AppErr
goto BulkUpdateError
}
err = d.AppendWatchTx(keys, appInfo.tablesToWatch)
if err != nil {
errSrc = AppErr
goto BulkUpdateError
}
resp.UpdateResponse[i], err = (*app).processUpdate(d)
if err != nil {
errSrc = AppErr
}
BulkUpdateError:
if err != nil {
d.AbortTx()
resp.UpdateResponse[i].ErrSrc = errSrc
resp.UpdateResponse[i].Err = err
return resp, err
}
}
for i := range req.CreateRequest {
path := req.CreateRequest[i].Path
payload := req.CreateRequest[i].Payload
log.Info("Create request received with path =", path)
app, appInfo, err := getAppModule(path, req.CreateRequest[i].ClientVersion)
if err != nil {
errSrc = ProtoErr
goto BulkCreateError
}
err = appInitialize(app, appInfo, path, &payload, nil, CREATE)
if err != nil {
errSrc = AppErr
goto BulkCreateError
}
keys, err = (*app).translateCreate(d)
if err != nil {
errSrc = AppErr
goto BulkCreateError
}
err = d.AppendWatchTx(keys, appInfo.tablesToWatch)
if err != nil {
errSrc = AppErr
goto BulkCreateError
}
resp.CreateResponse[i], err = (*app).processCreate(d)
if err != nil {
errSrc = AppErr
}
BulkCreateError:
if err != nil {
d.AbortTx()
resp.CreateResponse[i].ErrSrc = errSrc
resp.CreateResponse[i].Err = err
return resp, err
}
}
err = d.CommitTx()
return resp, err
}
//Subscribe - Subscribes to the paths requested and sends notifications when the data changes in DB
func Subscribe(req SubscribeRequest) ([]*IsSubscribeResponse, error) {
var err error
var sErr error
paths := req.Paths
q := req.Q
stop := req.Stop
dbNotificationMap := make(map[db.DBNum][]*notificationInfo)
resp := make([]*IsSubscribeResponse, len(paths))
for i := range resp {
resp[i] = &IsSubscribeResponse{Path: paths[i],
IsOnChangeSupported: false,
MinInterval: minSubsInterval,
PreferredType: Sample,
Err: nil}
}
if (!isAuthorizedForSubscribe(req)) {
return resp, tlerr.AuthorizationError{
Format: "User is unauthorized for Action Operation",
}
}
isGetCase := true
dbs, err := getAllDbs(isGetCase)
if err != nil {
return resp, err
}
//Do NOT close the DBs here as we need to use them during subscribe notification
for i, path := range paths {
app, appInfo, err := getAppModule(path, req.ClientVersion)
if err != nil {
if sErr == nil {
sErr = err
}
resp[i].Err = err
continue
}
nOpts, nInfo, errApp := translateSubscribeBridge(path, *app, dbs)
if nOpts != nil {
if nOpts.mInterval != 0 {
if ((nOpts.mInterval >= minSubsInterval) && (nOpts.mInterval <= maxSubsInterval)) {
resp[i].MinInterval = nOpts.mInterval
} else if (nOpts.mInterval < minSubsInterval) {
resp[i].MinInterval = minSubsInterval
} else {
resp[i].MinInterval = maxSubsInterval
}
}
resp[i].IsOnChangeSupported = nOpts.isOnChangeSupported
resp[i].PreferredType = nOpts.pType
}
if errApp != nil {
resp[i].Err = errApp
if sErr == nil {
sErr = errApp
}
continue
} else {
if nInfo == nil || !resp[i].IsOnChangeSupported {
sErr = tlerr.NotSupportedError{
Format: "Subscribe not supported", Path: path}
resp[i].Err = sErr
continue
}
nInfo.path = path
nInfo.app = app
nInfo.appInfo = appInfo
nInfo.dbs = dbs
dbNotificationMap[nInfo.dbno] = append(dbNotificationMap[nInfo.dbno], nInfo)
}
}
log.Info("map=", dbNotificationMap)
if sErr != nil {
return resp, sErr
}
sInfo := &subscribeInfo{syncDone: false,
q: q,
stop: stop}
sErr = startSubscribe(sInfo, dbNotificationMap)
return resp, sErr
}
//IsSubscribeSupported - Check if subscribe is supported on the given paths
func IsSubscribeSupported(req IsSubscribeRequest) ([]*IsSubscribeResponse, error) {
paths := req.Paths
resp := make([]*IsSubscribeResponse, len(paths))
for i := range resp {
resp[i] = &IsSubscribeResponse{Path: paths[i],
IsOnChangeSupported: false,
MinInterval: minSubsInterval,
PreferredType: Sample,
Err: nil}
}
if (!isAuthorizedForIsSubscribe(req)) {
return resp, tlerr.AuthorizationError{
Format: "User is unauthorized for Action Operation",
}
}
isGetCase := true
dbs, err := getAllDbs(isGetCase)
if err != nil {
return resp, err
}
defer closeAllDbs(dbs[:])
for i, path := range paths {
app, _, err := getAppModule(path, req.ClientVersion)
if err != nil {
resp[i].Err = err
continue
}
nOpts, _, errApp := translateSubscribeBridge(path, *app, dbs)
if nOpts != nil {
if nOpts.mInterval != 0 {
if ((nOpts.mInterval >= minSubsInterval) && (nOpts.mInterval <= maxSubsInterval)) {
resp[i].MinInterval = nOpts.mInterval
} else if (nOpts.mInterval < minSubsInterval) {
resp[i].MinInterval = minSubsInterval
} else {
resp[i].MinInterval = maxSubsInterval
}
}
resp[i].IsOnChangeSupported = nOpts.isOnChangeSupported
resp[i].PreferredType = nOpts.pType
}
if errApp != nil {
resp[i].Err = errApp
err = errApp
continue
}
}
return resp, err
}
//GetModels - Gets all the models supported by Translib
func GetModels() ([]ModelData, error) {
var err error
return getModels(), err
}
//Creates connection will all the redis DBs. To be used for get request
func getAllDbs(isGetCase bool) ([db.MaxDB]*db.DB, error) {
var dbs [db.MaxDB]*db.DB
var err error
var isWriteDisabled bool
if isGetCase {
isWriteDisabled = true
} else {
isWriteDisabled = false
}
//Create Application DB connection
dbs[db.ApplDB], err = db.NewDB(getDBOptions(db.ApplDB, isWriteDisabled))
if err != nil {
closeAllDbs(dbs[:])
return dbs, err
}
//Create ASIC DB connection
dbs[db.AsicDB], err = db.NewDB(getDBOptions(db.AsicDB, isWriteDisabled))
if err != nil {
closeAllDbs(dbs[:])
return dbs, err
}
//Create Counter DB connection
dbs[db.CountersDB], err = db.NewDB(getDBOptions(db.CountersDB, isWriteDisabled))
if err != nil {
closeAllDbs(dbs[:])
return dbs, err
}
isWriteDisabled = true
//Create Config DB connection
dbs[db.ConfigDB], err = db.NewDB(getDBOptions(db.ConfigDB, isWriteDisabled))
if err != nil {
closeAllDbs(dbs[:])
return dbs, err
}
if isGetCase {
isWriteDisabled = true
} else {
isWriteDisabled = false
}
//Create Flex Counter DB connection
dbs[db.FlexCounterDB], err = db.NewDB(getDBOptions(db.FlexCounterDB, isWriteDisabled))
if err != nil {
closeAllDbs(dbs[:])
return dbs, err
}
//Create State DB connection
dbs[db.StateDB], err = db.NewDB(getDBOptions(db.StateDB, isWriteDisabled))
if err != nil {
closeAllDbs(dbs[:])
return dbs, err
}
return dbs, err
}
//Closes the dbs, and nils out the arr.
func closeAllDbs(dbs []*db.DB) {
for dbsi, d := range dbs {
if d != nil {
d.DeleteDB()
dbs[dbsi] = nil
}
}
}
// Compare - Implement Compare method for priority queue for SubscribeResponse struct
func (val SubscribeResponse) Compare(other queue.Item) int {
o := other.(*SubscribeResponse)
if val.Timestamp > o.Timestamp {
return 1
} else if val.Timestamp == o.Timestamp {
return 0
}
return -1
}
func getDBOptions(dbNo db.DBNum, isWriteDisabled bool) db.Options {
var opt db.Options
switch dbNo {
case db.ApplDB, db.CountersDB, db.AsicDB:
opt = getDBOptionsWithSeparator(dbNo, "", ":", ":", isWriteDisabled)
case db.FlexCounterDB, db.ConfigDB, db.StateDB:
opt = getDBOptionsWithSeparator(dbNo, "", "|", "|", isWriteDisabled)
}
return opt
}
func getDBOptionsWithSeparator(dbNo db.DBNum, initIndicator string, tableSeparator string, keySeparator string, isWriteDisabled bool) db.Options {
return (db.Options{
DBNo: dbNo,
InitIndicator: initIndicator,
TableNameSeparator: tableSeparator,
KeySeparator: keySeparator,
IsWriteDisabled: isWriteDisabled,
})
}
func getAppModule(path string, clientVer Version) (*appInterface, *appInfo, error) {
var app appInterface
aInfo, err := getAppModuleInfo(path)
if err != nil {
return nil, aInfo, err
}
if err := validateClientVersion(clientVer, path, aInfo); err != nil {
return nil, aInfo, err
}
app, err = getAppInterface(aInfo.appType)
if err != nil {
return nil, aInfo, err
}
return &app, aInfo, err
}
func appInitialize(app *appInterface, appInfo *appInfo, path string, payload *[]byte, opts *appOptions, opCode int) error {
var err error
var input []byte
if payload != nil {
input = *payload
}
if appInfo.isNative {
log.Info("Native MSFT format")
data := appData{path: path, payload: input}
data.setOptions(opts)
(*app).initialize(data)
} else {
ygotStruct, ygotTarget, err := getRequestBinder(&path, payload, opCode, &(appInfo.ygotRootType)).unMarshall()
if err != nil {
log.Info("Error in request binding: ", err)
return err
}
data := appData{path: path, payload: input, ygotRoot: ygotStruct, ygotTarget: ygotTarget}
data.setOptions(opts)
(*app).initialize(data)
}
return err
}
func (data *appData) setOptions(opts *appOptions) {
if opts != nil {
data.appOptions = *opts
}
}
|
package main
import (
"bufio"
"fmt"
"math"
"os"
"strconv"
)
func readInput() []int64 {
f, _ := os.Open("input.txt")
defer f.Close()
input := make([]int64, 0)
scanner := bufio.NewScanner(f)
for scanner.Scan() {
num, _ := strconv.ParseInt(scanner.Text(), 10, 64)
input = append(input, num)
}
return input
}
func doPartOne() int64 {
nums := readInput()
mp := make(map[int64]int, 0)
// Add first 25 numbers to map
for i := 0; i < 25; i++ {
mp[nums[i]]++
}
// Iterate through entire input
for i := 25; i < len(nums); i++ {
curr := nums[i]
found := false
// Iterate through last 25 numbers
for j := i - 25; j < i; j++ {
// If curr - a number in the previous 25 is in the map
if mp[curr-nums[j]] > 0 {
// Result of curr-nums[j] == nums[j] is only valid IF there are multiple of that number
if curr-nums[j] == nums[j] && mp[nums[j]] < 2 {
continue
}
found = true
break
}
}
if !found {
return curr
}
// Slide the window one index over. Remove i-25th index and add curr
mp[nums[i-25]]--
mp[nums[i]]++
}
return -1
}
func doPartTwo(invalidNumber int64) int64 {
nums := readInput()
for i := 0; i < len(nums)-1; i++ {
sum := nums[i]
mn := int64(math.Min(float64(nums[i]), 9223372036854775807))
mx := int64(math.Max(float64(nums[i]), 0))
for j := i + 1; j < len(nums); j++ {
mn = int64(math.Min(float64(nums[j]), float64(mn)))
mx = int64(math.Max(float64(nums[j]), float64(mx)))
sum += nums[j]
if sum == invalidNumber {
return mn + mx
}
}
}
return -1
}
func main() {
invalidNumber := doPartOne()
fmt.Println(invalidNumber)
fmt.Println(doPartTwo(invalidNumber))
}
|
package main
import (
"flag"
)
func main() {
flag.Parse()
app, cleanup, err := initApp()
if err != nil {
panic(err)
}
defer cleanup()
// start and wait for stop signal
if err := app.Run(app.GetConfig().GetHttp().LocalAddr); err != nil {
panic(err)
}
}
|
package memory
import (
"io"
"sync"
"github.com/delgus/def-parser/internal/app"
)
// Queue реализует очередь безопасную для вызова в асинхронных потоках
type Queue struct {
tasks []app.HostTask
mu sync.Mutex
}
// NewQueue вернет новую очередь
func NewQueue() *Queue {
return &Queue{}
}
// Add реализует добавление в очередь
func (q *Queue) Add(task app.HostTask) {
q.mu.Lock()
q.tasks = append(q.tasks, task)
q.mu.Unlock()
}
// Get - получение задачи из очереди
func (q *Queue) Get() (app.HostTask, error) {
q.mu.Lock()
defer q.mu.Unlock()
if len(q.tasks) == 0 {
return app.HostTask{}, io.EOF
}
task := q.tasks[0]
q.tasks = q.tasks[1:]
return task, nil
}
|
/*
* winnow: weighted point selection
*
* input:
* matrix: an integer matrix, whose values are used as masses
* mask: a boolean matrix showing which points are eligible for
* consideration
* nrows, ncols: the number of rows and columns
* nelts: the number of points to select
*
* output:
* points: a vector of (x, y) points
*
*/
package main
import (
"flag"
"fmt"
"runtime"
"sort"
"bufio"
"os"
)
// #include <time.h>
// #include <stdio.h>
import "C"
type ByteMatrix struct {
Rows, Cols int
array []byte
}
func WrapBytes(r, c int, bytes []byte) *ByteMatrix {
return &ByteMatrix{r, c, bytes}
}
func NewByteMatrix(r, c int) *ByteMatrix {
return &ByteMatrix{r, c, make([]byte, r*c)}
}
func (m *ByteMatrix) Row(i int) []byte {
return m.array[i*m.Cols : (i+1)*m.Cols]
}
func (m *ByteMatrix) Bytes() []byte {
return m.array[0 : m.Rows*m.Cols]
}
var is_bench = flag.Bool("is_bench", false, "")
var matrix []byte
var mask [][]bool
var points []int
type WinnowPoints struct {
m *ByteMatrix
e []int // indexes into the ByteMatrix 'm'
}
func (p *WinnowPoints) Len() int {
return len(p.e)
}
func (p *WinnowPoints) Swap(i, j int) {
p.e[i], p.e[j] = p.e[j], p.e[i]
}
func (p *WinnowPoints) Less(i, j int) bool {
return ArrayLess(p.m.array, p.e[i], p.e[j])
}
func ArrayLess(array []byte, x, y int) bool {
if array[x] != array[y] {
return array[x] < array[y]
}
return x < y
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func WinnowMerge(points chan WinnowPoints) {
var merged WinnowPoints
x := <-points
y := <-points
new_size := len(x.e) + len(y.e)
merged.m = x.m
merged.e = make([]int, new_size)
j := 0
k := 0
for i := 0; i < new_size; i++ {
if j < len(x.e) && k < len(y.e) {
if ArrayLess(merged.m.array, x.e[j], y.e[k]) {
merged.e[i] = x.e[j]
j++
} else {
merged.e[i] = y.e[k]
k++
}
} else if j < len(x.e) {
merged.e[i] = x.e[j]
j++
} else if k < len(y.e) {
merged.e[i] = y.e[k]
k++
}
}
points <- merged
}
func Winnow(m *ByteMatrix, nrows, ncols, winnow_nelts int) {
NP := runtime.GOMAXPROCS(0)
var values WinnowPoints
values.m = m
values_work := make(chan int, 1024)
values_done := make(chan WinnowPoints, NP)
values_done <- WinnowPoints{m, make([]int, 0)}
go func() {
for i := 0; i < nrows; i++ {
values_work <- i
}
close(values_work)
}()
merged := make(chan bool, NP)
for i := 0; i < NP; i++ {
go func() {
WinnowMerge(values_done)
merged <- true
}()
}
for i := 0; i < NP; i++ {
go func() {
var local_indexes []int
for i := range values_work {
for j := 0; j < ncols; j++ {
idx := i*ncols + j
// if *is_bench {
// mask[i][j] = ((i * j) % (ncols + 1)) == 1
// }
if mask[i][j] {
local_indexes = append(local_indexes, idx)
}
}
}
var local_values WinnowPoints
local_values.m = m
local_values.e = local_indexes
sort.Sort(&local_values)
values_done <- local_values
}()
}
for i := 0; i < NP; i++ {
<-merged
}
values = <-values_done
// fmt.Printf("len:%d\n", values.Len())
chunk := values.Len() / winnow_nelts
points = make([]int, winnow_nelts)
point_work := make(chan int, 1024)
point_done := make(chan bool)
go func() {
for i := 0; i < winnow_nelts; i++ {
point_work <- i
}
close(point_work)
}()
for i := 0; i < NP; i++ {
go func() {
for i := range point_work {
points[i] = values.e[i*chunk]
}
point_done <- true
}()
}
for i := 0; i < NP; i++ {
<-point_done
}
}
func read_integer() int {
var value int
for true {
var read, _ = fmt.Scanf("%d", &value)
if read == 1 {
break
}
}
return value
}
func read_matrix(nrows, ncols int) {
for i := 0; i < nrows; i++ {
for j := 0; j < ncols; j++ {
matrix[i*ncols+j] = byte(read_integer())
}
}
}
func read_mask(nrows, ncols int) {
for i := 0; i < nrows; i++ {
for j := 0; j < ncols; j++ {
mask[i][j] = (read_integer() == 1)
}
}
}
func FillOnTheFly(nrows, ncols, thresh int) {
NP := runtime.GOMAXPROCS(0)
sem := make(chan bool, NP);
var chunkSize int = (nrows + NP - 1) / NP
var threshInverse = 100 / thresh
// fmt.Printf("%d\n", threshInverse)
for n := 0; n < NP; n++ {
go func(n int){
var ix int
var start int = n * chunkSize
var end int = min( start + chunkSize, nrows)
for i := start; i < end; i++ {
for j := 0; j < ncols; j++ {
if ((i*ncols+j) % threshInverse ) == 0 {
mask[i][j] = true
}else{
mask[i][j] = false
}
ix = i*ncols+j
matrix[ix] = byte(ix % 100)
}
}
sem <- true
}(n)
}
// behaves like a barrier
for n := 0; n < NP; n++ { <-sem }
}
func main() {
var nrows, ncols, nelts, thresh int
thresh=0
flag.Parse()
nrows = int(read_integer())
ncols = int(read_integer())
m := NewByteMatrix(nrows, ncols)
matrix = m.array
mask = make([][]bool, nrows)
for i := range mask {
mask[i] = make([]bool, ncols)
}
if !*is_bench {
read_matrix(nrows, ncols)
read_mask(nrows, ncols)
}
nelts = int(read_integer())
if *is_bench {
thresh = int(read_integer())
FillOnTheFly( nrows, ncols, thresh )
}
points = make([]int, nelts)
var start, stop C.struct_timespec
var accum C.double
if( C.clock_gettime( C.CLOCK_MONOTONIC_RAW, &start) == -1 ) {
C.perror( C.CString("clock gettime error 1") );
return
}
Winnow(m, nrows, ncols, nelts)
if( C.clock_gettime( C.CLOCK_MONOTONIC_RAW, &stop) == -1 ) {
C.perror( C.CString("clock gettime error 1") );
return
}
accum = C.double( C.long(stop.tv_sec) - C.long(start.tv_sec) ) + C.double(( C.long(stop.tv_nsec) - C.long(start.tv_nsec))) / C.double(1e9);
file, err := os.OpenFile("./measurements.txt", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
fmt.Println("File does not exists or cannot be created")
os.Exit(1)
}
w := bufio.NewWriter(file)
// Lang, Problem, rows, cols, thresh, winnow_nelts, jobs, time
fmt.Fprintf(w, "Go ,Winnow ,%5d,%5d,%3d,%5d,%2d,%.9f,isBench:%t\n", nrows, ncols, thresh, nelts, runtime.GOMAXPROCS(0), accum, *is_bench )
w.Flush()
file.Close()
if !*is_bench {
fmt.Printf("%d\n", nelts)
for i := 0; i < nelts; i++ {
fmt.Printf("%d %d\n", points[i]/ncols, points[i]%ncols)
}
fmt.Printf("\n")
}
}
|
package websocket_route
import (
"encoding/binary"
"github.com/changx123/websocket-sync"
"bytes"
)
//钩子常量
const (
//新连接通知
HOOK_NEW_CONN = "new conn"
//连接closed 通知
HOOK_CLOSED = "closed"
//错误通知
HOOK_ERROR = "error"
//路由寻址不存在
HOOK_NOT_MODULE = "not module"
//发送消息通知
HOOK_WRITE_MESSAGE = "write message"
//接收消息解包
HOOK_UN_PACKING = "read message un packing"
//发送消息封包
HOOK_PACKET = "send message packing"
)
type HandlerFunc func(conn *websocket.Conn, msg *Message, route *interface{}) error
//路由批量注册结构指针
type HandlerGroup struct {
//除钩子外 对象计数
count int
//路由单指针寻址
HandlersRoute []HandlerFunc
//路由寻址
ModuleRoutes map[interface{}][]int
}
type Storage interface {
Use(...HandlerFunc)
Route(interface{}, ...HandlerFunc)
Group(interface{}, func() *HandlerGroup)
Hook(string, HandlerFunc)
}
type Message struct {
MessageType int
P []byte
Err error
}
//端绪设置 默认大端序
var Endian = binary.BigEndian
//存放路由主结构
type StorageGroup struct {
HandlerGroup
//中间件路由寻址
HandlersUse []int
//钩子注册存放
HandlerHook map[string]HandlerFunc
//端绪设置
}
//检查是否实现所有接口方法
var _ Storage = &StorageGroup{}
//获取一个新的路由对象
func NewRouter() *StorageGroup {
out := StorageGroup{}
out.ModuleRoutes = make(map[interface{}][]int)
out.HandlerHook = make(map[string]HandlerFunc)
out.count = 0
//HOOK_UN_PACKING
out.Hook(HOOK_UN_PACKING,hookUnPacking)
//HOOK_PACKET
out.Hook(HOOK_PACKET,hookPacket)
return &out
}
//注册中间件
func (r *StorageGroup) Use(funcs ...HandlerFunc) {
//记录每一个路由地址id
indexs := make([]int, len(funcs))
for k, v := range funcs {
//注册到总路由寻址列表
r.HandlersRoute = append(r.HandlersRoute, v)
indexs[k] = r.count
r.count++
}
for _, v := range indexs {
r.HandlersUse = append(r.HandlersUse, v)
}
}
//单个注册路由列表
func (r *StorageGroup) Route(m interface{}, funcs ...HandlerFunc) {
indexs := make([]int, len(funcs))
for k, v := range funcs {
r.HandlersRoute = append(r.HandlersRoute, v)
indexs[k] = r.count
r.count++
}
for _, v := range indexs {
r.ModuleRoutes[m] = append(r.ModuleRoutes[m], v)
}
}
//批量分组注册路由列表
func (r *StorageGroup) Group(m interface{}, f func() *HandlerGroup) {
group := f()
for k, v := range group.ModuleRoutes {
indexs := make([]int, len(v))
for key, val := range v {
r.HandlersRoute = append(r.HandlersRoute, group.HandlersRoute[val])
indexs[key] = r.count
r.count++
}
for _, val := range indexs {
kname := []interface{}{m, k}
r.ModuleRoutes[kname] = append(r.ModuleRoutes[kname], val)
}
}
}
//注册钩子
func (r *StorageGroup) Hook(s string, f HandlerFunc) {
r.HandlerHook[s] = f
}
//获取批量分组注册路由对象
func NewHandlerGroup() *HandlerGroup {
out := HandlerGroup{}
out.ModuleRoutes = make(map[interface{}][]int)
out.count = 0
return &out
}
//增加注册一个路由
func (h *HandlerGroup) Add(a interface{}, funcs ...HandlerFunc) {
indexs := make([]int, len(funcs))
for k, v := range funcs {
h.HandlersRoute = append(h.HandlersRoute, v)
indexs[k] = h.count
h.count++
}
for _, v := range indexs {
h.ModuleRoutes[a] = append(h.ModuleRoutes[a], v)
}
}
func (r *StorageGroup) hookRun(n string, conn *websocket.Conn, msg *Message, route *interface{}) error {
f, ok := r.HandlerHook[n]
if ok {
err := f(conn, msg, route)
return err
}
return nil
}
//监听数据
func (r *StorageGroup) Listen(conn *websocket.Conn) error {
//触发 HOOK_NEW_CONN(新连接通知钩子)
err := r.hookRun(HOOK_NEW_CONN, conn, nil, nil)
if err != nil {
return err
}
ERROR_BREAK:
for {
messageType , p , err := conn.ReadMessage()
var message *Message
message.MessageType = messageType
message.P = p
message.Err = err
if err != nil {
//触发 HOOK_ERROR (错误通知 钩子)
if err := r.hookRun(HOOK_ERROR, conn, message, nil) ;err != nil {
return err
}
//触发 HOOK_CLOSED (连接closed 通知钩子)
if err := r.hookRun(HOOK_CLOSED, conn, message, nil);err != nil {
return err
}
break
}
var route *interface{}
if err := r.hookRun(HOOK_UN_PACKING, conn, message, route) ;err != nil {
return err
}
//运行中间件
useFuncs := r.HandlersUse
ERROR_CONTINUE:
for _, v := range useFuncs {
err := r.HandlersRoute[v](conn, message, route)
if err != nil {
switch err {
case ERROR_STOP:
//在此终止程序继续往下执行
continue ERROR_BREAK
case ERROR_CONTINUE:
//在此跳过这个中间件继续执行
continue ERROR_CONTINUE
case ERROR_BREAK:
//在此跳过中间件继续执行后续逻辑
break ERROR_CONTINUE
default:
//在此跳过中间件继续执行后续逻辑
continue ERROR_CONTINUE
}
}
}
routeFuncs, ok := r.ModuleRoutes[*route]
if ok {
ERROR_CONTINUE_TOW:
for _, v := range routeFuncs {
err := r.HandlersRoute[v](conn, message, route)
if err != nil {
switch err {
case ERROR_STOP:
//在此终止程序继续往下执行
continue ERROR_BREAK
case ERROR_CONTINUE:
//在此继续执行下一个路由函数
continue ERROR_CONTINUE_TOW
case ERROR_BREAK:
//在此跳出路由函数执行继续后面逻辑
break ERROR_CONTINUE_TOW
default:
//在此跳出路由函数执行继续后面逻辑
break ERROR_CONTINUE_TOW
}
}
}
} else {
//触发 HOOK_NOT_MODULE (路由寻址不存在 通知钩子)
if err := r.hookRun(HOOK_NOT_MODULE, conn, message, route) ;err != nil {
return err
}
continue
}
}
return nil
}
//HOOK_UN_PACKING
func hookUnPacking(conn *websocket.Conn, msg *Message, route *interface{}) error {
bytesBuffer := bytes.NewBuffer(msg.P)
var module, action uint16
//uint 16 Module
binary.Read(bytesBuffer, Endian, &module)
//uint 16 Action
binary.Read(bytesBuffer, Endian, &action)
msg.P = bytesBuffer.Bytes()
var kname interface{}
kname = []uint16{module, action}
route = &kname
return nil
}
//HOOK_PACKET
func hookPacket(conn *websocket.Conn, msg *Message, route *interface{}) error {
//触发 HOOK_WRITE_MESSAGE (发送详细前 通知钩子 便于开发调试)
bytesBuffer := bytes.NewBuffer([]byte{})
b := *route
rou := b.([]uint16)
//uint 16 Module
binary.Write(bytesBuffer, Endian, &rou[0])
//uint 16 Action
binary.Write(bytesBuffer, Endian, &rou[1])
//[]byte MessageValue
binary.Write(bytesBuffer, Endian, &msg.P)
msg.P = bytesBuffer.Bytes()
return nil
}
|
package main
import "fmt"
func main() {
//Explicit declaration
var sentence string = "A string"
//Implicit declaration
var number = 256.98
//Implicit variable declaration may cause the compiler to assign the wrong type
//This only happens in very, very rare occasions
//Expression assignment operator
//Basically assigns the value without the 'var' operator
speed := 500.45
fmt.Printf("Number type %T", number)
fmt.Printf("Number type %T", speed)
fmt.Printf("Number type %T", sentence)
//Omo, before assigning values, Go actually gives the variable a default
//value, case in point:
var defaultValueChecker bool
fmt.Println(defaultValueChecker)
}
|
package main
import (
"container/ring"
"fmt"
"jblee.net/adventofcode2018/utils"
)
func main() {
line := utils.ReadLinesOrDie("input.txt")[0]
var numPlayers, maxPointValue int
fmt.Sscanf(line, "%d players; last marble is worth %d points",
&numPlayers, &maxPointValue)
currentMarble := ring.New(1)
currentMarble.Value = 0
player := 0
scores := make([]int, numPlayers)
for pointVal := 1; pointVal <= maxPointValue; pointVal++ {
if pointVal%23 != 0 {
newMarble := ring.New(1)
newMarble.Value = pointVal
currentMarble.Next().Link(newMarble)
currentMarble = newMarble
} else {
for i := 0; i < 7; i++ {
currentMarble = currentMarble.Prev()
}
scores[player] += pointVal + currentMarble.Value.(int)
currentMarble = currentMarble.Prev()
currentMarble.Unlink(1)
currentMarble = currentMarble.Next()
}
player = (player + 1) % numPlayers
}
maxScore := 0
for _, score := range scores {
if score > maxScore {
maxScore = score
}
}
fmt.Printf("max score: %d\n", maxScore)
}
|
package format_test
import (
"testing"
"github.com/g-harel/gothrough/internal/types"
"github.com/g-harel/gothrough/internal/types/format"
)
func TestString(t *testing.T) {
tt := map[string]struct {
Input types.Type
Expected string
}{
"simple interface": {
Input: &types.Interface{
Name: "Test",
},
Expected: "type Test interface {}\n",
},
"interface with function and embedded": {
Input: &types.Interface{
Name: "Extender",
Embedded: []types.Reference{{Package: "io", Name: "Reader"}},
Methods: []types.Function{{Name: "Print"}},
},
Expected: `type Extender interface {
io.Reader
Print()
}
`,
},
"interface with docs": {
Input: &types.Interface{
Name: "Potato",
Docs: types.Docs{Text: "Potato does the thing."},
Embedded: []types.Reference{
{
Name: "Tester",
},
{
Name: "Water",
},
{
Package: "earth",
Name: "Grower",
Docs: types.Docs{
Text: "Grows the thing with the thing.",
},
},
},
Methods: []types.Function{
{
Name: "Pick",
Docs: types.Docs{
Text: "Takes the thing from the thing.",
},
},
{
Name: "Squish",
ReturnValues: []types.Field{
{Type: "int"},
},
},
{
Name: "Eat",
ReturnValues: []types.Field{
{Type: "string"},
{Type: "error"},
},
},
},
},
Expected: `// Potato does the thing.
type Potato interface {
Tester
Water
// Grows the thing with the thing.
earth.Grower
// Takes the thing from the thing.
Pick()
Squish() int
Eat() (string, error)
}
`,
},
"simple function": {
Input: &types.Function{
Name: "Test",
},
Expected: "func Test()\n",
},
"function with single return value": {
Input: &types.Function{
Name: "GetCount",
ReturnValues: []types.Field{{Type: "int"}},
},
Expected: "func GetCount() int\n",
},
"function with multiple return values": {
Input: &types.Function{
Name: "Calc",
ReturnValues: []types.Field{{Type: "int"}, {Type: "error"}},
},
Expected: "func Calc() (int, error)\n",
},
"function with named return values": {
Input: &types.Function{
Name: "Test",
ReturnValues: []types.Field{{Name: "expected", Type: "string"}},
},
Expected: "func Test() (expected string)\n",
},
"function with argument": {
Input: &types.Function{
Name: "Square",
Arguments: []types.Field{{Name: "num", Type: "int"}},
ReturnValues: []types.Field{{Type: "int"}},
},
Expected: "func Square(num int) int\n",
},
"simple value": {
Input: &types.Value{
Name: "Test",
Type: "string",
},
Expected: "var Test string\n",
},
"const value": {
Input: &types.Value{
Name: "Count",
Type: "int",
Const: true,
},
Expected: "const Count int\n",
},
"value with literal": {
Input: &types.Value{
Name: "TEST_NAME",
Value: "\"abc\"",
},
Expected: "var TEST_NAME = \"abc\"\n",
},
}
for name, tc := range tt {
t.Run(name, func(t *testing.T) {
actual, err := format.String(tc.Input)
if err != nil {
t.Fatalf("format error: %v", err)
return
}
if actual != tc.Expected {
t.Fatalf("expected/actual do not match\n'%v'\n'%v'", tc.Expected, actual)
}
})
}
}
|
package public
import (
"context"
"gorm.io/gorm"
"strings"
"tpay_backend/merchantapi/internal/common"
"tpay_backend/model"
"tpay_backend/utils"
"tpay_backend/merchantapi/internal/svc"
"tpay_backend/merchantapi/internal/types"
"github.com/tal-tech/go-zero/core/logx"
)
type WithdrawLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
merchantId int64
}
func NewWithdrawLogic(ctx context.Context, svcCtx *svc.ServiceContext, merchantId int64) WithdrawLogic {
return WithdrawLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
merchantId: merchantId,
}
}
func (l *WithdrawLogic) Withdraw(req types.WithdrawRequest) error {
logx.Infof("req:%+v", req)
req.CardNumber = strings.ReplaceAll(req.CardNumber, " ", "")
// 1.查商户信息
merchant, err := model.NewMerchantModel(l.svcCtx.DbEngine).FindOneById(l.merchantId)
if err != nil {
if err == model.ErrRecordNotFound {
l.Errorf("商户[%v]不存在", l.merchantId)
return common.NewCodeError(common.NotWithdrawConfig)
} else {
l.Errorf("查询商户[%v]失败, err=%v", l.merchantId, err)
return common.NewCodeError(common.ApplyFail)
}
}
// google验证码是否关闭
totpIsClose, err := model.NewGlobalConfigModel(l.svcCtx.DbEngine).TotpIsClose()
if err != nil {
l.Errorf("查询totp配置失败:%v", err)
return common.NewCodeError(common.SysDBErr)
}
if !totpIsClose { // 没有关闭
// 验证TOTP密码code
if !utils.VerifyTOTPPasscode(req.TotpCode, merchant.TotpSecret) {
return common.NewCodeError(common.LoginCaptchaNotMatch)
}
}
plainPassword, err := common.DecryptPassword(req.PayPassword)
if err != nil {
l.Errorf("密码解密发生错误,err:%v, password:%v", err, req.PayPassword)
return common.NewCodeError(common.SysDBErr)
}
if merchant.PayPassword != common.CreateMerchantPayPassword(plainPassword) {
l.Errorf("商户[%v]支付密码错误", merchant)
return common.NewCodeError(common.PayPasswordErr)
}
if merchant.Balance <= 0 {
l.Errorf("商户[%v]余额[%v]不足", l.merchantId, merchant.Balance)
return common.NewCodeError(common.InsufficientBalance)
}
// 2.查商户提现配置信息
config, err := model.NewMerchantWithdrawConfigModel(l.svcCtx.DbEngine).FindOneByMerchantId(l.merchantId)
if err != nil {
if err == model.ErrRecordNotFound {
l.Errorf("商户[%v]没有提现配置", l.merchantId)
return common.NewCodeError(common.NotWithdrawConfig)
} else {
l.Errorf("查询商户[%v]提现配置失败, err=%v", l.merchantId, err)
return common.NewCodeError(common.ApplyFail)
}
}
if config.SingleMinAmount > req.Amount || config.SingleMaxAmount < req.Amount {
l.Errorf("提现金额[%v]超出最大[%v]和最小[%v]限制", req.Amount, config.SingleMaxAmount, config.SingleMinAmount)
return common.NewCodeError(common.AmountOutOfLimit)
}
// 4.下单
order := new(model.MerchantWithdrawOrder)
order.OrderNo = utils.GetDailyId()
order.MerchantId = merchant.Id
order.OrderAmount = req.Amount
order.Remark = req.Remark
order.BankName = req.BankName
order.BranchName = req.BranchName
order.PayeeName = req.PayeeName
order.CardNumber = req.CardNumber
order.OrderStatus = model.WithdrawOrderStatusPending
order.DeductionMethod = config.DeductionMethod
order.Currency = merchant.Currency
order.BankCode = req.BankCode
order.AreaId = merchant.AreaId
// 商户手续费
order.MerchantFee = utils.CalculatePayOrderFeeMerchant(req.Amount, config.SingleFee, config.Rate)
switch config.DeductionMethod {
case model.MerchantWithdrawDeductionInner:
// 商户实际到账金额
order.RealAmount = order.OrderAmount - order.MerchantFee
// 商户账户扣减金额
order.DecreaseAmount = req.Amount
if merchant.Balance < order.OrderAmount {
l.Errorf("商户[%v]余额[%v]不足", l.merchantId, merchant.Balance)
return common.NewCodeError(common.InsufficientBalance)
}
case model.MerchantWithdrawDeductionOut:
// 商户实际到账金额
order.RealAmount = order.OrderAmount
// 商户账户扣减金额
order.DecreaseAmount = order.OrderAmount + order.MerchantFee
if merchant.Balance < order.DecreaseAmount {
l.Errorf("商户[%v]余额[%v]不足", l.merchantId, merchant.Balance)
return common.NewCodeError(common.InsufficientBalance)
}
default:
l.Errorf("提现配置有问题, config: %+v", config)
return common.NewCodeError(common.ApplyFail)
}
l.Infof("订单:%+v", order)
if err := l.InsertOrder(order); err != nil {
l.Errorf("提现失败, err=%v", err)
return common.NewCodeError(common.ApplyFail)
}
return nil
}
func (l *WithdrawLogic) InsertOrder(order *model.MerchantWithdrawOrder) error {
txErr := l.svcCtx.DbEngine.Transaction(func(tx *gorm.DB) error {
// 1.插入订单
if err := model.NewMerchantWithdrawOrderModel(tx).Insert(order); err != nil {
l.Errorf("添加提现订单失败, err=%v", err)
return err
}
// 2.减商户余额,加冻结金额
log := model.WalletLogExt{
BusinessNo: order.OrderNo,
Source: model.AmountSourceWithdraw,
Remark: "",
}
if err := model.NewMerchantModel(tx).MinusBalanceFreezeTx(l.merchantId, order.DecreaseAmount, log); err != nil {
if strings.Contains(err.Error(), model.BalanceErr.Error()) {
l.Errorf("商户余额不足")
return err
} else {
l.Errorf("减商户余额增加冻结金额失败, err=%v", err)
return err
}
}
return nil
})
return txErr
}
|
package compress
import (
"bufio"
"bytes"
"compress/flate"
"errors"
"io"
"io/ioutil"
"github.com/gobwas/pool/pbufio"
)
const (
maxCompressionLevel = flate.BestCompression
minCompressionLevel = -2
)
var (
ErrWriteClose = errors.New("write to closed writer")
ErrUnexpectedEndOfStream = errors.New("websocket: internal error, unexpected bytes at end of flate stream")
// Tail as described here: https://tools.ietf.org/html/rfc7692#section-7.2.2
deflateFinal = [4]byte{0, 0, 0xff, 0xff}
// Tail to prevent reader error
tail = [5]byte{0x01, 0, 0, 0xff, 0xff}
flateReaderPool = NewFlateReaderPool()
flateWritersPools = [maxCompressionLevel - minCompressionLevel + 1]*FlateWriterPool{
NewFlateWriterPool(-2), NewFlateWriterPool(-1), NewFlateWriterPool(0),
NewFlateWriterPool(1), NewFlateWriterPool(2), NewFlateWriterPool(3),
NewFlateWriterPool(4), NewFlateWriterPool(5), NewFlateWriterPool(6),
NewFlateWriterPool(7), NewFlateWriterPool(8), NewFlateWriterPool(9),
}
)
type Reader interface {
io.ReadCloser
Reset(src io.Reader, dict []byte)
}
type reader struct {
src io.Reader
buf *bufio.Reader
flateReader io.ReadCloser
}
func NewReader(src io.Reader, bufferSize int) Reader {
// Create buffered reader to buffer size control.
// Otherwise it will be default buffer size from flate package. Flate
// package also smart enough to do not wrap already buffered reader again.
buf := pbufio.GetReader(
io.MultiReader(
src,
bytes.NewReader(deflateFinal[:]),
bytes.NewReader(tail[:]),
),
bufferSize+len(deflateFinal)+len(tail),
)
return &reader{
src: src,
buf: buf,
flateReader: flateReaderPool.Get(buf),
}
}
func (cr *reader) Reset(src io.Reader, dict []byte) {
cr.src = src
cr.reset(dict)
}
func (cr *reader) reset(dict []byte) {
cr.buf.Reset(io.MultiReader(
cr.src,
bytes.NewReader(deflateFinal[:]),
bytes.NewReader(tail[:]),
))
cr.flateReader.(flate.Resetter).Reset(cr.buf, dict)
}
func (cr *reader) Read(p []byte) (n int, err error) {
if cr.flateReader == nil || cr.src == nil || cr.buf == nil {
return 0, io.ErrClosedPipe
}
n, err = cr.flateReader.Read(p)
// When multiple DEFLATE block in one message was used — there is can be
// io.EOF that actually means only end of the flate block, not the message.
// To workaround that case we check internal buffer for content and if there
// is anything in it — just ignore io.EOF to prevent partial message read.
// Multiple DEFLATE block is supported in permessage-deflate.
// See: https://tools.ietf.org/html/rfc7692#section-7.2.3.5
if err == io.EOF {
_, err2 := cr.buf.ReadByte()
if err2 == io.EOF {
cr.reset(nil)
return n, io.EOF
}
cr.buf.UnreadByte()
}
if err != nil {
cr.reset(nil)
}
return
}
// Close Reader and return resources.
func (cr *reader) Close() error {
if cr.flateReader == nil || cr.src == nil || cr.buf == nil {
return io.ErrClosedPipe
}
cr.src = nil
flateReaderPool.Put(cr.flateReader)
cr.flateReader = nil
pbufio.PutReader(cr.buf)
cr.buf = nil
return nil
}
// truncWriter is an io.Writer that writes all but the last four bytes of the
// stream to another io.Writer.
// Things related to: https://tools.ietf.org/html/rfc7692#section-7.2.1
type truncWriter struct {
origin io.Writer
filledEndBytes int
endBuffer [4]byte
}
func (tw *truncWriter) Reset(w io.Writer) {
tw.endBuffer = [4]byte{0, 0, 0, 0}
tw.filledEndBytes = 0
tw.origin = w
}
func (tw *truncWriter) Write(block []byte) (int, error) {
filledBytes := 0
// there we try to write «pruned» bytes from the end to special buffer.
// that buffer appends to next block and will be replaced by end of that
// block.
if tw.filledEndBytes < len(tw.endBuffer) {
filledBytes = copy(tw.endBuffer[tw.filledEndBytes:], block)
block = block[filledBytes:]
tw.filledEndBytes += filledBytes
if len(block) == 0 {
return filledBytes, nil
}
}
m := len(block)
if m > len(tw.endBuffer) {
m = len(tw.endBuffer)
}
// Write buffer to the wire — we have replacement for it in that block.
// If error — stops and return numbers of bytes writen.
if nn, err := tw.origin.Write(tw.endBuffer[:m]); err != nil {
return filledBytes + nn, err
}
// renew buffer and trim new buffer from the block end
copy(tw.endBuffer[:], tw.endBuffer[m:])
copy(tw.endBuffer[len(tw.endBuffer)-m:], block[len(block)-m:])
// write block without last bytes.
nn, err := tw.origin.Write(block[:len(block)-m])
return filledBytes + nn, err
}
func (tw *truncWriter) FlushTail() error {
_, err := tw.origin.Write(tw.endBuffer[:])
tw.endBuffer = [4]byte{0, 0, 0, 0}
tw.filledEndBytes = 0
return err
}
// Compress writer
// Only client_no_context_takeover supported now. For implement sliding window
// there is no API in flate package.
//
// See: https://tools.ietf.org/html/rfc7692#section-7.1.1
// See: https://github.com/golang/go/issues/3155
type Writer interface {
io.WriteCloser
Flush() error
FlushFragment() error
Reset(io.Writer)
}
type writer struct {
flateWriter *flate.Writer
truncWriter *truncWriter
dst io.Writer
level int
writeStarted bool
}
func NewWriter(w io.Writer, level int) Writer {
tw := &truncWriter{origin: w}
return &writer{
truncWriter: tw,
flateWriter: flateWritersPools[level-minCompressionLevel].Get(tw),
dst: w,
level: level,
}
}
func (cw *writer) ReadFrom(src io.Reader) (n int64, err error) {
bts, err := ioutil.ReadAll(src)
if err != nil {
return 0, err
}
m, err := cw.Write(bts)
return int64(m), err
}
func (cw *writer) Write(p []byte) (n int, err error) {
// Here is dirty hack to handle empty messages properly.
defer func() {
cw.writeStarted = err == nil && len(p) > 0
}()
if cw.flateWriter == nil {
return 0, ErrWriteClose
}
return cw.flateWriter.Write(p)
}
func (cw *writer) FlushFragment() error {
err := cw.flateWriter.Flush()
if err != nil {
return err
}
return cw.truncWriter.FlushTail()
}
// Flush
func (cw *writer) Flush() error {
defer func() {
// Do not share state between flushes.
cw.Reset(cw.dst)
cw.writeStarted = false
}()
// The writeStarted flag needed because flateWriter have different
// representation for an empty message. It write at least Z_SYNC_FLUSH marker
// but that not expected.
// TODO: May be better solution should include buffer for small messages
// that should be excluded from compression and sends as is.
if !cw.writeStarted {
return nil
}
err := cw.flateWriter.Flush()
if err != nil {
return err
}
return nil
}
func (cw *writer) Close() error {
if cw.flateWriter == nil {
return ErrWriteClose
}
err1 := cw.Flush()
flateWritersPools[cw.level-minCompressionLevel].Put(cw.flateWriter)
cw.flateWriter = nil
cw.writeStarted = false
if cw.truncWriter.endBuffer != deflateFinal &&
cw.truncWriter.endBuffer != [4]byte{0, 0, 0, 0} {
return ErrUnexpectedEndOfStream
}
cw.truncWriter.Reset(nil)
if err1 != nil {
return err1
}
return err1
}
func (cw *writer) Reset(w io.Writer) {
cw.writeStarted = false
cw.dst = w
cw.truncWriter.Reset(cw.dst)
cw.flateWriter.Reset(cw.truncWriter)
}
|
package txsizes
import (
"bytes"
"encoding/hex"
"testing"
"github.com/btcsuite/btcd/wire"
)
const (
p2pkhScriptSize = P2PKHPkScriptSize
p2shScriptSize = 23
)
func makeInts(value int, n int) []int {
v := make([]int, n)
for i := range v {
v[i] = value
}
return v
}
func TestEstimateSerializeSize(t *testing.T) {
t.Parallel()
tests := []struct {
InputCount int
OutputScriptLengths []int
AddChangeOutput bool
ExpectedSizeEstimate int
}{
0: {1, []int{}, false, 159},
1: {1, []int{p2pkhScriptSize}, false, 193},
2: {1, []int{}, true, 193},
3: {1, []int{p2pkhScriptSize}, true, 227},
4: {1, []int{p2shScriptSize}, false, 191},
5: {1, []int{p2shScriptSize}, true, 225},
6: {2, []int{}, false, 308},
7: {2, []int{p2pkhScriptSize}, false, 342},
8: {2, []int{}, true, 342},
9: {2, []int{p2pkhScriptSize}, true, 376},
10: {2, []int{p2shScriptSize}, false, 340},
11: {2, []int{p2shScriptSize}, true, 374},
// 0xfd is discriminant for 16-bit compact ints, compact int
// total size increases from 1 byte to 3.
12: {1, makeInts(p2pkhScriptSize, 0xfc), false, 8727},
13: {1, makeInts(p2pkhScriptSize, 0xfd), false, 8727 + P2PKHOutputSize + 2},
14: {1, makeInts(p2pkhScriptSize, 0xfc), true, 8727 + P2PKHOutputSize + 2},
15: {0xfc, []int{}, false, 37558},
16: {0xfd, []int{}, false, 37558 + RedeemP2PKHInputSize + 2},
}
for i, test := range tests {
outputs := make([]*wire.TxOut, 0, len(test.OutputScriptLengths))
for _, l := range test.OutputScriptLengths {
outputs = append(outputs, &wire.TxOut{PkScript: make([]byte, l)})
}
actualEstimate := EstimateSerializeSize(test.InputCount, outputs, test.AddChangeOutput)
if actualEstimate != test.ExpectedSizeEstimate {
t.Errorf("Test %d: Got %v: Expected %v", i, actualEstimate, test.ExpectedSizeEstimate)
}
}
}
func TestEstimateVirtualSize(t *testing.T) {
t.Parallel()
type estimateVSizeTest struct {
tx func() (*wire.MsgTx, error)
p2wpkhIns int
nestedp2wpkhIns int
p2pkhIns int
change bool
result int
}
// TODO(halseth): add tests for more combination out inputs/outputs.
tests := []estimateVSizeTest{
// Spending P2WPKH to two outputs. Example adapted from example in BIP-143.
{
tx: func() (*wire.MsgTx, error) {
txHex := "01000000000101ef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac0247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000"
b, err := hex.DecodeString(txHex)
if err != nil {
return nil, err
}
tx := &wire.MsgTx{}
err = tx.Deserialize(bytes.NewReader(b))
if err != nil {
return nil, err
}
return tx, nil
},
p2wpkhIns: 1,
result: 147,
},
{
// Spending P2SH-P2WPKH to two outputs. Example adapted from example in BIP-143.
tx: func() (*wire.MsgTx, error) {
txHex := "01000000000101db6b1b20aa0fd7b23880be2ecbd4a98130974cf4748fb66092ac4d3ceb1a5477010000001716001479091972186c449eb1ded22b78e40d009bdf0089feffffff02b8b4eb0b000000001976a914a457b684d7f0d539a46a45bbc043f35b59d0d96388ac0008af2f000000001976a914fd270b1ee6abcaea97fea7ad0402e8bd8ad6d77c88ac02473044022047ac8e878352d3ebbde1c94ce3a10d057c24175747116f8288e5d794d12d482f0220217f36a485cae903c713331d877c1f64677e3622ad4010726870540656fe9dcb012103ad1d8e89212f0b92c74d23bb710c00662ad1470198ac48c43f7d6f93a2a2687392040000"
b, err := hex.DecodeString(txHex)
if err != nil {
return nil, err
}
tx := &wire.MsgTx{}
err = tx.Deserialize(bytes.NewReader(b))
if err != nil {
return nil, err
}
return tx, nil
},
nestedp2wpkhIns: 1,
result: 170,
},
{
// Spendin P2WPKH to on output, adding one change output. We reuse
// the transaction spending to two outputs, removing one of them.
tx: func() (*wire.MsgTx, error) {
txHex := "01000000000101ef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac0247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000"
b, err := hex.DecodeString(txHex)
if err != nil {
return nil, err
}
tx := &wire.MsgTx{}
err = tx.Deserialize(bytes.NewReader(b))
if err != nil {
return nil, err
}
// Only keep the first output.
tx.TxOut = []*wire.TxOut{tx.TxOut[0]}
return tx, nil
},
p2wpkhIns: 1,
change: true,
result: 144,
},
{
// Spending one P2PKH to two P2PKH outputs (no witness data).
tx: func() (*wire.MsgTx, error) {
txHex := "0100000001a4c91c9720157a5ee582a7966471d9c70d0a860fa7757b4c42a535a12054a4c9000000006c493046022100d49c452a00e5b1213ac84d92269510a05a584a4d0949bd7d0ad4e3408ac8e80a022100bf98707ffaf1eb9dff146f7da54e68651c0a27e3653ec3882b7a95202328579c01210332d98672a4246fe917b9c724c339e757d46b1ffde3fb27fdc680b4bb29b6ad59ffffffff02a0860100000000001976a9144fb55ee0524076acd4c14e7773561e4c298c8e2788ac20688a0b000000001976a914cb7f6bb8e95a2cd06423932cfbbce73d16a18df088ac00000000"
b, err := hex.DecodeString(txHex)
if err != nil {
return nil, err
}
tx := &wire.MsgTx{}
err = tx.Deserialize(bytes.NewReader(b))
if err != nil {
return nil, err
}
return tx, nil
},
p2pkhIns: 1,
result: 227,
},
}
for _, test := range tests {
tx, err := test.tx()
if err != nil {
t.Fatalf("unable to get test tx: %v", err)
}
changeScriptSize := 0
if test.change {
changeScriptSize = P2WPKHPkScriptSize
}
est := EstimateVirtualSize(
test.p2pkhIns, 0, test.p2wpkhIns, test.nestedp2wpkhIns,
tx.TxOut, changeScriptSize,
)
if est != test.result {
t.Fatalf("expected estimated vsize to be %d, "+
"instead got %d", test.result, est)
}
}
}
|
package test
import (
"testing"
"reflect"
"strings"
"encoding/json"
"github.com/trevershick/analytics2-cli/a2m/rest"
)
/* Test Helpers */
func AssertEquals(t *testing.T, expected interface{}, actual interface{}) {
if expected != actual {
t.Errorf("Expected %v (type %v) - Got %v (type %v)", expected, reflect.TypeOf(expected), actual, reflect.TypeOf(actual))
}
}
func AssertContains(t *testing.T, expected string, output string) {
if !strings.Contains(output, expected) {
t.Errorf("Expected '%s' to contain '%s'", output, expected)
}
}
func FakeRestLoader(responseContent string) (rest.Loader, *rest.RestArgs) {
passedInRestArgs := rest.RestArgs{}
myLoader := func(args *rest.RestArgs) (error) {
// can i do this differently, without this type of assignment?
// i think i need a pointer to a pointer :)
passedInRestArgs.Url = args.Url
passedInRestArgs.Params = args.Params
err := json.Unmarshal([]byte(responseContent), args.ResponseData)
if err != nil {
panic(err)
}
return nil
}
return myLoader, &passedInRestArgs
}
|
package stemmer
import (
"strings"
)
type PorterStemmer interface {
Stem(string) (string, error)
}
//stem a word
func Stem(origWord string) string {
if len(origWord) > 2 {
return step5b(step5a(step4(step3(step2(step1(strings.TrimSpace(origWord)))))))
}
return origWord
}
//Step 1 deals with plurals and past participles.
func step1(iw string) (sw string) {
sw = step1a(iw)
sw = step1b(sw)
sw = step1c(sw)
return
}
func step1a(iw string) string {
wLen := len(iw)
if strings.HasSuffix(iw, "sses") || strings.HasSuffix(iw, "ies") {
return iw[:wLen-2]
} else if strings.HasSuffix(iw, "ss") {
return iw
} else if strings.HasSuffix(iw, "s") {
return iw[:wLen-1]
}
return iw
}
func step1b(iw string) string {
wLen := len(iw)
if strings.HasSuffix(iw, "eed") {
if measure(iw[:wLen-3]) > 0 {
return iw[:wLen-1]
}
} else if strings.HasSuffix(iw, "ed") {
if hasVowel(iw[:wLen-2]) {
return step1bInter(iw[:wLen-2])
}
} else if strings.HasSuffix(iw, "ing") {
if hasVowel(iw[:wLen-3]) {
return step1bInter(iw[:wLen-3])
}
}
return iw
}
func step1bInter(iw string) string {
wLen := len(iw)
if strings.HasSuffix(iw, "at") || strings.HasSuffix(iw, "bl") ||
strings.HasSuffix(iw, "iz") {
return iw + "e"
} else if astrD(iw) {
if iw[wLen-1] != 'l' && iw[wLen-1] != 's' && iw[wLen-1] != 'z' {
return iw[:wLen-1]
}
} else if astrO(iw) {
if measure(iw) == 1 {
return iw + "e"
}
}
return iw
}
func step1c(iw string) string {
wLen := len(iw)
if strings.HasSuffix(iw, "y") && hasVowel(iw[:wLen-1]) {
return iw[:wLen-1] + "i"
}
return iw
}
func step2(iw string) string {
wLen := len(iw)
if strings.HasSuffix(iw, "ational") {
if measure(iw[:wLen-7]) > 0 {
return iw[:wLen-7] + "ate"
}
} else if strings.HasSuffix(iw, "tional") {
if measure(iw[:wLen-6]) > 0 {
return iw[:wLen-2]
}
} else if strings.HasSuffix(iw, "enci") || strings.HasSuffix(iw, "anci") {
if measure(iw[:wLen-4]) > 0 {
return iw[:wLen-1] + "e"
}
} else if strings.HasSuffix(iw, "izer") {
if measure(iw[:wLen-4]) > 0 {
return iw[:wLen-4] + "ize"
}
} else if strings.HasSuffix(iw, "abli") {
if measure(iw[:wLen-4]) > 0 {
return iw[:wLen-4] + "able"
}
} else if strings.HasSuffix(iw, "alli") {
if measure(iw[:wLen-4]) > 0 {
return iw[:wLen-4] + "al"
}
} else if strings.HasSuffix(iw, "entli") {
if measure(iw[:wLen-5]) > 0 {
return iw[:wLen-5] + "ent"
}
} else if strings.HasSuffix(iw, "eli") {
if measure(iw[:wLen-3]) > 0 {
return iw[:wLen-3] + "e"
}
} else if strings.HasSuffix(iw, "ousli") {
if measure(iw[:wLen-5]) > 0 {
return iw[:wLen-5] + "ous"
}
} else if strings.HasSuffix(iw, "ization") {
if measure(iw[:wLen-7]) > 0 {
return iw[:wLen-7] + "ize"
}
} else if strings.HasSuffix(iw, "ation") {
if measure(iw[:wLen-5]) > 0 {
return iw[:wLen-5] + "ate"
}
} else if strings.HasSuffix(iw, "ator") {
if measure(iw[:wLen-4]) > 0 {
return iw[:wLen-4] + "ate"
}
} else if strings.HasSuffix(iw, "alism") {
if measure(iw[:wLen-5]) > 0 {
return iw[:wLen-5] + "al"
}
} else if strings.HasSuffix(iw, "iveness") {
if measure(iw[:wLen-7]) > 0 {
return iw[:wLen-7] + "ive"
}
} else if strings.HasSuffix(iw, "fulness") {
if measure(iw[:wLen-7]) > 0 {
return iw[:wLen-7] + "ful"
}
} else if strings.HasSuffix(iw, "ousness") {
if measure(iw[:wLen-7]) > 0 {
return iw[:wLen-7] + "ous"
}
} else if strings.HasSuffix(iw, "aliti") {
if measure(iw[:wLen-5]) > 0 {
return iw[:wLen-5] + "al"
}
} else if strings.HasSuffix(iw, "iviti") {
if measure(iw[:wLen-5]) > 0 {
return iw[:wLen-5] + "ive"
}
} else if strings.HasSuffix(iw, "biliti") {
if measure(iw[:wLen-6]) > 0 {
return iw[:wLen-6] + "ble"
}
}
return iw
}
func step3(iw string) string {
wLen := len(iw)
if strings.HasSuffix(iw, "icate") {
if measure(iw[:wLen-5]) > 0 {
return iw[:wLen-3]
}
} else if strings.HasSuffix(iw, "ative") {
if measure(iw[:wLen-5]) > 0 {
return iw[:wLen-5]
}
} else if strings.HasSuffix(iw, "alize") {
if measure(iw[:wLen-5]) > 0 {
return iw[:wLen-3]
}
} else if strings.HasSuffix(iw, "iciti") {
if measure(iw[:wLen-5]) > 0 {
return iw[:wLen-3]
}
} else if strings.HasSuffix(iw, "ical") {
if measure(iw[:wLen-4]) > 0 {
return iw[:wLen-2]
}
} else if strings.HasSuffix(iw, "ful") {
if measure(iw[:wLen-3]) > 0 {
return iw[:wLen-3]
}
} else if strings.HasSuffix(iw, "ness") {
if measure(iw[:wLen-4]) > 0 {
return iw[:wLen-4]
}
}
return iw
}
func step4(iw string) string {
wLen := len(iw)
if strings.HasSuffix(iw, "al") {
if measure(iw[:wLen-2]) > 1 {
return iw[:wLen-2]
}
} else if strings.HasSuffix(iw, "ance") {
if measure(iw[:wLen-4]) > 1 {
return iw[:wLen-4]
}
} else if strings.HasSuffix(iw, "ence") {
if measure(iw[:wLen-4]) > 1 {
return iw[:wLen-4]
}
} else if strings.HasSuffix(iw, "er") {
if measure(iw[:wLen-2]) > 1 {
return iw[:wLen-2]
}
} else if strings.HasSuffix(iw, "ic") {
if measure(iw[:wLen-2]) > 1 {
return iw[:wLen-2]
}
} else if strings.HasSuffix(iw, "able") {
if measure(iw[:wLen-4]) > 1 {
return iw[:wLen-4]
}
} else if strings.HasSuffix(iw, "ible") {
if measure(iw[:wLen-4]) > 1 {
return iw[:wLen-4]
}
} else if strings.HasSuffix(iw, "ant") {
if measure(iw[:wLen-3]) > 1 {
return iw[:wLen-3]
}
} else if strings.HasSuffix(iw, "ement") {
if measure(iw[:wLen-5]) > 1 {
return iw[:wLen-5]
}
} else if strings.HasSuffix(iw, "ment") {
if measure(iw[:wLen-4]) > 1 {
return iw[:wLen-4]
}
} else if strings.HasSuffix(iw, "ent") {
if measure(iw[:wLen-3]) > 1 {
return iw[:wLen-3]
}
} else if strings.HasSuffix(iw, "ion") {
if measure(iw[:wLen-3]) > 1 {
if wLen > 4 && (iw[wLen-4] == 's' || iw[wLen-4] == 't') {
return iw[:wLen-3]
}
}
} else if strings.HasSuffix(iw, "ou") {
if measure(iw[:wLen-2]) > 1 {
return iw[:wLen-2]
}
} else if strings.HasSuffix(iw, "ism") {
if measure(iw[:wLen-3]) > 1 {
return iw[:wLen-3]
}
} else if strings.HasSuffix(iw, "ate") {
if measure(iw[:wLen-3]) > 1 {
return iw[:wLen-3]
}
} else if strings.HasSuffix(iw, "iti") {
if measure(iw[:wLen-3]) > 1 {
return iw[:wLen-3]
}
} else if strings.HasSuffix(iw, "ous") {
if measure(iw[:wLen-3]) > 1 {
return iw[:wLen-3]
}
} else if strings.HasSuffix(iw, "ive") {
if measure(iw[:wLen-3]) > 1 {
return iw[:wLen-3]
}
} else if strings.HasSuffix(iw, "ize") {
if measure(iw[:wLen-3]) > 1 {
return iw[:wLen-3]
}
}
return iw
}
func step5a(iw string) string {
wLen := len(iw)
if strings.HasSuffix(iw, "e") && measure(iw[:wLen-1]) > 1 {
return iw[:wLen-1]
} else if strings.HasSuffix(iw, "e") && measure(iw[:wLen-1]) == 1 && !astrO(iw[:wLen-1]) {
return iw[:wLen-1]
}
return iw
}
func step5b(iw string) string {
wLen := len(iw)
if measure(iw) > 1 && isConsonant(iw, wLen-1) && isConsonant(iw, wLen-2) && iw[wLen-1] == 'l' {
return iw[:wLen-1]
}
return iw
}
//the stem ends cvc, where the second c is not W, X or Y
func astrO(iw string) bool {
wLen := len(iw) - 1
if wLen >= 2 && isConsonant(iw, wLen-2) && !isConsonant(iw, wLen-1) && isConsonant(iw, wLen) {
return iw[wLen] != 'w' && iw[wLen] != 'x' && iw[wLen] != 'y'
}
return false
}
//double consonants
func astrD(iw string) bool {
wLen := len(iw)
return iw[wLen-1] == iw[wLen-2] && isConsonant(iw, wLen-1)
}
//check if character at index i is a consonant
func isConsonant(w string, i int) bool {
wLen := len(w)
if wLen < i || i < 0 {
return false
}
switch w[i] {
case 'a', 'e', 'i', 'o', 'u':
return false
case 'y':
if i == 0 {
return true
} else {
return i > 0 && !isConsonant(w, i-1)
}
default:
return true
}
}
//measures the number of consonant sequences
func measure(w string) (val int64) {
wLen := len(w)
if wLen <= 0 {
return
}
ptr := 0
//ignore consonant at start
for isConsonant(w, ptr) {
ptr++
if ptr >= wLen {
return val
}
}
incVal := false
//count Vowel-Consonant pair
for i := ptr; i < wLen; i++ {
for i < wLen && !isConsonant(w, i) {
i++
}
for i < wLen && isConsonant(w, i) {
i++
incVal = true
}
if incVal {
val++
incVal = false
}
}
return
}
//checks if stem contains a vowel
func hasVowel(str string) bool {
for i := 0; i < len(str); i++ {
if !isConsonant(str, i) {
return true
}
}
return false
}
|
package main
import (
"fmt"
"sync"
"time"
)
func main() {
// lock()
// rwlock()
wg()
}
func lock() {
var m sync.Mutex
m.Lock()
go func() {
time.Sleep(3 * time.Second)
m.Unlock()
fmt.Println("unlock1")
}()
// var m2 sync.Mutex // もちろんこれだと意味ない
// m2.Lock()
m.Lock()
fmt.Println("この手前でブロック")
m.Unlock()
fmt.Println("unlock2")
}
func rwlock() {
// RLock同士はブロックせず、Lockのみがブロックされる
var m sync.RWMutex
m.RLock()
go func() {
time.Sleep(3 * time.Second)
m.RUnlock()
fmt.Println("unlock1")
}()
m.RLock()
m.RUnlock()
fmt.Println("unlock2")
}
func wg() {
var once sync.Once // 複数のGoルーチンから1回だけ呼ばれる関数を定義できる
var wg sync.WaitGroup
ps := []string{"tom", "jhon", "yam"}
for _, p := range ps {
go func(p string) {
wg.Add(1)
time.Sleep(2 * time.Second)
fmt.Printf("my name is %s\n", p)
once.Do(func() {
fmt.Printf("i'm first man! (%s)\n", p)
})
defer wg.Done()
}(p)
time.Sleep(2 * time.Second)
}
wg.Wait()
fmt.Println("done!")
}
|
package src
func Pass3(compiler *Compiler, RootAST *RootAST) *RootAST {
return RootAST
}
|
package gojsonq
import (
"encoding/json"
"fmt"
"reflect"
"strings"
"testing"
)
func TestNew(t *testing.T) {
jq := New()
if reflect.ValueOf(jq).Type().String() != "*gojsonq.JSONQ" {
t.Error("failed to match JSONQ type")
}
}
func TestJSONQ_String(t *testing.T) {
jq := New()
expected := fmt.Sprintf("\nContent: %s\nQuries:%v\n", string(jq.raw), jq.queries)
if out := jq.String(); out != expected {
t.Errorf("Expected: %v\n Got: %v", expected, out)
}
}
func TestJSONQ_decode(t *testing.T) {
testCases := []struct {
tag string
jsonStr string
errExpect bool
}{
{
tag: "valid json",
jsonStr: `{"name": "John Doe", "age": 30}`,
errExpect: false,
},
{
tag: "invalid json should return error",
jsonStr: `{"name": "John Doe", "age": 30, "only_key"}`,
errExpect: true,
},
}
for _, tc := range testCases {
jq := New()
jq.raw = json.RawMessage(tc.jsonStr)
jq.decode()
if err := jq.Error(); err != nil && !tc.errExpect {
t.Errorf("failed %s", tc.tag)
}
}
}
func TestJSONQ_File(t *testing.T) {
filename := "./data.json"
fc := createTestFile(t, filename)
defer fc()
t.Run("valid_file", func(t *testing.T) {
if err := New().File(filename).Error(); err != nil {
t.Error(err)
}
})
t.Run("file_not_exist", func(t *testing.T) {
if err := New().File("./invalid_file_name").Error(); err == nil {
t.Error(err)
}
})
}
func TestJSONQ_JSONString(t *testing.T) {
testCases := []struct {
tag string
jsonStr string
errExpect bool
}{
{
tag: "valid json",
jsonStr: `{"name": "John Doe", "age": 30}`,
errExpect: false,
},
{
tag: "invalid json should return error",
jsonStr: `{"name": "John Doe", "age": 30, "only_key"}`,
errExpect: true,
},
}
for _, tc := range testCases {
if err := New().JSONString(tc.jsonStr).Error(); err != nil && !tc.errExpect {
t.Errorf("failed %s", tc.tag)
}
}
}
func TestJSONQ_Reader(t *testing.T) {
testCases := []struct {
tag string
jsonStr string
errExpect bool
}{
{
tag: "valid json",
jsonStr: `{"name": "John Doe", "age": 30}`,
errExpect: false,
},
{
tag: "invalid json should return error",
jsonStr: `{"name": "John Doe", "age": 30, "only_key"}`,
errExpect: true,
},
}
for _, tc := range testCases {
rdr := strings.NewReader(tc.jsonStr)
if err := New().Reader(rdr).Error(); err != nil && !tc.errExpect {
t.Errorf("failed %s", tc.tag)
}
}
}
func TestJSONQ_Errors(t *testing.T) {
testCases := []struct {
tag string
jsonStr string
}{
{
tag: "invalid json 1",
jsonStr: `{"name": "John Doe", "age": 30, :""}`,
},
{
tag: "invalid json 2",
jsonStr: `{"name": "John Doe", "age": 30, "only_key"}`,
},
}
for _, tc := range testCases {
if errs := New().JSONString(tc.jsonStr).Errors(); len(errs) == 0 {
t.Errorf("failed %s", tc.tag)
}
}
}
func TestJSONQ_Macro(t *testing.T) {
jq := New()
t.Run("custom_macro", func(t *testing.T) {
jq.Macro("mac1", func(x, y interface{}) bool {
return true
})
if _, ok := jq.queryMap["mac1"]; !ok {
t.Error("failed to register macro")
}
})
t.Run("already registered macro", func(t *testing.T) {
jq.Macro("mac1", func(x, y interface{}) bool {
return true
})
if jq.Error() == nil {
t.Error("failed to throw error for already registered macro")
}
})
}
func TestJSONQ_From(t *testing.T) {
node := "root.items.[0].name"
jq := New().From(node)
if jq.node != node {
t.Error("failed to set node name")
}
}
func TestJSONQ_findNode(t *testing.T) {
t.Run("accessing node", func(t *testing.T) {
jq := New().JSONString(jsonStr)
expected := "Star Trek"
if out := jq.From("vendor.name").Get(); out != expected {
t.Errorf("Expected: %v\n Got: %v", expected, out)
}
})
t.Run("accessing index", func(t *testing.T) {
jq := New().JSONString(jsonStr)
expJSON := `{"id":1,"name":"MacBook Pro 13 inch retina","price":1350}`
out := jq.From("vendor.items.[0]").Get()
assertJSON(t, out, expJSON)
})
t.Run("accessing not existed index", func(t *testing.T) {
jq := New().JSONString(jsonStr)
expJSON := `null`
out := jq.From("vendor.items.[10]").Get()
assertJSON(t, out, expJSON)
})
t.Run("accessing invalid index error", func(t *testing.T) {
jq := New().JSONString(jsonStr)
jq.From("vendor.items.[x]").Get()
if jq.Error() == nil {
t.Error("expecting an error")
}
})
t.Run("accessing group by data", func(t *testing.T) {
jq := New().JSONString(jsonStr)
expJSON := `[{"id":3,"name":"Sony VAIO","price":1200}]`
out := jq.From("vendor.items").GroupBy("price").From("1200").Get()
assertJSON(t, out, expJSON)
})
}
func TestJSONQ_Where(t *testing.T) {
t.Run("single Where", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
Where("price", "=", 1700)
expected := `[{"id":2,"name":"MacBook Pro 15 inch retina","price":1700}]`
out := jq.Get()
assertJSON(t, out, expected)
})
t.Run("multiple Where expecting data", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
Where("price", "=", 1700).
Where("id", "=", 2)
expected := `[{"id":2,"name":"MacBook Pro 15 inch retina","price":1700}]`
out := jq.Get()
assertJSON(t, out, expected)
})
t.Run("multiple Where expecting empty result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
Where("price", "=", 1700).
Where("id", "=", "1700")
expected := `[]`
out := jq.Get()
assertJSON(t, out, expected)
})
t.Run("Where with invalid operator expecting error", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
Where("price", "invalid_op", 1700)
jq.Get()
if jq.Error() == nil {
t.Error("expecting: invalid operator invalid_op")
}
})
}
func TestJSONQ_WhereEqual(t *testing.T) {
t.Run("single WhereEqual", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
WhereEqual("price", 1700)
expected := `[{"id":2,"name":"MacBook Pro 15 inch retina","price":1700}]`
out := jq.Get()
assertJSON(t, out, expected)
})
t.Run("multiple WhereEqual expecting data", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
WhereEqual("price", 1700).
WhereEqual("id", 2)
expected := `[{"id":2,"name":"MacBook Pro 15 inch retina","price":1700}]`
out := jq.Get()
assertJSON(t, out, expected)
})
t.Run("multiple WhereEqual expecting empty result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
WhereEqual("price", 1700).
WhereEqual("id", "1700")
expected := `[]`
out := jq.Get()
assertJSON(t, out, expected)
})
}
func TestJSONQ_WhereNotEqual(t *testing.T) {
t.Run("single WhereNotEqual", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
WhereNotEqual("price", 850)
expected := `[{"id":1,"name":"MacBook Pro 13 inch retina","price":1350},{"id":2,"name":"MacBook Pro 15 inch retina","price":1700},{"id":3,"name":"Sony VAIO","price":1200},{"id":6,"name":"HP core i7","price":950}]`
out := jq.Get()
assertJSON(t, out, expected)
})
t.Run("multiple WhereNotEqual expecting result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
WhereNotEqual("price", 850).
WhereNotEqual("id", 2)
expected := `[{"id":1,"name":"MacBook Pro 13 inch retina","price":1350},{"id":3,"name":"Sony VAIO","price":1200},{"id":6,"name":"HP core i7","price":950}]`
out := jq.Get()
assertJSON(t, out, expected)
})
}
func TestJSONQ_WhereNil(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
WhereNil("id")
expected := `[{"id":null,"name":"HP core i3 SSD","price":850}]`
out := jq.Get()
assertJSON(t, out, expected)
}
func TestJSONQ_WhereNotNil(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
WhereNotNil("id")
expected := `[{"id":1,"name":"MacBook Pro 13 inch retina","price":1350},{"id":2,"name":"MacBook Pro 15 inch retina","price":1700},{"id":3,"name":"Sony VAIO","price":1200},{"id":4,"name":"Fujitsu","price":850},{"id":5,"key":2300,"name":"HP core i5","price":850},{"id":6,"name":"HP core i7","price":950}]`
out := jq.Get()
assertJSON(t, out, expected)
}
func TestJSONQ_WhereIn(t *testing.T) {
t.Run("WhereIn expecting result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
WhereIn("id", []int{1, 3, 5})
expected := `[{"id":1,"name":"MacBook Pro 13 inch retina","price":1350},{"id":3,"name":"Sony VAIO","price":1200},{"id":5,"key":2300,"name":"HP core i5","price":850}]`
out := jq.Get()
assertJSON(t, out, expected)
})
t.Run("WhereIn expecting empty result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
WhereIn("id", []int{18, 39, 85})
expected := `[]`
out := jq.Get()
assertJSON(t, out, expected)
})
}
func TestJSONQ_WhereNotIn(t *testing.T) {
t.Run("WhereIn expecting result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
WhereNotIn("id", []int{1, 3, 5, 6})
expected := `[{"id":2,"name":"MacBook Pro 15 inch retina","price":1700},{"id":4,"name":"Fujitsu","price":850},{"id":null,"name":"HP core i3 SSD","price":850}]`
out := jq.Get()
assertJSON(t, out, expected)
})
t.Run("WhereIn expecting empty result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
WhereNotIn("price", []float64{850, 950, 1200, 1700, 1350})
expected := `[]`
out := jq.Get()
assertJSON(t, out, expected)
})
}
func TestJSONQ_OrWhere(t *testing.T) {
t.Run("OrWhere expecting result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
OrWhere("price", ">", 1200)
expected := `[{"id":1,"name":"MacBook Pro 13 inch retina","price":1350},{"id":2,"name":"MacBook Pro 15 inch retina","price":1700}]`
out := jq.Get()
assertJSON(t, out, expected)
})
}
func TestJSONQ_WhereStartsWith(t *testing.T) {
t.Run("WhereStartsWith expecting result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
WhereStartsWith("name", "Mac")
expected := `[{"id":1,"name":"MacBook Pro 13 inch retina","price":1350},{"id":2,"name":"MacBook Pro 15 inch retina","price":1700}]`
out := jq.Get()
assertJSON(t, out, expected)
})
t.Run("WhereStartsWith expecting empty result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
WhereStartsWith("name", "xyz")
expected := `[]`
out := jq.Get()
assertJSON(t, out, expected)
})
}
func TestJSONQ_WhereEndsWith(t *testing.T) {
t.Run("WhereStartsWith expecting result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
WhereEndsWith("name", "retina")
expected := `[{"id":1,"name":"MacBook Pro 13 inch retina","price":1350},{"id":2,"name":"MacBook Pro 15 inch retina","price":1700}]`
out := jq.Get()
assertJSON(t, out, expected)
})
t.Run("WhereStartsWith expecting empty result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
WhereEndsWith("name", "xyz")
expected := `[]`
out := jq.Get()
assertJSON(t, out, expected)
})
}
func TestJSONQ_WhereContains(t *testing.T) {
t.Run("WhereContains expecting result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
WhereContains("name", "RetinA")
expected := `[{"id":1,"name":"MacBook Pro 13 inch retina","price":1350},{"id":2,"name":"MacBook Pro 15 inch retina","price":1700}]`
out := jq.Get()
assertJSON(t, out, expected)
})
t.Run("WhereContains expecting empty result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
WhereContains("name", "xyz")
expected := `[]`
out := jq.Get()
assertJSON(t, out, expected)
})
}
func TestJSONQ_WhereStrictContains(t *testing.T) {
t.Run("WhereContains expecting result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
WhereStrictContains("name", "retina")
expected := `[{"id":1,"name":"MacBook Pro 13 inch retina","price":1350},{"id":2,"name":"MacBook Pro 15 inch retina","price":1700}]`
out := jq.Get()
assertJSON(t, out, expected)
})
t.Run("WhereContains expecting empty result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
WhereStrictContains("name", "RetinA")
expected := `[]`
out := jq.Get()
assertJSON(t, out, expected)
})
}
func TestJSONQ_GroupBy(t *testing.T) {
t.Run("WhereContains expecting result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
GroupBy("price")
expected := `{"1200":[{"id":3,"name":"Sony VAIO","price":1200}],"1350":[{"id":1,"name":"MacBook Pro 13 inch retina","price":1350}],"1700":[{"id":2,"name":"MacBook Pro 15 inch retina","price":1700}],"850":[{"id":4,"name":"Fujitsu","price":850},{"id":5,"key":2300,"name":"HP core i5","price":850},{"id":null,"name":"HP core i3 SSD","price":850}],"950":[{"id":6,"name":"HP core i7","price":950}]}`
out := jq.Get()
assertJSON(t, out, expected)
})
}
func TestJSONQ_Sort(t *testing.T) {
t.Run("sorring array of string in ascending desc", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.names").
Sort()
expected := `["Abby","Jane Doe","Jerry","John Doe","Nicolas","Tom"]`
out := jq.Get()
assertJSON(t, out, expected)
})
t.Run("sorring array of float in descending order", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.prices").
Sort("desc")
expected := `[2400,2100,1200,400.87,150.1,89.9]`
out := jq.Get()
assertJSON(t, out, expected)
})
t.Run("passing two args in Sort expecting an error", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.prices").
Sort("asc", "desc")
jq.Get()
if jq.Error() == nil {
t.Error("expecting an error")
}
})
}
func TestJSONQ_SortBy(t *testing.T) {
t.Run("sorring array of object by its key (price-float64) in ascending desc", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
SortBy("price")
expected := `[{"id":null,"name":"HP core i3 SSD","price":850},{"id":4,"name":"Fujitsu","price":850},{"id":5,"key":2300,"name":"HP core i5","price":850},{"id":6,"name":"HP core i7","price":950},{"id":3,"name":"Sony VAIO","price":1200},{"id":1,"name":"MacBook Pro 13 inch retina","price":1350},{"id":2,"name":"MacBook Pro 15 inch retina","price":1700}]`
out := jq.Get()
assertJSON(t, out, expected)
})
t.Run("sorring array of object by its key (price-float64) in descending desc", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
SortBy("price", "desc")
expected := `[{"id":2,"name":"MacBook Pro 15 inch retina","price":1700},{"id":1,"name":"MacBook Pro 13 inch retina","price":1350},{"id":3,"name":"Sony VAIO","price":1200},{"id":6,"name":"HP core i7","price":950},{"id":4,"name":"Fujitsu","price":850},{"id":5,"key":2300,"name":"HP core i5","price":850},{"id":null,"name":"HP core i3 SSD","price":850}]`
out := jq.Get()
assertJSON(t, out, expected)
})
t.Run("sorring array of object by its key (name-string) in ascending desc", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
SortBy("name")
expected := `[{"id":4,"name":"Fujitsu","price":850},{"id":null,"name":"HP core i3 SSD","price":850},{"id":5,"key":2300,"name":"HP core i5","price":850},{"id":6,"name":"HP core i7","price":950},{"id":1,"name":"MacBook Pro 13 inch retina","price":1350},{"id":2,"name":"MacBook Pro 15 inch retina","price":1700},{"id":3,"name":"Sony VAIO","price":1200}]`
out := jq.Get()
assertJSON(t, out, expected)
})
t.Run("sorring array of object by its key (name-string) in descending desc", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
SortBy("name", "desc")
expected := `[{"id":3,"name":"Sony VAIO","price":1200},{"id":2,"name":"MacBook Pro 15 inch retina","price":1700},{"id":1,"name":"MacBook Pro 13 inch retina","price":1350},{"id":6,"name":"HP core i7","price":950},{"id":5,"key":2300,"name":"HP core i5","price":850},{"id":null,"name":"HP core i3 SSD","price":850},{"id":4,"name":"Fujitsu","price":850}]`
out := jq.Get()
assertJSON(t, out, expected)
})
t.Run("passing no argument in SortBy expecting an error", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
SortBy()
jq.Get()
if jq.Error() == nil {
t.Error("expecting an error")
}
})
t.Run("passing more than 2 arguments in SortBy expecting an error", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
SortBy("name", "desc", "asc")
jq.Get()
if jq.Error() == nil {
t.Error("expecting an error")
}
})
}
func TestJSONQ_Only(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
Only("id", "price")
expected := `[{"id":1,"price":1350},{"id":2,"price":1700},{"id":3,"price":1200},{"id":4,"price":850},{"id":5,"price":850},{"id":6,"price":950},{"id":null,"price":850}]`
out := jq.Get()
assertJSON(t, out, expected)
}
func TestJSONQ_First(t *testing.T) {
t.Run("First expecting result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items")
expected := `{"id":1,"name":"MacBook Pro 13 inch retina","price":1350}`
out := jq.First()
assertJSON(t, out, expected)
})
t.Run("First expecting empty result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
Where("price", ">", 1800)
expected := `null`
out := jq.First()
assertJSON(t, out, expected)
})
}
func TestJSONQ_Last(t *testing.T) {
t.Run("Last expecting result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items")
expected := `{"id":null,"name":"HP core i3 SSD","price":850}`
out := jq.Last()
assertJSON(t, out, expected)
})
t.Run("Last expecting empty result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
Where("price", ">", 1800)
expected := `null`
out := jq.Last()
assertJSON(t, out, expected)
})
}
func TestJSONQ_Nth(t *testing.T) {
t.Run("Nth expecting result", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items")
expected := `{"id":1,"name":"MacBook Pro 13 inch retina","price":1350}`
out := jq.Nth(1)
assertJSON(t, out, expected)
})
t.Run("Nth expecting empty result with an error", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
Where("price", ">", 1800)
expected := `null`
out := jq.Nth(1)
assertJSON(t, out, expected)
if jq.Error() == nil {
t.Error("expecting an error for empty result nth value")
}
})
t.Run("Nth expecting empty result with an error of index out of range", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items")
expected := `null`
out := jq.Nth(100)
assertJSON(t, out, expected)
if jq.Error() == nil {
t.Error("expecting an error for empty result nth value")
}
})
t.Run("Nth expecting result form last when providing -1", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items")
expected := `{"id":null,"name":"HP core i3 SSD","price":850}`
out := jq.Nth(-1)
assertJSON(t, out, expected)
})
t.Run("Nth expecting error is provide 0", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
Where("price", ">", 1800)
jq.Nth(0)
if jq.Error() == nil {
t.Error("expecting error")
}
})
t.Run("Nth expecting empty result if the node is a map", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items.[0]")
out := jq.Nth(0)
expected := `null`
assertJSON(t, out, expected)
})
}
func TestJSONQ_Find(t *testing.T) {
t.Run("Find expecting name computers", func(t *testing.T) {
jq := New().JSONString(jsonStr)
out := jq.Find("name")
expected := `"computers"`
assertJSON(t, out, expected)
})
t.Run("Find expecting a nested object", func(t *testing.T) {
jq := New().JSONString(jsonStr)
out := jq.Find("vendor.items.[0]")
expected := `{"id":1,"name":"MacBook Pro 13 inch retina","price":1350}`
assertJSON(t, out, expected)
})
}
func TestJSONQ_Pluck(t *testing.T) {
t.Run("Pluck expecting prices from list of objects", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items")
out := jq.Pluck("price")
expected := `[1350,1700,1200,850,850,950,850]`
assertJSON(t, out, expected)
})
t.Run("Pluck expecting empty list from list of objects, because of invalid property name", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items")
out := jq.Pluck("invalid_prop")
expected := `[]`
assertJSON(t, out, expected)
})
}
func TestJSONQ_Count(t *testing.T) {
t.Run("Count expecting a int number of total item of an arry", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items")
out := jq.Count()
expected := `7`
assertJSON(t, out, expected)
})
t.Run("Count expecting a int number of total item of an arry of objects", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items.[0]")
out := jq.Count()
expected := `3`
assertJSON(t, out, expected)
})
t.Run("Count expecting a int number of total item of an arry of groupped objects", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
GroupBy("price")
out := jq.Count()
expected := `5`
assertJSON(t, out, expected)
})
}
func TestJSONQ_Sum(t *testing.T) {
t.Run("Sum expecting sum an arry", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.prices")
out := jq.Sum()
expected := `6340.87`
assertJSON(t, out, expected)
})
t.Run("Sum expecting sum an arry of objects property", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items")
out := jq.Sum("price")
expected := `7750`
assertJSON(t, out, expected)
})
t.Run("Sum expecting an error for providing property for arry", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.prices")
jq.Sum("key")
if jq.Error() == nil {
t.Error("expecting: unnecessary property name for array")
}
})
t.Run("Sum expecting an error for not providing property for arry of objects", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items")
jq.Sum()
if jq.Error() == nil {
t.Error("expecting: property name can not be empty for object")
}
})
t.Run("Sum expecting an error for not providing property for object", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items.[0]")
jq.Sum()
if jq.Error() == nil {
t.Error("expecting: property name can not be empty for object")
}
})
t.Run("Sum expecting an error for not providing property for object", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items.[0]")
out := jq.Sum("price")
expected := `1350`
assertJSON(t, out, expected)
})
}
func TestJSONQ_Avg(t *testing.T) {
t.Run("Avg expecting average an arry", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.prices")
out := jq.Avg()
expected := `1056.8116666666667`
assertJSON(t, out, expected)
})
t.Run("Avg expecting average an arry of objects property", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items")
out := jq.Avg("price")
expected := `1107.142857142857`
assertJSON(t, out, expected)
})
}
func TestJSONQ_Min(t *testing.T) {
t.Run("Min expecting min an arry", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.prices")
out := jq.Min()
expected := `89.9`
assertJSON(t, out, expected)
})
t.Run("Min expecting min an arry of objects property", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items")
out := jq.Min("price")
expected := `850`
assertJSON(t, out, expected)
})
}
func TestJSONQ_Max(t *testing.T) {
t.Run("Max expecting max an arry", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.prices")
out := jq.Max()
expected := `2400`
assertJSON(t, out, expected)
})
t.Run("Max expecting max an arry of objects property", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items")
out := jq.Max("price")
expected := `1700`
assertJSON(t, out, expected)
})
}
// TODO: Need to write some more combined query test
func TestJSONQ_CombinedWhereOrWhere(t *testing.T) {
t.Run("combined Where with orWhere", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
Where("id", "=", 1).
OrWhere("name", "=", "Sony VAIO").
Where("price", "=", 1200)
out := jq.Get()
expected := `[{"id":1,"name":"MacBook Pro 13 inch retina","price":1350},{"id":3,"name":"Sony VAIO","price":1200}]`
assertJSON(t, out, expected)
})
t.Run("combined Where with orWhere containing invalid key", func(t *testing.T) {
jq := New().JSONString(jsonStr).
From("vendor.items").
Where("id", "=", 1).
OrWhere("invalid_key", "=", "Sony VAIO")
out := jq.Get()
expected := `[{"id":1,"name":"MacBook Pro 13 inch retina","price":1350}]`
assertJSON(t, out, expected)
})
}
|
package main
import (
"fmt"
"plugin"
)
func main() {
// 插件目录
p, err := plugin.Open("./plugin/plugin.so")
if err != nil {
panic(err)
}
// 加载插件方法
add, err := p.Lookup("Add") // 方法名必须与插件的方法名一致,区分大小写
if err != nil {
panic(err)
}
sub, err := p.Lookup("Subtract")
if err != nil {
panic(err)
}
// 运行插件方法
sum := add.(func(int, int) int)(11, 2)
fmt.Println(sum)
subt := sub.(func(int, int) int)(11, 2)
fmt.Println(subt)
}
|
package gcppubsub
import (
"fmt"
"time"
"github.com/batchcorp/plumber-schemas/build/go/protos/opts"
"github.com/pkg/errors"
"github.com/batchcorp/plumber-schemas/build/go/protos/records"
"github.com/batchcorp/plumber/printer"
)
// DisplayMessage will parse a Read record and print (pretty) output to STDOUT
func (g *GCPPubSub) DisplayMessage(cliOpts *opts.CLIOptions, msg *records.ReadRecord) error {
if err := validateReadRecord(msg); err != nil {
return errors.Wrap(err, "unable to validate read record")
}
record := msg.GetGcpPubsub()
if record == nil {
return errors.New("BUG: record in message is nil")
}
properties := [][]string{
{"ID", record.Id},
{"Publish Time", time.Unix(record.PublishTime, 0).Format(time.RFC3339)},
{"Delivery Attempt", fmt.Sprintf("%d", record.DeliveryAttempt)},
{"Ordering Key", record.OrderingKey},
}
for k, v := range record.Attributes {
properties = append(properties, []string{k, v})
}
receivedAt := time.Unix(msg.ReceivedAtUnixTsUtc, 0)
printer.PrintTable(cliOpts, msg.Num, receivedAt, msg.Payload, properties)
return nil
}
// DisplayError will parse an Error record and print (pretty) output to STDOUT
func (g *GCPPubSub) DisplayError(msg *records.ErrorRecord) error {
printer.DefaultDisplayError(msg)
return nil
}
func validateReadRecord(msg *records.ReadRecord) error {
if msg == nil {
return errors.New("msg cannot be nil")
}
return nil
}
|
package plantuml
import (
"io"
)
type Package struct {
name string
children []Renderable
}
func NewPackage(name string) *Package {
return &Package{name: name}
}
func (p *Package) Add(r ...Renderable) *Package {
p.children = append(p.children, r...)
return p
}
func (p *Package) Render(wr io.Writer) error {
w := strWriter{Writer: wr}
w.Print("package \"")
w.Print(escapeP(p.name))
w.Print("\" {\n")
for _, child := range p.children {
if err := child.Render(wr); err != nil {
return err
}
}
w.Print("}\n")
return w.Err
}
|
package main
import (
"encoding/json"
"log"
"net/http"
"os"
"time"
"github.com/julienschmidt/httprouter"
"github.com/peter-mueller/sit-o-mat/httperror"
"github.com/peter-mueller/sit-o-mat/sitomat"
"github.com/peter-mueller/sit-o-mat/user"
"github.com/peter-mueller/sit-o-mat/workplace"
"fmt"
_ "gocloud.dev/docstore/gcpfirestore"
_ "gocloud.dev/docstore/memdocstore"
"errors"
)
func main() {
userService := user.Service{}
userController := user.Controller{Service: &userService}
workplaceService := workplace.Service{}
workplaceController := workplace.Controller{Service: &workplaceService}
sitomatService := sitomat.Service{
UserService: &userService,
WorkplaceService: &workplaceService,
}
sitomatController := sitomat.Controller{Service: &sitomatService}
r := httprouter.New()
r.HandleOPTIONS = true
r.HandleMethodNotAllowed = true
r.GlobalOPTIONS = http.HandlerFunc(corsHandler)
r.POST("/user", userController.RegisterUser)
r.GET("/user/:name", userController.GetUser)
r.DELETE("/user/:name", userController.DeleteUser)
r.PATCH("/user/:name", userController.PatchUser)
r.POST("/workplace", workplaceController.CreateWorkplace)
r.DELETE("/workplace/:name", workplaceController.DeleteWorkplace)
r.GET("/workplace", workplaceController.GetAllWorkplaces)
r.GET("/sitomat", sitomatController.ManualAssign)
r.GET("/health", healthHandler)
r.PanicHandler = panicHandler
fmt.Println("Starting Server")
srv := &http.Server{
Handler: corsDecorator{r},
Addr: lookupEnv("SITOMAT_ADDR", "127.0.0.1:8080"),
// Good practice: enforce timeouts for servers you create!
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
fmt.Println(" Addr: ", srv.Addr)
log.Fatal(srv.ListenAndServe())
}
func panicHandler(w http.ResponseWriter, r *http.Request, data interface{}) {
err, ok := data.(error)
if !ok {
w.WriteHeader(http.StatusInternalServerError)
return
}
var httperr httperror.Error
if errors.As(err, &httperr) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(httperr.Status)
json.NewEncoder(w).Encode(httperr)
return
}
w.WriteHeader(http.StatusInternalServerError)
}
func corsHandler(w http.ResponseWriter, r *http.Request) {
// Set CORS headers
header := w.Header()
header.Set("Access-Control-Allow-Methods", "GET, POST, DELETE, PATCH, PUT")
header.Set("Access-Control-Allow-Headers", "authorization")
header.Set("Access-Control-Allow-Origin", "*")
// Adjust status code to 204
w.WriteHeader(http.StatusNoContent)
}
type corsDecorator struct {
router *httprouter.Router
}
func healthHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode("UP")
}
func (c corsDecorator) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if origin := r.Header.Get("Origin"); origin != "" {
w.Header().Set("Access-Control-Allow-Origin", origin)
}
c.router.ServeHTTP(w, r)
}
func lookupEnv(env string, alternative string) string {
value, ok := os.LookupEnv(env)
if !ok {
log.Printf("Using default for %v: %v", env, alternative)
return alternative
}
return value
}
|
package x
// GENERATED BY XO. DO NOT EDIT.
import (
"errors"
"strings"
//"time"
"strconv"
"github.com/jmoiron/sqlx"
)
// (shortname .TableNameGo "err" "res" "sqlstr" "db" "XOLog") -}}//(schema .Schema .Table.TableName) -}}// .TableNameGo}}// PostKeys represents a row from 'sun.post_keys'.
// Manualy copy this to project
type PostKeys__ struct {
Id int `json:"Id"` // Id -
PostKeyStr string `json:"PostKeyStr"` // PostKeyStr -
Used int `json:"Used"` // Used -
RandShard int `json:"RandShard"` // RandShard -
// xo fields
_exists, _deleted bool
}
// Exists determines if the PostKeys exists in the database.
func (pk *PostKeys) Exists() bool {
return pk._exists
}
// Deleted provides information if the PostKeys has been deleted from the database.
func (pk *PostKeys) Deleted() bool {
return pk._deleted
}
// Insert inserts the PostKeys to the database.
func (pk *PostKeys) Insert(db XODB) error {
var err error
// if already exist, bail
if pk._exists {
return errors.New("insert failed: already exists")
}
// sql insert query, primary key provided by autoincrement
const sqlstr = `INSERT INTO sun.post_keys (` +
`PostKeyStr, Used, RandShard` +
`) VALUES (` +
`?, ?, ?` +
`)`
// run query
if LogTableSqlReq.PostKeys {
XOLog(sqlstr, pk.PostKeyStr, pk.Used, pk.RandShard)
}
res, err := db.Exec(sqlstr, pk.PostKeyStr, pk.Used, pk.RandShard)
if err != nil {
if LogTableSqlReq.PostKeys {
XOLogErr(err)
}
return err
}
// retrieve id
id, err := res.LastInsertId()
if err != nil {
if LogTableSqlReq.PostKeys {
XOLogErr(err)
}
return err
}
// set primary key and existence
pk.Id = int(id)
pk._exists = true
OnPostKeys_AfterInsert(pk)
return nil
}
// Insert inserts the PostKeys to the database.
func (pk *PostKeys) Replace(db XODB) error {
var err error
// sql query
const sqlstr = `REPLACE INTO sun.post_keys (` +
`PostKeyStr, Used, RandShard` +
`) VALUES (` +
`?, ?, ?` +
`)`
// run query
if LogTableSqlReq.PostKeys {
XOLog(sqlstr, pk.PostKeyStr, pk.Used, pk.RandShard)
}
res, err := db.Exec(sqlstr, pk.PostKeyStr, pk.Used, pk.RandShard)
if err != nil {
if LogTableSqlReq.PostKeys {
XOLogErr(err)
}
return err
}
// retrieve id
id, err := res.LastInsertId()
if err != nil {
if LogTableSqlReq.PostKeys {
XOLogErr(err)
}
return err
}
// set primary key and existence
pk.Id = int(id)
pk._exists = true
OnPostKeys_AfterInsert(pk)
return nil
}
// Update updates the PostKeys in the database.
func (pk *PostKeys) Update(db XODB) error {
var err error
// if doesn't exist, bail
if !pk._exists {
return errors.New("update failed: does not exist")
}
// if deleted, bail
if pk._deleted {
return errors.New("update failed: marked for deletion")
}
// sql query
const sqlstr = `UPDATE sun.post_keys SET ` +
`PostKeyStr = ?, Used = ?, RandShard = ?` +
` WHERE Id = ?`
// run query
if LogTableSqlReq.PostKeys {
XOLog(sqlstr, pk.PostKeyStr, pk.Used, pk.RandShard, pk.Id)
}
_, err = db.Exec(sqlstr, pk.PostKeyStr, pk.Used, pk.RandShard, pk.Id)
if LogTableSqlReq.PostKeys {
XOLogErr(err)
}
OnPostKeys_AfterUpdate(pk)
return err
}
// Save saves the PostKeys to the database.
func (pk *PostKeys) Save(db XODB) error {
if pk.Exists() {
return pk.Update(db)
}
return pk.Replace(db)
}
// Delete deletes the PostKeys from the database.
func (pk *PostKeys) Delete(db XODB) error {
var err error
// if doesn't exist, bail
if !pk._exists {
return nil
}
// if deleted, bail
if pk._deleted {
return nil
}
// sql query
const sqlstr = `DELETE FROM sun.post_keys WHERE Id = ?`
// run query
if LogTableSqlReq.PostKeys {
XOLog(sqlstr, pk.Id)
}
_, err = db.Exec(sqlstr, pk.Id)
if err != nil {
if LogTableSqlReq.PostKeys {
XOLogErr(err)
}
return err
}
// set deleted
pk._deleted = true
OnPostKeys_AfterDelete(pk)
return nil
}
////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////// Querify gen - ME /////////////////////////////////////////
//.TableNameGo= table name
// _Deleter, _Updater
// orma types
type __PostKeys_Deleter struct {
wheres []whereClause
whereSep string
dollarIndex int
isMysql bool
}
type __PostKeys_Updater struct {
wheres []whereClause
// updates map[string]interface{}
updates []updateCol
whereSep string
dollarIndex int
isMysql bool
}
type __PostKeys_Selector struct {
wheres []whereClause
selectCol string
whereSep string
orderBy string //" order by id desc //for ints
limit int
offset int
dollarIndex int
isMysql bool
}
func NewPostKeys_Deleter() *__PostKeys_Deleter {
d := __PostKeys_Deleter{whereSep: " AND ", isMysql: true}
return &d
}
func NewPostKeys_Updater() *__PostKeys_Updater {
u := __PostKeys_Updater{whereSep: " AND ", isMysql: true}
//u.updates = make(map[string]interface{},10)
return &u
}
func NewPostKeys_Selector() *__PostKeys_Selector {
u := __PostKeys_Selector{whereSep: " AND ", selectCol: "*", isMysql: true}
return &u
}
/*/// mysql or cockroach ? or $1 handlers
func (m *__PostKeys_Selector)nextDollars(size int) string {
r := DollarsForSqlIn(size,m.dollarIndex,m.isMysql)
m.dollarIndex += size
return r
}
func (m *__PostKeys_Selector)nextDollar() string {
r := DollarsForSqlIn(1,m.dollarIndex,m.isMysql)
m.dollarIndex += 1
return r
}
*/
/////////////////////////////// Where for all /////////////////////////////
//// for ints all selector updater, deleter
/// mysql or cockroach ? or $1 handlers
func (m *__PostKeys_Deleter) nextDollars(size int) string {
r := DollarsForSqlIn(size, m.dollarIndex, m.isMysql)
m.dollarIndex += size
return r
}
func (m *__PostKeys_Deleter) nextDollar() string {
r := DollarsForSqlIn(1, m.dollarIndex, m.isMysql)
m.dollarIndex += 1
return r
}
////////ints
func (u *__PostKeys_Deleter) Or() *__PostKeys_Deleter {
u.whereSep = " OR "
return u
}
func (u *__PostKeys_Deleter) Id_In(ins []int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Id IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Deleter) Id_Ins(ins ...int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Id IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Deleter) Id_NotIn(ins []int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Id NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__PostKeys_Deleter) Id_Eq(val int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Deleter) Id_NotEq(val int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Deleter) Id_LT(val int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Deleter) Id_LE(val int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Deleter) Id_GT(val int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Deleter) Id_GE(val int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__PostKeys_Deleter) Used_In(ins []int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Used IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Deleter) Used_Ins(ins ...int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Used IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Deleter) Used_NotIn(ins []int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Used NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__PostKeys_Deleter) Used_Eq(val int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Used = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Deleter) Used_NotEq(val int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Used != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Deleter) Used_LT(val int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Used < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Deleter) Used_LE(val int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Used <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Deleter) Used_GT(val int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Used > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Deleter) Used_GE(val int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Used >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__PostKeys_Deleter) RandShard_In(ins []int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " RandShard IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Deleter) RandShard_Ins(ins ...int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " RandShard IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Deleter) RandShard_NotIn(ins []int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " RandShard NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__PostKeys_Deleter) RandShard_Eq(val int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " RandShard = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Deleter) RandShard_NotEq(val int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " RandShard != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Deleter) RandShard_LT(val int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " RandShard < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Deleter) RandShard_LE(val int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " RandShard <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Deleter) RandShard_GT(val int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " RandShard > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Deleter) RandShard_GE(val int) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " RandShard >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
/// mysql or cockroach ? or $1 handlers
func (m *__PostKeys_Updater) nextDollars(size int) string {
r := DollarsForSqlIn(size, m.dollarIndex, m.isMysql)
m.dollarIndex += size
return r
}
func (m *__PostKeys_Updater) nextDollar() string {
r := DollarsForSqlIn(1, m.dollarIndex, m.isMysql)
m.dollarIndex += 1
return r
}
////////ints
func (u *__PostKeys_Updater) Or() *__PostKeys_Updater {
u.whereSep = " OR "
return u
}
func (u *__PostKeys_Updater) Id_In(ins []int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Id IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Updater) Id_Ins(ins ...int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Id IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Updater) Id_NotIn(ins []int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Id NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__PostKeys_Updater) Id_Eq(val int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Updater) Id_NotEq(val int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Updater) Id_LT(val int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Updater) Id_LE(val int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Updater) Id_GT(val int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Updater) Id_GE(val int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__PostKeys_Updater) Used_In(ins []int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Used IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Updater) Used_Ins(ins ...int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Used IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Updater) Used_NotIn(ins []int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Used NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__PostKeys_Updater) Used_Eq(val int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Used = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Updater) Used_NotEq(val int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Used != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Updater) Used_LT(val int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Used < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Updater) Used_LE(val int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Used <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Updater) Used_GT(val int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Used > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Updater) Used_GE(val int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Used >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__PostKeys_Updater) RandShard_In(ins []int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " RandShard IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Updater) RandShard_Ins(ins ...int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " RandShard IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Updater) RandShard_NotIn(ins []int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " RandShard NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__PostKeys_Updater) RandShard_Eq(val int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " RandShard = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Updater) RandShard_NotEq(val int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " RandShard != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Updater) RandShard_LT(val int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " RandShard < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Updater) RandShard_LE(val int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " RandShard <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Updater) RandShard_GT(val int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " RandShard > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Updater) RandShard_GE(val int) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " RandShard >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
/// mysql or cockroach ? or $1 handlers
func (m *__PostKeys_Selector) nextDollars(size int) string {
r := DollarsForSqlIn(size, m.dollarIndex, m.isMysql)
m.dollarIndex += size
return r
}
func (m *__PostKeys_Selector) nextDollar() string {
r := DollarsForSqlIn(1, m.dollarIndex, m.isMysql)
m.dollarIndex += 1
return r
}
////////ints
func (u *__PostKeys_Selector) Or() *__PostKeys_Selector {
u.whereSep = " OR "
return u
}
func (u *__PostKeys_Selector) Id_In(ins []int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Id IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Selector) Id_Ins(ins ...int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Id IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Selector) Id_NotIn(ins []int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Id NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__PostKeys_Selector) Id_Eq(val int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Selector) Id_NotEq(val int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Selector) Id_LT(val int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Selector) Id_LE(val int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Selector) Id_GT(val int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Selector) Id_GE(val int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__PostKeys_Selector) Used_In(ins []int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Used IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Selector) Used_Ins(ins ...int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Used IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Selector) Used_NotIn(ins []int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Used NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__PostKeys_Selector) Used_Eq(val int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Used = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Selector) Used_NotEq(val int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Used != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Selector) Used_LT(val int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Used < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Selector) Used_LE(val int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Used <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Selector) Used_GT(val int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Used > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Selector) Used_GE(val int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Used >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__PostKeys_Selector) RandShard_In(ins []int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " RandShard IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Selector) RandShard_Ins(ins ...int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " RandShard IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Selector) RandShard_NotIn(ins []int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " RandShard NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__PostKeys_Selector) RandShard_Eq(val int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " RandShard = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Selector) RandShard_NotEq(val int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " RandShard != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Selector) RandShard_LT(val int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " RandShard < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Selector) RandShard_LE(val int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " RandShard <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Selector) RandShard_GT(val int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " RandShard > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Selector) RandShard_GE(val int) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " RandShard >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
///// for strings //copy of above with type int -> string + rm if eq + $ms_str_cond
////////ints
func (u *__PostKeys_Deleter) PostKeyStr_In(ins []string) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostKeyStr IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Deleter) PostKeyStr_NotIn(ins []string) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostKeyStr NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
//must be used like: UserName_like("hamid%")
func (u *__PostKeys_Deleter) PostKeyStr_Like(val string) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostKeyStr LIKE " + u.nextDollar()
u.wheres = append(u.wheres, w)
return u
}
func (d *__PostKeys_Deleter) PostKeyStr_Eq(val string) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostKeyStr = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Deleter) PostKeyStr_NotEq(val string) *__PostKeys_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostKeyStr != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
////////ints
func (u *__PostKeys_Updater) PostKeyStr_In(ins []string) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostKeyStr IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Updater) PostKeyStr_NotIn(ins []string) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostKeyStr NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
//must be used like: UserName_like("hamid%")
func (u *__PostKeys_Updater) PostKeyStr_Like(val string) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostKeyStr LIKE " + u.nextDollar()
u.wheres = append(u.wheres, w)
return u
}
func (d *__PostKeys_Updater) PostKeyStr_Eq(val string) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostKeyStr = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Updater) PostKeyStr_NotEq(val string) *__PostKeys_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostKeyStr != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
////////ints
func (u *__PostKeys_Selector) PostKeyStr_In(ins []string) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostKeyStr IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__PostKeys_Selector) PostKeyStr_NotIn(ins []string) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostKeyStr NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
//must be used like: UserName_like("hamid%")
func (u *__PostKeys_Selector) PostKeyStr_Like(val string) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostKeyStr LIKE " + u.nextDollar()
u.wheres = append(u.wheres, w)
return u
}
func (d *__PostKeys_Selector) PostKeyStr_Eq(val string) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostKeyStr = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__PostKeys_Selector) PostKeyStr_NotEq(val string) *__PostKeys_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostKeyStr != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
/// End of wheres for selectors , updators, deletor
/////////////////////////////// Updater /////////////////////////////
//ints
func (u *__PostKeys_Updater) Id(newVal int) *__PostKeys_Updater {
up := updateCol{" Id = " + u.nextDollar(), newVal}
u.updates = append(u.updates, up)
// u.updates[" Id = " + u.nextDollar()] = newVal
return u
}
func (u *__PostKeys_Updater) Id_Increment(count int) *__PostKeys_Updater {
if count > 0 {
up := updateCol{" Id = Id+ " + u.nextDollar(), count}
u.updates = append(u.updates, up)
//u.updates[" Id = Id+ " + u.nextDollar()] = count
}
if count < 0 {
up := updateCol{" Id = Id- " + u.nextDollar(), count}
u.updates = append(u.updates, up)
// u.updates[" Id = Id- " + u.nextDollar() ] = -(count) //make it positive
}
return u
}
//string
//ints
//string
func (u *__PostKeys_Updater) PostKeyStr(newVal string) *__PostKeys_Updater {
up := updateCol{"PostKeyStr = " + u.nextDollar(), newVal}
u.updates = append(u.updates, up)
// u.updates[" PostKeyStr = "+ u.nextDollar()] = newVal
return u
}
//ints
func (u *__PostKeys_Updater) Used(newVal int) *__PostKeys_Updater {
up := updateCol{" Used = " + u.nextDollar(), newVal}
u.updates = append(u.updates, up)
// u.updates[" Used = " + u.nextDollar()] = newVal
return u
}
func (u *__PostKeys_Updater) Used_Increment(count int) *__PostKeys_Updater {
if count > 0 {
up := updateCol{" Used = Used+ " + u.nextDollar(), count}
u.updates = append(u.updates, up)
//u.updates[" Used = Used+ " + u.nextDollar()] = count
}
if count < 0 {
up := updateCol{" Used = Used- " + u.nextDollar(), count}
u.updates = append(u.updates, up)
// u.updates[" Used = Used- " + u.nextDollar() ] = -(count) //make it positive
}
return u
}
//string
//ints
func (u *__PostKeys_Updater) RandShard(newVal int) *__PostKeys_Updater {
up := updateCol{" RandShard = " + u.nextDollar(), newVal}
u.updates = append(u.updates, up)
// u.updates[" RandShard = " + u.nextDollar()] = newVal
return u
}
func (u *__PostKeys_Updater) RandShard_Increment(count int) *__PostKeys_Updater {
if count > 0 {
up := updateCol{" RandShard = RandShard+ " + u.nextDollar(), count}
u.updates = append(u.updates, up)
//u.updates[" RandShard = RandShard+ " + u.nextDollar()] = count
}
if count < 0 {
up := updateCol{" RandShard = RandShard- " + u.nextDollar(), count}
u.updates = append(u.updates, up)
// u.updates[" RandShard = RandShard- " + u.nextDollar() ] = -(count) //make it positive
}
return u
}
//string
/////////////////////////////////////////////////////////////////////
/////////////////////// Selector ///////////////////////////////////
//Select_* can just be used with: .GetString() , .GetStringSlice(), .GetInt() ..GetIntSlice()
func (u *__PostKeys_Selector) OrderBy_Id_Desc() *__PostKeys_Selector {
u.orderBy = " ORDER BY Id DESC "
return u
}
func (u *__PostKeys_Selector) OrderBy_Id_Asc() *__PostKeys_Selector {
u.orderBy = " ORDER BY Id ASC "
return u
}
func (u *__PostKeys_Selector) Select_Id() *__PostKeys_Selector {
u.selectCol = "Id"
return u
}
func (u *__PostKeys_Selector) OrderBy_PostKeyStr_Desc() *__PostKeys_Selector {
u.orderBy = " ORDER BY PostKeyStr DESC "
return u
}
func (u *__PostKeys_Selector) OrderBy_PostKeyStr_Asc() *__PostKeys_Selector {
u.orderBy = " ORDER BY PostKeyStr ASC "
return u
}
func (u *__PostKeys_Selector) Select_PostKeyStr() *__PostKeys_Selector {
u.selectCol = "PostKeyStr"
return u
}
func (u *__PostKeys_Selector) OrderBy_Used_Desc() *__PostKeys_Selector {
u.orderBy = " ORDER BY Used DESC "
return u
}
func (u *__PostKeys_Selector) OrderBy_Used_Asc() *__PostKeys_Selector {
u.orderBy = " ORDER BY Used ASC "
return u
}
func (u *__PostKeys_Selector) Select_Used() *__PostKeys_Selector {
u.selectCol = "Used"
return u
}
func (u *__PostKeys_Selector) OrderBy_RandShard_Desc() *__PostKeys_Selector {
u.orderBy = " ORDER BY RandShard DESC "
return u
}
func (u *__PostKeys_Selector) OrderBy_RandShard_Asc() *__PostKeys_Selector {
u.orderBy = " ORDER BY RandShard ASC "
return u
}
func (u *__PostKeys_Selector) Select_RandShard() *__PostKeys_Selector {
u.selectCol = "RandShard"
return u
}
func (u *__PostKeys_Selector) Limit(num int) *__PostKeys_Selector {
u.limit = num
return u
}
func (u *__PostKeys_Selector) Offset(num int) *__PostKeys_Selector {
u.offset = num
return u
}
func (u *__PostKeys_Selector) Order_Rand() *__PostKeys_Selector {
u.orderBy = " ORDER BY RAND() "
return u
}
///////////////////////// Queryer Selector //////////////////////////////////
func (u *__PostKeys_Selector) _stoSql() (string, []interface{}) {
sqlWherrs, whereArgs := whereClusesToSql(u.wheres, u.whereSep)
sqlstr := "SELECT " + u.selectCol + " FROM sun.post_keys"
if len(strings.Trim(sqlWherrs, " ")) > 0 { //2 for safty
sqlstr += " WHERE " + sqlWherrs
}
if u.orderBy != "" {
sqlstr += u.orderBy
}
if u.limit != 0 {
sqlstr += " LIMIT " + strconv.Itoa(u.limit)
}
if u.offset != 0 {
sqlstr += " OFFSET " + strconv.Itoa(u.offset)
}
return sqlstr, whereArgs
}
func (u *__PostKeys_Selector) GetRow(db *sqlx.DB) (*PostKeys, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.PostKeys {
XOLog(sqlstr, whereArgs)
}
row := &PostKeys{}
//by Sqlx
err = db.Get(row, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.PostKeys {
XOLogErr(err)
}
return nil, err
}
row._exists = true
OnPostKeys_LoadOne(row)
return row, nil
}
func (u *__PostKeys_Selector) GetRows(db *sqlx.DB) ([]*PostKeys, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.PostKeys {
XOLog(sqlstr, whereArgs)
}
var rows []*PostKeys
//by Sqlx
err = db.Unsafe().Select(&rows, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.PostKeys {
XOLogErr(err)
}
return nil, err
}
/*for i:=0;i< len(rows);i++ {
rows[i]._exists = true
}*/
for i := 0; i < len(rows); i++ {
rows[i]._exists = true
}
OnPostKeys_LoadMany(rows)
return rows, nil
}
//dep use GetRows()
func (u *__PostKeys_Selector) GetRows2(db *sqlx.DB) ([]PostKeys, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.PostKeys {
XOLog(sqlstr, whereArgs)
}
var rows []*PostKeys
//by Sqlx
err = db.Unsafe().Select(&rows, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.PostKeys {
XOLogErr(err)
}
return nil, err
}
/*for i:=0;i< len(rows);i++ {
rows[i]._exists = true
}*/
for i := 0; i < len(rows); i++ {
rows[i]._exists = true
}
OnPostKeys_LoadMany(rows)
rows2 := make([]PostKeys, len(rows))
for i := 0; i < len(rows); i++ {
cp := *rows[i]
rows2[i] = cp
}
return rows2, nil
}
func (u *__PostKeys_Selector) GetString(db *sqlx.DB) (string, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.PostKeys {
XOLog(sqlstr, whereArgs)
}
var res string
//by Sqlx
err = db.Get(&res, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.PostKeys {
XOLogErr(err)
}
return "", err
}
return res, nil
}
func (u *__PostKeys_Selector) GetStringSlice(db *sqlx.DB) ([]string, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.PostKeys {
XOLog(sqlstr, whereArgs)
}
var rows []string
//by Sqlx
err = db.Select(&rows, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.PostKeys {
XOLogErr(err)
}
return nil, err
}
return rows, nil
}
func (u *__PostKeys_Selector) GetIntSlice(db *sqlx.DB) ([]int, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.PostKeys {
XOLog(sqlstr, whereArgs)
}
var rows []int
//by Sqlx
err = db.Select(&rows, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.PostKeys {
XOLogErr(err)
}
return nil, err
}
return rows, nil
}
func (u *__PostKeys_Selector) GetInt(db *sqlx.DB) (int, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.PostKeys {
XOLog(sqlstr, whereArgs)
}
var res int
//by Sqlx
err = db.Get(&res, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.PostKeys {
XOLogErr(err)
}
return 0, err
}
return res, nil
}
///////////////////////// Queryer Update Delete //////////////////////////////////
func (u *__PostKeys_Updater) Update(db XODB) (int, error) {
var err error
var updateArgs []interface{}
var sqlUpdateArr []string
/*for up, newVal := range u.updates {
sqlUpdateArr = append(sqlUpdateArr, up)
updateArgs = append(updateArgs, newVal)
}*/
for _, up := range u.updates {
sqlUpdateArr = append(sqlUpdateArr, up.col)
updateArgs = append(updateArgs, up.val)
}
sqlUpdate := strings.Join(sqlUpdateArr, ",")
sqlWherrs, whereArgs := whereClusesToSql(u.wheres, u.whereSep)
var allArgs []interface{}
allArgs = append(allArgs, updateArgs...)
allArgs = append(allArgs, whereArgs...)
sqlstr := `UPDATE sun.post_keys SET ` + sqlUpdate
if len(strings.Trim(sqlWherrs, " ")) > 0 { //2 for safty
sqlstr += " WHERE " + sqlWherrs
}
if LogTableSqlReq.PostKeys {
XOLog(sqlstr, allArgs)
}
res, err := db.Exec(sqlstr, allArgs...)
if err != nil {
if LogTableSqlReq.PostKeys {
XOLogErr(err)
}
return 0, err
}
num, err := res.RowsAffected()
if err != nil {
if LogTableSqlReq.PostKeys {
XOLogErr(err)
}
return 0, err
}
return int(num), nil
}
func (d *__PostKeys_Deleter) Delete(db XODB) (int, error) {
var err error
var wheresArr []string
for _, w := range d.wheres {
wheresArr = append(wheresArr, w.condition)
}
wheresStr := strings.Join(wheresArr, d.whereSep)
var args []interface{}
for _, w := range d.wheres {
args = append(args, w.args...)
}
sqlstr := "DELETE FROM sun.post_keys WHERE " + wheresStr
// run query
if LogTableSqlReq.PostKeys {
XOLog(sqlstr, args)
}
res, err := db.Exec(sqlstr, args...)
if err != nil {
if LogTableSqlReq.PostKeys {
XOLogErr(err)
}
return 0, err
}
// retrieve id
num, err := res.RowsAffected()
if err != nil {
if LogTableSqlReq.PostKeys {
XOLogErr(err)
}
return 0, err
}
return int(num), nil
}
///////////////////////// Mass insert - replace for PostKeys ////////////////
func MassInsert_PostKeys(rows []PostKeys, db XODB) error {
if len(rows) == 0 {
return errors.New("rows slice should not be empty - inserted nothing")
}
var err error
ln := len(rows)
s := "(?,?,?)," //`(?, ?, ?, ?),`
insVals_ := strings.Repeat(s, ln)
insVals := insVals_[0 : len(insVals_)-1]
// sql query
sqlstr := "INSERT INTO sun.post_keys (" +
"PostKeyStr, Used, RandShard" +
") VALUES " + insVals
// run query
vals := make([]interface{}, 0, ln*5) //5 fields
for _, row := range rows {
// vals = append(vals,row.UserId)
vals = append(vals, row.PostKeyStr)
vals = append(vals, row.Used)
vals = append(vals, row.RandShard)
}
if LogTableSqlReq.PostKeys {
XOLog(sqlstr, " MassInsert len = ", ln, vals)
}
_, err = db.Exec(sqlstr, vals...)
if err != nil {
if LogTableSqlReq.PostKeys {
XOLogErr(err)
}
return err
}
return nil
}
func MassReplace_PostKeys(rows []PostKeys, db XODB) error {
var err error
ln := len(rows)
s := "(?,?,?)," //`(?, ?, ?, ?),`
insVals_ := strings.Repeat(s, ln)
insVals := insVals_[0 : len(insVals_)-1]
// sql query
sqlstr := "REPLACE INTO sun.post_keys (" +
"PostKeyStr, Used, RandShard" +
") VALUES " + insVals
// run query
vals := make([]interface{}, 0, ln*5) //5 fields
for _, row := range rows {
// vals = append(vals,row.UserId)
vals = append(vals, row.PostKeyStr)
vals = append(vals, row.Used)
vals = append(vals, row.RandShard)
}
if LogTableSqlReq.PostKeys {
XOLog(sqlstr, " MassReplace len = ", ln, vals)
}
_, err = db.Exec(sqlstr, vals...)
if err != nil {
if LogTableSqlReq.PostKeys {
XOLogErr(err)
}
return err
}
return nil
}
//////////////////// Play ///////////////////////////////
//
//
//
//
|
/*
Copyright 2021 CodeNotary, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tbtree
import (
"encoding/binary"
"errors"
"io"
"sync"
)
var ErrNoMoreEntries = errors.New("no more entries")
var ErrReadersNotClosed = errors.New("readers not closed")
const (
InnerNodeType = iota
LeafNodeType
)
type Snapshot struct {
t *TBtree
id uint64
root node
readers map[int]io.Closer
maxReaderID int
closed bool
mutex sync.Mutex
}
func (s *Snapshot) Get(key []byte) (value []byte, ts uint64, hc uint64, err error) {
s.mutex.Lock()
defer s.mutex.Unlock()
if s.closed {
return nil, 0, 0, ErrAlreadyClosed
}
if key == nil {
return nil, 0, 0, ErrIllegalArguments
}
return s.root.get(key)
}
func (s *Snapshot) History(key []byte, offset uint64, descOrder bool, limit int) (tss []uint64, err error) {
s.mutex.Lock()
defer s.mutex.Unlock()
if s.closed {
return nil, ErrAlreadyClosed
}
if key == nil {
return nil, ErrIllegalArguments
}
if limit < 1 {
return nil, ErrIllegalArguments
}
return s.root.history(key, offset, descOrder, limit)
}
func (s *Snapshot) Ts() uint64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.root.ts()
}
func (s *Snapshot) NewHistoryReader(spec *HistoryReaderSpec) (*HistoryReader, error) {
s.mutex.Lock()
defer s.mutex.Unlock()
if s.closed {
return nil, ErrAlreadyClosed
}
reader, err := newHistoryReader(s.maxReaderID, s, spec)
if err != nil {
return nil, err
}
s.readers[reader.id] = reader
s.maxReaderID++
return reader, nil
}
func (s *Snapshot) NewReader(spec *ReaderSpec) (r *Reader, err error) {
s.mutex.Lock()
defer s.mutex.Unlock()
if s.closed {
return nil, ErrAlreadyClosed
}
if spec == nil || len(spec.SeekKey) > s.t.maxKeyLen || len(spec.Prefix) > s.t.maxKeyLen {
return nil, ErrIllegalArguments
}
seekKey := spec.SeekKey
inclusiveSeek := spec.InclusiveSeek
// Automatically set seekKey based on prefixKey (when seekKey is not specified)
if len(seekKey) == 0 && len(spec.Prefix) > 0 {
inclusiveSeek = true
if spec.DescOrder {
// Initial key is padded so to cover all keys with provided prefix
seekKey = make([]byte, s.t.maxKeyLen)
copy(seekKey, spec.Prefix)
for i := len(spec.Prefix); i < s.t.maxKeyLen; i++ {
seekKey[i] = 0xFF
}
} else {
seekKey = make([]byte, len(spec.Prefix))
copy(seekKey, spec.Prefix)
}
}
r = &Reader{
snapshot: s,
id: s.maxReaderID,
seekKey: seekKey,
prefix: spec.Prefix,
inclusiveSeek: inclusiveSeek,
descOrder: spec.DescOrder,
closed: false,
}
s.readers[r.id] = r
s.maxReaderID++
return r, nil
}
func (s *Snapshot) closedReader(id int) error {
s.mutex.Lock()
defer s.mutex.Unlock()
delete(s.readers, id)
return nil
}
func (s *Snapshot) Close() error {
s.mutex.Lock()
defer s.mutex.Unlock()
if s.closed {
return ErrAlreadyClosed
}
if len(s.readers) > 0 {
return ErrReadersNotClosed
}
err := s.t.snapshotClosed(s)
if err != nil {
return err
}
s.closed = true
return nil
}
func (s *Snapshot) WriteTo(nw, hw io.Writer, writeOpts *WriteOpts) (nOff int64, wN, wH int64, err error) {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.root.writeTo(nw, hw, writeOpts)
}
func (n *innerNode) writeTo(nw, hw io.Writer, writeOpts *WriteOpts) (nOff int64, wN, wH int64, err error) {
if writeOpts.OnlyMutated && !n.mutated() {
return n.off, 0, 0, nil
}
var cnw, chw int64
offsets := make([]int64, len(n.nodes))
for i, c := range n.nodes {
wopts := &WriteOpts{
OnlyMutated: writeOpts.OnlyMutated,
BaseNLogOffset: writeOpts.BaseNLogOffset + cnw,
BaseHLogOffset: writeOpts.BaseHLogOffset + chw,
commitLog: writeOpts.commitLog,
}
no, wn, wh, err := c.writeTo(nw, hw, wopts)
if err != nil {
return 0, wn, wh, err
}
offsets[i] = no
cnw += wn
chw += wh
}
size := n.size()
buf := make([]byte, size)
bi := 0
buf[bi] = InnerNodeType
bi++
binary.BigEndian.PutUint32(buf[bi:], uint32(size)) // Size
bi += 4
binary.BigEndian.PutUint32(buf[bi:], uint32(len(n.nodes)))
bi += 4
for i, c := range n.nodes {
n := writeNodeRefToWithOffset(c, offsets[i], buf[bi:])
bi += n
}
wn, err := nw.Write(buf[:bi])
if err != nil {
return 0, int64(wn), chw, err
}
wN = cnw + int64(size)
nOff = writeOpts.BaseNLogOffset + cnw
if writeOpts.commitLog {
n.off = writeOpts.BaseNLogOffset + cnw
n.mut = false
nodes := make([]node, len(n.nodes))
for i, c := range n.nodes {
nodes[i] = &nodeRef{
t: n.t,
_minKey: c.minKey(),
_maxKey: c.maxKey(),
_ts: c.ts(),
_size: c.size(),
off: c.offset(),
}
}
n.nodes = nodes
n.t.cachePut(n)
}
return nOff, wN, chw, nil
}
func (l *leafNode) writeTo(nw, hw io.Writer, writeOpts *WriteOpts) (nOff int64, wN, wH int64, err error) {
if writeOpts.OnlyMutated && !l.mutated() {
return l.off, 0, 0, nil
}
size := l.size()
buf := make([]byte, size)
bi := 0
buf[bi] = LeafNodeType
bi++
binary.BigEndian.PutUint32(buf[bi:], uint32(size)) // Size
bi += 4
binary.BigEndian.PutUint32(buf[bi:], uint32(len(l.values)))
bi += 4
accH := int64(0)
for _, v := range l.values {
binary.BigEndian.PutUint32(buf[bi:], uint32(len(v.key)))
bi += 4
copy(buf[bi:], v.key)
bi += len(v.key)
binary.BigEndian.PutUint32(buf[bi:], uint32(len(v.value)))
bi += 4
copy(buf[bi:], v.value)
bi += len(v.value)
binary.BigEndian.PutUint64(buf[bi:], v.ts)
bi += 8
hOff := v.hOff
hCount := v.hCount + uint64(len(v.tss))
if len(v.tss) > 0 {
hbuf := make([]byte, 4+len(v.tss)*8+8)
hi := 0
binary.BigEndian.PutUint32(hbuf[hi:], uint32(len(v.tss)))
hi += 4
for _, ts := range v.tss {
binary.BigEndian.PutUint64(hbuf[hi:], uint64(ts))
hi += 8
}
binary.BigEndian.PutUint64(hbuf[hi:], uint64(v.hOff))
hi += 8
n, err := hw.Write(hbuf)
if err != nil {
return 0, 0, int64(n), err
}
hOff = writeOpts.BaseHLogOffset + accH
accH += int64(n)
}
binary.BigEndian.PutUint64(buf[bi:], uint64(hOff))
bi += 8
binary.BigEndian.PutUint64(buf[bi:], hCount)
bi += 8
if writeOpts.commitLog {
v.tss = nil
v.hOff = hOff
v.hCount = hCount
}
}
n, err := nw.Write(buf[:bi])
if err != nil {
return 0, int64(n), accH, err
}
wN = int64(size)
nOff = writeOpts.BaseNLogOffset
if writeOpts.commitLog {
l.off = writeOpts.BaseNLogOffset
l.mut = false
l.t.cachePut(l)
}
return nOff, wN, accH, nil
}
func (n *nodeRef) writeTo(nw, hw io.Writer, writeOpts *WriteOpts) (nOff int64, wN, wH int64, err error) {
if writeOpts.OnlyMutated {
return n.offset(), 0, 0, nil
}
node, err := n.t.nodeAt(n.off)
if err != nil {
return 0, 0, 0, err
}
off, wn, wh, err := node.writeTo(nw, hw, writeOpts)
if err != nil {
return 0, wn, wh, err
}
if writeOpts.commitLog {
n.off = off
}
return off, wn, wh, nil
}
func writeNodeRefToWithOffset(n node, offset int64, buf []byte) int {
i := 0
minKey := n.minKey()
binary.BigEndian.PutUint32(buf[i:], uint32(len(minKey)))
i += 4
copy(buf[i:], minKey)
i += len(minKey)
maxKey := n.maxKey()
binary.BigEndian.PutUint32(buf[i:], uint32(len(maxKey)))
i += 4
copy(buf[i:], maxKey)
i += len(maxKey)
binary.BigEndian.PutUint64(buf[i:], n.ts())
i += 8
binary.BigEndian.PutUint32(buf[i:], uint32(n.size()))
i += 4
binary.BigEndian.PutUint64(buf[i:], uint64(offset))
i += 8
return i
}
|
package noop
import "go.mercari.io/datastore"
var _ datastore.Middleware = &noop{}
// New no-op middleware creates and returns.
func New() datastore.Middleware {
return &noop{}
}
type noop struct {
}
func (*noop) AllocateIDs(info *datastore.MiddlewareInfo, keys []datastore.Key) ([]datastore.Key, error) {
return info.Next.AllocateIDs(info, keys)
}
func (*noop) PutMultiWithoutTx(info *datastore.MiddlewareInfo, keys []datastore.Key, psList []datastore.PropertyList) ([]datastore.Key, error) {
return info.Next.PutMultiWithoutTx(info, keys, psList)
}
func (*noop) PutMultiWithTx(info *datastore.MiddlewareInfo, keys []datastore.Key, psList []datastore.PropertyList) ([]datastore.PendingKey, error) {
return info.Next.PutMultiWithTx(info, keys, psList)
}
func (*noop) GetMultiWithoutTx(info *datastore.MiddlewareInfo, keys []datastore.Key, psList []datastore.PropertyList) error {
return info.Next.GetMultiWithoutTx(info, keys, psList)
}
func (*noop) GetMultiWithTx(info *datastore.MiddlewareInfo, keys []datastore.Key, psList []datastore.PropertyList) error {
return info.Next.GetMultiWithTx(info, keys, psList)
}
func (*noop) DeleteMultiWithoutTx(info *datastore.MiddlewareInfo, keys []datastore.Key) error {
return info.Next.DeleteMultiWithoutTx(info, keys)
}
func (*noop) DeleteMultiWithTx(info *datastore.MiddlewareInfo, keys []datastore.Key) error {
return info.Next.DeleteMultiWithTx(info, keys)
}
func (*noop) PostCommit(info *datastore.MiddlewareInfo, tx datastore.Transaction, commit datastore.Commit) error {
return info.Next.PostCommit(info, tx, commit)
}
func (*noop) PostRollback(info *datastore.MiddlewareInfo, tx datastore.Transaction) error {
return info.Next.PostRollback(info, tx)
}
func (*noop) Run(info *datastore.MiddlewareInfo, q datastore.Query, qDump *datastore.QueryDump) datastore.Iterator {
return info.Next.Run(info, q, qDump)
}
func (*noop) GetAll(info *datastore.MiddlewareInfo, q datastore.Query, qDump *datastore.QueryDump, psList *[]datastore.PropertyList) ([]datastore.Key, error) {
return info.Next.GetAll(info, q, qDump, psList)
}
func (*noop) Next(info *datastore.MiddlewareInfo, q datastore.Query, qDump *datastore.QueryDump, iter datastore.Iterator, ps *datastore.PropertyList) (datastore.Key, error) {
return info.Next.Next(info, q, qDump, iter, ps)
}
func (*noop) Count(info *datastore.MiddlewareInfo, q datastore.Query, qDump *datastore.QueryDump) (int, error) {
return info.Next.Count(info, q, qDump)
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor_test
import (
"fmt"
"math/rand"
"slices"
"testing"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/types"
"github.com/stretchr/testify/require"
)
func TestBasicCTE(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
rows := tk.MustQuery("with recursive cte1 as (" +
"select 1 c1 " +
"union all " +
"select c1 + 1 c1 from cte1 where c1 < 5) " +
"select * from cte1")
rows.Check(testkit.Rows("1", "2", "3", "4", "5"))
// Two seed parts.
rows = tk.MustQuery("with recursive cte1 as (" +
"select 1 c1 " +
"union all " +
"select 2 c1 " +
"union all " +
"select c1 + 1 c1 from cte1 where c1 < 10) " +
"select * from cte1 order by c1")
rows.Check(testkit.Rows("1", "2", "2", "3", "3", "4", "4", "5", "5", "6", "6", "7", "7", "8", "8", "9", "9", "10", "10"))
// Two recursive parts.
rows = tk.MustQuery("with recursive cte1 as (" +
"select 1 c1 " +
"union all " +
"select 2 c1 " +
"union all " +
"select c1 + 1 c1 from cte1 where c1 < 3 " +
"union all " +
"select c1 + 2 c1 from cte1 where c1 < 5) " +
"select * from cte1 order by c1")
rows.Check(testkit.Rows("1", "2", "2", "3", "3", "3", "4", "4", "5", "5", "5", "6", "6"))
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t1(a int);")
tk.MustExec("insert into t1 values(1);")
tk.MustExec("insert into t1 values(2);")
rows = tk.MustQuery("SELECT * FROM t1 dt WHERE EXISTS(WITH RECURSIVE qn AS (SELECT a*0 AS b UNION ALL SELECT b+1 FROM qn WHERE b=0) SELECT * FROM qn WHERE b=a);")
rows.Check(testkit.Rows("1"))
rows = tk.MustQuery("SELECT * FROM t1 dt WHERE EXISTS( WITH RECURSIVE qn AS (SELECT a*0 AS b UNION ALL SELECT b+1 FROM qn WHERE b=0 or b = 1) SELECT * FROM qn WHERE b=a );")
rows.Check(testkit.Rows("1", "2"))
rows = tk.MustQuery("with recursive c(p) as (select 1), cte(a, b) as (select 1, 1 union select a+1, 1 from cte, c where a < 5) select * from cte order by 1, 2;")
rows.Check(testkit.Rows("1 1", "2 1", "3 1", "4 1", "5 1"))
}
func TestUnionDistinct(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test;")
// Basic test. UNION/UNION ALL intersects.
rows := tk.MustQuery("with recursive cte1(c1) as (select 1 union select 1 union select 1 union all select c1 + 1 from cte1 where c1 < 3) select * from cte1 order by c1;")
rows.Check(testkit.Rows("1", "2", "3"))
rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union all select 1 union select 1 union all select c1 + 1 from cte1 where c1 < 3) select * from cte1 order by c1;")
rows.Check(testkit.Rows("1", "2", "3"))
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t1(c1 int, c2 int);")
tk.MustExec("insert into t1 values(1, 1), (1, 2), (2, 2);")
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from t1) select * from cte1 order by c1;")
rows.Check(testkit.Rows("1", "2", "3"))
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t1(c1 int);")
tk.MustExec("insert into t1 values(1), (1), (1), (2), (2), (2);")
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 where c1 < 4) select * from cte1 order by c1;")
rows.Check(testkit.Rows("1", "2", "3", "4"))
}
func TestCTEMaxRecursionDepth(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test;")
tk.MustExec("set @@cte_max_recursion_depth = -1;")
err := tk.QueryToErr("with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 100) select * from cte1;")
require.EqualError(t, err, "[executor:3636]Recursive query aborted after 1 iterations. Try increasing @@cte_max_recursion_depth to a larger value")
// If there is no recursive part, query runs ok.
rows := tk.MustQuery("with recursive cte1(c1) as (select 1 union select 2) select * from cte1 order by c1;")
rows.Check(testkit.Rows("1", "2"))
rows = tk.MustQuery("with cte1(c1) as (select 1 union select 2) select * from cte1 order by c1;")
rows.Check(testkit.Rows("1", "2"))
tk.MustExec("set @@cte_max_recursion_depth = 0;")
err = tk.QueryToErr("with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 0) select * from cte1;")
require.EqualError(t, err, "[executor:3636]Recursive query aborted after 1 iterations. Try increasing @@cte_max_recursion_depth to a larger value")
err = tk.QueryToErr("with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 1) select * from cte1;")
require.EqualError(t, err, "[executor:3636]Recursive query aborted after 1 iterations. Try increasing @@cte_max_recursion_depth to a larger value")
// If there is no recursive part, query runs ok.
rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select 2) select * from cte1 order by c1;")
rows.Check(testkit.Rows("1", "2"))
rows = tk.MustQuery("with cte1(c1) as (select 1 union select 2) select * from cte1 order by c1;")
rows.Check(testkit.Rows("1", "2"))
tk.MustExec("set @@cte_max_recursion_depth = 1;")
rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 0) select * from cte1;")
rows.Check(testkit.Rows("1"))
rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 1) select * from cte1;")
rows.Check(testkit.Rows("1"))
err = tk.QueryToErr("with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 2) select * from cte1;")
require.EqualError(t, err, "[executor:3636]Recursive query aborted after 2 iterations. Try increasing @@cte_max_recursion_depth to a larger value")
// If there is no recursive part, query runs ok.
rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select 2) select * from cte1 order by c1;")
rows.Check(testkit.Rows("1", "2"))
rows = tk.MustQuery("with cte1(c1) as (select 1 union select 2) select * from cte1 order by c1;")
rows.Check(testkit.Rows("1", "2"))
}
func TestCTEWithLimit(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test;")
// Basic recursive tests.
rows := tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 0) select * from cte1")
rows.Check(testkit.Rows("1", "2", "3", "4", "5"))
rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 1) select * from cte1")
rows.Check(testkit.Rows("2", "3", "4", "5", "6"))
rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 10) select * from cte1")
rows.Check(testkit.Rows("11", "12", "13", "14", "15"))
rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 995) select * from cte1")
rows.Check(testkit.Rows("996", "997", "998", "999", "1000"))
rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 6) select * from cte1;")
rows.Check(testkit.Rows("7", "8", "9", "10", "11"))
// Test with cte_max_recursion_depth
tk.MustExec("set cte_max_recursion_depth=2;")
rows = tk.MustQuery("with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 1 offset 2) select * from cte1;")
rows.Check(testkit.Rows("2"))
err := tk.QueryToErr("with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 1 offset 3) select * from cte1;")
require.EqualError(t, err, "[executor:3636]Recursive query aborted after 3 iterations. Try increasing @@cte_max_recursion_depth to a larger value")
tk.MustExec("set cte_max_recursion_depth=1000;")
rows = tk.MustQuery("with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 5 offset 996) select * from cte1;")
rows.Check(testkit.Rows("996", "997", "998", "999", "1000"))
err = tk.QueryToErr("with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 5 offset 997) select * from cte1;")
require.EqualError(t, err, "[executor:3636]Recursive query aborted after 1001 iterations. Try increasing @@cte_max_recursion_depth to a larger value")
rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 0 offset 1) select * from cte1")
rows.Check(testkit.Rows())
rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 0 offset 10) select * from cte1")
rows.Check(testkit.Rows())
// Test join.
rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select * from cte1 dt1 join cte1 dt2 order by dt1.c1, dt2.c1;")
rows.Check(testkit.Rows("2 2", "2 3", "3 2", "3 3"))
rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1 order by dt1.c1, dt1.c1;")
rows.Check(testkit.Rows("2 2", "3 3"))
// Test subquery.
// Different with mysql, maybe it's mysql bug?(https://bugs.mysql.com/bug.php?id=103890&thanks=4)
rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select c1 from cte1 where c1 in (select 2);")
rows.Check(testkit.Rows("2"))
rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select c1 from cte1 dt where c1 in (select c1 from cte1 where 1 = dt.c1 - 1);")
rows.Check(testkit.Rows("2"))
// Test Apply.
rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select c1 from cte1 where cte1.c1 = (select dt1.c1 from cte1 dt1 where dt1.c1 = cte1.c1);")
rows.Check(testkit.Rows("2", "3"))
// Recursive tests with table.
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t1(c1 int);")
tk.MustExec("insert into t1 values(1), (2), (3);")
// Error: ERROR 1221 (HY000): Incorrect usage of UNION and LIMIT.
// Limit can only be at the end of SQL stmt.
err = tk.ExecToErr("with recursive cte1(c1) as (select c1 from t1 limit 1 offset 1 union select c1 + 1 from cte1 limit 0 offset 1) select * from cte1")
require.EqualError(t, err, "[planner:1221]Incorrect usage of UNION and LIMIT")
// Basic non-recusive tests.
rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select 2 order by 1 limit 1 offset 1) select * from cte1")
rows.Check(testkit.Rows("2"))
rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select 2 order by 1 limit 0 offset 1) select * from cte1")
rows.Check(testkit.Rows())
rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select 2 order by 1 limit 2 offset 0) select * from cte1")
rows.Check(testkit.Rows("1", "2"))
// Test with table.
tk.MustExec("drop table if exists t1;")
insertStr := "insert into t1 values(0)"
for i := 1; i < 300; i++ {
insertStr += fmt.Sprintf(", (%d)", i)
}
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t1(c1 int);")
tk.MustExec(insertStr)
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 limit 1) select * from cte1")
rows.Check(testkit.Rows("0"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 limit 1 offset 100) select * from cte1")
rows.Check(testkit.Rows("100"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 limit 5 offset 100) select * from cte1")
rows.Check(testkit.Rows("100", "101", "102", "103", "104"))
// Basic non-recursive tests.
rows = tk.MustQuery("with cte1 as (select c1 from t1 limit 2 offset 1) select * from cte1")
rows.Check(testkit.Rows("1", "2"))
rows = tk.MustQuery("with cte1 as (select c1 from t1 limit 2 offset 1) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1")
rows.Check(testkit.Rows("1 1", "2 2"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select 2 limit 0 offset 1) select * from cte1")
rows.Check(testkit.Rows())
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select 2 limit 0 offset 1) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1")
rows.Check(testkit.Rows())
// rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select 2 limit 5 offset 100) select * from cte1")
// rows.Check(testkit.Rows("100", "101", "102", "103", "104"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 limit 3 offset 100) select * from cte1")
rows.Check(testkit.Rows("100", "101", "102"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 limit 3 offset 100) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1")
rows.Check(testkit.Rows("100 100", "101 101", "102 102"))
// Test limit 0.
tk.MustExec("set cte_max_recursion_depth = 0;")
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t1(c1 int);")
tk.MustExec("insert into t1 values(0);")
rows = tk.MustQuery("with recursive cte1 as (select 1/c1 c1 from t1 union select c1 + 1 c1 from cte1 where c1 < 2 limit 0) select * from cte1;")
rows.Check(testkit.Rows())
// MySQL err: ERROR 1365 (22012): Division by 0. Because it gives error when computing 1/c1.
err = tk.QueryToErr("with recursive cte1 as (select 1/c1 c1 from t1 union select c1 + 1 c1 from cte1 where c1 < 2 limit 1) select * from cte1;")
require.EqualError(t, err, "[executor:3636]Recursive query aborted after 1 iterations. Try increasing @@cte_max_recursion_depth to a larger value")
tk.MustExec("set cte_max_recursion_depth = 1000;")
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t1(c1 int);")
tk.MustExec("insert into t1 values(1), (2), (3);")
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 0 offset 2) select * from cte1;")
rows.Check(testkit.Rows())
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 1 offset 2) select * from cte1;")
rows.Check(testkit.Rows("3"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 2 offset 2) select * from cte1;")
rows.Check(testkit.Rows("3", "4"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 3 offset 2) select * from cte1;")
rows.Check(testkit.Rows("3", "4", "5"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 4 offset 2) select * from cte1;")
rows.Check(testkit.Rows("3", "4", "5", "6"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 0 offset 3) select * from cte1;")
rows.Check(testkit.Rows())
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 1 offset 3) select * from cte1;")
rows.Check(testkit.Rows("4"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 2 offset 3) select * from cte1;")
rows.Check(testkit.Rows("4", "5"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 3 offset 3) select * from cte1;")
rows.Check(testkit.Rows("4", "5", "6"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 4 offset 3) select * from cte1;")
rows.Check(testkit.Rows("4", "5", "6", "7"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 0 offset 4) select * from cte1;")
rows.Check(testkit.Rows())
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 1 offset 4) select * from cte1;")
rows.Check(testkit.Rows("5"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 2 offset 4) select * from cte1;")
rows.Check(testkit.Rows("5", "6"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 3 offset 4) select * from cte1;")
rows.Check(testkit.Rows("5", "6", "7"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 4 offset 4) select * from cte1;")
rows.Check(testkit.Rows("5", "6", "7", "8"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 0 offset 2) select * from cte1;")
rows.Check(testkit.Rows())
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 1 offset 2) select * from cte1;")
rows.Check(testkit.Rows("3"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 2 offset 2) select * from cte1;")
rows.Check(testkit.Rows("3", "2"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 3 offset 2) select * from cte1;")
rows.Check(testkit.Rows("3", "2", "3"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 4 offset 2) select * from cte1;")
rows.Check(testkit.Rows("3", "2", "3", "4"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 0 offset 3) select * from cte1;")
rows.Check(testkit.Rows())
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 1 offset 3) select * from cte1;")
rows.Check(testkit.Rows("2"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 2 offset 3) select * from cte1;")
rows.Check(testkit.Rows("2", "3"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 3 offset 3) select * from cte1;")
rows.Check(testkit.Rows("2", "3", "4"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 4 offset 3) select * from cte1;")
rows.Check(testkit.Rows("2", "3", "4", "3"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 0 offset 4) select * from cte1;")
rows.Check(testkit.Rows())
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 1 offset 4) select * from cte1;")
rows.Check(testkit.Rows("3"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 2 offset 4) select * from cte1;")
rows.Check(testkit.Rows("3", "4"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 3 offset 4) select * from cte1;")
rows.Check(testkit.Rows("3", "4", "3"))
rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 4 offset 4) select * from cte1;")
rows.Check(testkit.Rows("3", "4", "3", "4"))
}
func TestSpillToDisk(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("SET GLOBAL tidb_enable_tmp_storage_on_oom = 1")
defer tk.MustExec("SET GLOBAL tidb_enable_tmp_storage_on_oom = 0")
tk.MustExec("use test;")
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/testCTEStorageSpill", "return(true)"))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/testCTEStorageSpill"))
tk.MustExec("set tidb_mem_quota_query = 1073741824;")
}()
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/testSortedRowContainerSpill", "return(true)"))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/testSortedRowContainerSpill"))
}()
// Use duplicated rows to test UNION DISTINCT.
tk.MustExec("set tidb_mem_quota_query = 1073741824;")
insertStr := "insert into t1 values(0)"
rowNum := 1000
vals := make([]int, rowNum)
vals[0] = 0
for i := 1; i < rowNum; i++ {
v := rand.Intn(100)
vals[i] = v
insertStr += fmt.Sprintf(", (%d)", v)
}
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t1(c1 int);")
tk.MustExec(insertStr)
tk.MustExec("set tidb_mem_quota_query = 40000;")
tk.MustExec("set cte_max_recursion_depth = 500000;")
sql := fmt.Sprintf("with recursive cte1 as ( "+
"select c1 from t1 "+
"union "+
"select c1 + 1 c1 from cte1 where c1 < %d) "+
"select c1 from cte1 order by c1;", rowNum)
rows := tk.MustQuery(sql)
memTracker := tk.Session().GetSessionVars().StmtCtx.MemTracker
diskTracker := tk.Session().GetSessionVars().StmtCtx.DiskTracker
require.Greater(t, memTracker.MaxConsumed(), int64(0))
require.Greater(t, diskTracker.MaxConsumed(), int64(0))
slices.Sort(vals)
resRows := make([]string, 0, rowNum)
for i := vals[0]; i <= rowNum; i++ {
resRows = append(resRows, fmt.Sprintf("%d", i))
}
rows.Check(testkit.Rows(resRows...))
}
func TestCTEExecError(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists src;")
tk.MustExec("create table src(first int, second int);")
insertStr := fmt.Sprintf("insert into src values (%d, %d)", rand.Intn(1000), rand.Intn(1000))
for i := 0; i < 1000; i++ {
insertStr += fmt.Sprintf(",(%d, %d)", rand.Intn(1000), rand.Intn(1000))
}
insertStr += ";"
tk.MustExec(insertStr)
// Increase projection concurrency and decrease chunk size
// to increase the probability of reproducing the problem.
tk.MustExec("set tidb_max_chunk_size = 32")
tk.MustExec("set tidb_projection_concurrency = 20")
for i := 0; i < 10; i++ {
err := tk.QueryToErr("with recursive cte(iter, first, second, result) as " +
"(select 1, first, second, first+second from src " +
" union all " +
"select iter+1, second, result, second+result from cte where iter < 80 )" +
"select * from cte")
require.True(t, terror.ErrorEqual(err, types.ErrOverflow))
}
}
// https://github.com/pingcap/tidb/issues/33965.
func TestCTEsInView(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test;")
tk.MustExec("create database if not exists test1;")
tk.MustExec("create table test.t (a int);")
tk.MustExec("create table test1.t (a int);")
tk.MustExec("insert into test.t values (1);")
tk.MustExec("insert into test1.t values (2);")
tk.MustExec("use test;")
tk.MustExec("create definer='root'@'localhost' view test.v as with tt as (select * from t) select * from tt;")
tk.MustQuery("select * from test.v;").Check(testkit.Rows("1"))
tk.MustExec("use test1;")
tk.MustQuery("select * from test.v;").Check(testkit.Rows("1"))
}
func TestCTEPanic(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test;")
tk.MustExec("create table t1(c1 int)")
tk.MustExec("insert into t1 values(1), (2), (3)")
fpPathPrefix := "github.com/pingcap/tidb/executor/"
fp := "testCTESeedPanic"
require.NoError(t, failpoint.Enable(fpPathPrefix+fp, fmt.Sprintf(`panic("%s")`, fp)))
err := tk.QueryToErr("with recursive cte1 as (select c1 from t1 union all select c1 + 1 from cte1 where c1 < 5) select t_alias_1.c1 from cte1 as t_alias_1 inner join cte1 as t_alias_2 on t_alias_1.c1 = t_alias_2.c1 order by c1")
require.Contains(t, err.Error(), fp)
require.NoError(t, failpoint.Disable(fpPathPrefix+fp))
fp = "testCTERecursivePanic"
require.NoError(t, failpoint.Enable(fpPathPrefix+fp, fmt.Sprintf(`panic("%s")`, fp)))
err = tk.QueryToErr("with recursive cte1 as (select c1 from t1 union all select c1 + 1 from cte1 where c1 < 5) select t_alias_1.c1 from cte1 as t_alias_1 inner join cte1 as t_alias_2 on t_alias_1.c1 = t_alias_2.c1 order by c1")
require.Contains(t, err.Error(), fp)
require.NoError(t, failpoint.Disable(fpPathPrefix+fp))
}
func TestCTEDelSpillFile(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1, t2;")
tk.MustExec("create table t1(c1 int, c2 int);")
tk.MustExec("create table t2(c1 int);")
tk.MustExec("set @@cte_max_recursion_depth = 1000000;")
tk.MustExec("set global tidb_mem_oom_action = 'log';")
tk.MustExec("set @@tidb_mem_quota_query = 100;")
tk.MustExec("insert into t2 values(1);")
tk.MustExec("insert into t1 (c1, c2) with recursive cte1 as (select c1 from t2 union select cte1.c1 + 1 from cte1 where cte1.c1 < 100000) select cte1.c1, cte1.c1+1 from cte1;")
require.Nil(t, tk.Session().GetSessionVars().StmtCtx.CTEStorageMap)
}
func TestCTEShareCorColumn(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1, t2;")
tk.MustExec("create table t1(c1 int, c2 varchar(100));")
tk.MustExec("insert into t1 values(1, '2020-10-10');")
tk.MustExec("create table t2(c1 int, c2 date);")
tk.MustExec("insert into t2 values(1, '2020-10-10');")
for i := 0; i < 100; i++ {
tk.MustQuery("with cte1 as (select t1.c1, (select t2.c2 from t2 where t2.c2 = str_to_date(t1.c2, '%Y-%m-%d')) from t1 inner join t2 on t1.c1 = t2.c1) select /*+ hash_join_build(alias1) */ * from cte1 alias1 inner join cte1 alias2 on alias1.c1 = alias2.c1;").Check(testkit.Rows("1 2020-10-10 1 2020-10-10"))
tk.MustQuery("with cte1 as (select t1.c1, (select t2.c2 from t2 where t2.c2 = str_to_date(t1.c2, '%Y-%m-%d')) from t1 inner join t2 on t1.c1 = t2.c1) select /*+ hash_join_build(alias2) */ * from cte1 alias1 inner join cte1 alias2 on alias1.c1 = alias2.c1;").Check(testkit.Rows("1 2020-10-10 1 2020-10-10"))
}
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t1(a int);")
tk.MustExec("insert into t1 values(1), (2);")
tk.MustQuery("SELECT * FROM t1 dt WHERE EXISTS( WITH RECURSIVE qn AS (SELECT a AS b UNION ALL SELECT b+1 FROM qn WHERE b=0 or b = 1) SELECT * FROM qn dtqn1 where exists (select /*+ NO_DECORRELATE() */ b from qn where dtqn1.b+1));").Check(testkit.Rows("1", "2"))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.