text stringlengths 11 4.05M |
|---|
package ca
import (
"bytes"
"testing"
)
func openTestingDB() (*database, error) {
return openDB(":memory:")
}
func TestMetadata(t *testing.T) {
var (
key = []byte{42}
value = []byte("hello, world")
)
db, err := openTestingDB()
if err != nil {
t.Fatal(err)
}
err = db.SetMetadata(key, value)
if err != nil {
t.Errorf("unexpected error: %s\n", err)
}
result, err := db.GetMetadata(key)
if err != nil {
t.Errorf("unexpected error: %s\n", err)
}
if !bytes.Equal(result, value) {
t.Fatalf("unexpected value: got %v, want %s\n", result, value)
}
}
|
package informer
import (
"testing"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
)
func Test_objectToWorkflowTemplate(t *testing.T) {
t.Run("NotUnstructured", func(t *testing.T) {
v, err := objectToWorkflowTemplate(&corev1.Status{})
assert.EqualError(t, err, "malformed workflow template: expected \"*unstructured.Unstructured\", got \"*v1.Status\"")
assert.NotNil(t, v)
})
t.Run("MalformedWorkflowTemplate", func(t *testing.T) {
v, err := objectToWorkflowTemplate(&unstructured.Unstructured{Object: map[string]interface{}{
"metadata": map[string]interface{}{"namespace": "my-ns", "name": "my-name"},
"spec": "ops",
}})
assert.EqualError(t, err, "malformed workflow template \"my-ns/my-name\": cannot restore struct from: string")
if assert.NotNil(t, v) {
assert.Equal(t, "my-ns", v.Namespace)
assert.Equal(t, "my-name", v.Name)
}
})
t.Run("WorkflowTemplate", func(t *testing.T) {
v, err := objectToWorkflowTemplate(&unstructured.Unstructured{})
assert.NoError(t, err)
assert.Equal(t, &wfv1.WorkflowTemplate{}, v)
})
}
func Test_objectsToWorkflowTemplates(t *testing.T) {
assert.Len(t, objectsToWorkflowTemplates([]runtime.Object{&corev1.Status{}, &unstructured.Unstructured{}}), 2)
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package engine
import (
"bytes"
"compress/gzip"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"regexp"
"sort"
"strconv"
"strings"
"text/template"
"github.com/Azure/go-autorest/autorest/to"
"github.com/Azure/aks-engine/pkg/api"
"github.com/Azure/aks-engine/pkg/api/common"
"github.com/Azure/aks-engine/pkg/helpers"
"github.com/pkg/errors"
_ "k8s.io/client-go/plugin/pkg/client/auth/azure" // register azure (AD) authentication plugin
)
var commonTemplateFiles = []string{agentOutputs, agentParams, masterOutputs, iaasOutputs, masterParams, windowsParams}
var kubernetesParamFiles = []string{armParameters, kubernetesParams, masterParams, agentParams, windowsParams}
var keyvaultSecretPathRe *regexp.Regexp
func init() {
keyvaultSecretPathRe = regexp.MustCompile(`^(/subscriptions/\S+/resourceGroups/\S+/providers/Microsoft.KeyVault/vaults/\S+)/secrets/([^/\s]+)(/(\S+))?$`)
}
// GenerateKubeConfig returns a JSON string representing the KubeConfig
func GenerateKubeConfig(properties *api.Properties, location string) (string, error) {
if properties == nil {
return "", errors.New("Properties nil in GenerateKubeConfig")
}
if properties.CertificateProfile == nil {
return "", errors.New("CertificateProfile property may not be nil in GenerateKubeConfig")
}
b, err := Asset(kubeConfigJSON)
if err != nil {
return "", errors.Wrapf(err, "error reading kube config template file %s", kubeConfigJSON)
}
kubeconfig := string(b)
// variable replacement
kubeconfig = strings.Replace(kubeconfig, "{{WrapAsVerbatim \"parameters('caCertificate')\"}}", base64.StdEncoding.EncodeToString([]byte(properties.CertificateProfile.CaCertificate)), -1)
if properties.OrchestratorProfile != nil &&
properties.OrchestratorProfile.KubernetesConfig != nil &&
properties.OrchestratorProfile.KubernetesConfig.PrivateCluster != nil &&
to.Bool(properties.OrchestratorProfile.KubernetesConfig.PrivateCluster.Enabled) {
if properties.MasterProfile.HasMultipleNodes() {
// more than 1 master, use the internal lb IP
firstMasterIP := net.ParseIP(properties.MasterProfile.FirstConsecutiveStaticIP).To4()
if firstMasterIP == nil {
return "", errors.Errorf("MasterProfile.FirstConsecutiveStaticIP '%s' is an invalid IP address", properties.MasterProfile.FirstConsecutiveStaticIP)
}
lbIP := net.IP{firstMasterIP[0], firstMasterIP[1], firstMasterIP[2], firstMasterIP[3] + byte(DefaultInternalLbStaticIPOffset)}
kubeconfig = strings.Replace(kubeconfig, "{{WrapAsVerbatim \"reference(concat('Microsoft.Network/publicIPAddresses/', variables('masterPublicIPAddressName'))).dnsSettings.fqdn\"}}", lbIP.String(), -1)
} else {
// Master count is 1, use the master IP
kubeconfig = strings.Replace(kubeconfig, "{{WrapAsVerbatim \"reference(concat('Microsoft.Network/publicIPAddresses/', variables('masterPublicIPAddressName'))).dnsSettings.fqdn\"}}", properties.MasterProfile.FirstConsecutiveStaticIP, -1)
}
} else {
kubeconfig = strings.Replace(kubeconfig, "{{WrapAsVerbatim \"reference(concat('Microsoft.Network/publicIPAddresses/', variables('masterPublicIPAddressName'))).dnsSettings.fqdn\"}}", api.FormatProdFQDNByLocation(properties.MasterProfile.DNSPrefix, location, properties.GetCustomCloudName()), -1)
}
kubeconfig = strings.Replace(kubeconfig, "{{WrapAsVariable \"resourceGroup\"}}", properties.MasterProfile.DNSPrefix, -1)
var authInfo string
if properties.AADProfile == nil {
authInfo = fmt.Sprintf("{\"client-certificate-data\":\"%v\",\"client-key-data\":\"%v\"}",
base64.StdEncoding.EncodeToString([]byte(properties.CertificateProfile.KubeConfigCertificate)),
base64.StdEncoding.EncodeToString([]byte(properties.CertificateProfile.KubeConfigPrivateKey)))
} else {
tenantID := properties.AADProfile.TenantID
if len(tenantID) == 0 {
tenantID = "common"
}
authInfo = fmt.Sprintf("{\"auth-provider\":{\"name\":\"azure\",\"config\":{\"environment\":\"%v\",\"tenant-id\":\"%v\",\"apiserver-id\":\"%v\",\"client-id\":\"%v\"}}}",
helpers.GetTargetEnv(location, properties.GetCustomCloudName()),
tenantID,
properties.AADProfile.ServerAppID,
properties.AADProfile.ClientAppID)
}
kubeconfig = strings.Replace(kubeconfig, "{{authInfo}}", authInfo, -1)
return kubeconfig, nil
}
// generateConsecutiveIPsList takes a starting IP address and returns a string slice of length "count" of subsequent, consecutive IP addresses
func generateConsecutiveIPsList(count int, firstAddr string) ([]string, error) {
ipaddr := net.ParseIP(firstAddr).To4()
if ipaddr == nil {
return nil, errors.Errorf("IPAddr '%s' is an invalid IP address", firstAddr)
}
if int(ipaddr[3])+count >= 255 {
return nil, errors.Errorf("IPAddr '%s' + %d will overflow the fourth octet", firstAddr, count)
}
ret := make([]string, count)
for i := 0; i < count; i++ {
nextAddress := fmt.Sprintf("%d.%d.%d.%d", ipaddr[0], ipaddr[1], ipaddr[2], ipaddr[3]+byte(i))
ipaddr := net.ParseIP(nextAddress).To4()
if ipaddr == nil {
return nil, errors.Errorf("IPAddr '%s' is an invalid IP address", nextAddress)
}
ret[i] = nextAddress
}
return ret, nil
}
func addValue(m paramsMap, k string, v interface{}) {
m[k] = paramsMap{
"value": v,
}
}
func addKeyvaultReference(m paramsMap, k string, vaultID, secretName, secretVersion string) {
m[k] = paramsMap{
"reference": &KeyVaultRef{
KeyVault: KeyVaultID{
ID: vaultID,
},
SecretName: secretName,
SecretVersion: secretVersion,
},
}
}
func addSecret(m paramsMap, k string, v interface{}, encode bool) {
str, ok := v.(string)
if !ok {
addValue(m, k, v)
return
}
parts := keyvaultSecretPathRe.FindStringSubmatch(str)
if parts == nil || len(parts) != 5 {
if encode {
addValue(m, k, base64.StdEncoding.EncodeToString([]byte(str)))
} else {
addValue(m, k, str)
}
return
}
addKeyvaultReference(m, k, parts[1], parts[2], parts[4])
}
func makeMasterExtensionScriptCommands(cs *api.ContainerService) string {
curlCaCertOpt := ""
if cs.Properties.IsAzureStackCloud() {
curlCaCertOpt = fmt.Sprintf("--cacert %s", common.AzureStackCaCertLocation)
}
return makeExtensionScriptCommands(cs.Properties.MasterProfile.PreprovisionExtension,
curlCaCertOpt, cs.Properties.ExtensionProfiles)
}
func makeAgentExtensionScriptCommands(cs *api.ContainerService, profile *api.AgentPoolProfile) string {
if profile.OSType == api.Windows {
return makeWindowsExtensionScriptCommands(profile.PreprovisionExtension,
cs.Properties.ExtensionProfiles)
}
curlCaCertOpt := ""
if cs.Properties.IsAzureStackCloud() {
curlCaCertOpt = fmt.Sprintf("--cacert %s", common.AzureStackCaCertLocation)
}
return makeExtensionScriptCommands(profile.PreprovisionExtension,
curlCaCertOpt, cs.Properties.ExtensionProfiles)
}
func makeExtensionScriptCommands(extension *api.Extension, curlCaCertOpt string, extensionProfiles []*api.ExtensionProfile) string {
var extensionProfile *api.ExtensionProfile
for _, eP := range extensionProfiles {
if strings.EqualFold(eP.Name, extension.Name) {
extensionProfile = eP
break
}
}
if extensionProfile == nil {
panic(fmt.Sprintf("%s extension referenced was not found in the extension profile", extension.Name))
}
extensionsParameterReference := fmt.Sprintf("parameters('%sParameters')", extensionProfile.Name)
scriptURL := getExtensionURL(extensionProfile.RootURL, extensionProfile.Name, extensionProfile.Version, extensionProfile.Script, extensionProfile.URLQuery)
scriptFilePath := fmt.Sprintf("/opt/azure/containers/extensions/%s/%s", extensionProfile.Name, extensionProfile.Script)
return fmt.Sprintf("- sudo /usr/bin/curl --retry 5 --retry-delay 10 --retry-max-time 30 -o %s --create-dirs %s \"%s\" \n- sudo /bin/chmod 744 %s \n- sudo %s ',%s,' > /var/log/%s-output.log",
scriptFilePath, curlCaCertOpt, scriptURL, scriptFilePath, scriptFilePath, extensionsParameterReference, extensionProfile.Name)
}
func makeWindowsExtensionScriptCommands(extension *api.Extension, extensionProfiles []*api.ExtensionProfile) string {
var extensionProfile *api.ExtensionProfile
for _, eP := range extensionProfiles {
if strings.EqualFold(eP.Name, extension.Name) {
extensionProfile = eP
break
}
}
if extensionProfile == nil {
panic(fmt.Sprintf("%s extension referenced was not found in the extension profile", extension.Name))
}
scriptURL := getExtensionURL(extensionProfile.RootURL, extensionProfile.Name, extensionProfile.Version, extensionProfile.Script, extensionProfile.URLQuery)
scriptFileDir := fmt.Sprintf("$env:SystemDrive:/AzureData/extensions/%s", extensionProfile.Name)
scriptFilePath := fmt.Sprintf("%s/%s", scriptFileDir, extensionProfile.Script)
return fmt.Sprintf("New-Item -ItemType Directory -Force -Path \"%s\" ; Invoke-WebRequest -Uri \"%s\" -OutFile \"%s\" ; powershell \"%s `\"',parameters('%sParameters'),'`\"\"\n", scriptFileDir, scriptURL, scriptFilePath, scriptFilePath, extensionProfile.Name)
}
func getVNETAddressPrefixes(properties *api.Properties) string {
visitedSubnets := make(map[string]bool)
var buf bytes.Buffer
buf.WriteString(`"[variables('masterSubnet')]"`)
visitedSubnets[properties.MasterProfile.Subnet] = true
for _, profile := range properties.AgentPoolProfiles {
if _, ok := visitedSubnets[profile.Subnet]; !ok {
buf.WriteString(fmt.Sprintf(",\n \"[variables('%sSubnet')]\"", profile.Name))
}
}
return buf.String()
}
func getVNETSubnetDependencies(properties *api.Properties) string {
agentString := ` "[concat('Microsoft.Network/networkSecurityGroups/', variables('%sNSGName'))]"`
var buf bytes.Buffer
for index, agentProfile := range properties.AgentPoolProfiles {
if index > 0 {
buf.WriteString(",\n")
}
buf.WriteString(fmt.Sprintf(agentString, agentProfile.Name))
}
return buf.String()
}
func getVNETSubnets(properties *api.Properties, addNSG bool) string {
masterString := `{
"name": "[variables('masterSubnetName')]",
"properties": {
"addressPrefix": "[variables('masterSubnet')]"
}
}`
agentString := ` {
"name": "[variables('%sSubnetName')]",
"properties": {
"addressPrefix": "[variables('%sSubnet')]"
}
}`
agentStringNSG := ` {
"name": "[variables('%sSubnetName')]",
"properties": {
"addressPrefix": "[variables('%sSubnet')]",
"networkSecurityGroup": {
"id": "[resourceId('Microsoft.Network/networkSecurityGroups', variables('%sNSGName'))]"
}
}
}`
var buf bytes.Buffer
buf.WriteString(masterString)
for _, agentProfile := range properties.AgentPoolProfiles {
buf.WriteString(",\n")
if addNSG {
buf.WriteString(fmt.Sprintf(agentStringNSG, agentProfile.Name, agentProfile.Name, agentProfile.Name))
} else {
buf.WriteString(fmt.Sprintf(agentString, agentProfile.Name, agentProfile.Name))
}
}
return buf.String()
}
func getLBRule(name string, port int) string {
return fmt.Sprintf(` {
"name": "LBRule%d",
"properties": {
"backendAddressPool": {
"id": "[concat(variables('%sLbID'), '/backendAddressPools/', variables('%sLbBackendPoolName'))]"
},
"backendPort": %d,
"enableFloatingIP": false,
"frontendIPConfiguration": {
"id": "[variables('%sLbIPConfigID')]"
},
"frontendPort": %d,
"idleTimeoutInMinutes": 5,
"loadDistribution": "Default",
"probe": {
"id": "[concat(variables('%sLbID'),'/probes/tcp%dProbe')]"
},
"protocol": "Tcp"
}
}`, port, name, name, port, name, port, name, port)
}
func getLBRules(name string, ports []int) string {
var buf bytes.Buffer
for index, port := range ports {
if index > 0 {
buf.WriteString(",\n")
}
buf.WriteString(getLBRule(name, port))
}
return buf.String()
}
func getProbe(port int) string {
return fmt.Sprintf(` {
"name": "tcp%dProbe",
"properties": {
"intervalInSeconds": 5,
"numberOfProbes": 2,
"port": %d,
"protocol": "Tcp"
}
}`, port, port)
}
func getProbes(ports []int) string {
var buf bytes.Buffer
for index, port := range ports {
if index > 0 {
buf.WriteString(",\n")
}
buf.WriteString(getProbe(port))
}
return buf.String()
}
func getSecurityRule(port int, portIndex int) string {
// BaseLBPriority specifies the base lb priority.
BaseLBPriority := 200
return fmt.Sprintf(` {
"name": "Allow_%d",
"properties": {
"access": "Allow",
"description": "Allow traffic from the Internet to port %d",
"destinationAddressPrefix": "*",
"destinationPortRange": "%d",
"direction": "Inbound",
"priority": %d,
"protocol": "*",
"sourceAddressPrefix": "Internet",
"sourcePortRange": "*"
}
}`, port, port, port, BaseLBPriority+portIndex)
}
func getDataDisks(a *api.AgentPoolProfile) string {
if !a.HasDisks() {
return ""
}
var buf bytes.Buffer
buf.WriteString("\"dataDisks\": [\n")
dataDisks := ` {
"createOption": "Empty",
"diskSizeGB": "%d",
"lun": %d,
"caching": "ReadOnly",
"name": "[concat(variables('%sVMNamePrefix'), copyIndex(),'-datadisk%d')]",
"vhd": {
"uri": "[concat('http://',variables('storageAccountPrefixes')[mod(add(add(div(copyIndex(),variables('maxVMsPerStorageAccount')),variables('%sStorageAccountOffset')),variables('dataStorageAccountPrefixSeed')),variables('storageAccountPrefixesCount'))],variables('storageAccountPrefixes')[div(add(add(div(copyIndex(),variables('maxVMsPerStorageAccount')),variables('%sStorageAccountOffset')),variables('dataStorageAccountPrefixSeed')),variables('storageAccountPrefixesCount'))],variables('%sDataAccountName'),'.blob.core.windows.net/vhds/',variables('%sVMNamePrefix'),copyIndex(), '--datadisk%d.vhd')]"
}
}`
managedDataDisks := ` {
"diskSizeGB": "%d",
"lun": %d,
"caching": "ReadOnly",
"createOption": "Empty"
}`
for i, diskSize := range a.DiskSizesGB {
if i > 0 {
buf.WriteString(",\n")
}
if a.StorageProfile == api.StorageAccount {
buf.WriteString(fmt.Sprintf(dataDisks, diskSize, i, a.Name, i, a.Name, a.Name, a.Name, a.Name, i))
} else if a.StorageProfile == api.ManagedDisks {
buf.WriteString(fmt.Sprintf(managedDataDisks, diskSize, i))
}
}
buf.WriteString("\n ],")
return buf.String()
}
func getSecurityRules(ports []int) string {
var buf bytes.Buffer
for index, port := range ports {
if index > 0 {
buf.WriteString(",\n")
}
buf.WriteString(getSecurityRule(port, index))
}
return buf.String()
}
// getSingleLine returns the file as a single line
func (t *TemplateGenerator) getSingleLine(textFilename string, cs *api.ContainerService, profile interface{}) (string, error) {
b, err := Asset(textFilename)
if err != nil {
return "", t.Translator.Errorf("yaml file %s does not exist", textFilename)
}
// use go templates to process the text filename
templ := template.New("customdata template").Funcs(t.getTemplateFuncMap(cs))
if _, err = templ.New(textFilename).Parse(string(b)); err != nil {
return "", t.Translator.Errorf("error parsing file %s: %v", textFilename, err)
}
var buffer bytes.Buffer
if err = templ.ExecuteTemplate(&buffer, textFilename, profile); err != nil {
return "", t.Translator.Errorf("error executing template for file %s: %v", textFilename, err)
}
expandedTemplate := buffer.String()
return expandedTemplate, nil
}
// getSingleLineForTemplate returns the file as a single line for embedding in an arm template
func (t *TemplateGenerator) getSingleLineForTemplate(textFilename string, cs *api.ContainerService, profile interface{}) (string, error) {
expandedTemplate, err := t.getSingleLine(textFilename, cs, profile)
if err != nil {
return "", err
}
textStr := escapeSingleLine(expandedTemplate)
return textStr, nil
}
func escapeSingleLine(escapedStr string) string {
// template.JSEscapeString leaves undesirable chars that don't work with pretty print
escapedStr = strings.Replace(escapedStr, "\\", "\\\\", -1)
escapedStr = strings.Replace(escapedStr, "\r\n", "\\n", -1)
escapedStr = strings.Replace(escapedStr, "\n", "\\n", -1)
escapedStr = strings.Replace(escapedStr, "\"", "\\\"", -1)
return escapedStr
}
// getBase64EncodedGzippedCustomScript will return a base64 of the CSE
func getBase64EncodedGzippedCustomScript(csFilename string, cs *api.ContainerService) string {
b, err := Asset(csFilename)
if err != nil {
// this should never happen and this is a bug
panic(fmt.Sprintf("BUG: %s", err.Error()))
}
// translate the parameters
templ := template.New("ContainerService template").Funcs(getContainerServiceFuncMap(cs))
_, err = templ.Parse(string(b))
if err != nil {
// this should never happen and this is a bug
panic(fmt.Sprintf("BUG: %s", err.Error()))
}
var buffer bytes.Buffer
_ = templ.Execute(&buffer, cs)
csStr := buffer.String()
csStr = strings.Replace(csStr, "\r\n", "\n", -1)
return getBase64EncodedGzippedCustomScriptFromStr(csStr)
}
func getStringFromBase64(str string) (string, error) {
decodedBytes, err := base64.StdEncoding.DecodeString(str)
return string(decodedBytes), err
}
// getBase64EncodedGzippedCustomScriptFromStr will return a base64-encoded string of the gzip'd source data
func getBase64EncodedGzippedCustomScriptFromStr(str string) string {
var gzipB bytes.Buffer
w := gzip.NewWriter(&gzipB)
_, _ = w.Write([]byte(str))
w.Close()
return base64.StdEncoding.EncodeToString(gzipB.Bytes())
}
func getComponentFuncMap(component api.KubernetesComponent, cs *api.ContainerService) template.FuncMap {
ret := template.FuncMap{
"ContainerImage": func(name string) string {
if i := component.GetContainersIndexByName(name); i > -1 {
return component.Containers[i].Image
}
return ""
},
"ContainerCPUReqs": func(name string) string {
if i := component.GetContainersIndexByName(name); i > -1 {
return component.Containers[i].CPURequests
}
return ""
},
"ContainerCPULimits": func(name string) string {
if i := component.GetContainersIndexByName(name); i > -1 {
return component.Containers[i].CPULimits
}
return ""
},
"ContainerMemReqs": func(name string) string {
if i := component.GetContainersIndexByName(name); i > -1 {
return component.Containers[i].MemoryRequests
}
return ""
},
"ContainerMemLimits": func(name string) string {
if i := component.GetContainersIndexByName(name); i > -1 {
return component.Containers[i].MemoryLimits
}
return ""
},
"ContainerConfig": func(name string) string {
return component.Config[name]
},
"IsCustomCloudProfile": func() bool {
return cs.Properties.IsCustomCloudProfile()
},
"IsAzureStackCloud": func() bool {
return cs.Properties.IsAzureStackCloud()
},
"IsKubernetesVersionGe": func(version string) bool {
return common.IsKubernetesVersionGe(cs.Properties.OrchestratorProfile.OrchestratorVersion, version)
},
}
if component.Name == common.APIServerComponentName {
ret["GetAPIServerArgs"] = func() string {
return common.GetOrderedEscapedKeyValsString(cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig)
}
}
if component.Name == common.ControllerManagerComponentName {
ret["GetControllerManagerArgs"] = func() string {
return common.GetOrderedEscapedKeyValsString(cs.Properties.OrchestratorProfile.KubernetesConfig.ControllerManagerConfig)
}
}
if component.Name == common.SchedulerComponentName {
ret["GetSchedulerArgs"] = func() string {
return common.GetOrderedEscapedKeyValsString(cs.Properties.OrchestratorProfile.KubernetesConfig.SchedulerConfig)
}
}
if component.Name == common.CloudControllerManagerComponentName {
ret["GetCloudControllerManagerArgs"] = func() string {
return common.GetOrderedEscapedKeyValsString(cs.Properties.OrchestratorProfile.KubernetesConfig.CloudControllerManagerConfig)
}
}
return ret
}
func getAddonFuncMap(addon api.KubernetesAddon, cs *api.ContainerService) template.FuncMap {
return template.FuncMap{
"ContainerImage": func(name string) string {
i := addon.GetAddonContainersIndexByName(name)
return addon.Containers[i].Image
},
"ContainerCPUReqs": func(name string) string {
i := addon.GetAddonContainersIndexByName(name)
return addon.Containers[i].CPURequests
},
"ContainerCPULimits": func(name string) string {
i := addon.GetAddonContainersIndexByName(name)
return addon.Containers[i].CPULimits
},
"ContainerMemReqs": func(name string) string {
i := addon.GetAddonContainersIndexByName(name)
return addon.Containers[i].MemoryRequests
},
"ContainerMemLimits": func(name string) string {
i := addon.GetAddonContainersIndexByName(name)
return addon.Containers[i].MemoryLimits
},
"ContainerConfig": func(name string) string {
return addon.Config[name]
},
"ContainerConfigBase64": func(name string) string {
return base64.StdEncoding.EncodeToString([]byte(addon.Config[name]))
},
"HasWindows": func() bool {
return cs.Properties.HasWindows()
},
"IsCustomCloudProfile": func() bool {
return cs.Properties.IsCustomCloudProfile()
},
"HasLinux": func() bool {
return cs.Properties.AnyAgentIsLinux()
},
"IsAzureStackCloud": func() bool {
return cs.Properties.IsAzureStackCloud()
},
"NeedsStorageAccountStorageClasses": func() bool {
return len(cs.Properties.AgentPoolProfiles) > 0 && cs.Properties.AgentPoolProfiles[0].StorageProfile == api.StorageAccount
},
"NeedsManagedDiskStorageClasses": func() bool {
return len(cs.Properties.AgentPoolProfiles) > 0 && cs.Properties.AgentPoolProfiles[0].StorageProfile == api.ManagedDisks
},
"UsesCloudControllerManager": func() bool {
return to.Bool(cs.Properties.OrchestratorProfile.KubernetesConfig.UseCloudControllerManager)
},
"HasAvailabilityZones": func() bool {
return cs.Properties.HasAvailabilityZones()
},
"HasAgentPoolAvailabilityZones": func() bool {
return cs.Properties.HasAgentPoolAvailabilityZones()
},
"GetAgentPoolZones": func() string {
if len(cs.Properties.AgentPoolProfiles) == 0 {
return ""
}
var zones string
for _, pool := range cs.Properties.AgentPoolProfiles {
if pool.AvailabilityZones != nil {
for _, zone := range pool.AvailabilityZones {
zones += fmt.Sprintf("\n - %s-%s", cs.Location, zone)
}
}
if zones != "" {
return zones
}
}
return zones
},
"CSIControllerReplicas": func() string {
replicas := "2"
if cs.Properties.HasWindows() && !cs.Properties.AnyAgentIsLinux() {
replicas = "1"
}
return replicas
},
"ShouldEnableCSISnapshotFeature": func(csiDriverName string) bool {
// Snapshot is not available for Windows clusters
if cs.Properties.HasWindows() && !cs.Properties.AnyAgentIsLinux() {
return false
}
switch csiDriverName {
case common.AzureDiskCSIDriverAddonName:
// Snapshot feature for Azure Disk CSI Driver is in beta, requiring K8s 1.17+
return common.IsKubernetesVersionGe(cs.Properties.OrchestratorProfile.OrchestratorVersion, "1.17.0")
case common.AzureFileCSIDriverAddonName:
// Snapshot feature for Azure File CSI Driver is in alpha, requiring K8s 1.13-1.16
return common.IsKubernetesVersionGe(cs.Properties.OrchestratorProfile.OrchestratorVersion, "1.13.0") &&
!common.IsKubernetesVersionGe(cs.Properties.OrchestratorProfile.OrchestratorVersion, "1.17.0")
}
return false
},
"IsKubernetesVersionGe": func(version string) bool {
return common.IsKubernetesVersionGe(cs.Properties.OrchestratorProfile.OrchestratorVersion, version)
},
"GetAADPodIdentityTaintKey": func() string {
return common.AADPodIdentityTaintKey
},
"GetMode": func() string {
return addon.Mode
},
"GetClusterSubnet": func() string {
return cs.Properties.OrchestratorProfile.KubernetesConfig.ClusterSubnet
},
"IsAzureCNI": func() bool {
return cs.Properties.OrchestratorProfile.IsAzureCNI()
},
"GetCRDAPIVersion": func() string {
if common.IsKubernetesVersionGe(cs.Properties.OrchestratorProfile.OrchestratorVersion, "1.22.0") {
return "apiextensions.k8s.io/v1"
}
return "apiextensions.k8s.io/v1beta1"
},
"GetRBACAPIVersion": func() string {
if common.IsKubernetesVersionGe(cs.Properties.OrchestratorProfile.OrchestratorVersion, "1.22.0") {
return "rbac.authorization.k8s.io/v1"
}
return "rbac.authorization.k8s.io/v1beta1"
},
"GetStorageAPIVersion": func() string {
if common.IsKubernetesVersionGe(cs.Properties.OrchestratorProfile.OrchestratorVersion, "1.22.0") {
return "storage.k8s.io/v1"
}
return "storage.k8s.io/v1beta1"
},
"GetWebhookAPIVersion": func() string {
if common.IsKubernetesVersionGe(cs.Properties.OrchestratorProfile.OrchestratorVersion, "1.22.0") {
return "admissionregistration.k8s.io/v1"
}
return "admissionregistration.k8s.io/v1beta1"
},
}
}
func getClusterAutoscalerAddonFuncMap(addon api.KubernetesAddon, cs *api.ContainerService) template.FuncMap {
return template.FuncMap{
"ContainerImage": func(name string) string {
i := addon.GetAddonContainersIndexByName(name)
return addon.Containers[i].Image
},
"ContainerCPUReqs": func(name string) string {
i := addon.GetAddonContainersIndexByName(name)
return addon.Containers[i].CPURequests
},
"ContainerCPULimits": func(name string) string {
i := addon.GetAddonContainersIndexByName(name)
return addon.Containers[i].CPULimits
},
"ContainerMemReqs": func(name string) string {
i := addon.GetAddonContainersIndexByName(name)
return addon.Containers[i].MemoryRequests
},
"ContainerMemLimits": func(name string) string {
i := addon.GetAddonContainersIndexByName(name)
return addon.Containers[i].MemoryLimits
},
"ContainerConfig": func(name string) string {
return addon.Config[name]
},
"GetMode": func() string {
return addon.Mode
},
"GetClusterAutoscalerNodesConfig": func() string {
return api.GetClusterAutoscalerNodesConfig(addon, cs)
},
"GetBase64EncodedVMType": func() string {
return base64.StdEncoding.EncodeToString([]byte(cs.Properties.GetVMType()))
},
"GetVolumeMounts": func() string {
if to.Bool(cs.Properties.OrchestratorProfile.KubernetesConfig.UseManagedIdentity) {
return "\n - mountPath: /var/lib/waagent/\n name: waagent\n readOnly: true"
}
return ""
},
"GetVolumes": func() string {
if to.Bool(cs.Properties.OrchestratorProfile.KubernetesConfig.UseManagedIdentity) {
return "\n - hostPath:\n path: /var/lib/waagent/\n name: waagent"
}
return ""
},
"GetHostNetwork": func() string {
if to.Bool(cs.Properties.OrchestratorProfile.KubernetesConfig.UseManagedIdentity) {
return "\n hostNetwork: true"
}
return ""
},
"GetCloud": func() string {
cloudSpecConfig := cs.GetCloudSpecConfig()
return cloudSpecConfig.CloudName
},
"UseManagedIdentity": func() string {
if to.Bool(cs.Properties.OrchestratorProfile.KubernetesConfig.UseManagedIdentity) {
return "true"
}
return "false"
},
"IsKubernetesVersionGe": func(version string) bool {
return common.IsKubernetesVersionGe(cs.Properties.OrchestratorProfile.OrchestratorVersion, version)
},
}
}
func getComponentsString(cs *api.ContainerService, sourcePath string) string {
properties := cs.Properties
var result string
settingsMap := kubernetesComponentSettingsInit(properties)
var componentNames []string
for componentName := range settingsMap {
componentNames = append(componentNames, componentName)
}
sort.Strings(componentNames)
for _, componentName := range componentNames {
setting := settingsMap[componentName]
if component, isEnabled := cs.Properties.OrchestratorProfile.KubernetesConfig.IsComponentEnabled(componentName); isEnabled {
var input string
if setting.base64Data != "" {
var err error
input, err = getStringFromBase64(setting.base64Data)
if err != nil {
return ""
}
} else if setting.sourceFile != "" {
orchProfile := properties.OrchestratorProfile
versions := strings.Split(orchProfile.OrchestratorVersion, ".")
templ := template.New("component resolver template").Funcs(getComponentFuncMap(component, cs))
componentFile := getCustomDataFilePath(setting.sourceFile, sourcePath, versions[0]+"."+versions[1])
componentFileBytes, err := Asset(componentFile)
if err != nil {
return ""
}
_, err = templ.Parse(string(componentFileBytes))
if err != nil {
return ""
}
var buffer bytes.Buffer
_ = templ.Execute(&buffer, component)
input = buffer.String()
}
if componentName == common.ClusterInitComponentName {
result += getComponentString(input, "/opt/azure/containers", setting.destinationFile)
} else {
result += getComponentString(input, "/etc/kubernetes/manifests", setting.destinationFile)
}
}
}
return result
}
func getAddonsString(cs *api.ContainerService, sourcePath string) string {
properties := cs.Properties
var result string
settingsMap := kubernetesAddonSettingsInit(properties)
var addonNames []string
for addonName := range settingsMap {
addonNames = append(addonNames, addonName)
}
sort.Strings(addonNames)
for _, addonName := range addonNames {
setting := settingsMap[addonName]
if cs.Properties.OrchestratorProfile.KubernetesConfig.IsAddonEnabled(addonName) {
var input string
if setting.base64Data != "" {
var err error
input, err = getStringFromBase64(setting.base64Data)
if err != nil {
return ""
}
} else {
orchProfile := properties.OrchestratorProfile
versions := strings.Split(orchProfile.OrchestratorVersion, ".")
addon := orchProfile.KubernetesConfig.GetAddonByName(addonName)
var templ *template.Template
switch addonName {
case "cluster-autoscaler":
templ = template.New("addon resolver template").Funcs(getClusterAutoscalerAddonFuncMap(addon, cs))
default:
templ = template.New("addon resolver template").Funcs(getAddonFuncMap(addon, cs))
}
addonFile := getCustomDataFilePath(setting.sourceFile, sourcePath, versions[0]+"."+versions[1])
addonFileBytes, err := Asset(addonFile)
if err != nil {
return ""
}
_, err = templ.Parse(string(addonFileBytes))
if err != nil {
return ""
}
var buffer bytes.Buffer
_ = templ.Execute(&buffer, addon)
input = buffer.String()
}
result += getComponentString(input, "/etc/kubernetes/addons", setting.destinationFile)
}
}
return result
}
func getKubernetesSubnets(properties *api.Properties) string {
subnetString := `{
"name": "podCIDR%d",
"properties": {
"addressPrefix": "10.244.%d.0/24",
"networkSecurityGroup": {
"id": "[variables('nsgID')]"
},
"routeTable": {
"id": "[variables('routeTableID')]"
}
}
}`
var buf bytes.Buffer
cidrIndex := getKubernetesPodStartIndex(properties)
for _, agentProfile := range properties.AgentPoolProfiles {
if agentProfile.OSType == api.Windows {
for i := 0; i < agentProfile.Count; i++ {
buf.WriteString(",\n")
buf.WriteString(fmt.Sprintf(subnetString, cidrIndex, cidrIndex))
cidrIndex++
}
}
}
return buf.String()
}
func getKubernetesPodStartIndex(properties *api.Properties) int {
nodeCount := 0
nodeCount += properties.MasterProfile.Count
for _, agentProfile := range properties.AgentPoolProfiles {
if agentProfile.OSType != api.Windows {
nodeCount += agentProfile.Count
}
}
return nodeCount + 1
}
func getMasterLinkedTemplateText(orchestratorType string, extensionProfile *api.ExtensionProfile, singleOrAll string) (string, error) {
extTargetVMNamePrefix := "variables('masterVMNamePrefix')"
// Due to upgrade k8s sometimes needs to install just some of the nodes.
loopCount := "[sub(variables('masterCount'), variables('masterOffset'))]"
loopOffset := "variables('masterOffset')"
if strings.EqualFold(singleOrAll, "single") {
loopCount = "1"
}
return internalGetPoolLinkedTemplateText(extTargetVMNamePrefix, orchestratorType, loopCount,
loopOffset, extensionProfile)
}
func getAgentPoolLinkedTemplateText(agentPoolProfile *api.AgentPoolProfile, orchestratorType string, extensionProfile *api.ExtensionProfile, singleOrAll string) (string, error) {
extTargetVMNamePrefix := fmt.Sprintf("variables('%sVMNamePrefix')", agentPoolProfile.Name)
loopCount := fmt.Sprintf("[variables('%sCount'))]", agentPoolProfile.Name)
loopOffset := ""
// Availability sets can have an offset since we don't redeploy vms.
// So we don't want to rerun these extensions in scale up scenarios.
if agentPoolProfile.IsAvailabilitySets() {
loopCount = fmt.Sprintf("[sub(variables('%sCount'), variables('%sOffset'))]",
agentPoolProfile.Name, agentPoolProfile.Name)
loopOffset = fmt.Sprintf("variables('%sOffset')", agentPoolProfile.Name)
}
if strings.EqualFold(singleOrAll, "single") {
loopCount = "1"
}
return internalGetPoolLinkedTemplateText(extTargetVMNamePrefix, orchestratorType, loopCount,
loopOffset, extensionProfile)
}
func internalGetPoolLinkedTemplateText(extTargetVMNamePrefix, orchestratorType, loopCount, loopOffset string, extensionProfile *api.ExtensionProfile) (string, error) {
dta, e := getLinkedTemplateTextForURL(extensionProfile.RootURL, orchestratorType, extensionProfile.Name, extensionProfile.Version, extensionProfile.URLQuery)
if e != nil {
return "", e
}
if strings.Contains(extTargetVMNamePrefix, "master") {
dta = strings.Replace(dta, "EXTENSION_TARGET_VM_TYPE", "master", -1)
} else {
dta = strings.Replace(dta, "EXTENSION_TARGET_VM_TYPE", "agent", -1)
}
extensionsParameterReference := fmt.Sprintf("[parameters('%sParameters')]", extensionProfile.Name)
dta = strings.Replace(dta, "EXTENSION_PARAMETERS_REPLACE", extensionsParameterReference, -1)
dta = strings.Replace(dta, "EXTENSION_URL_REPLACE", extensionProfile.RootURL, -1)
dta = strings.Replace(dta, "EXTENSION_TARGET_VM_NAME_PREFIX", extTargetVMNamePrefix, -1)
if _, err := strconv.Atoi(loopCount); err == nil {
dta = strings.Replace(dta, "\"EXTENSION_LOOP_COUNT\"", loopCount, -1)
} else {
dta = strings.Replace(dta, "EXTENSION_LOOP_COUNT", loopCount, -1)
}
dta = strings.Replace(dta, "EXTENSION_LOOP_OFFSET", loopOffset, -1)
return dta, nil
}
func validateProfileOptedForExtension(extensionName string, profileExtensions []api.Extension) (bool, string) {
for _, extension := range profileExtensions {
if extensionName == extension.Name {
return true, extension.SingleOrAll
}
}
return false, ""
}
// getLinkedTemplateTextForURL returns the string data from
// template-link.json in the following directory:
// extensionsRootURL/extensions/extensionName/version
// It returns an error if the extension cannot be found
// or loaded. getLinkedTemplateTextForURL provides the ability
// to pass a root extensions url for testing
func getLinkedTemplateTextForURL(rootURL, orchestrator, extensionName, version, query string) (string, error) {
supportsExtension, err := orchestratorSupportsExtension(rootURL, orchestrator, extensionName, version, query)
if !supportsExtension {
return "", errors.Wrap(err, "Extension not supported for orchestrator")
}
templateLinkBytes, err := getExtensionResource(rootURL, extensionName, version, "template-link.json", query)
if err != nil {
return "", err
}
return string(templateLinkBytes), nil
}
func orchestratorSupportsExtension(rootURL, orchestrator, extensionName, version, query string) (bool, error) {
orchestratorBytes, err := getExtensionResource(rootURL, extensionName, version, "supported-orchestrators.json", query)
if err != nil {
return false, err
}
var supportedOrchestrators []string
err = json.Unmarshal(orchestratorBytes, &supportedOrchestrators)
if err != nil {
return false, errors.Errorf("Unable to parse supported-orchestrators.json for Extension %s Version %s", extensionName, version)
}
if !stringInSlice(orchestrator, supportedOrchestrators) {
return false, errors.Errorf("Orchestrator: %s not in list of supported orchestrators for Extension: %s Version %s", orchestrator, extensionName, version)
}
return true, nil
}
func getExtensionResource(rootURL, extensionName, version, fileName, query string) ([]byte, error) {
requestURL := getExtensionURL(rootURL, extensionName, version, fileName, query)
res, err := http.Get(requestURL)
if err != nil {
return nil, errors.Wrapf(err, "Unable to GET extension resource for extension: %s with version %s with filename %s at URL: %s", extensionName, version, fileName, requestURL)
}
defer res.Body.Close()
if res.StatusCode != 200 {
return nil, errors.Errorf("Unable to GET extension resource for extension: %s with version %s with filename %s at URL: %s StatusCode: %s: Status: %s", extensionName, version, fileName, requestURL, strconv.Itoa(res.StatusCode), res.Status)
}
body, err := io.ReadAll(res.Body)
if err != nil {
return nil, errors.Wrapf(err, "Unable to GET extension resource for extension: %s with version %s with filename %s at URL: %s", extensionName, version, fileName, requestURL)
}
return body, nil
}
func getExtensionURL(rootURL, extensionName, version, fileName, query string) string {
extensionsDir := "extensions"
url := rootURL + extensionsDir + "/" + extensionName + "/" + version + "/" + fileName
if query != "" {
url += "?" + query
}
return url
}
func stringInSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
func wrapAsVariableObject(o, v string) string {
return fmt.Sprintf("',variables('%s').%s,'", o, v)
}
func getSSHPublicKeysPowerShell(linuxProfile *api.LinuxProfile) string {
str := ""
if linuxProfile != nil {
lastItem := len(linuxProfile.SSH.PublicKeys) - 1
for i, publicKey := range linuxProfile.SSH.PublicKeys {
str += `"` + strings.TrimSpace(publicKey.KeyData) + `"`
if i < lastItem {
str += ", "
}
}
}
return str
}
func getWindowsMasterSubnetARMParam(masterProfile *api.MasterProfile) string {
if masterProfile != nil && masterProfile.IsCustomVNET() {
return "',parameters('vnetCidr'),'"
}
return "',parameters('masterSubnet'),'"
}
|
package main
func main() {
}
/**
二叉树的最近公共祖先
给定一个二叉树, 找到该树中两个指定节点的最近公共祖先。
百度百科中最近公共祖先的定义为:“对于有根树 T 的两个结点 p、q,最近公共祖先表示为一个结点 x,满足 x 是 p、q 的祖先且 x 的深度尽可能大(一个节点也可以是它自己的祖先)。”
例如,给定如下二叉树: root = [3,5,1,6,2,0,8,null,null,7,4]

示例 1:
```
输入: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 1
输出: 3
解释: 节点 5 和节点 1 的最近公共祖先是节点 3。
```
示例 2:
```
输入: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 4
输出: 5
解释: 节点 5 和节点 4 的最近公共祖先是节点 5。因为根据定义最近公共祖先节点可以为节点本身。
```
说明:
所有节点的值都是唯一的。
p、q 为不同节点且均存在于给定的二叉树中。
*/
/**
* Definition for TreeNode.
* type TreeNode struct {
* Val int
* Left *ListNode
* Right *ListNode
* }
*/
func lowestCommonAncestor(root, p, q *TreeNode) *TreeNode {
if root == nil {
return nil
}
if root.Val == p.Val || root.Val == q.Val {
return root
}
left := lowestCommonAncestor(root.Left, p, q)
right := lowestCommonAncestor(root.Right, p, q)
if left != nil && right != nil {
return root
}
if left == nil {
return right
}
return left
}
|
package ads
import (
"bytes"
"encoding/binary"
"fmt"
"math"
"strconv"
"time"
)
//func (dt *ADSSymbol) parse(offset uint32, data []byte) { /*{{{*/
func (dt *ADSSymbol) parse(data []byte, offset int) { /*{{{*/
start := offset
stop := start + int(dt.Length)
if dt.Childs != nil {
for _, value := range dt.Childs {
value.parse(data[offset:stop], int(value.Offset))
}
}
// for i := range dt.Childs {
// dt.Childs[i].Self.parse(data)
// }
if len(dt.Childs) == 0 {
var newValue = "nil"
if len(data) < int(dt.Length) {
fmt.Errorf("Incoming data is to small, !0<%d<%d<%d", start, stop, len(data))
return
}
switch dt.DataType {
case "BOOL":
if stop-start != 1 {
return
}
if data[start:stop][0] > 0 {
newValue = "True"
} else {
newValue = "False"
}
case "BYTE", "USINT": // Unsigned Short INT 0 to 255
if stop-start != 1 {
return
}
buf := bytes.NewBuffer(data[start:stop])
var i uint8
binary.Read(buf, binary.LittleEndian, &i)
newValue = strconv.FormatInt(int64(i), 10)
case "SINT": // Short INT -128 to 127
if stop-start != 1 {
return
}
buf := bytes.NewBuffer(data[start:stop])
var i int8
binary.Read(buf, binary.LittleEndian, &i)
newValue = strconv.FormatInt(int64(i), 10)
case "UINT", "WORD":
if stop-start != 2 {
return
}
i := binary.LittleEndian.Uint16(data[start:stop])
newValue = strconv.FormatUint(uint64(i), 10)
case "UDINT", "DWORD":
if stop-start != 4 {
return
}
i := binary.LittleEndian.Uint32(data[start:stop])
newValue = strconv.FormatUint(uint64(i), 10)
case "INT":
if stop-start != 2 {
return
}
buf := bytes.NewBuffer(data)
var i int16
binary.Read(buf, binary.LittleEndian, &i)
i = int16(binary.LittleEndian.Uint16(data[start:stop]))
newValue = strconv.FormatInt(int64(i), 10)
case "DINT":
if stop-start != 4 {
return
}
buf := bytes.NewBuffer(data[start:stop])
var i int32
binary.Read(buf, binary.LittleEndian, &i)
newValue = strconv.FormatInt(int64(i), 10)
case "REAL":
if stop-start != 4 {
return
}
i := binary.LittleEndian.Uint32(data[start:stop])
f := math.Float32frombits(i)
newValue = strconv.FormatFloat(float64(f), 'f', -1, 32)
case "LREAL":
if stop-start != 8 {
return
}
i := binary.LittleEndian.Uint64(data[start:stop])
f := math.Float64frombits(i)
newValue = strconv.FormatFloat(f, 'f', -1, 64)
case "STRING":
trimmedBytes := bytes.TrimSpace(data[start:stop])
secondIndex := bytes.IndexByte(trimmedBytes, byte(0))
newValue = string(trimmedBytes[:(secondIndex)])
case "TIME":
if stop-start != 4 {
return
}
i := binary.LittleEndian.Uint32(data[start:stop])
t := time.Unix(0, int64(uint64(i)*uint64(time.Millisecond))-int64(time.Hour))
newValue = t.Truncate(time.Millisecond).Format("15:04:05.999999999")
case "TOD":
if stop-start != 4 {
return
}
i := binary.LittleEndian.Uint32(data[start:stop])
t := time.Unix(0, int64(uint64(i)*uint64(time.Millisecond))-int64(time.Hour))
newValue = t.Truncate(time.Millisecond).Format("15:04")
case "DATE":
if stop-start != 4 {
return
}
i := binary.LittleEndian.Uint32(data[start:stop])
t := time.Unix(0, int64(uint64(i)*uint64(time.Second)))
newValue = t.Truncate(time.Millisecond).Format("2006-01-02")
case "DT":
if stop-start != 4 {
return
}
i := binary.LittleEndian.Uint32(data[start:stop])
t := time.Unix(0, int64(uint64(i)*uint64(time.Second))-int64(time.Hour))
newValue = t.Truncate(time.Millisecond).Format("2006-01-02 15:04:05")
default:
newValue = "nil"
}
if strcmp(dt.Value, newValue) != 0 &&
time.Now().UnixNano()-dt.LastUpdateTime > dt.MinUpdateInterval {
dt.LastUpdateTime = time.Now().UnixNano()
dt.Value = newValue
dt.Valid = true
dt.Changed = true
dt.updateChanged(true)
//fmt.Println(dt.FullName, dt.Value)
}
}
}
func (dt *ADSSymbol) updateChanged(value bool) {
dt.Changed = value
if dt.Parent != nil {
dt.Parent.updateChanged(value)
}
}
func (symbol *ADSSymbol) writeToNode(value string, offset int) (err error) { /*{{{*/
// log.Warn("Write (", symbol.Area, ":", symbol.Offset, "): ", symbol.Name)
if len(symbol.Childs) != 0 {
err = fmt.Errorf("Cannot write to a whole struct at once!")
return
}
buf := bytes.NewBuffer([]byte{})
switch symbol.DataType {
case "BOOL":
v, e := strconv.ParseBool(value)
if e != nil {
return e
}
if v {
buf.Write([]byte{1})
} else {
buf.Write([]byte{0})
}
case "BYTE", "USINT": // Unsigned Short INT 0 to 255
v, e := strconv.ParseUint(value, 10, 8)
if e != nil {
return e
}
v8 := uint8(v)
binary.Write(buf, binary.LittleEndian, &v8)
case "UINT", "WORD":
v, e := strconv.ParseUint(value, 10, 16)
if e != nil {
return e
}
v16 := uint16(v)
binary.Write(buf, binary.LittleEndian, &v16)
case "UDINT", "DWORD":
v, e := strconv.ParseUint(value, 10, 32)
if e != nil {
return e
}
v32 := uint32(v)
binary.Write(buf, binary.LittleEndian, &v32)
case "SINT": // Short INT -128 to 127
v, e := strconv.ParseInt(value, 10, 8)
if e != nil {
return e
}
v8 := int8(v)
binary.Write(buf, binary.LittleEndian, &v8)
case "INT":
v, e := strconv.ParseInt(value, 10, 16)
if e != nil {
return e
}
v16 := int16(v)
binary.Write(buf, binary.LittleEndian, &v16)
case "DINT":
v, e := strconv.ParseInt(value, 10, 32)
if e != nil {
return e
}
v32 := int32(v)
binary.Write(buf, binary.LittleEndian, &v32)
case "REAL":
v, e := strconv.ParseFloat(value, 32)
if e != nil {
return e
}
v32 := math.Float32bits(float32(v))
binary.Write(buf, binary.LittleEndian, &v32)
case "LREAL":
v, e := strconv.ParseFloat(value, 64)
if e != nil {
return e
}
v64 := math.Float64bits(v)
binary.Write(buf, binary.LittleEndian, &v64)
case "STRING":
newBuf := make([]byte, symbol.Length)
copy(newBuf, []byte(value))
buf.Write(newBuf)
/*case "TIME":
if stop-start != 4 {return}
i := binary.LittleEndian.Uint32(data[start:stop])
t := time.Unix(0, int64(uint64(i)*uint64(time.Millisecond))-int64(time.Hour) )
newValue = t.Truncate(time.Millisecond).Format("15:04:05.999999999")
case "TOD":
if stop-start != 4 {return}
i := binary.LittleEndian.Uint32(data[start:stop])
t := time.Unix(0, int64(uint64(i)*uint64(time.Millisecond))-int64(time.Hour) )
newValue = t.Truncate(time.Millisecond).Format("15:04")
case "DATE":
if stop-start != 4 {return}
i := binary.LittleEndian.Uint32(data[start:stop])
t := time.Unix(0, int64(uint64(i)*uint64(time.Second)) )
newValue = t.Truncate(time.Millisecond).Format("2006-01-02")
case "DT":
if stop-start != 4 {return}
i := binary.LittleEndian.Uint32(data[start:stop])
t := time.Unix(0, int64(uint64(i)*uint64(time.Second))-int64(time.Hour) )
newValue = t.Truncate(time.Millisecond).Format("2006-01-02 15:04:05")*/
default:
err = fmt.Errorf("Datatype '%s' write is not implemented yet!", symbol.DataType)
return
}
symbol.writeBuffArray(buf.Bytes())
// set
//symbol.Self.conn.Write(symbol.Area, symbol.Offset, buf.Bytes())
return nil
}
func strcmp(a, b string) int {
min := len(b)
if len(a) < len(b) {
min = len(a)
}
diff := 0
for i := 0; i < min && diff == 0; i++ {
diff = int(a[i]) - int(b[i])
}
if diff == 0 {
diff = len(a) - len(b)
}
return diff
}
|
package ccconvert
import (
"fmt"
"image"
"image/color"
"image/draw"
"image/jpeg"
"net/http"
"os"
)
const (
UnknownConvertMode = 0
Png2Jpg = 1
Jpg2Jpg = 2
)
func readRaw(src string, decode func(file *os.File, ext string) (image.Image, error)) (image.Image, error) {
f, err := os.Open(src)
if err != nil {
fmt.Println(err)
return nil, err
}
defer f.Close()
buff := make([]byte, 512)
_, err = f.Read(buff)
if err != nil {
return nil, err
}
// seek to begin
// Cool.Cat
f.Seek(0, 0)
var img image.Image
ext := http.DetectContentType(buff)
img, err = decode(f, ext)
if err != nil {
fmt.Println(err)
return nil, err
}
return img, nil
}
func Convert(src, dst string, bgColor color.Color, decode func(file *os.File, ext string) (image.Image, error), encode func(file *os.File, rgba *image.RGBA, options *jpeg.Options) error) error {
img, err := readRaw(src, decode)
if img == nil {
return err
}
var out *os.File
out, err = os.Create(dst)
if err != nil {
fmt.Println(err)
return err
}
defer out.Close()
jpg := image.NewRGBA(image.Rect(0, 0, img.Bounds().Max.X, img.Bounds().Max.Y))
if bgColor == nil {
// Draw image to background
draw.Draw(jpg, jpg.Bounds(), img, img.Bounds().Min, draw.Src)
} else {
// Draw background using custom colors
draw.Draw(jpg, jpg.Bounds(), &image.Uniform{C: bgColor}, image.Point{}, draw.Src)
// Draw image to new background
draw.Draw(jpg, jpg.Bounds(), img, img.Bounds().Min, draw.Over)
}
// Encode to dest image format
return encode(out, jpg, &jpeg.Options{Quality: 80})
}
|
package memory
import (
"encoding/hex"
"fmt"
"github.com/Secured-Finance/dione/blockchain/database"
types2 "github.com/Secured-Finance/dione/blockchain/types"
"github.com/patrickmn/go-cache"
)
const (
LatestBlockHeightKey = "latest_block_height"
)
type Database struct {
db *cache.Cache
}
func NewDatabase() *Database {
return &Database{
db: cache.New(cache.NoExpiration, 0),
}
}
func (d *Database) StoreBlock(block *types2.Block) error {
h := hex.EncodeToString(block.Header.Hash)
d.db.SetDefault(h, block)
d.db.SetDefault(fmt.Sprintf("height/%d", block.Header.Height), block)
return nil
}
func (d *Database) HasBlock(blockHash []byte) (bool, error) {
_, ok := d.db.Get(hex.EncodeToString(blockHash))
return ok, nil
}
func (d *Database) FetchBlockData(blockHash []byte) ([]*types2.Transaction, error) {
b, err := d.FetchBlock(blockHash)
if err != nil {
return nil, err
}
return b.Data, nil
}
func (d *Database) FetchBlockHeader(blockHash []byte) (*types2.BlockHeader, error) {
b, err := d.FetchBlock(blockHash)
if err != nil {
return nil, err
}
return b.Header, nil
}
func (d *Database) FetchBlock(blockHash []byte) (*types2.Block, error) {
b, ok := d.db.Get(hex.EncodeToString(blockHash))
if !ok {
return nil, database.ErrBlockNotFound
}
return b.(*types2.Block), nil
}
func (d *Database) FetchBlockByHeight(height uint64) (*types2.Block, error) {
b, ok := d.db.Get(fmt.Sprintf("height/%d", height))
if !ok {
return nil, database.ErrBlockNotFound
}
return b.(*types2.Block), nil
}
func (d *Database) FetchBlockHeaderByHeight(height uint64) (*types2.BlockHeader, error) {
b, ok := d.db.Get(fmt.Sprintf("height/%d", height))
if !ok {
return nil, database.ErrBlockNotFound
}
return b.(*types2.Block).Header, nil
}
func (d *Database) GetLatestBlockHeight() (uint64, error) {
height, ok := d.db.Get(LatestBlockHeightKey)
if !ok {
return 0, database.ErrLatestHeightNil
}
return height.(uint64), nil
}
func (d *Database) SetLatestBlockHeight(height uint64) error {
d.db.SetDefault(LatestBlockHeightKey, height)
return nil
}
|
package clock
import (
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
)
type suit func(c Clock)
func realAndMockClock(t *testing.T, test suit) {
Convey("测试 real clock", t, func() {
c := NewRealClock()
test(c)
})
Convey("测试 mock clock", t, func() {
now := time.Now()
c := NewMockClock(now)
test(c)
})
}
|
package api
import (
"encoding/hex"
"encoding/json"
"fmt"
"github.com/gin-gonic/gin"
"github.com/noah-blockchain/autodeleg/internal/env"
"github.com/noah-blockchain/autodeleg/internal/gate"
"github.com/noah-blockchain/autodeleg/internal/helpers"
"github.com/noah-blockchain/noah-go-node/core/transaction"
"github.com/sirupsen/logrus"
"net/http"
"strconv"
"time"
)
type Delegations struct {
Txs []string `json:"transactions"`
}
type inner struct {
Hash string `json:"hash"`
}
type TxDelegateResponse struct {
Data inner `json:"data"`
}
func Delegate(c *gin.Context) {
var err error
var url = fmt.Sprintf("%s/api/v1/transaction/push", env.GetEnv(env.NoahGateApi, ""))
gate, ok := c.MustGet("gate").(*gate.NoahGate)
if !ok {
c.JSON(http.StatusInternalServerError, gin.H{
"error": gin.H{
"code": 1,
"log": "Type cast error",
},
})
return
}
var dlg Delegations
if err = c.ShouldBindJSON(&dlg); err != nil {
// gate.Logger.Error(err)
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
//start delegation
go func() {
txCount := 0
txLen := len(dlg.Txs)
for txCount < txLen {
tx := dlg.Txs[txCount]
decodeString, err := hex.DecodeString(tx)
if err != nil {
gate.Logger.WithFields(logrus.Fields{
"transaction": tx,
}).Errorf("Transaction decode error: ", err)
return
}
decodedTx, err := transaction.TxDecoder.DecodeFromBytes(decodeString)
if err != nil {
gate.Logger.WithFields(logrus.Fields{
"transaction": tx,
}).Errorf("Transaction decode error: ", err)
return
}
sender, err := decodedTx.Sender()
if err != nil {
gate.Logger.WithFields(logrus.Fields{
"transaction": tx,
}).Errorf("Transaction decode error: ", err)
return
}
address := sender.String()
nonce := decodedTx.Nonce
qNoahBalance, err := gate.GetBalance(address)
if err != nil {
return
}
resultNonce, err := gate.GetNonce(address)
if err != nil {
return
}
addrNonce, err := strconv.ParseUint(*resultNonce, 10, 64)
if err != nil {
gate.Logger.WithFields(logrus.Fields{
"address": address,
}).Warn(err)
return
}
if nonce-1 != addrNonce {
gate.Logger.WithFields(logrus.Fields{
"expected": nonce - 1,
"got": addrNonce,
}).Info("nonce differ stop delegation")
return
}
decodedTxData, ok := decodedTx.GetDecodedData().(*transaction.DelegateData)
if !ok {
gate.Logger.WithFields(logrus.Fields{
"transaction": tx,
}).Errorf("Transaction decode error: ", err)
return
}
amount := decodedTxData.Value
cmp := amount.Cmp(qNoahBalance)
if cmp == -1 || cmp == 0 {
var payload = make(map[string]string)
payload["transaction"] = tx
delResp, err := helpers.HttpPost(url, payload)
if err != nil {
gate.Logger.WithFields(logrus.Fields{}).Errorf("Transaction delegate error: ", err)
return
}
var body map[string]interface{}
if err = json.Unmarshal(delResp, &body); err != nil {
gate.Logger.Error(err)
return
}
if _, exists := body["data"]; exists {
var resp TxDelegateResponse
err = json.Unmarshal(delResp, &resp)
if err != nil {
gate.Logger.Error(err)
return
}
gate.Logger.WithFields(logrus.Fields{
"hash": resp.Data.Hash,
}).Info("Tx success")
txCount++
// SLEEP!
time.Sleep(time.Second * 10) // пауза 10сек, Nonce чтобы в блокчейна +1
} else {
gate.Logger.WithFields(logrus.Fields{
"error": body,
}).Warn("GATE ERROR")
}
}
}
}()
c.JSON(http.StatusOK, gin.H{"message": "Delegation started!"})
}
func Index(c *gin.Context) {
c.JSON(200, gin.H{
"name": "Noah Auto-delegator API",
"version": "0.0.1",
})
}
|
package xtractr
/* This file contains methods that support the extract queuing system. */
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
// Xtract defines the queue input data: data needed to extract files in a path.
// Fill this out to create a queued extraction and pass it into Xtractr.Extract().
// If a CBFunction is provided it runs when the queued extract begins w/ Response.Done=false.
// The CBFunction is called again when the extraction finishes w/ Response.Done=true.
// The CBFunction channel works the exact same way, except it's a channel instead of a blocking function.
type Xtract struct {
Name string // Unused in this app; exposed for calling library.
SearchPath string // Folder path where extractable items are located.
TempFolder bool // Leave files in temporary folder? false=move files back to Searchpath
DeleteOrig bool // Delete Archives after successful extraction? Be careful.
CBFunction func(*Response) // Callback Function, runs twice per queued item.
CBChannel chan *Response // Callback Channel, msg sent twice per queued item.
}
// Response is sent to the call-back function. The first CBFunction call is just
// a notification that the extraction has started. You can determine it's the first
// call by chcking Response.Done. false = started, true = finished. When done=false
// the only other meaningful data provided is the re.Archives, re.Output and re.Queue.
type Response struct {
Done bool // Extract Started (false) or Finished (true).
Size int64 // Size of data written.
Output string // Temporary output folder.
Queued int // Items still in queue.
Started time.Time // When this extract began.
Elapsed time.Duration // Elapsed extraction duration. ie. How long it took.
Extras []string // Extra archives extracted from within an archive.
Archives []string // Initial archives found and extracted.
NewFiles []string // Files written to final path.
Error error // Error encountered, only when done=true.
X *Xtract // Copied from input data.
}
// Extract is how external code begins an extraction process against a path.
// To add an item to the extraction queue, create an Xtract struct with the
// search path set and pass it to this method. The current queue size is returned.
func (x *Xtractr) Extract(ex *Xtract) (int, error) {
if x.queue == nil {
return -1, ErrQueueStopped
}
x.queue <- ex // goes to processQueue()
return len(x.queue), nil
}
// processQueue runs in a go routine, 'e.Parallel' times,
// and watches for things to extract.
func (x *Xtractr) processQueue() {
for ex := range x.queue { // extractions come from Extract()
x.extract(ex)
}
}
// extract is where the real work begins and files get extracted.
// This is fired off from processQueue() in a go routine.
func (x *Xtractr) extract(ex *Xtract) {
re := &Response{
X: ex,
Started: time.Now(),
Output: strings.TrimRight(ex.SearchPath, `/\`) + x.Suffix, // tmp folder.
Archives: FindCompressedFiles(ex.SearchPath),
Queued: len(x.queue),
}
if len(re.Archives) < 1 { // no archives to xtract, bail out.
x.finishExtract(re, ErrNoCompressedFiles)
return
}
if ex.CBFunction != nil {
ex.CBFunction(re) // This lets the calling function know we've started.
}
if ex.CBChannel != nil {
ex.CBChannel <- re // This lets the calling function know we've started.
}
// Create another pointer to avoid race conditions in the callbacks above.
re = &Response{X: ex, Started: re.Started, Output: re.Output, Archives: re.Archives}
// e.log("Starting: %d archives - %v", len(resp.Archives), ex.SearchPath)
x.finishExtract(re, x.decompressFiles(re))
}
func (x *Xtractr) finishExtract(re *Response, err error) {
re.Error = err
re.Elapsed = time.Since(re.Started)
re.Done = true
re.Queued = len(x.queue)
if re.X.CBFunction != nil {
re.X.CBFunction(re) // This lets the calling function know we've finished.
}
if re.X.CBChannel != nil {
re.X.CBChannel <- re // This lets the calling function know we've finished.
}
if re.X.CBChannel != nil || re.X.CBFunction != nil {
return
}
// Only print a message if there is no callback function. Allows apps to print their own messages.
if err != nil {
x.Printf("Error Extracting: %s (%v elapsed): %v", re.X.SearchPath, re.Elapsed, err)
return
}
x.Printf("Finished Extracting: %s (%v elapsed, queue size: %d)", re.X.SearchPath, re.Elapsed, re.Queued)
}
// decompressFiles runs after we find and verify archives exist.
func (x *Xtractr) decompressFiles(re *Response) error {
for _, archive := range re.Archives {
// 'o' is the response for _this_ archive file, 're' is the whole batch.
o, err := x.processArchive(archive, re.Output)
if len(o.Extras) > 0 {
re.Extras = append(re.Extras, o.Extras...)
}
re.Size += o.Size
if err != nil {
// Make sure these get added in case there is an error.
// If there is no error, we add a different set later.
if len(o.NewFiles) > 0 {
re.NewFiles = append(re.NewFiles, o.NewFiles...)
}
return err
}
o.Output, o.X = re.Output, re.X
err = x.cleanupProcessedArchive(o, archive)
if len(o.NewFiles) > 0 {
// Append any new files, even if there was an error.
re.NewFiles = append(re.NewFiles, o.NewFiles...)
}
if err != nil {
return err
}
}
return nil
}
func (x *Xtractr) cleanupProcessedArchive(re *Response, archivePath string) error {
tmpFile := filepath.Join(re.Output, x.Suffix+"."+filepath.Base(archivePath)+".txt")
re.NewFiles = append(x.GetFileList(re.Output), tmpFile)
msg := []byte(fmt.Sprintf("# %s - this file is removed with the extracted data\n---\n"+
"archive:%s\nextras:%v\nfrom_path:%s\ntemp_path:%s\nrelocated:%v\ntime:%v\nfiles:\n - %v\n",
x.Suffix, archivePath, re.Extras, re.X.SearchPath, re.Output, !re.X.TempFolder, time.Now(),
strings.Join(re.NewFiles, "\n - ")))
if err := ioutil.WriteFile(tmpFile, msg, x.FileMode); err != nil {
x.Printf("Error: Creating Temporary Tracking File: %v", err) // continue anyway.
}
if re.X.DeleteOrig {
x.DeleteFiles(archivePath) // as requested
}
var err error
// Only move back the files if the archive wasn't extracted from the temp path.
if archiveDir := filepath.Dir(archivePath); !re.X.TempFolder && re.Output != archiveDir {
// Move the extracted files back into the same folder as the archive.
re.NewFiles, err = x.MoveFiles(re.Output, archiveDir, false)
}
return err
}
// processArchives extracts one archive at a time, then checks if it extracted more archives.
// Returns list of extra files extracted, size of data written and files written.
func (x *Xtractr) processArchive(filename string, tmpPath string) (*Response, error) {
output := &Response{NewFiles: []string{}, Extras: []string{}}
if err := os.MkdirAll(tmpPath, x.DirMode); err != nil {
return output, fmt.Errorf("os.MkdirAll: %w", err)
}
x.Debugf("Extracting File: %v to %v", filename, tmpPath)
beforeFiles := x.GetFileList(tmpPath) // get the "before this extraction" file list
bytes, files, err := ExtractFile(&XFile{ // extract the file.
FilePath: filename,
OutputDir: tmpPath,
FileMode: x.FileMode,
DirMode: x.DirMode,
})
output.NewFiles = append(output.NewFiles, files...) // keep track of the files extracted.
output.Size += bytes // total the size of data written.
if err != nil {
x.DeleteFiles(tmpPath) // clean up the mess after an error and bail.
return output, err
}
// Check if we just extracted more archives.
newFiles := Difference(beforeFiles, x.GetFileList(tmpPath))
for _, filename := range newFiles {
if strings.HasSuffix(filename, ".rar") || strings.HasSuffix(filename, ".zip") {
// recurse and append data to tracking vars.
o, err := x.processArchive(filename, tmpPath)
output.Extras = append(append(output.Extras, o.Extras...), filename) // MORE archives!
output.NewFiles = append(output.NewFiles, o.NewFiles...) // keep track of the files extracted.
output.Size += o.Size // total the size of data written.
if err != nil {
return output, err
}
}
}
return output, nil
}
|
package files
import (
"crypto/md5"
"hash"
"io"
"os"
"path/filepath"
"github.com/javiercbk/filetype"
"github.com/javiercbk/filetype/types"
)
// ReadWriteSeekCloser is a Reader, a Writer, a Seeker and a Closer
type ReadWriteSeekCloser interface {
io.Reader
io.Writer
io.Seeker
io.Closer
}
// FileMetadata is a bit masking value to enumerate options to extract metadata from writers
type FileMetadata uint32
const (
// Checksum extracts the file checksum from the reader
Checksum FileMetadata = 1 << iota
// MIME extracts the file MIME from the reader
MIME
)
// Repository is a file management structure
type Repository struct {
basePath string
}
// WriteInfo modes all the information that can be extracted when writing a Writer
type WriteInfo struct {
Written int64
Checksum []byte
MimeType string
}
// NewRepository creates a new file repository
func NewRepository(basePath string) Repository {
return Repository{
basePath: basePath,
}
}
// BasePath return the base path of the file repository
func (r Repository) BasePath() string {
return r.basePath
}
func (r Repository) soundFolderPath(userID, instrumentID string) string {
return filepath.Join(r.basePath, userID, instrumentID)
}
func (r Repository) soundFilePath(userID, instrumentID, soundUUID, extension string) string {
filename := soundUUID + extension
return filepath.Join(r.soundFolderPath(userID, instrumentID), filename)
}
// SoundFile returns the sound file
func (r Repository) SoundFile(userID, instrumentID, soundUUID, extension string, flag int) (ReadWriteSeekCloser, error) {
folderPath := r.soundFolderPath(userID, instrumentID)
err := os.MkdirAll(folderPath, 0777)
if err != nil {
return nil, err
}
fileLocation := r.soundFilePath(userID, instrumentID, soundUUID, extension)
return os.OpenFile(fileLocation, flag, 0644)
}
// RemoveSound removes a sound
func (r Repository) RemoveSound(userID, instrumentID, soundUUID, extension string) error {
fileLocation := r.soundFilePath(userID, instrumentID, soundUUID, extension)
return os.Remove(fileLocation)
}
// WriteWithMetadata writes a reader to a writer and extract the required metadata
func WriteWithMetadata(writer io.Writer, reader io.Reader, meta FileMetadata) (WriteInfo, error) {
var h hash.Hash
var t types.Type
var mw *filetype.MatcherWriter
wi := WriteInfo{}
currentReader := reader
if meta&Checksum != 0 {
h = md5.New()
teeReader := io.TeeReader(currentReader, h)
currentReader = teeReader
}
if meta&MIME != 0 {
mw = filetype.NewMatcherWriter()
teeReader := io.TeeReader(currentReader, mw)
currentReader = teeReader
}
written, err := io.Copy(writer, currentReader)
wi.Written = written
if err != nil {
return wi, err
}
if meta&Checksum != 0 {
wi.Checksum = h.Sum(nil)
}
if meta&MIME != 0 {
// if error it will be returned on the last return
t, err = mw.Match()
wi.MimeType = t.MIME.Value
}
return wi, err
}
|
package logs
import (
"time"
)
type LogLevel int
const (
Info LogLevel = iota
Warning
Error
)
type LogMsg struct {
Level LogLevel
Msg string
}
type LogStack struct {
Logger *Logger
Stack []*LogMsg
}
func (ls *LogStack) Add(lvl LogLevel, msg string) {
ls.Stack = append(ls.Stack,
&LogMsg{
Level: lvl,
Msg: msg,
})
}
func (ls *LogStack) PrintStack() {
ls.Logger.mu.Lock()
if time.Since(ls.Logger.Last) > ls.Logger.Timeout {
for _, log := range ls.Stack {
switch log.Level {
case Info:
ls.Logger.Info.Println(log.Msg)
case Warning:
ls.Logger.Warning.Println(log.Msg)
case Error:
ls.Logger.Error.Println(log.Msg)
default:
}
}
ls.Logger.Last = time.Now()
}
ls.Logger.mu.Unlock()
ls.Stack = make([]*LogMsg, 0)
}
func (ls *LogStack) AddAndPrint(lvl LogLevel, msg string) {
ls.Add(lvl, msg)
ls.PrintStack()
}
func NewLogStack(l *Logger) *LogStack {
return &LogStack{
Logger: l,
Stack: make([]*LogMsg, 0),
}
}
|
package main
import (
"fmt"
"runtime"
)
func ifPractice(val int) {
if val == 1 {
fmt.Println("a is 1")
} else if val == 2 {
fmt.Println("a is 2")
} else if val == 3 {
fmt.Println("a is 3")
} else {
fmt.Println("a is not 1,2,3")
}
fmt.Println(runtime.GOOS)
}
func ifInit() {
val := 0
// if中会有单独的块级作用域
if val = 1; val > 0 {
fmt.Println("a ")
}
fmt.Println(val)
}
func isError(val int) (int, bool) {
if val > 0 {
return val, true
}
return 0, false
}
func main() {
ifInit()
} |
package audit
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
api "nighthawkapi/api/core"
"nighthawkapi/api/handlers/auth"
"nighthawkapi/api/handlers/config"
"time"
"github.com/gorilla/mux"
elastic "gopkg.in/olivere/elastic.v5"
)
const EsTagType = "tags"
func init() {
conf, err = config.ReadConfFile()
if err != nil {
api.LogDebug(api.DEBUG, "Failed to initialize config read")
return
}
client, err = elastic.NewClient(elastic.SetURL(fmt.Sprintf("%s://%s:%d", conf.ServerHttpScheme(), conf.ServerHost(), conf.ServerPort())))
if err != nil {
api.LogDebug(api.DEBUG, "Failed to initialize elastic client")
return
}
}
type Tag struct {
Timestamp string `json:"timestamp"`
CreatedBy string `json:"created_by"`
CaseName string `json:"casename,omitempty"`
ComputerName string `json:"computername,omitempty"`
Audit string `json:"audit,omitempty"`
DocId string `json:"doc_id,,omitempty"`
TagCategory string `json:"tag_category"`
TagName string `json:"tag_name"`
}
func loadTagVars(tag *Tag, vars map[string]string) {
if len(vars) == 0 {
return
}
// Request URI parameters takes priority
if vars["casename"] != "" {
tag.CaseName = vars["casename"]
}
if vars["endpoint"] != "" {
tag.ComputerName = vars["endpoint"]
}
if vars["audit"] != "" {
tag.Audit = vars["audit"]
}
if vars["doc_id"] != "" {
tag.DocId = vars["doc_id"]
}
if vars["tag_category"] != "" {
tag.TagCategory = vars["tag_category"]
}
if vars["tag_name"] != "" {
tag.TagName = vars["tag_name"]
}
}
func AddTag(w http.ResponseWriter, r *http.Request) {
r.Header.Set("Content-Type", "application/json; charset=UTF-8")
// For UnitTest comment auth
isauth, message := auth.IsAuthenticatedSession(w, r)
if !isauth {
api.HttpResponseReturn(w, r, "failed", message, nil)
return
}
var tag Tag
body, err := ioutil.ReadAll(r.Body)
if err != nil {
api.HttpResponseReturn(w, r, "failed", err.Error(), nil)
return
}
json.Unmarshal(body, &tag)
loadTagVars(&tag, mux.Vars(r))
// Check for mandatory data
if tag.CaseName == "" {
api.HttpResponseReturn(w, r, "failed", "Casename is required", nil)
return
}
if tag.ComputerName == "" {
api.HttpResponseReturn(w, r, "failed", "Computer name is required", nil)
return
}
if tag.Audit == "" {
api.HttpResponseReturn(w, r, "failed", "Audit is required", nil)
return
}
if tag.DocId == "" {
api.HttpResponseReturn(w, r, "failed", "DocumentID is required", nil)
return
}
if tag.TagCategory == "" {
api.HttpResponseReturn(w, r, "failed", "Tag category is required", nil)
return
}
if tag.TagName == "" {
api.HttpResponseReturn(w, r, "failed", "Tag name is required", nil)
return
}
if tag.CreatedBy == "" {
api.HttpResponseReturn(w, r, "failed", "Creator is required", nil)
return
}
// Sett Tag creation time
tag.Timestamp = time.Now().UTC().Format(Layout)
jsonTag, _ := json.Marshal(tag)
res, err := client.Index().
Index(conf.ServerIndex()).
Type(EsTagType).
BodyJson(string(jsonTag)).
Do(context.Background())
if err != nil {
api.HttpResponseReturn(w, r, "failed", err.Error(), nil)
return
}
api.HttpResponseReturn(w, r, "success", "Tag added successfully", res.Id)
}
// GetTag returns tag matching search condition
// api_uri: GET /api/v1/tag/show/{case}
// api_uri: GET /api/v1/tag/show/{case}/{endpoint}
// api_uri: GET /api/v1/tag/show/{case}/{endpoint}/{audit}
// api_uri: GET /api/v1/tag/show/{case}/{endpoint}/{audit}/{doc_id}
func GetTagData(w http.ResponseWriter, r *http.Request) {
r.Header.Set("Content-Type", "application/json; charset=UTF-8")
// For UnitTest comment auth
isauth, message := auth.IsAuthenticatedSession(w, r)
if !isauth {
api.HttpResponseReturn(w, r, "failed", message, nil)
return
}
var tag Tag
if r.Method == "POST" {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
api.HttpResponseReturn(w, r, "failed", err.Error(), nil)
return
}
json.Unmarshal(body, &tag)
}
loadTagVars(&tag, mux.Vars(r))
var queries []elastic.Query
if tag.CaseName == "" {
queries = append(queries, elastic.NewWildcardQuery("casename.keyword", "*"))
} else {
queries = append(queries, elastic.NewTermQuery("casename.keyword", tag.CaseName))
}
if tag.ComputerName == "" {
queries = append(queries, elastic.NewWildcardQuery("computername.keyword", "*"))
} else {
queries = append(queries, elastic.NewTermQuery("computername.keyword", tag.ComputerName))
}
if tag.Audit == "" {
queries = append(queries, elastic.NewWildcardQuery("audit.keyword", "*"))
} else {
queries = append(queries, elastic.NewTermQuery("audit.keyword", tag.Audit))
}
if tag.DocId == "" {
queries = append(queries, elastic.NewWildcardQuery("doc_id.keyword", "*"))
} else {
queries = append(queries, elastic.NewTermQuery("doc_id.keyword", tag.DocId))
}
if tag.CreatedBy != "" {
queries = append(queries, elastic.NewTermQuery("created_by.keyword", tag.CreatedBy))
}
if tag.TagName != "" {
queries = append(queries, elastic.NewWildcardQuery("tag_name.keyword", fmt.Sprintf("*%s*", tag.TagName)))
}
boolquery := elastic.NewBoolQuery().Must(queries...)
// Print JsonQuery
// Uncomment the code below to JsonQuery to Elasticsearch
/*
boolQueryMap, _ := boolquery.Source()
jsonBoolQuery, _ := json.Marshal(boolQueryMap)
fmt.Println(string(jsonBoolQuery))
*/
sr, err := client.Search().
Index(conf.ServerIndex()).
Type(EsTagType).
Query(boolquery).
Do(context.Background())
if err != nil {
api.HttpResponseReturn(w, r, "failed", err.Error(), nil)
return
}
if sr.TotalHits() < 1 {
api.HttpResponseReturn(w, r, "failed", "No result found", nil)
return
}
// Populating array of tags
var tags []Tag
for _, hit := range sr.Hits.Hits {
var tag Tag
json.Unmarshal(*hit.Source, &tag)
tags = append(tags, tag)
}
api.HttpResponseReturn(w, r, "success", "Tag search completed", tags)
}
|
package main
import (
"flag"
"fmt"
"io"
"log"
"math"
"math/rand"
"os"
"strconv"
"sync"
"time"
)
var (
n int // Number of files
m int // Number of entries per file
)
var max = len(strconv.AppendUint(nil, math.MaxUint64, 10))
func main() {
flag.IntVar(&n, "n", 1, "number of files to generate")
flag.IntVar(&m, "m", 5, "number of entries to generate")
flag.Parse()
rand.Seed(time.Now().UnixNano())
res := make(chan error, n)
for i := 0; i < n; i++ {
go fileWriter(res, i, m)
}
for i := 0; i < n; i++ {
if err := <-res; err != nil {
log.Printf("file [%d], err -> %s", i, err)
}
}
}
func fileWriter(res chan<- error, i, m int) {
var err error
defer func() {
if err != nil {
res <- err
}
}()
fName := fmt.Sprintf("%d.gen", i)
var file *os.File
file, err = os.Create(fName)
if err != nil {
return
}
defer file.Close()
for j := 0; j < m; j++ {
if _, err = writeRandNumbersToFile(file); err!= nil {
res <- err
return
}
}
res <- nil
}
func writeRandNumbersToFile(w io.Writer) (int, error) {
v := rand.Uint64()
//bts := make([]byte, 0, max+1) // 8 is for uint64; 1 is for '\n'.
bts := pool.Get().(*[]byte)
defer pool.Put(bts)
p := *bts
p = strconv.AppendUint(p, v, 10)
p = append(p, '\n')
return w.Write(p)
}
var pool=sync.Pool {
New: func()interface{}{
p := make([]byte,0,max+1)
return &p
},
}
|
package messages
const MsgFormFieldRequired = "%[1]s required"
|
package main
import "fmt"
// Vertex
type Vertex struct {
edges []*Vertex
}
func (v Vertex) addEdge(vToAdd Vertex) Vertex {
edges := v.edges
edges = append(edges, &vToAdd)
return Vertex{edges}
}
func (v Vertex) delEdge(vToDel Vertex) Vertex {
edges := v.edges
nEdges := len(edges)
toDel := -1
for i := 0; i < nEdges; i++ {
toDel = i
}
if toDel > -1 {
edges = append(edges[:toDel], edges[toDel+1:]...)
}
return Vertex{edges}
}
type Muter interface {
addEdge(vToAdd Vertex) Vertex
delEdge(vToDel Vertex) Vertex
}
type Graph struct {
vertices []*Vertex
}
func main() {
v1 := Vertex{}
v2 := Vertex{}
v1 = v1.addEdge(v2)
graph := Graph{[]*Vertex{&v1, &v2}}
fmt.Println(graph)
fmt.Println(v1)
}
|
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"github.com/ripexz/logpasta/clipboard"
)
var version = "v0.3.3"
func main() {
initLogger()
conf := loadConfig()
checkForCommands()
var content string
fi, _ := os.Stdin.Stat()
if (fi.Mode() & os.ModeCharDevice) == 0 {
// piped
bytes, _ := ioutil.ReadAll(os.Stdin)
content = string(bytes)
} else {
content = strings.Join(flag.Args(), " ")
}
if content == "" {
log.Printf("No input detected, see 'logpasta help' for usage")
os.Exit(0)
}
// make request
var output string
paste, err := saveLog(conf, content)
if err != nil {
conf.Silent = false
output = fmt.Sprintf("Failed to save log: %s", err.Error())
} else {
pasteURL := fmt.Sprintf("%s/paste/%s", conf.BaseURL, paste.UUID)
output = fmt.Sprintf("Log saved successfully:\n%s", pasteURL)
if conf.Copy {
err = clipboard.Copy(pasteURL)
if err != nil {
output += "\n(failed to copy to clipboard)"
if !conf.Silent {
output += fmt.Sprintf("\nError: %s", err.Error())
}
} else {
output += " (copied to clipboard)"
}
}
if paste.DeleteKey != nil {
output += fmt.Sprintf(
"\nYou can delete it early by visiting:\n%s/delete/%s/%s",
conf.BaseURL, paste.UUID, *paste.DeleteKey,
)
}
}
if !conf.Silent {
log.Println(content)
}
log.Println(output)
}
func checkForCommands() {
if len(os.Args) <= 1 {
return
}
switch os.Args[1] {
case "version":
printVersion()
os.Exit(0)
case "help":
printUsage()
os.Exit(0)
}
}
|
package main
import (
"os"
"github.com/therecipe/qt/widgets"
)
func main() {
widgets.NewQApplication(len(os.Args), os.Args)
a := newApp(1500, 1000)
widgets.QApplication_SetStyle2("fusion")
a.w.Show()
widgets.QApplication_Exec()
}
|
package main
import "fmt"
func main() {
ch1 := make(chan int, 1)
ch1<-1
ch2 := make(chan int, 1)
ch2<-2
select {
case k1 := <-ch1:
fmt.Println(k1)
case k2 := <-ch2:
fmt.Println(k2)
default:
fmt.Println("chan")
}
}
|
package controller
import (
"github.com/reechou/robot-manager/models"
)
const (
RESPONSE_OK = iota
RESPONSE_ERR
)
type Response struct {
Code int64 `json:"code"`
Msg string `json:"msg,omitempty"`
Data interface{} `json:"data,omitempty"`
}
type GetRobotGroupsRsp struct {
Count int64 `json:"count"`
List []models.RobotGroup `json:"list"`
}
type GetRobotGroupMassListRsp struct {
Count int64 `json:"count"`
List []models.RobotGroupMass `json:"list"`
}
type RobotSaveGroupsReq struct {
RobotWx string `json:"robotWx"`
Groups []WxGroup `json:"groups"`
}
type RobotCreateManagerReq struct {
RobotWx string `json:"robotWx"`
Nickname string `json:"nickname"`
}
type GetRobotGroupsReq struct {
RobotId int64 `json:"robotId"`
Offset int64 `json:"offset"`
Num int64 `json:"num"`
}
type GetRobotGroupChatNewReq struct {
RobotId int64 `json:"robotId"`
Timestamp int64 `json:"timestamp"`
}
type GetRobotGroupChatFromGroupReq struct {
RobotId int64 `json:"robotId"`
GroupId int64 `json:"groupId"`
Timestamp int64 `json:"timestamp"`
}
type GetRobotGroupMassReq struct {
Offset int64 `json:"offset"`
Num int64 `json:"num"`
}
type GetRobotGroupMassFromRobotReq struct {
RobotWx string `json:"robotWx"`
}
type SendGroupMsgReq struct {
RobotWx string `json:"robotWx"`
GroupUserName string `json:"groupUserName"`
GroupNickName string `json:"groupNickName"`
MsgType string `json:"msgType"`
Msg string `json:"msg"`
}
|
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"context"
"flag"
"fmt"
"math"
"regexp"
"strings"
"time"
"github.com/onsi/ginkgo"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-base/metrics/testutil"
csitrans "k8s.io/csi-translation-lib"
"k8s.io/kubernetes/test/e2e/framework"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/podlogs"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
var (
migratedPlugins *string
minValidSize = "1Ki"
maxValidSize = "10Ei"
)
func init() {
migratedPlugins = flag.String("storage.migratedPlugins", "", "comma separated list of in-tree plugin names of form 'kubernetes.io/{pluginName}' migrated to CSI")
}
type opCounts map[string]int64
// BaseSuites is a list of storage test suites that work for in-tree and CSI drivers
var BaseSuites = []func() TestSuite{
InitVolumesTestSuite,
InitVolumeIOTestSuite,
InitVolumeModeTestSuite,
InitSubPathTestSuite,
InitProvisioningTestSuite,
InitMultiVolumeTestSuite,
InitVolumeExpandTestSuite,
InitDisruptiveTestSuite,
InitVolumeLimitsTestSuite,
InitTopologyTestSuite,
InitStressTestSuite,
}
// CSISuites is a list of storage test suites that work only for CSI drivers
var CSISuites = append(BaseSuites,
InitEphemeralTestSuite,
InitSnapshottableTestSuite,
)
// TestSuite represents an interface for a set of tests which works with TestDriver
type TestSuite interface {
// GetTestSuiteInfo returns the TestSuiteInfo for this TestSuite
GetTestSuiteInfo() TestSuiteInfo
// DefineTests defines tests of the testpattern for the driver.
// Called inside a Ginkgo context that reflects the current driver and test pattern,
// so the test suite can define tests directly with ginkgo.It.
DefineTests(TestDriver, testpatterns.TestPattern)
// SkipRedundantSuite will skip the test suite based on the given TestPattern and TestDriver
SkipRedundantSuite(TestDriver, testpatterns.TestPattern)
}
// TestSuiteInfo represents a set of parameters for TestSuite
type TestSuiteInfo struct {
Name string // name of the TestSuite
FeatureTag string // featureTag for the TestSuite
TestPatterns []testpatterns.TestPattern // Slice of TestPattern for the TestSuite
SupportedSizeRange e2evolume.SizeRange // Size range supported by the test suite
}
func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string {
tsInfo := suite.GetTestSuiteInfo()
return fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.Name, tsInfo.FeatureTag)
}
// DefineTestSuite defines tests for all testpatterns and all testSuites for a driver
func DefineTestSuite(driver TestDriver, tsInits []func() TestSuite) {
for _, testSuiteInit := range tsInits {
suite := testSuiteInit()
for _, pattern := range suite.GetTestSuiteInfo().TestPatterns {
p := pattern
ginkgo.Context(getTestNameStr(suite, p), func() {
ginkgo.BeforeEach(func() {
// Skip unsupported tests to avoid unnecessary resource initialization
suite.SkipRedundantSuite(driver, p)
skipUnsupportedTest(driver, p)
})
suite.DefineTests(driver, p)
})
}
}
}
// skipUnsupportedTest will skip tests if the combination of driver, and testpattern
// is not suitable to be tested.
// Whether it needs to be skipped is checked by following steps:
// 1. Check if Whether SnapshotType is supported by driver from its interface
// 2. Check if Whether volType is supported by driver from its interface
// 3. Check if fsType is supported
// 4. Check with driver specific logic
//
// Test suites can also skip tests inside their own DefineTests function or in
// individual tests.
func skipUnsupportedTest(driver TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
var isSupported bool
// 1. Check if Whether SnapshotType is supported by driver from its interface
// if isSupported, we still execute the driver and suite tests
if len(pattern.SnapshotType) > 0 {
switch pattern.SnapshotType {
case testpatterns.DynamicCreatedSnapshot:
_, isSupported = driver.(SnapshottableTestDriver)
default:
isSupported = false
}
if !isSupported {
e2eskipper.Skipf("Driver %s doesn't support snapshot type %v -- skipping", dInfo.Name, pattern.SnapshotType)
}
} else {
// 2. Check if Whether volType is supported by driver from its interface
switch pattern.VolType {
case testpatterns.InlineVolume:
_, isSupported = driver.(InlineVolumeTestDriver)
case testpatterns.PreprovisionedPV:
_, isSupported = driver.(PreprovisionedPVTestDriver)
case testpatterns.DynamicPV:
_, isSupported = driver.(DynamicPVTestDriver)
case testpatterns.CSIInlineVolume:
_, isSupported = driver.(EphemeralTestDriver)
default:
isSupported = false
}
if !isSupported {
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
}
// 3. Check if fsType is supported
if !dInfo.SupportedFsType.Has(pattern.FsType) {
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.FsType)
}
if pattern.FsType == "xfs" && framework.NodeOSDistroIs("gci", "cos", "windows") {
e2eskipper.Skipf("Distro doesn't support xfs -- skipping")
}
if pattern.FsType == "ntfs" && !framework.NodeOSDistroIs("windows") {
e2eskipper.Skipf("Distro %s doesn't support ntfs -- skipping", framework.TestContext.NodeOSDistro)
}
}
// 4. Check with driver specific logic
driver.SkipUnsupportedTest(pattern)
}
// VolumeResource is a generic implementation of TestResource that wil be able to
// be used in most of TestSuites.
// See volume_io.go or volumes.go in test/e2e/storage/testsuites/ for how to use this resource.
// Also, see subpath.go in the same directory for how to extend and use it.
type VolumeResource struct {
Config *PerTestConfig
Pattern testpatterns.TestPattern
VolSource *v1.VolumeSource
Pvc *v1.PersistentVolumeClaim
Pv *v1.PersistentVolume
Sc *storagev1.StorageClass
Volume TestVolume
}
// CreateVolumeResource constructs a VolumeResource for the current test. It knows how to deal with
// different test pattern volume types.
func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, testVolumeSizeRange e2evolume.SizeRange) *VolumeResource {
r := VolumeResource{
Config: config,
Pattern: pattern,
}
dInfo := driver.GetDriverInfo()
f := config.Framework
cs := f.ClientSet
// Create volume for pre-provisioned volume tests
r.Volume = CreateVolume(driver, config, pattern.VolType)
switch pattern.VolType {
case testpatterns.InlineVolume:
framework.Logf("Creating resource for inline volume")
if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
r.VolSource = iDriver.GetVolumeSource(false, pattern.FsType, r.Volume)
}
case testpatterns.PreprovisionedPV:
framework.Logf("Creating resource for pre-provisioned PV")
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, pattern.FsType, r.Volume)
if pvSource != nil {
r.Pv, r.Pvc = createPVCPV(f, dInfo.Name, pvSource, volumeNodeAffinity, pattern.VolMode, dInfo.RequiredAccessModes)
r.VolSource = createVolumeSource(r.Pvc.Name, false /* readOnly */)
}
}
case testpatterns.DynamicPV:
framework.Logf("Creating resource for dynamic PV")
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
var err error
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
framework.Logf("Using claimSize:%s, test suite supported size:%v, driver(%s) supported size:%v ", claimSize, testVolumeSizeRange, dDriver.GetDriverInfo().Name, testVolumeSizeRange)
r.Sc = dDriver.GetDynamicProvisionStorageClass(r.Config, pattern.FsType)
if pattern.BindingMode != "" {
r.Sc.VolumeBindingMode = &pattern.BindingMode
}
if pattern.AllowExpansion != false {
r.Sc.AllowVolumeExpansion = &pattern.AllowExpansion
}
ginkgo.By("creating a StorageClass " + r.Sc.Name)
r.Sc, err = cs.StorageV1().StorageClasses().Create(context.TODO(), r.Sc, metav1.CreateOptions{})
framework.ExpectNoError(err)
if r.Sc != nil {
r.Pv, r.Pvc = createPVCPVFromDynamicProvisionSC(
f, dInfo.Name, claimSize, r.Sc, pattern.VolMode, dInfo.RequiredAccessModes)
r.VolSource = createVolumeSource(r.Pvc.Name, false /* readOnly */)
}
}
case testpatterns.CSIInlineVolume:
framework.Logf("Creating resource for CSI ephemeral inline volume")
if eDriver, ok := driver.(EphemeralTestDriver); ok {
attributes, _, _ := eDriver.GetVolume(config, 0)
r.VolSource = &v1.VolumeSource{
CSI: &v1.CSIVolumeSource{
Driver: eDriver.GetCSIDriverName(config),
VolumeAttributes: attributes,
},
}
}
default:
framework.Failf("VolumeResource doesn't support: %s", pattern.VolType)
}
if r.VolSource == nil {
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
}
return &r
}
func createVolumeSource(pvcName string, readOnly bool) *v1.VolumeSource {
return &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
ReadOnly: readOnly,
},
}
}
// CleanupResource cleans up VolumeResource
func (r *VolumeResource) CleanupResource() error {
f := r.Config.Framework
var cleanUpErrs []error
if r.Pvc != nil || r.Pv != nil {
switch r.Pattern.VolType {
case testpatterns.PreprovisionedPV:
ginkgo.By("Deleting pv and pvc")
if errs := e2epv.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.Pv, r.Pvc); len(errs) != 0 {
framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs))
}
case testpatterns.DynamicPV:
ginkgo.By("Deleting pvc")
// We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner
if r.Pv != nil && r.Pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v",
r.Pv.Name, v1.PersistentVolumeReclaimDelete)
}
if r.Pvc != nil {
cs := f.ClientSet
pv := r.Pv
if pv == nil && r.Pvc.Name != "" {
// This happens for late binding. Check whether we have a volume now that we need to wait for.
pvc, err := cs.CoreV1().PersistentVolumeClaims(r.Pvc.Namespace).Get(context.TODO(), r.Pvc.Name, metav1.GetOptions{})
switch {
case err == nil:
if pvc.Spec.VolumeName != "" {
pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
if err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to find PV %v", pvc.Spec.VolumeName))
}
}
case apierrors.IsNotFound(err):
// Without the PVC, we cannot locate the corresponding PV. Let's
// hope that it is gone.
default:
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to find PVC %v", r.Pvc.Name))
}
}
err := e2epv.DeletePersistentVolumeClaim(f.ClientSet, r.Pvc.Name, f.Namespace.Name)
if err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to delete PVC %v", r.Pvc.Name))
}
if pv != nil {
err = e2epv.WaitForPersistentVolumeDeleted(f.ClientSet, pv.Name, 5*time.Second, 5*time.Minute)
if err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err,
"Persistent Volume %v not deleted by dynamic provisioner", pv.Name))
}
}
}
default:
framework.Failf("Found PVC (%v) or PV (%v) but not running Preprovisioned or Dynamic test pattern", r.Pvc, r.Pv)
}
}
if r.Sc != nil {
ginkgo.By("Deleting sc")
if err := deleteStorageClass(f.ClientSet, r.Sc.Name); err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to delete StorageClass %v", r.Sc.Name))
}
}
// Cleanup volume for pre-provisioned volume tests
if r.Volume != nil {
if err := tryFunc(r.Volume.DeleteVolume); err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrap(err, "Failed to delete Volume"))
}
}
return utilerrors.NewAggregate(cleanUpErrs)
}
func createPVCPV(
f *framework.Framework,
name string,
pvSource *v1.PersistentVolumeSource,
volumeNodeAffinity *v1.VolumeNodeAffinity,
volMode v1.PersistentVolumeMode,
accessModes []v1.PersistentVolumeAccessMode,
) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
pvConfig := e2epv.PersistentVolumeConfig{
NamePrefix: fmt.Sprintf("%s-", name),
StorageClassName: f.Namespace.Name,
PVSource: *pvSource,
NodeAffinity: volumeNodeAffinity,
AccessModes: accessModes,
}
pvcConfig := e2epv.PersistentVolumeClaimConfig{
StorageClassName: &f.Namespace.Name,
AccessModes: accessModes,
}
if volMode != "" {
pvConfig.VolumeMode = &volMode
pvcConfig.VolumeMode = &volMode
}
framework.Logf("Creating PVC and PV")
pv, pvc, err := e2epv.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
framework.ExpectNoError(err, "PVC, PV creation failed")
err = e2epv.WaitOnPVandPVC(f.ClientSet, f.Namespace.Name, pv, pvc)
framework.ExpectNoError(err, "PVC, PV failed to bind")
return pv, pvc
}
func createPVCPVFromDynamicProvisionSC(
f *framework.Framework,
name string,
claimSize string,
sc *storagev1.StorageClass,
volMode v1.PersistentVolumeMode,
accessModes []v1.PersistentVolumeAccessMode,
) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
cs := f.ClientSet
ns := f.Namespace.Name
ginkgo.By("creating a claim")
pvcCfg := e2epv.PersistentVolumeClaimConfig{
NamePrefix: name,
ClaimSize: claimSize,
StorageClassName: &(sc.Name),
AccessModes: accessModes,
VolumeMode: &volMode,
}
pvc := e2epv.MakePersistentVolumeClaim(pvcCfg, ns)
var err error
pvc, err = e2epv.CreatePVC(cs, ns, pvc)
framework.ExpectNoError(err)
if !isDelayedBinding(sc) {
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
}
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
var pv *v1.PersistentVolume
if !isDelayedBinding(sc) {
pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
framework.ExpectNoError(err)
}
return pv, pvc
}
func isDelayedBinding(sc *storagev1.StorageClass) bool {
if sc.VolumeBindingMode != nil {
return *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer
}
return false
}
// deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
func deleteStorageClass(cs clientset.Interface, className string) error {
err := cs.StorageV1().StorageClasses().Delete(context.TODO(), className, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return err
}
return nil
}
// convertTestConfig returns a framework test config with the
// parameters specified for the testsuite or (if available) the
// dynamically created config for the volume server.
//
// This is done because TestConfig is the public API for
// the testsuites package whereas volume.TestConfig is merely
// an implementation detail. It contains fields that have no effect,
// which makes it unsuitable for use in the testsuits public API.
func convertTestConfig(in *PerTestConfig) e2evolume.TestConfig {
if in.ServerConfig != nil {
return *in.ServerConfig
}
return e2evolume.TestConfig{
Namespace: in.Framework.Namespace.Name,
Prefix: in.Prefix,
ClientNodeSelection: in.ClientNodeSelection,
}
}
// getSizeRangesIntersection takes two instances of storage size ranges and determines the
// intersection of the intervals (if it exists) and return the minimum of the intersection
// to be used as the claim size for the test.
// if value not set, that means there's no minimum or maximum size limitation and we set default size for it.
func getSizeRangesIntersection(first e2evolume.SizeRange, second e2evolume.SizeRange) (string, error) {
var firstMin, firstMax, secondMin, secondMax resource.Quantity
var err error
//if SizeRange is not set, assign a minimum or maximum size
if len(first.Min) == 0 {
first.Min = minValidSize
}
if len(first.Max) == 0 {
first.Max = maxValidSize
}
if len(second.Min) == 0 {
second.Min = minValidSize
}
if len(second.Max) == 0 {
second.Max = maxValidSize
}
if firstMin, err = resource.ParseQuantity(first.Min); err != nil {
return "", err
}
if firstMax, err = resource.ParseQuantity(first.Max); err != nil {
return "", err
}
if secondMin, err = resource.ParseQuantity(second.Min); err != nil {
return "", err
}
if secondMax, err = resource.ParseQuantity(second.Max); err != nil {
return "", err
}
interSectionStart := math.Max(float64(firstMin.Value()), float64(secondMin.Value()))
intersectionEnd := math.Min(float64(firstMax.Value()), float64(secondMax.Value()))
// the minimum of the intersection shall be returned as the claim size
var intersectionMin resource.Quantity
if intersectionEnd-interSectionStart >= 0 { //have intersection
intersectionMin = *resource.NewQuantity(int64(interSectionStart), "BinarySI") //convert value to BinarySI format. E.g. 5Gi
// return the minimum of the intersection as the claim size
return intersectionMin.String(), nil
}
return "", fmt.Errorf("intersection of size ranges %+v, %+v is null", first, second)
}
func getSnapshot(claimName string, ns, snapshotClassName string) *unstructured.Unstructured {
snapshot := &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "VolumeSnapshot",
"apiVersion": snapshotAPIVersion,
"metadata": map[string]interface{}{
"generateName": "snapshot-",
"namespace": ns,
},
"spec": map[string]interface{}{
"volumeSnapshotClassName": snapshotClassName,
"source": map[string]interface{}{
"persistentVolumeClaimName": claimName,
},
},
},
}
return snapshot
}
// StartPodLogs begins capturing log output and events from current
// and future pods running in the namespace of the framework. That
// ends when the returned cleanup function is called.
//
// The output goes to log files (when using --report-dir, as in the
// CI) or the output stream (otherwise).
func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func() {
ctx, cancel := context.WithCancel(context.Background())
cs := f.ClientSet
ns := driverNamespace.Name
to := podlogs.LogOutput{
StatusWriter: ginkgo.GinkgoWriter,
}
if framework.TestContext.ReportDir == "" {
to.LogWriter = ginkgo.GinkgoWriter
} else {
test := ginkgo.CurrentGinkgoTestDescription()
reg := regexp.MustCompile("[^a-zA-Z0-9_-]+")
// We end the prefix with a slash to ensure that all logs
// end up in a directory named after the current test.
//
// TODO: use a deeper directory hierarchy once gubernator
// supports that (https://github.com/kubernetes/test-infra/issues/10289).
to.LogPathPrefix = framework.TestContext.ReportDir + "/" +
reg.ReplaceAllString(test.FullTestText, "_") + "/"
}
podlogs.CopyAllLogs(ctx, cs, ns, to)
// pod events are something that the framework already collects itself
// after a failed test. Logging them live is only useful for interactive
// debugging, not when we collect reports.
if framework.TestContext.ReportDir == "" {
podlogs.WatchPods(ctx, cs, ns, ginkgo.GinkgoWriter)
}
return cancel
}
func getVolumeOpsFromMetricsForPlugin(ms testutil.Metrics, pluginName string) opCounts {
totOps := opCounts{}
for method, samples := range ms {
switch method {
case "storage_operation_status_count":
for _, sample := range samples {
plugin := string(sample.Metric["volume_plugin"])
if pluginName != plugin {
continue
}
opName := string(sample.Metric["operation_name"])
if opName == "verify_controller_attached_volume" {
// We ignore verify_controller_attached_volume because it does not call into
// the plugin. It only watches Node API and updates Actual State of World cache
continue
}
totOps[opName] = totOps[opName] + int64(sample.Value)
}
}
}
return totOps
}
func getVolumeOpCounts(c clientset.Interface, pluginName string) opCounts {
if !framework.ProviderIs("gce", "gke", "aws") {
return opCounts{}
}
nodeLimit := 25
metricsGrabber, err := e2emetrics.NewMetricsGrabber(c, nil, true, false, true, false, false)
if err != nil {
framework.ExpectNoError(err, "Error creating metrics grabber: %v", err)
}
if !metricsGrabber.HasRegisteredMaster() {
framework.Logf("Warning: Environment does not support getting controller-manager metrics")
return opCounts{}
}
controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
framework.ExpectNoError(err, "Error getting c-m metrics : %v", err)
totOps := getVolumeOpsFromMetricsForPlugin(testutil.Metrics(controllerMetrics), pluginName)
framework.Logf("Node name not specified for getVolumeOpCounts, falling back to listing nodes from API Server")
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "Error listing nodes: %v", err)
if len(nodes.Items) <= nodeLimit {
// For large clusters with > nodeLimit nodes it is too time consuming to
// gather metrics from all nodes. We just ignore the node metrics
// for those clusters
for _, node := range nodes.Items {
nodeMetrics, err := metricsGrabber.GrabFromKubelet(node.GetName())
framework.ExpectNoError(err, "Error getting Kubelet %v metrics: %v", node.GetName(), err)
totOps = addOpCounts(totOps, getVolumeOpsFromMetricsForPlugin(testutil.Metrics(nodeMetrics), pluginName))
}
} else {
framework.Logf("Skipping operation metrics gathering from nodes in getVolumeOpCounts, greater than %v nodes", nodeLimit)
}
return totOps
}
func addOpCounts(o1 opCounts, o2 opCounts) opCounts {
totOps := opCounts{}
seen := sets.NewString()
for op, count := range o1 {
seen.Insert(op)
totOps[op] = totOps[op] + count + o2[op]
}
for op, count := range o2 {
if !seen.Has(op) {
totOps[op] = totOps[op] + count
}
}
return totOps
}
func getMigrationVolumeOpCounts(cs clientset.Interface, pluginName string) (opCounts, opCounts) {
if len(pluginName) > 0 {
var migratedOps opCounts
l := csitrans.New()
csiName, err := l.GetCSINameFromInTreeName(pluginName)
if err != nil {
framework.Logf("Could not find CSI Name for in-tree plugin %v", pluginName)
migratedOps = opCounts{}
} else {
csiName = "kubernetes.io/csi:" + csiName
migratedOps = getVolumeOpCounts(cs, csiName)
}
return getVolumeOpCounts(cs, pluginName), migratedOps
}
// Not an in-tree driver
framework.Logf("Test running for native CSI Driver, not checking metrics")
return opCounts{}, opCounts{}
}
func validateMigrationVolumeOpCounts(cs clientset.Interface, pluginName string, oldInTreeOps, oldMigratedOps opCounts) {
if len(pluginName) == 0 {
// This is a native CSI Driver and we don't check ops
return
}
if sets.NewString(strings.Split(*migratedPlugins, ",")...).Has(pluginName) {
// If this plugin is migrated based on the test flag storage.migratedPlugins
newInTreeOps, _ := getMigrationVolumeOpCounts(cs, pluginName)
for op, count := range newInTreeOps {
if count != oldInTreeOps[op] {
framework.Failf("In-tree plugin %v migrated to CSI Driver, however found %v %v metrics for in-tree plugin", pluginName, count-oldInTreeOps[op], op)
}
}
// We don't check for migrated metrics because some negative test cases
// may not do any volume operations and therefore not emit any metrics
} else {
// In-tree plugin is not migrated
framework.Logf("In-tree plugin %v is not migrated, not validating any metrics", pluginName)
// We don't check in-tree plugin metrics because some negative test
// cases may not do any volume operations and therefore not emit any
// metrics
// We don't check counts for the Migrated version of the driver because
// if tests are running in parallel a test could be using the CSI Driver
// natively and increase the metrics count
// TODO(dyzz): Add a dimension to OperationGenerator metrics for
// "migrated"->true/false so that we can disambiguate migrated metrics
// and native CSI Driver metrics. This way we can check the counts for
// migrated version of the driver for stronger negative test case
// guarantees (as well as more informative metrics).
}
}
// Skip skipVolTypes patterns if the driver supports dynamic provisioning
func skipVolTypePatterns(pattern testpatterns.TestPattern, driver TestDriver, skipVolTypes map[testpatterns.TestVolType]bool) {
_, supportsProvisioning := driver.(DynamicPVTestDriver)
if supportsProvisioning && skipVolTypes[pattern.VolType] {
e2eskipper.Skipf("Driver supports dynamic provisioning, skipping %s pattern", pattern.VolType)
}
}
func tryFunc(f func()) error {
var err error
if f == nil {
return nil
}
defer func() {
if recoverError := recover(); recoverError != nil {
err = fmt.Errorf("%v", recoverError)
}
}()
f()
return err
}
|
package jsonv
import (
"reflect"
)
// Used to avoid expensive pathing string formatting when it's needed 99.9999%
// of the time
type Pather func() string
/*
Used by Parser for parsing and validation of JSON types.
Can return either a ValidationError or a general error if encountered
This is used to allow the parser and it's clients to differentiate between
validation errors and IO errors.
If the error is just a vaidation error, but parsing can continue, the
implementation should return a ValidationError, otherwise any other error type
will be collected up with all errors accumulated so far and parsing stopped.
*/
type SchemaType interface {
Parse(Pather, *Scanner, interface{}) error
}
/*
SchemaTypes can implement this to allow
Anything that embeds other types must implement this and call it on embedded
types to allow them to initialise once the type is known. E.g. Object types
use this to pre-cache all the fields they need and call it on each of their
ObjectProp.Types
*/
type PreparedSchemaType interface {
Prepare(reflect.Type) error
}
|
package main
//如何判断一个链表有没有闭环
import (
"fmt"
)
func setp() int {
x := 1
y := 2
var n int
fmt.Print("please input step number: ")
// fmt.Scanf("%d",&n)
fmt.Scanln(&n)
if n == 1 {
return x
} else if n == 2 {
return y
} else {
for i := 0; i < n-2; i++ {
x, y = y, x+y
}
return y
}
}
func main() {
num := setp()
fmt.Println(num)
}
|
package ui
import (
"fmt"
"github.com/jroimartin/gocui"
"github.com/ryo-ma/coronaui/lib"
)
type TextPanel struct {
ViewName string
viewPosition ViewPosition
}
func NewTextPanel() (*TextPanel, error) {
textPanel := TextPanel{
ViewName: "text",
viewPosition: ViewPosition{
x0: Position{0.3, 0},
y0: Position{0.0, 0},
x1: Position{1.0, 2},
y1: Position{0.9, 2},
},
}
return &textPanel, nil
}
func (textPanel *TextPanel) DrawView(g *gocui.Gui) error {
maxX, maxY := g.Size()
x0, y0, x1, y1 := textPanel.viewPosition.GetCoordinates(maxX, maxY)
if v, err := g.SetView(textPanel.ViewName, x0, y0, x1, y1); err != nil {
v.SelFgColor = gocui.ColorBlack
v.SelBgColor = gocui.ColorGreen
v.Title = " text "
}
return nil
}
func (textPanel *TextPanel) DrawText(g *gocui.Gui, country *lib.Country) error {
v, err := g.View(textPanel.ViewName)
if err != nil {
return err
}
v.Clear()
v.Title = " " + country.Name + " "
fmt.Fprintln(v, country.String())
return nil
}
|
package data
import "testing"
func TestCheckValidation(t *testing.T){
p:= &Product{
Name: "ProductName",
Price: 1,
SKU: "abcd-def",
}
err:= p.Validate()
if err!= nil{
t.Fatal(err)
}
} |
package oauthstore
import (
"io/ioutil"
"os"
"reflect"
"testing"
"time"
"golang.org/x/oauth2"
)
func TestFileStorage_GetToken(t *testing.T) {
fname, _ := ioutil.TempFile(".", "example")
defer os.Remove(fname.Name())
tests := []struct {
name string
f *FileStorage
want *oauth2.Token
wantErr bool
}{
{"base-case", &FileStorage{Path: fname.Name()}, nil, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := tt.f.GetToken()
if (err != nil) != tt.wantErr {
t.Errorf("FileStorage.GetToken() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("FileStorage.GetToken() = %v, want %v", got, tt.want)
}
})
}
}
func TestFileStorage_SetToken(t *testing.T) {
fname, _ := ioutil.TempFile(".", "example")
defer os.Remove(fname.Name())
type args struct {
t *oauth2.Token
}
tests := []struct {
name string
f *FileStorage
args args
wantErr bool
}{
{"base-case", &FileStorage{Path: fname.Name()}, args{&oauth2.Token{AccessToken: "123", Expiry: time.Now().Add(30 * time.Minute)}}, false},
{"nil token", &FileStorage{Path: fname.Name()}, args{nil}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := tt.f.SetToken(tt.args.t); (err != nil) != tt.wantErr {
t.Errorf("FileStorage.SetToken() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
|
package cmd
import (
"fmt"
"github.com/oberd/ecsy/ecs"
"github.com/spf13/cobra"
)
// Can be "all", "running", "stopped"
var logsStatusFilter = "all"
// logsCmd represents the logs command
var logsCmd = &cobra.Command{
Use: "logs [cluster] [service]",
Short: "Show recent logs for a service in a cluster (must be cloudwatch based)",
Long: `Show recent logs for a service in a cluster`,
Run: func(cmd *cobra.Command, args []string) {
err := ecs.GetLogs(args[0], args[1], logsStatusFilter)
failOnError(err, "")
},
PreRunE: func(cmd *cobra.Command, args []string) error {
if len(args) != 2 {
return fmt.Errorf("Incorrect number of arguments supplied! (%d / 2)", len(args))
}
return nil
},
}
func init() {
RootCmd.AddCommand(logsCmd)
logsCmd.Flags().StringVarP(&logsStatusFilter, "status", "s", "all", "Limit to only tasks of [status] (stopped|running|all)")
}
|
package main
import "fmt"
type vehicle struct {
doors int
colour string
}
type truck struct {
vehicle
fourWheel bool
}
type sedan struct {
vehicle
luxury bool
}
func main() {
hmmvw := truck{
vehicle: vehicle{
doors: 4,
colour: "army green",
},
fourWheel: true,
}
passat := sedan{
vehicle: vehicle{
doors: 5,
colour: "black",
},
luxury: true,
}
fmt.Println(hmmvw, hmmvw.fourWheel)
fmt.Println(passat, passat.doors)
}
|
/*
Tests basic communication between client and kvnode using kvservice.
Creates 2 non-overlapping transactions and commits them.
Usage:
go run 1_TwoNonOverlappingTransactions.go
*/
package main
import "../kvservice"
import (
"fmt"
)
func main() {
var nodes []string
nodes = []string{"52.233.45.243:2222", "52.175.29.87:2222", "40.69.195.111:2222", "13.65.91.243:2222", "51.140.126.235:2222", "52.233.190.164:2222"}
c := kvservice.NewConnection(nodes)
fmt.Printf("NewConnection returned: %v\n", c)
// children := c.GetChildren(nodes[3], "")
// fmt.Printf("GetChildren returned: %x\n", children)
t1, err := c.NewTX()
fmt.Printf("NewTX returned: %v, %v\n", t1, err)
success, err := t1.Put("A", "T1")
fmt.Printf("Put returned: %v, %v\n", success, err)
success, v, err := t1.Get("A")
fmt.Printf("Get returned: %v, %v, %v\n", success, v, err)
success, txID, err := t1.Commit(0)
fmt.Printf("Commit returned: %v, %v, %v\n", success, txID, err)
t2, err := c.NewTX()
fmt.Printf("NewTX returned: %v, %v\n", t2, err)
success, err = t2.Put("B", "T2")
fmt.Printf("Put returned: %v, %v\n", success, err)
success, v, err = t2.Get("B")
fmt.Printf("Get returned: %v, %v, %v\n", success, v, err)
success, txID, err = t2.Commit(3)
fmt.Printf("Commit returned: %v, %v, %v\n", success, txID, err)
c.Close()
}
|
package consul
import (
"fmt"
consulAPI "github.com/hashicorp/consul/api"
consulWatch "github.com/hashicorp/consul/api/watch"
)
type watcher struct {
serverType string
plan *consulWatch.Plan
noticeChan chan AvailableServers
}
func (d *watcher) handler(index uint64, raw interface{}) {
if raw == nil {
return
}
if entries, ok := raw.([]*consulAPI.ServiceEntry); ok {
var servers []string
for _, entry := range entries {
// healthy check fail, continue anyway
if entry.Checks.AggregatedStatus() != consulAPI.HealthPassing {
continue
}
servers = append(servers, fmt.Sprintf("%s:%d", entry.Service.Address, entry.Service.Port))
}
d.noticeChan <- AvailableServers{
ServerType: d.serverType,
Servers: servers,
}
}
}
|
package logdata
import (
"encoding/json"
"github.com/google/gopacket/layers"
)
// ICMPv6LogData is the struct describing the logged data for ICMPv6 packets
type ICMPv6LogData struct {
TypeCode layers.ICMPv6TypeCode `json:"type_code"`
Type uint8 `json:"type"`
Code uint8 `json:"code"`
TypeCodeName string `json:"type_code_name"`
Checksum uint16 `json:"checksum"`
Payload Payload `json:"payload"`
}
// ICMPv6EventLog is the event log struct for ICMPv6 packets
type ICMPv6EventLog struct {
ICMPv6 ICMPv6LogData `json:"icmpv6"`
IP IPv6LogData `json:"ip"`
BaseLogData
}
func (eventLog ICMPv6EventLog) String() (string, error) {
data, err := json.Marshal(eventLog)
if err != nil {
return "", err
}
return string(data), nil
}
|
// ˅
package main
// ˄
type Mediator interface {
ColleagueChanged()
CreateColleagues()
// ˅
// ˄
}
// ˅
// ˄
|
package main
import (
"fmt"
"strconv"
)
func main() {
number := 0
width := 1
numStr := ""
revStr := ""
for (number != 0) && (len(numStr) != width) {
pop := number % 10
numStr += strconv.Itoa(pop)
number /= 10
}
if len(numStr) < width {
for len(numStr) < width {
numStr += "0"
}
}
for i := len(numStr) - 1; i >= 0; i-- {
revStr += string(numStr[i])
}
fmt.Println(numStr)
fmt.Println(revStr)
}
|
package pgsql
import (
"database/sql"
"database/sql/driver"
"strconv"
)
// MoneyFromInt64 returns a driver.Valuer that produces a PostgreSQL money from the given Go int64.
func MoneyFromInt64(val int64) driver.Valuer {
return moneyFromInt64{val: val}
}
// MoneyToInt64 returns an sql.Scanner that converts a PostgreSQL money into a Go int64 and sets it to val.
func MoneyToInt64(val *int64) sql.Scanner {
return moneyToInt64{val: val}
}
type moneyFromInt64 struct {
val int64
}
func (v moneyFromInt64) Value() (driver.Value, error) {
out := []byte{'$'}
out = strconv.AppendFloat(out, float64(v.val)/100.0, 'f', 2, 64)
return out, nil
}
type moneyToInt64 struct {
val *int64
}
func (v moneyToInt64) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
data = data[1:] // drop $
f64, err := strconv.ParseFloat(string(data), 64)
if err != nil {
return err
}
*v.val = int64(f64 * 100.0)
return nil
}
|
package main
func main() {
x := 10
if xinit(); x == 0 {
println("a")
}
if a, b := x+1, x+10; a < b {
println(a)
} else {
println(b)
}
}
func xinit() bool {
println("In xinit ...")
return true
}
|
// Copyright © 2018 Taavi Kivisik
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package games
import (
"crypto/rand"
"fmt"
"math/big"
"strconv"
"strings"
)
const (
letters string = "abcdefghijklmnopqrstuvwxyz"
boardSideLength uint8 = 4
MaxShips int = 2
maxShots int = 100
)
// Legend holds the mapping of a board
type LegendStruct struct {
Terrain string
Ship string
Hit string
Miss string
}
var Legend = LegendStruct{
Terrain: "~",
Ship: "0",
Hit: "X",
Miss: "*",
}
// String returns a printable string of a Legend
func (l *LegendStruct) String() string {
var str strings.Builder
fmt.Fprintf(&str, "%10s '%s'\n", "Terrain", l.Terrain)
fmt.Fprintf(&str, "%10s '%s'\n", "Ship", l.Ship)
fmt.Fprintf(&str, "%10s '%s'\n", "Hit", l.Hit)
fmt.Fprintf(&str, "%10s '%s'\n", "Miss", l.Miss)
return str.String()
}
// Coordinate expresses a location on a map using x and y
type Coordinate struct {
x byte
y byte
}
func (c *Coordinate) String() string {
return fmt.Sprintf("Human representation: %c%d", byte(letters[c.x]), c.y+1)
}
// Read prompts the user to enter a Coordinate
func (c *Coordinate) Read() {
var s string
for i := 0; i < 100; i++ {
fmt.Println("Please enter a coordinate (e.g. 'd3'):")
fmt.Scanln(&s)
if s == "" {
continue
}
if strings.Contains(letters[boardSideLength:], string(s[0])) {
fmt.Printf("Please use letters from %c-%c\n", letters[0], letters[int(boardSideLength)])
continue
}
if strings.Contains("0123456789", string(s[0])) {
fmt.Printf("First character must be a letter from %c-%c\n", letters[0], letters[int(boardSideLength)])
continue
}
y, err := strconv.Atoi(s[1:])
if err != nil {
fmt.Println("Please make sure number follows the letter immediately")
continue
}
if y <= 0 || uint8(y) > boardSideLength {
fmt.Printf("Please use numbers from 1-%d\n", boardSideLength)
continue
}
c.x = byte(strings.IndexRune(letters, rune(s[0])))
c.y = byte(y)
c.y--
break
}
}
type layer [boardSideLength]byte
func (l *layer) StringRaw() string {
var str strings.Builder
for row := uint8(0); row < boardSideLength; row++ {
fmt.Fprintf(&str, "%08b\n", l[row])
}
return str.String()
}
// coordinateToOne turns a Coordinate on a layer to 1
func (l *layer) coordinateToOne(c Coordinate) {
l[c.y] |= 1 << c.x
}
// Board is an object for ships and shots
type Board struct {
ships layer
shots layer
HitCount int8
}
func NewBoard() *Board {
return &Board{ships: layer{}, shots: layer{}, HitCount: 0}
}
// String returns the current battleship board as string
func (b *Board) String(enemy bool) string {
b.HitCount = 0
var str strings.Builder
str.WriteString(" ") // space instead of a row number
for column := uint8(0); column < boardSideLength; column++ {
fmt.Fprintf(&str, "%2s", letters[column:column+1])
}
str.WriteString("\n")
for row := uint8(0); row < boardSideLength; row++ {
fmt.Fprintf(&str, "%2d", row+1)
for column := uint8(0); column < boardSideLength; column++ {
coord := Coordinate{column, row}
if b.hasShip(coord) {
if b.hasShot(coord) {
fmt.Fprintf(&str, " %s", Legend.Hit)
b.HitCount++
} else {
if enemy { // hide enemy ships until hit
fmt.Fprintf(&str, " %s", Legend.Terrain)
} else {
fmt.Fprintf(&str, " %s", Legend.Ship)
}
}
} else { // not a ship
if b.hasShot(coord) {
fmt.Fprintf(&str, " %s", Legend.Miss)
} else {
fmt.Fprintf(&str, " %s", Legend.Terrain)
}
}
}
str.WriteString("\n")
}
return str.String()
}
// Print prints the board according to the Legend
func (b *Board) Print(enemy bool) {
fmt.Print(b.String(enemy))
}
func (b *Board) hasShip(c Coordinate) bool {
return b.ships[c.y]&(1<<c.x) != 0
}
func (b *Board) hasShot(c Coordinate) bool {
return b.shots[c.y]&(1<<c.x) != 0
}
func (b *Board) isSurroundedByWater(c Coordinate) bool {
for row := -1; row < 2; row++ {
if int(c.y)+row < 0 {
continue
}
if c.y+byte(row) >= boardSideLength {
return true
}
for col := -1; col < 2; col++ {
if int(c.x)+col < 0 {
continue
}
if c.x+byte(col) >= boardSideLength {
continue
}
if b.ships[c.y+byte(row)]&(1<<(c.x+byte(col))) != 0 {
return false
}
}
}
return true
}
func (b *Board) randomLocation() Coordinate {
var coord Coordinate
nBig, err := rand.Int(rand.Reader, big.NewInt(int64(boardSideLength*boardSideLength)))
if err != nil {
panic(err)
}
n := uint8(nBig.Int64())
if err != nil {
fmt.Println("error:", err)
}
coord.x = n % boardSideLength
coord.y = n / boardSideLength
return coord
}
func (b *Board) AddShipBy(random bool) {
var coord Coordinate
if random {
for {
coord = b.randomLocation()
if b.hasShip(coord) == true {
continue
}
if b.isSurroundedByWater(coord) == false {
continue
}
b.ships.coordinateToOne(coord)
break
}
} else {
for {
coord.Read()
if b.hasShip(coord) == true {
fmt.Println("There already is a ship in that location.")
continue
}
if b.isSurroundedByWater(coord) == false {
fmt.Println("Ships must have space between them.")
continue
}
b.ships.coordinateToOne(coord)
break
}
}
}
func (b *Board) ShootThisBoard(automatic bool) {
var coord Coordinate
if automatic == true {
for i := 0; i < 1000; {
coord = b.randomLocation()
if b.hasShot(coord) {
continue
}
b.shots.coordinateToOne(coord)
fmt.Println("ENEMY just shot.")
break
}
} else {
for i := 0; i < 1000; {
coord.Read()
if b.hasShot(coord) {
continue
}
b.shots.coordinateToOne(coord)
break
}
}
}
|
package main
import (
"fmt"
"os"
"runtime"
"sync"
)
func printArg (wg *sync.WaitGroup, val string) {
defer wg.Done()
fmt.Println(val)
}
func main() {
runtime.GOMAXPROCS(2)
args := os.Args[1:]
var wg sync.WaitGroup
for i := range args {
wg.Add(1)
go printArg(&wg, args[i])
}
wg.Wait()
} |
package leetcode
/*Given a string and an integer k, you need to reverse the first k characters for every 2k characters counting from the start of the string. If there are less than k characters left, reverse all of them. If there are less than 2k but greater than or equal to k characters, then reverse the first k characters and left the other as original.
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/reverse-string-ii
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。*/
func reverseStr(s string, k int) string {
str := []byte(s)
round := len(str) / (2 * k)
left := len(str) % (2 * k)
for i := 0; i < round; i++ {
reverse(str, i*2*k, i*2*k+k-1)
}
if left >= k {
reverse(str, round*2*k, round*2*k+k-1)
} else {
reverse(str, round*2*k, len(str)-1)
}
return string(str)
}
func reverse(s []byte, begin, end int) {
for begin < end {
s[begin], s[end] = s[end], s[begin]
begin++
end--
}
}
|
package main
import (
// 读输入输出流
"bufio"
"fmt"
// socket包 操作tcp的
"net"
"os"
"strings"
)
func checkError(err error){
if err != nil{
panic(err);
}
}
// 写入数据的处理
func messagesend(conn net.Conn){
var input string;
for{
// 这是在写入数据的时候的操作!
// 读取终端是不是有数据
reader:=bufio.NewReader(os.Stdin);
// 获取数据
data,_,_:=reader.ReadLine();
// 数据处理
input = string(data);
if(strings.ToUpper(input)=="EXIT"){
conn.Close();
break;
}
// 写入到连接
_,err := conn.Write([]byte(input));
if err!= nil{
conn.Close();
fmt.Printf("fail:%s\n",err.Error());
break;
}
}
}
func main(){
conn,err:=net.Dial("tcp","127.0.0.1:8080");
checkError(err);
defer conn.Close();
// conn.Write([]byte("hello iam client"));
go messagesend(conn);
buf:=make([]byte,1024);
for{
_,err:=conn.Read(buf);
checkError(err);
fmt.Printf("收到服务器消息"+string(buf));
}
fmt.Println("客户端结束");
} |
package models
import (
"database/sql"
"git.hoogi.eu/snafu/go-blog/logger"
"strings"
"time"
)
// SQLiteArticleDatasource providing an implementation of ArticleDatasourceService for SQLite
type SQLiteArticleDatasource struct {
SQLConn *sql.DB
}
// Create creates an article
func (rdb *SQLiteArticleDatasource) Create(a *Article) (int, error) {
res, err := rdb.SQLConn.Exec("INSERT INTO article (headline, teaser, content, slug, published_on, published, last_modified, category_id, user_id) "+
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)",
a.Headline,
a.Teaser,
a.Content,
a.Slug,
nil,
false,
time.Now(),
a.CID,
a.Author.ID)
if err != nil {
return 0, err
}
id, err := res.LastInsertId()
if err != nil {
return 0, err
}
return int(id), nil
}
// List returns a slice of articles; if the user is not nil the number of articles for this explcit user is returned
// the PublishedCritera specifies which articles should be considered
func (rdb *SQLiteArticleDatasource) List(u *User, c *Category, p *Pagination, pc PublishedCriteria) ([]Article, error) {
rows, err := selectArticlesStmt(rdb.SQLConn, u, c, p, pc)
if err != nil {
return nil, err
}
defer func() {
if err := rows.Close(); err != nil {
logger.Log.Error(err)
}
}()
articles := []Article{}
for rows.Next() {
var a Article
var ru User
if err := rows.Scan(&a.ID, &a.Headline, &a.Teaser, &a.Content, &a.Published, &a.PublishedOn, &a.Slug, &a.LastModified, &ru.ID, &ru.DisplayName,
&ru.Email, &ru.Username, &ru.IsAdmin, &a.CID, &a.CName); err != nil {
return nil, err
}
a.Author = &ru
articles = append(articles, a)
}
if err := rows.Err(); err != nil {
return nil, err
}
return articles, nil
}
// Count returns the number of article found; if the user is not nil the number of articles for this explcit user is returned
// the PublishedCritera specifies which articles should be considered
func (rdb *SQLiteArticleDatasource) Count(u *User, c *Category, pc PublishedCriteria) (int, error) {
var total int
var stmt strings.Builder
var args []interface{}
stmt.WriteString("SELECT count(a.id) FROM article a ")
if c != nil {
stmt.WriteString("INNER JOIN category c ON (c.id = a.category_id) ")
} else {
stmt.WriteString("LEFT JOIN category c ON (c.id = a.category_id) ")
}
stmt.WriteString("WHERE ")
if c != nil {
stmt.WriteString("c.name = ? AND ")
args = append(args, c.Name)
}
if u != nil {
if !u.IsAdmin {
stmt.WriteString("a.user_id=? AND ")
args = append(args, u.ID)
}
}
if pc == NotPublished {
stmt.WriteString("a.published = '0' ")
} else if pc == All {
stmt.WriteString("(a.published='0' OR a.published='1') ")
} else {
stmt.WriteString("a.published = '1' ")
}
if err := rdb.SQLConn.QueryRow(stmt.String(), args...).Scan(&total); err != nil {
return -1, err
}
return total, nil
}
// Get returns a article by its id; if the user is not nil the article for this explcit user is returned
// the PublishedCritera specifies which articles should be considered
func (rdb *SQLiteArticleDatasource) Get(articleID int, u *User, pc PublishedCriteria) (*Article, error) {
var a Article
var ru User
if err := selectArticleStmt(rdb.SQLConn, articleID, "", u, pc).Scan(&a.ID, &a.Headline, &a.PublishedOn, &a.Published, &a.Slug, &a.Teaser, &a.Content,
&a.LastModified, &ru.ID, &ru.DisplayName, &ru.Email, &ru.Username, &ru.IsAdmin, &a.CID, &a.CName); err != nil {
return nil, err
}
a.Author = &ru
return &a, nil
}
// GetBySlug returns a article by its slug; if the user is not nil the article for this explcit user is returned
// the PublishedCritera specifies which articles should be considered
func (rdb *SQLiteArticleDatasource) GetBySlug(slug string, u *User, pc PublishedCriteria) (*Article, error) {
var a Article
var ru User
if err := selectArticleStmt(rdb.SQLConn, -1, slug, u, pc).Scan(&a.ID, &a.Headline, &a.PublishedOn, &a.Published, &a.Slug, &a.Teaser, &a.Content,
&a.LastModified, &ru.ID, &ru.DisplayName, &ru.Email, &ru.Username, &ru.IsAdmin, &a.CID, &a.CName); err != nil {
return nil, err
}
a.Author = &ru
return &a, nil
}
// Update updates an aricle
func (rdb *SQLiteArticleDatasource) Update(a *Article) error {
if _, err := rdb.SQLConn.Exec("UPDATE article SET headline=?, teaser=?, slug=?, content=?, last_modified=?, category_id=? WHERE id=? ", a.Headline, &a.Teaser, a.Slug,
a.Content, time.Now(), a.CID, a.ID); err != nil {
return err
}
return nil
}
// Publish checks if the article is published or not - switches the appropriate status
func (rdb *SQLiteArticleDatasource) Publish(a *Article) error {
publishOn := NullTime{Valid: false}
if !a.Published {
publishOn = NullTime{Time: time.Now(), Valid: true}
}
if _, err := rdb.SQLConn.Exec("UPDATE article SET published=?, last_modified=?, published_on=? WHERE id=? ", !a.Published, time.Now(),
publishOn, a.ID); err != nil {
return err
}
return nil
}
// Delete deletes the article specified by the articleID
func (rdb *SQLiteArticleDatasource) Delete(articleID int) error {
if _, err := rdb.SQLConn.Exec("DELETE FROM article WHERE id=? ", articleID); err != nil {
return err
}
return nil
}
func selectArticleStmt(db *sql.DB, articleID int, slug string, u *User, pc PublishedCriteria) *sql.Row {
var stmt strings.Builder
var args []interface{}
stmt.WriteString("SELECT a.id, a.headline, a.published_on, a.published, a.slug, a.teaser, a.content, a.last_modified, ")
stmt.WriteString("u.id, u.display_name, u.email, u.username, u.is_admin, ")
stmt.WriteString("c.id, c.name ")
stmt.WriteString("FROM article a ")
stmt.WriteString("INNER JOIN user u ON (a.user_id = u.id) ")
stmt.WriteString("LEFT JOIN category c ON (c.id = a.category_id) ")
stmt.WriteString("WHERE ")
if pc == NotPublished {
stmt.WriteString("a.published='0' ")
} else if pc == All {
stmt.WriteString("(a.published='0' OR a.published='1') ")
} else {
stmt.WriteString("a.published='1' ")
}
if len(slug) > 0 {
stmt.WriteString("AND a.slug = ? ")
args = append(args, slug)
} else {
stmt.WriteString("AND a.id=? ")
args = append(args, articleID)
}
if u != nil {
if !u.IsAdmin {
stmt.WriteString("AND a.user_id=? ")
args = append(args, u.ID)
}
}
stmt.WriteString("LIMIT 1")
return db.QueryRow(stmt.String(), args...)
}
func selectArticlesStmt(db *sql.DB, u *User, c *Category, p *Pagination, pc PublishedCriteria) (*sql.Rows, error) {
var stmt strings.Builder
var args []interface{}
stmt.WriteString("SELECT a.id, a.headline, a.teaser, a.content, a.published, a.published_on, a.slug, a.last_modified, ")
stmt.WriteString("u.id, u.display_name, u.email, u.username, u.is_admin, ")
stmt.WriteString("c.id, c.name ")
stmt.WriteString("FROM article a ")
stmt.WriteString("INNER JOIN user u ON (a.user_id = u.id) ")
if c != nil {
stmt.WriteString("INNER JOIN category c ON (c.id = a.category_id) ")
} else {
stmt.WriteString("LEFT JOIN category c ON (c.id = a.category_id) ")
}
stmt.WriteString("WHERE ")
if c != nil {
stmt.WriteString("c.name = ? AND ")
args = append(args, c.Name)
}
if u != nil {
if !u.IsAdmin {
stmt.WriteString("a.user_id=? AND ")
args = append(args, u.ID)
}
}
if pc == NotPublished {
stmt.WriteString("a.published='0' ")
} else if pc == All {
stmt.WriteString("(a.published='0' OR a.published='1') ")
} else {
stmt.WriteString("a.published='1' ")
}
stmt.WriteString("ORDER BY a.published_on DESC, a.published ASC, a.last_modified DESC ")
if p != nil {
stmt.WriteString("LIMIT ? OFFSET ? ")
args = append(args, p.Limit, p.Offset())
}
return db.Query(stmt.String(), args...)
}
|
package main
import (
"fmt"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
)
func NewKeypair() (ecdsa.PrivateKey,[]byte) {
//生成椭圆曲线。secp256r1曲线 (比特币当中的曲线是secp256k1)
curve := elliptic.P256()
private,err1 := ecdsa.GenerateKey(curve,rand.Reader)
if err1 != nil{
fmt.Println(err1)
}
pubkey := append(private.PublicKey.X.Bytes(),private.PublicKey.Y.Bytes()...)
return *private,pubkey
}
func main() {
//调用函数生成曲线
privatekey,_ := NewKeypair()
hash := sha256.Sum256([]byte("hello tony"))
r,s,err := ecdsa.Sign(rand.Reader,&privatekey,hash[:]) //rand.Reader生产种子随机数
if err!= nil{
fmt.Println(err)
}
//生成数据签名
signature := append(r.Bytes(),s.Bytes()...)
fmt.Printf("%X\n",signature)//返回的数据签名为椭圆曲线上的随机数
}
|
package gvabe
import (
"context"
"encoding/json"
"fmt"
"log"
"reflect"
"regexp"
"sort"
"strings"
"time"
"github.com/btnguyen2k/consu/reddo"
"golang.org/x/oauth2"
"main/src/goapi"
"main/src/gvabe/bo/app"
"main/src/gvabe/bo/user"
"main/src/itineris"
)
/*
Setup API handlers: application register its api-handlers by calling router.SetHandler(apiName, apiHandlerFunc)
- api-handler function must has the following signature: func (itineris.ApiContext, itineris.ApiAuth, itineris.ApiParams) *itineris.ApiResult
*/
func initApiHandlers(router *itineris.ApiRouter) {
router.SetHandler("info", apiInfo)
router.SetHandler("login", apiLogin)
router.SetHandler("verifyLoginToken", apiVerifyLoginToken)
router.SetHandler("systemInfo", apiSystemInfo)
router.SetHandler("getApp", apiGetApp)
router.SetHandler("myAppList", apiMyAppList)
router.SetHandler("getMyApp", apiGetMyApp)
router.SetHandler("registerApp", apiRegisterApp)
router.SetHandler("updateMyApp", apiUpdateMyApp)
router.SetHandler("deleteMyApp", apiDeleteMyApp)
}
/*------------------------------ shared variables and functions ------------------------------*/
var (
// those APIs will not need authentication.
// "false" means client, however, needs to sends app-id along with the API call
// "true" means the API is free for public call
publicApis = map[string]bool{
"login": false,
"info": true,
"getApp": false,
"verifyLoginToken": true,
"loginChannelList": true,
}
)
func _parseLoginTokenFromApi(_token interface{}) (*itineris.ApiResult, *SessionClaims, *user.User) {
stoken, ok := _token.(string)
if !ok || stoken == "" {
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage("empty token"), nil, nil
}
var claim *SessionClaims
var user *user.User
var err error
if claim, err = parseLoginToken(stoken); err != nil {
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage(err.Error()), nil, nil
} else if claim.isExpired() {
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage(errorExpiredJwt.Error()), nil, nil
} else if claim.Type != sessionTypeLogin {
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage("invalid session type"), nil, nil
}
if user, err = userDao.Get(claim.UserId); err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error()), nil, nil
} else if user == nil {
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage("session user not found"), nil, nil
}
return nil, claim, user
}
func extractAppAttrsPublic(myApp *app.App) map[string]interface{} {
result := make(map[string]interface{})
attrsPublic := myApp.GetAttrsPublic()
js, _ := json.Marshal(attrsPublic)
json.Unmarshal(js, &result)
loginChannels := make(map[string]bool)
for s, _ := range attrsPublic.IdentitySources {
if attrsPublic.IdentitySources[s] && enabledLoginChannels[s] {
loginChannels[s] = true
}
}
result["sources"] = loginChannels
return result
}
/*------------------------------ public APIs ------------------------------*/
// API handler "info"
func apiInfo(_ *itineris.ApiContext, _ *itineris.ApiAuth, _ *itineris.ApiParams) *itineris.ApiResult {
appInfo := map[string]interface{}{
"name": goapi.AppConfig.GetString("app.name"),
"shortname": goapi.AppConfig.GetString("app.shortname"),
"version": goapi.AppConfig.GetString("app.version"),
"description": goapi.AppConfig.GetString("app.desc"),
}
loginChannels := make([]string, 0)
for channel, _ := range enabledLoginChannels {
loginChannels = append(loginChannels, channel)
}
sort.Strings(loginChannels)
result := map[string]interface{}{
"app": appInfo,
"login_channels": loginChannels,
"rsa_public_key": string(rsaPubKeyPemPKCS1),
"public_key": string(rsaPubKeyPemPKIX),
"facebook_app_id": fbOAuthConf.ClientID,
"github_client_id": githubOAuthConf.ClientID,
"google_client_id": googleOAuthConf.ClientID,
"linkedin_client_id": linkedinOAuthConf.ClientID,
}
return itineris.NewApiResult(itineris.StatusOk).SetData(result)
}
// API handler "systemInfo"
func apiSystemInfo(_ *itineris.ApiContext, _ *itineris.ApiAuth, _ *itineris.ApiParams) *itineris.ApiResult {
data := lastSystemInfo()
return itineris.NewApiResult(itineris.StatusOk).SetData(data)
}
/*------------------------------ login & session APIs ------------------------------*/
func _doLoginFacebook(_ *itineris.ApiContext, _ *itineris.ApiAuth, accessToken string, app *app.App, returnUrl string) *itineris.ApiResult {
if DEBUG {
log.Printf("[DEBUG] START _doLoginFacebook")
t := time.Now().UnixNano()
defer func() {
d := time.Now().UnixNano() - t
log.Printf("[DEBUG] END _doLoginFacebook: %d ms", d/1000000)
}()
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
// firstly exchange for long-live token
if token, err := fbExchangeForLongLiveToken(ctx, accessToken); err != nil {
if DEBUG {
log.Printf("[DEBUG] ERROR _doLoginFacebook: %s / %s", "***"+accessToken[len(accessToken)-4:], err)
}
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage(err.Error())
} else if token == nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage("Error: exchanged token is nil")
} else {
// secondly embed accessToken into exter's session as a JWT
js, _ := json.Marshal(token)
now := time.Now()
claims, err := genPreLoginClaims(&Session{
ClientId: app.GetId(),
Channel: loginChannelFacebook,
CreatedAt: now,
ExpiredAt: token.Expiry,
Data: js, // JSON-serialization of oauth2.Token
})
if err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
}
_, jwt, err := saveSession(claims)
if err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
}
// lastly use accessToken to fetch Facebook profile info
go goFetchFacebookProfile(claims.Id)
returnUrl = strings.ReplaceAll(returnUrl, "${token}", jwt)
return itineris.NewApiResult(itineris.StatusOk).SetData(jwt).SetExtras(map[string]interface{}{apiResultExtraReturnUrl: returnUrl})
}
}
func _doLoginGitHub(_ *itineris.ApiContext, _ *itineris.ApiAuth, authCode string, app *app.App, returnUrl string) *itineris.ApiResult {
if DEBUG {
log.Printf("[DEBUG] START _doLoginGitHub")
t := time.Now().UnixNano()
defer func() {
d := time.Now().UnixNano() - t
log.Printf("[DEBUG] END _doLoginGitHub: %d ms", d/1000000)
}()
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
// firstly exchange authCode for accessToken
if token, err := githubOAuthConf.Exchange(ctx, authCode, oauth2.AccessTypeOnline); err != nil {
if DEBUG {
log.Printf("[DEBUG] ERROR _doLoginGithub: %s / %s", "***"+authCode[len(authCode)-4:], err)
}
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage(err.Error())
} else if token == nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage("Error: exchanged token is nil")
} else {
now := time.Now()
/*
GitHub's OAuth token does not set expiry, which causes the token to be expired immediately.
Hence we need to explicitly set expiry.
*/
token.Expiry = now.Add(1 * time.Hour)
// secondly embed accessToken into exter's session as a JWT
js, _ := json.Marshal(token)
claims, err := genPreLoginClaims(&Session{
ClientId: app.GetId(),
Channel: loginChannelGithub,
CreatedAt: now,
ExpiredAt: token.Expiry,
Data: js, // JSON-serialization of oauth2.Token
})
if err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
}
_, jwt, err := saveSession(claims)
if err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
}
// lastly use accessToken to fetch GitHub profile info
go goFetchGitHubProfile(claims.Id)
returnUrl = strings.ReplaceAll(returnUrl, "${token}", jwt)
return itineris.NewApiResult(itineris.StatusOk).SetData(jwt).SetExtras(map[string]interface{}{apiResultExtraReturnUrl: returnUrl})
}
}
func _doLoginGoogle(_ *itineris.ApiContext, _ *itineris.ApiAuth, authCode string, app *app.App, returnUrl string) *itineris.ApiResult {
if DEBUG {
log.Printf("[DEBUG] START _doLoginGoogle")
t := time.Now().UnixNano()
defer func() {
d := time.Now().UnixNano() - t
log.Printf("[DEBUG] END _doLoginGoogle: %d ms", d/1000000)
}()
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
// firstly exchange authCode for accessToken
if token, err := googleOAuthConf.Exchange(ctx, authCode, oauth2.AccessTypeOnline); err != nil {
if DEBUG {
log.Printf("[DEBUG] ERROR _doLoginGoogle: %s / %s", "***"+authCode[len(authCode)-4:], err)
}
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage(err.Error())
} else if token == nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage("Error: exchanged token is nil")
} else {
// secondly embed accessToken into exter's session as a JWT
js, _ := json.Marshal(token)
now := time.Now()
claims, err := genPreLoginClaims(&Session{
ClientId: app.GetId(),
Channel: loginChannelGoogle,
CreatedAt: now,
ExpiredAt: token.Expiry,
Data: js, // JSON-serialization of oauth2.Token
})
if err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
}
_, jwt, err := saveSession(claims)
if err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
}
// lastly use accessToken to fetch Google profile info
go goFetchGoogleProfile(claims.Id)
returnUrl = strings.ReplaceAll(returnUrl, "${token}", jwt)
return itineris.NewApiResult(itineris.StatusOk).SetData(jwt).SetExtras(map[string]interface{}{apiResultExtraReturnUrl: returnUrl})
}
}
func _doLoginLinkedin(_ *itineris.ApiContext, _ *itineris.ApiAuth, authCode string, app *app.App, returnUrl string) *itineris.ApiResult {
if DEBUG {
log.Printf("[DEBUG] START _doLoginLinkedin")
t := time.Now().UnixNano()
defer func() {
d := time.Now().UnixNano() - t
log.Printf("[DEBUG] END _doLoginLinkedin: %d ms", d/1000000)
}()
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
// firstly exchange authCode for accessToken
if token, err := linkedinOAuthConf.Exchange(ctx, authCode, oauth2.AccessTypeOnline); err != nil {
if DEBUG {
log.Printf("[DEBUG] ERROR _doLoginLinkedin: %s / %s", "***"+authCode[len(authCode)-4:], err)
}
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage(err.Error())
} else if token == nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage("Error: exchanged token is nil")
} else {
now := time.Now()
// secondly embed accessToken into exter's session as a JWT
js, _ := json.Marshal(token)
claims, err := genPreLoginClaims(&Session{
ClientId: app.GetId(),
Channel: loginChannelLinkedin,
CreatedAt: now,
ExpiredAt: token.Expiry,
Data: js, // JSON-serialization of oauth2.Token
})
if err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
}
_, jwt, err := saveSession(claims)
if err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
}
// lastly use accessToken to fetch LinkedIn profile info
go goFetchLinkedInProfile(claims.Id)
returnUrl = strings.ReplaceAll(returnUrl, "${token}", jwt)
return itineris.NewApiResult(itineris.StatusOk).SetData(jwt).SetExtras(map[string]interface{}{apiResultExtraReturnUrl: returnUrl})
}
}
/*
apiLogin handles API call "login".
- Upon login successfully, this API returns the login token as JWT.
*/
func apiLogin(ctx *itineris.ApiContext, auth *itineris.ApiAuth, params *itineris.ApiParams) *itineris.ApiResult {
appId := _extractParam(params, "app", reddo.TypeString, "", nil)
app, err := appDao.Get(appId.(string))
if err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
} else if app == nil {
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage(fmt.Sprintf("App [%s] not found", appId))
} else if !app.GetAttrsPublic().IsActive {
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage(fmt.Sprintf("App [%s] is not active", appId))
}
requestReturnUrl := _extractParam(params, "return_url", reddo.TypeString, "", nil)
if returnUrl := app.GenerateReturnUrl(requestReturnUrl.(string)); returnUrl == "" && requestReturnUrl != "" {
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage(fmt.Sprintf("Return url [%s] is not allowed for app [%s]", requestReturnUrl, appId))
} else {
requestReturnUrl = returnUrl
}
source := _extractParam(params, "source", reddo.TypeString, "", nil)
switch strings.ToLower(source.(string)) {
case loginChannelGoogle:
authCode := _extractParam(params, "code", reddo.TypeString, "", nil)
return _doLoginGoogle(ctx, auth, authCode.(string), app, requestReturnUrl.(string))
case loginChannelGithub:
authCode := _extractParam(params, "code", reddo.TypeString, "", nil)
return _doLoginGitHub(ctx, auth, authCode.(string), app, requestReturnUrl.(string))
case loginChannelFacebook:
authCode := _extractParam(params, "code", reddo.TypeString, "", nil)
return _doLoginFacebook(ctx, auth, authCode.(string), app, requestReturnUrl.(string))
case loginChannelLinkedin:
authCode := _extractParam(params, "code", reddo.TypeString, "", nil)
return _doLoginLinkedin(ctx, auth, authCode.(string), app, requestReturnUrl.(string))
}
return itineris.NewApiResult(itineris.StatusErrorClient).SetMessage(fmt.Sprintf("Login source is not supported: %s", source))
}
/*
apiVerifyLoginToken handles API call "verifyLoginToken".
This API expects an input map:
{
"token": login token (returned by apiLogin/apiVerifyLoginToken),
"app": application's id,
}
- Upon successful, this API returns the login-token.
*/
func apiVerifyLoginToken(_ *itineris.ApiContext, _ *itineris.ApiAuth, params *itineris.ApiParams) *itineris.ApiResult {
// firstly extract JWT token from request and convert it into claims
token := _extractParam(params, "token", reddo.TypeString, "", nil)
if token == "" {
return itineris.NewApiResult(itineris.StatusErrorClient).SetMessage("empty token")
}
claims, err := parseLoginToken(token.(string))
if err != nil {
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage(err.Error())
}
if claims.isExpired() {
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage(errorExpiredJwt.Error())
}
// secondly verify the client app
appId := _extractParam(params, "app", reddo.TypeString, "", nil)
app, err := appDao.Get(appId.(string))
if err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
} else if app == nil || !app.GetAttrsPublic().IsActive {
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage("invalid app")
}
// also verify 'return-url'
returnUrl := _extractParam(params, "return_url", reddo.TypeString, "", nil)
if returnUrl = app.GenerateReturnUrl(returnUrl.(string)); returnUrl == "" && app.GetId() != systemAppId {
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage(fmt.Sprintf("Return url [%s] is not allowed for app [%s]", returnUrl, appId))
}
// thirdly verify the session
sess, err := sessionDao.Get(claims.Id)
if err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
}
if sess == nil || sess.IsExpired() {
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage(fmt.Sprintf("Session not exists not expired"))
}
// lastly return the session encoded as JWT
if sess.GetSessionType() == sessionTypePreLogin {
return itineris.NewApiResult(302).SetMessage("please try again after a moment")
} else {
returnUrl = strings.ReplaceAll(returnUrl.(string), "${token}", sess.GetSessionData())
}
return itineris.NewApiResult(itineris.StatusOk).SetData(sess.GetSessionData()).SetExtras(map[string]interface{}{apiResultExtraReturnUrl: returnUrl})
}
/* app APIs */
/*
API handler "myAppList"
This API expects an input map:
{
"token": login token (returned by apiLogin/apiVerifyLoginToken),
}
Notes:
- This API returns only app's public info.
*/
func apiMyAppList(_ *itineris.ApiContext, _ *itineris.ApiAuth, params *itineris.ApiParams) *itineris.ApiResult {
token, _ := params.GetParamAsType("token", reddo.TypeString)
errResult, _, user := _parseLoginTokenFromApi(token)
if errResult != nil {
return errResult
}
appList, err := appDao.GetUserApps(user)
if err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
}
result := make([]map[string]interface{}, 0)
for _, myApp := range appList {
attrsPublic := extractAppAttrsPublic(myApp)
appInfo := map[string]interface{}{"id": myApp.GetId(), "public_attrs": attrsPublic}
result = append(result, appInfo)
}
return itineris.NewApiResult(itineris.StatusOk).SetData(result)
}
/*
API handler "getMyApp".
*/
func apiGetMyApp(ctx *itineris.ApiContext, _ *itineris.ApiAuth, params *itineris.ApiParams) *itineris.ApiResult {
id, _ := params.GetParamAsType("id", reddo.TypeString)
if id == nil || strings.TrimSpace(id.(string)) == "" {
return itineris.NewApiResult(itineris.StatusNotFound).SetMessage(fmt.Sprintf("App [%s] not found", id))
}
if myApp, err := appDao.Get(id.(string)); err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
} else if myApp == nil {
return itineris.NewApiResult(itineris.StatusNotFound).SetMessage(fmt.Sprintf("App [%s] not found", id))
} else {
sessionClaim, ok := ctx.GetContextValue(ctxFieldSession).(*SessionClaims)
if !ok || sessionClaim == nil {
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage("Cannot obtain current logged in user info")
}
if myApp.GetOwnerId() != sessionClaim.UserId {
// purposely return "not found" error
return itineris.NewApiResult(itineris.StatusNotFound).SetMessage(fmt.Sprintf("App [%s] not found", id))
}
attrsPublic := extractAppAttrsPublic(myApp)
return itineris.NewApiResult(itineris.StatusOk).SetData(map[string]interface{}{
"id": myApp.GetId(),
"domains": myApp.GetDomains(),
"public_attrs": attrsPublic,
})
}
}
/*
API handler "getApp".
Notes:
- This API returns only app's public info.
*/
func apiGetApp(_ *itineris.ApiContext, _ *itineris.ApiAuth, params *itineris.ApiParams) *itineris.ApiResult {
id, _ := params.GetParamAsType("id", reddo.TypeString)
if id == nil || strings.TrimSpace(id.(string)) == "" {
return itineris.NewApiResult(itineris.StatusNotFound).SetMessage(fmt.Sprintf("App [%s] not found", id))
}
if myApp, err := appDao.Get(id.(string)); err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
} else if myApp == nil {
return itineris.NewApiResult(itineris.StatusNotFound).SetMessage(fmt.Sprintf("App [%s] not found", id))
} else {
attrsPublic := extractAppAttrsPublic(myApp)
return itineris.NewApiResult(itineris.StatusOk).SetData(map[string]interface{}{
"id": myApp.GetId(),
"public_attrs": attrsPublic,
})
}
}
func _extractParam(params *itineris.ApiParams, paramName string, typ reflect.Type, defValue interface{}, regexp *regexp.Regexp) interface{} {
v, _ := params.GetParamAsType(paramName, typ)
if v == nil {
v = defValue
}
if v != nil {
if _, ok := v.(string); ok {
v = strings.TrimSpace(v.(string))
if regexp != nil && !regexp.Match([]byte(v.(string))) {
return nil
}
}
}
return v
}
func _extractAppParams(ctx *itineris.ApiContext, params *itineris.ApiParams) (*app.App, *itineris.ApiResult) {
id := _extractParam(params, "id", reddo.TypeString, nil, regexp.MustCompile("^[0-9A-Za-z_]+$"))
if id == nil {
return nil, itineris.NewApiResult(itineris.StatusErrorClient).SetMessage("Missing or invalid value for parameter [id]")
} else {
id = strings.ToLower(id.(string))
}
isActive := _extractParam(params, "is_active", reddo.TypeBool, false, nil)
desc := _extractParam(params, "description", reddo.TypeString, "", nil)
defaultReturnUrl := _extractParam(params, "default_return_url", reddo.TypeString, "", nil)
if defaultReturnUrl != "" && !regexp.MustCompile("^(?i)https?://.*$").Match([]byte(defaultReturnUrl.(string))) {
return nil, itineris.NewApiResult(itineris.StatusErrorClient).SetMessage("Invalid value for parameter [default_return_url]")
}
defaultCancelUrl := _extractParam(params, "default_cancel_url", reddo.TypeString, "", nil)
if defaultCancelUrl != "" && !regexp.MustCompile("^(?i)https?://.*$").Match([]byte(defaultCancelUrl.(string))) {
return nil, itineris.NewApiResult(itineris.StatusErrorClient).SetMessage("Invalid value for parameter [default_cancel_url]")
}
domainsStr := _extractParam(params, "domains", reddo.TypeString, "", nil)
domains := regexp.MustCompile(`[,;\s]+`).Split(domainsStr.(string), -1)
for i, domain := range domains {
domains[i] = strings.ToLower(strings.TrimSpace(domain))
}
tagsStr := _extractParam(params, "tags", reddo.TypeString, "", nil)
tags := regexp.MustCompile(`[,;]+`).Split(tagsStr.(string), -1)
for i, tag := range tags {
tags[i] = strings.TrimSpace(tag)
}
idSources := _extractParam(params, "id_sources", reflect.TypeOf(map[string]bool{}), make(map[string]bool), nil)
rsaPubicKeyPem := _extractParam(params, "rsa_public_key", reddo.TypeString, "", nil)
if rsaPubicKeyPem != "" {
_, err := parseRsaPublicKeyFromPem(rsaPubicKeyPem.(string))
if err != nil {
return nil, itineris.NewApiResult(itineris.StatusErrorClient).SetMessage(err.Error())
}
}
sessionClaim, ok := ctx.GetContextValue(ctxFieldSession).(*SessionClaims)
if !ok || sessionClaim == nil {
return nil, itineris.NewApiResult(itineris.StatusNoPermission).SetMessage("Cannot obtain current logged in user info")
}
ownerId := sessionClaim.UserId
boApp := app.NewApp(goapi.AppVersionNumber, id.(string), ownerId, desc.(string))
boApp.SetDomains(domains)
boApp.SetAttrsPublic(app.AppAttrsPublic{
IsActive: isActive.(bool),
Description: desc.(string),
DefaultReturnUrl: defaultReturnUrl.(string),
DefaultCancelUrl: defaultCancelUrl.(string),
IdentitySources: idSources.(map[string]bool),
Tags: tags,
RsaPublicKey: rsaPubicKeyPem.(string),
})
return boApp, nil
}
// API handler "registerApp"
func apiRegisterApp(ctx *itineris.ApiContext, _ *itineris.ApiAuth, params *itineris.ApiParams) *itineris.ApiResult {
newApp, apiResult := _extractAppParams(ctx, params)
if apiResult != nil {
return apiResult
}
if existingApp, err := appDao.Get(newApp.GetId()); err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
} else if existingApp != nil {
return itineris.NewApiResult(itineris.StatusErrorClient).SetMessage(fmt.Sprintf("App [%s] already exist", newApp.GetId()))
}
if ok, err := appDao.Create(newApp); err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
} else if !ok {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(fmt.Sprintf("Unknown error while registering app [%s]", newApp.GetId()))
}
return itineris.NewApiResult(itineris.StatusOk).SetMessage(fmt.Sprintf("App [%s] has been registered successfully", newApp.GetId()))
}
// API handler "updateMyApp"
func apiUpdateMyApp(ctx *itineris.ApiContext, _ *itineris.ApiAuth, params *itineris.ApiParams) *itineris.ApiResult {
submitApp, apiResult := _extractAppParams(ctx, params)
if apiResult != nil {
return apiResult
}
if existingApp, err := appDao.Get(submitApp.GetId()); err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
} else if existingApp == nil {
return itineris.NewApiResult(itineris.StatusErrorClient).SetMessage(fmt.Sprintf("App [%s] does not exist", submitApp.GetId()))
} else if existingApp.GetOwnerId() != submitApp.GetOwnerId() {
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage(fmt.Sprintf("App [%s] does not belong to user", submitApp.GetId()))
}
if ok, err := appDao.Update(submitApp); err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
} else if !ok {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(fmt.Sprintf("Unknown error while updating app [%s]", submitApp.GetId()))
}
return itineris.NewApiResult(itineris.StatusOk).SetMessage(fmt.Sprintf("App [%s] has been updated successfully", submitApp.GetId()))
}
// API handler "deleteMyApp"
func apiDeleteMyApp(ctx *itineris.ApiContext, _ *itineris.ApiAuth, params *itineris.ApiParams) *itineris.ApiResult {
submitApp, apiResult := _extractAppParams(ctx, params)
if apiResult != nil {
return apiResult
}
if existingApp, err := appDao.Get(submitApp.GetId()); err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
} else if existingApp == nil {
return itineris.NewApiResult(itineris.StatusErrorClient).SetMessage(fmt.Sprintf("App [%s] does not exist", submitApp.GetId()))
} else if existingApp.GetOwnerId() != submitApp.GetOwnerId() {
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage(fmt.Sprintf("App [%s] does not belong to user", submitApp.GetId()))
}
if submitApp.GetId() == systemAppId {
return itineris.NewApiResult(itineris.StatusNoPermission).SetMessage(fmt.Sprintf("App [%s] can not be deleted", submitApp.GetId()))
}
if ok, err := appDao.Delete(submitApp); err != nil {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(err.Error())
} else if !ok {
return itineris.NewApiResult(itineris.StatusErrorServer).SetMessage(fmt.Sprintf("Unknown error while deleting app [%s]", submitApp.GetId()))
}
return itineris.NewApiResult(itineris.StatusOk).SetMessage(fmt.Sprintf("App [%s] has been deleted successfully", submitApp.GetId()))
}
|
package problem0647
func countSubstrings2(s string) int {
if len(s) <= 0 {
return 0
}
result := 1
for i := 0; i < len(s)-1; i++ {
result = expand(i, i, s, result)
result = expand(i, (i + 1), s, result)
}
return result
}
func expand(left int, right int, s string, result int) int {
for left >= 0 && right < len(s) {
if s[left] == s[right] {
result = result + 1
left = left - 1
right = right + 1
} else {
return result
}
}
return result
}
|
package str
import (
"regexp"
"strings"
)
var IPReg, _ = regexp.Compile(`^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$`)
var MailReg, _ = regexp.Compile(`\w[-._\w]*@\w[-._\w]*\.\w+`)
func IsMatch(s, pattern string) bool {
match, err := regexp.Match(pattern, []byte(s))
if err != nil {
return false
}
return match
}
func IsIdentifier(s string, pattern ...string) bool {
defpattern := "^[a-zA-Z0-9\\-\\_\\.]+$"
if len(pattern) > 0 {
defpattern = pattern[0]
}
return IsMatch(s, defpattern)
}
func IsMail(s string) bool {
return MailReg.MatchString(s)
}
func IsPhone(s string) bool {
if strings.HasPrefix(s, "+") {
return IsMatch(s[1:], `^\d{13}$`)
} else {
return IsMatch(s, `^\d{11}$`)
}
}
func IsIP(s string) bool {
return IPReg.MatchString(s)
}
func Dangerous(s string) bool {
if strings.Contains(s, "<") {
return true
}
if strings.Contains(s, ">") {
return true
}
if strings.Contains(s, "&") {
return true
}
if strings.Contains(s, "'") {
return true
}
if strings.Contains(s, "\"") {
return true
}
if strings.Contains(s, "://") {
return true
}
if strings.Contains(s, "../") {
return true
}
return false
}
|
package main
import (
"fmt"
"testing"
"time"
)
//Testx ...
func TestX(t *testing.T) {
fmt.Println("Text")
}
func BenchmarkX(b *testing.B) {
fmt.Println("X")
time.Sleep(time.Second * 1)
}
func BenchmarkY(b *testing.B) {
fmt.Println("Y")
time.Sleep(time.Second * 2)
}
func BenchmarkZ(b *testing.B) {
fmt.Println("Z")
time.Sleep(time.Second * 2)
}
|
package chapter3
import "fmt"
func init() {
fmt.Println("=== Car and Truck")
c := Car{4, 6}
fmt.Println(c)
fmt.Println(c.getDoors())
t := Truck{2, "full", oneTon}
fmt.Println(t)
fmt.Println(t.getDoors())
} |
package main
import (
"time"
"fmt"
"net/http"
"encoding/json"
ctd "GossipServer/CTData"
"os"
"os/signal"
"syscall"
"bytes"
"io/ioutil"
"strings"
"github.com/golang/glog"
"flag"
)
var port string;
var peers []string;
var messages map[string]ctd.CTData
func main() {
done := make(chan os.Signal, 1); //create a channel to signify when server is shut down with ctrl+c
signal.Notify(done, os.Interrupt, syscall.SIGINT, syscall.SIGTERM);//notify the channel when program terminated
go func() {
<-done
glog.Infoln("kill recived");
glog.Flush();
os.Exit(1);
}(); //when channel is notified print debug info, make sure all logs get written and exit
flag.Parse(); //read the cmd line flags
defer glog.Flush(); //if the program ends unexpectedly make sure all debug info is printed
args := flag.Args(); //get cmd line args
if len(args) < 2 {
fmt.Println("use: gossipServer <FLAGS> <MY-PORT> <PEER-1-PORT> ... <PEER-n-PORT>"); //in case I forget how to run my program
return;
}
port := args[0]; //port is 1st arg
for i:=1; i < len(args); i++ {
peers = append(peers, fmt.Sprintf("http://localhost:%v/ct/v1", args[i])); //fill array of peers with adresses
glog.Infoln(peers[i-1]); //for debug
}
messages = make(map[string]ctd.CTData);
http.HandleFunc("/ct/v1/gossip", GossipHandler); // call GossipHandler on post to /gossip
glog.Infof("Starting server on %v\n", port); // for debug
err := http.ListenAndServe(fmt.Sprintf(":%v", port), nil); // start server
if err != nil {
fmt.Errorf("err: %v", err);
}
}
// GossipHandler is called on a post request to /ct/v1/gossip.
// It handles the logic of gossip within a network system
func GossipHandler(w http.ResponseWriter, req *http.Request){
data := ctd.CTData{}; // create an empty CTData struct
err := json.NewDecoder(req.Body).Decode(&data); // fill that struct using the JSON encoded struct send via the post
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest) // if there is an eror report and abort
return;
}
// TODO figure out logger for go? i think Jeremy used glog?
glog.Infof("CTData recived from source: %s\n\n", data.ToDebugString()); //print contents of message (for debugging)
if message, ok := messages[data.Identifier()]; ok { // if I have the message already check for conflict
if bytes.Compare(data.TBS.Digest, message.TBS.Digest)==0 {
http.Error(w, "duplicate item\n", http.StatusBadRequest); // if no conflic send back "duplicate item", and bad request status code to sender
} else {
glog.Infof("misbehavior detected\n\n"); // if conflict send a PoM to all peers
PoM := ctd.NewCTData("PoM", time.Now().Unix(), []byte{0,1,2,3}); //new dummy Proof of misbehavior
messages[PoM.Identifier()] = *PoM; // store PoM
for i := 0; i < len(peers); i++ {
glog.Infof("gossiping PoM to peer: %v\n", peers[i]);
post(fmt.Sprintf("%v/gossip", peers[i]), PoM, false); //gossip new PoM to all peers (false indicates send without blob)
}
}
}else{
if data.TBS.Blob == nil{ //If the message does not contain the blob
fmt.Fprintf(w, "blob-request"); //respond with "blob-request"
} else {
fmt.Fprintf(w, "new data"); //respond with "new data"
messages[data.Identifier()] = data; // if message is new add it to messages map
for i := 0; i < len(peers); i++ {
glog.Infof("gossiping new info to peer: %v\n", peers[i]);
post(fmt.Sprintf("%v/gossip", peers[i]), &data, false); //gossip new message to all peers (false indicates send without blob)
}
}
}
}
// post takes in an address as a string and a pointer to a CTData struct
// and makes a post request to that address with the JSON encoded version of that struct
func post(address string, data *ctd.CTData, withBlob bool){
var toSend *ctd.CTData;
if withBlob {
toSend = data;
} else {
toSend = data.CopyWithoutBlob();
}
var jsonStr, _ = json.Marshal(toSend); //create JSON string from struct w/o the blob
req, err := http.NewRequest("POST", address, bytes.NewBuffer(jsonStr)); //create a post request
req.Header.Set("X-Custom-Header", "myvalue"); //not sure if this is needed but the tutorial I copied from had it
req.Header.Set("Content-Type", "application/json"); //set message type to JSON
client := &http.Client{};
resp, err := client.Do(req); //make the request
if err != nil {
panic(err);
}
defer resp.Body.Close();
//print info for debug
glog.Infoln("response Status:", resp.Status);
glog.Infoln("response Headers:", resp.Header);
body, _ := ioutil.ReadAll(resp.Body);
sbody := string(body);
glog.Infoln("response Body:", sbody);
if strings.ToLower(sbody) == "blob-request" {
glog.Infof("sending blob to peer: %v\n\n", address);
post(address, data, true); // if the recipient sends back a blob request resend the message with the blob
}
}
|
package dbx
import (
"context"
"database/sql"
"fmt"
)
type DQLExecutor interface {
DQLExecContext(ctx context.Context, query string, argument DQLArgument) (sql.Result, error)
//DQLExec executes a query without returning any rows.
DQLExec(query string, argument DQLArgument) (sql.Result, error)
DQLMustExecContext(ctx context.Context, query string, argument DQLArgument) sql.Result
DQLMustExec(query string, argument DQLArgument) sql.Result
DQLFirstContext(ctx context.Context, dest interface{}, query string, argument DQLArgument) error
DQLFirst(dest interface{}, query string, argument DQLArgument) error
DQLMustFirstContext(ctx context.Context, dest interface{}, query string, argument DQLArgument)
DQLMustFirst(dest interface{}, query string, argument DQLArgument)
DQLFindContext(ctx context.Context, dest interface{}, query string, argument DQLArgument) error
DQLFind(dest interface{}, query string, argument DQLArgument) error
DQLMustFindContext(ctx context.Context, dest interface{}, query string, argument DQLArgument)
DQLMustFind(dest interface{}, query string, argument DQLArgument)
}
type SQLExecutor interface {
SQLPrepareContext(ctx context.Context, query string) (*SQLStmt, error)
SQLPrepare(query string) (*SQLStmt, error)
// SQLExecContext executes a query without returning any rows.
// The args are for any placeholder parameters in the query.
SQLExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
// SQLExec executes a query without returning any rows.
// The args are for any placeholder parameters in the query.
SQLExec(query string, args ...interface{}) (sql.Result, error)
SQLMustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result
SQLMustExec(query string, args ...interface{}) sql.Result
SQLFirstContext(ctx context.Context, dest interface{}, query string, args ...interface{}) (err error)
SQLFirst(dest interface{}, query string, args ...interface{}) (err error)
SQLMustFirstContext(ctx context.Context, dest interface{}, query string, args ...interface{})
SQLMustFirst(dest interface{}, query string, args ...interface{})
SQLFindContext(ctx context.Context, dest interface{}, query string, args ...interface{}) (err error)
SQLFind(dest interface{}, query string, args ...interface{}) (err error)
SQLMustFindContext(ctx context.Context, dest interface{}, query string, args ...interface{})
SQLMustFind(dest interface{}, query string, args ...interface{})
}
type StructExecutor interface {
StructInsertContext(ctx context.Context, value interface{}) (rs sql.Result, err error)
StructMustInsertContext(ctx context.Context, value interface{}) (rs sql.Result)
StructInsert(value interface{}) (sql.Result, error)
StructMustInsert(value interface{}) sql.Result
StructUpdateContext(ctx context.Context, value interface{}) (rs sql.Result, err error)
StructUpdate(value interface{}) (sql.Result, error)
StructMustUpdateContext(ctx context.Context, value interface{}) (rs sql.Result)
StructMustUpdate(value interface{}) (rs sql.Result)
}
type ComplexExecutor interface {
StructExecutor
SQLExecutor
DQLExecutor
}
// ComplexExec implemented SQLExecutor, DQLExecutor and StructExecutor
type ComplexExec struct {
option *Options
preparer preparer
}
func newComplexExec(preparer preparer, option *Options) *ComplexExec {
return &ComplexExec{preparer: preparer, option: option}
}
//StructInsertContext
func (e *ComplexExec) StructInsertContext(ctx context.Context, value interface{}) (rs sql.Result, err error) {
atv, query, values, err := e.option.generator.InsertSQL(value)
if err != nil {
return
}
rs, err = e.SQLExecContext(ctx, query, values...)
if err != nil {
return rs, err
}
id, err := rs.LastInsertId()
if err != nil {
return nil, err
}
if atv != nil {
atv.SetInt(id)
}
return
}
//StructMustInsertContext
func (e *ComplexExec) StructMustInsertContext(ctx context.Context, value interface{}) (rs sql.Result) {
rs, err := e.StructInsertContext(ctx, value)
if err != nil {
panic(err)
}
return
}
// StructInsert insert a struct to database
func (e *ComplexExec) StructInsert(value interface{}) (sql.Result, error) {
return e.StructInsertContext(context.Background(), value)
}
// StructMustInsert is like StructInsert but panics if cannot insert.
func (e *ComplexExec) StructMustInsert(value interface{}) sql.Result {
return e.StructMustInsertContext(context.Background(), value)
}
//StructUpdateContext
func (e *ComplexExec) StructUpdateContext(ctx context.Context, value interface{}) (rs sql.Result, err error) {
query, values, err := e.option.generator.UpdateSQL(value)
if err != nil {
return
}
return e.SQLExecContext(ctx, query, values...)
}
//StructUpdate
func (e *ComplexExec) StructUpdate(value interface{}) (sql.Result, error) {
return e.StructUpdateContext(context.Background(), value)
}
//StructMustUpdateContext
func (e *ComplexExec) StructMustUpdateContext(ctx context.Context, value interface{}) (rs sql.Result) {
rs, err := e.StructUpdateContext(ctx, value)
if err != nil {
panic(err)
}
return
}
//StructMustUpdate
func (e *ComplexExec) StructMustUpdate(value interface{}) (rs sql.Result) {
return e.StructMustUpdateContext(context.Background(), value)
}
//SQL Executor
func (e *ComplexExec) SQLPrepare(query string) (*SQLStmt, error) {
return newStmt(e.preparer, query, e.option)
}
//SQLPrepareContext
func (e *ComplexExec) SQLPrepareContext(ctx context.Context, query string) (*SQLStmt, error) {
return newStmtContext(ctx, e.preparer, query, e.option)
}
//SQLFirstContext
func (e *ComplexExec) SQLFirstContext(ctx context.Context, dest interface{}, query string, args ...interface{}) (err error) {
stmt, err := e.SQLPrepareContext(ctx, query)
if err != nil {
return
}
defer func() {
err := stmt.Close()
if err != nil {
fmt.Printf("close stmt err:%v", err)
}
}()
return stmt.FirstContext(ctx, dest, args...)
}
//SQLFirst
func (e *ComplexExec) SQLFirst(dest interface{}, query string, args ...interface{}) (err error) {
return e.SQLFirstContext(context.Background(), dest, query, args...)
}
//SQLMustFirstContext
func (e *ComplexExec) SQLMustFirstContext(ctx context.Context, dest interface{}, query string, args ...interface{}) {
err := e.SQLFirstContext(ctx, dest, query, args...)
if err != nil {
panic(err)
}
}
//SQLMustFirst
func (e *ComplexExec) SQLMustFirst(dest interface{}, query string, args ...interface{}) {
e.SQLMustFirstContext(context.Background(), dest, query, args...)
}
//SQLFindContext
func (e *ComplexExec) SQLFindContext(ctx context.Context, dest interface{}, query string, args ...interface{}) (err error) {
stmt, err := e.SQLPrepareContext(ctx, query)
if err != nil {
return
}
defer func() {
err := stmt.Close()
if err != nil {
fmt.Printf("close stmt err:%v", err)
}
}()
return stmt.FindContext(ctx, dest, args...)
}
//SQLFind
func (e *ComplexExec) SQLFind(dest interface{}, query string, args ...interface{}) (err error) {
return e.SQLFindContext(context.Background(), dest, query, args...)
}
//SQLMustFindContext
func (e *ComplexExec) SQLMustFindContext(ctx context.Context, dest interface{}, query string, args ...interface{}) {
err := e.SQLFindContext(ctx, dest, query, args...)
if err != nil {
panic(err)
}
}
//SQLMustFind
func (e *ComplexExec) SQLMustFind(dest interface{}, query string, args ...interface{}) {
e.SQLMustFindContext(context.Background(), dest, query, args...)
}
//SQLExecContext
func (e *ComplexExec) SQLExecContext(ctx context.Context, query string, args ...interface{}) (rs sql.Result, err error) {
printSQL(query, args, e.option.out)
return e.preparer.ExecContext(ctx, query, args...)
}
//SQLExec
func (e *ComplexExec) SQLExec(query string, args ...interface{}) (rs sql.Result, err error) {
return e.SQLExecContext(context.Background(), query, args...)
}
// SQLMustExecContext is like SQLExecContext but panics if the query cannot be execute.
func (e *ComplexExec) SQLMustExecContext(ctx context.Context, query string, args ...interface{}) (rs sql.Result) {
rs, err := e.SQLExecContext(ctx, query, args...)
if err != nil {
panic(err)
}
return
}
// SQLMustExec is like SQLExec but panics if the query cannot be execute.
func (e *ComplexExec) SQLMustExec(query string, args ...interface{}) (rs sql.Result) {
return e.SQLMustExecContext(context.Background(), query, args...)
}
// DQLExecContext executes a DQL query without returning any rows.
// The argument are for any placeholder parameters in the query.
func (e *ComplexExec) DQLExecContext(ctx context.Context, query string, argument DQLArgument) (sql.Result, error) {
query, args, err := DSLCompile(query, argument)
if err != nil {
return nil, err
}
return e.SQLExecContext(ctx, query, args...)
}
// DQLExec executes a DQL query without returning any rows.
// The argument are for any placeholder parameters in the query.
func (e *ComplexExec) DQLExec(query string, argument DQLArgument) (sql.Result, error) {
return e.DQLExecContext(context.Background(), query, argument)
}
//DQLMustExecContext
func (e *ComplexExec) DQLMustExecContext(ctx context.Context, query string, argument DQLArgument) (r sql.Result) {
r, err := e.DQLExecContext(ctx, query, argument)
if err != nil {
panic(err)
}
return
}
//DQLMustExec
func (e *ComplexExec) DQLMustExec(query string, argument DQLArgument) sql.Result {
return e.DQLMustExecContext(context.Background(), query, argument)
}
//DQLFirstContext
func (e *ComplexExec) DQLFirstContext(ctx context.Context, dest interface{}, query string, argument DQLArgument) (err error) {
query, args, err := DSLCompile(query, argument)
err.Error()
if err != nil {
return
}
return e.SQLFirstContext(ctx, dest, query, args...)
}
//DQLFirst
func (e *ComplexExec) DQLFirst(dest interface{}, query string, argument DQLArgument) (err error) {
return e.DQLFirstContext(context.Background(), dest, query, argument)
}
//DQLMustFirstContext
func (e *ComplexExec) DQLMustFirstContext(ctx context.Context, dest interface{}, query string, argument DQLArgument) {
err := e.DQLFirstContext(ctx, dest, query, argument)
if err != nil {
panic(err)
}
}
//DQLMustFirst
func (e *ComplexExec) DQLMustFirst(dest interface{}, query string, argument DQLArgument) {
e.DQLMustFirstContext(context.Background(), dest, query, argument)
}
// DQLFindContext executes DQL that put rows into dest
func (e *ComplexExec) DQLFindContext(ctx context.Context, dest interface{}, query string, argument DQLArgument) (err error) {
query, args, err := DSLCompile(query, argument)
if err != nil {
return
}
return e.SQLFindContext(ctx, dest, query, args...)
}
// DQLFind executes DQL that put rows into dest
func (e *ComplexExec) DQLFind(dest interface{}, query string, argument DQLArgument) (err error) {
return e.DQLFindContext(context.Background(), dest, query, argument)
}
// DQLMustFindContext executes DQL that put rows into dest
func (e *ComplexExec) DQLMustFindContext(ctx context.Context, dest interface{}, query string, argument DQLArgument) {
err := e.DQLFindContext(ctx, dest, query, argument)
if err != nil {
panic(err)
}
}
// DQLMustFind executes a dbx query language that put rows into dest
func (e *ComplexExec) DQLMustFind(dest interface{}, query string, argument DQLArgument) {
e.DQLMustFindContext(context.Background(), dest, query, argument)
}
|
package main
import "fmt"
// This var means the scope of x is the WHOLE package as its NOTt inside the {}. The whole package can use this var, including other .go files inside the package
// AKA package level scope
var x int = 1001
// THIS IS PACKAGE SCOPE
func main() {
// THIS IS BLOCK level scope
// shows closures and how to enclose code and variables to areas
fmt.Println(x)
foo()
y := 17
// y WILL be accesible in this block because it is defined with thwe block {}
fmt.Println(y)
}
func foo() {
fmt.Println(x)
//'y is not defined because the y variable is only in the 1st func block . This is called 'scope'
// x is able to compile because its in the universal scope, before any blocks
// func foo states what to be applied to foo() function shown in the 1st block
}
|
package main
import (
"testing"
)
type testdata struct {
fname1 string
expectedtask1 int
fname2 string
expectedtask2 int
}
var testset []*testdata = []*testdata{{"example1.txt", 165, "example2.txt", 208}}
func TestTaskOne(t *testing.T) {
for _, test := range testset {
m := readdata(test.fname1)
p := parsedata(m)
c := task1(p)
if c != test.expectedtask1 {
t.Fatalf("Test '%s' failed. Got '%d' - Wanted: '%d'", test.fname1, c, test.expectedtask1)
}
}
}
func TestTaskTwo(t *testing.T) {
for _, test := range testset {
m := readdata(test.fname2)
p := parsedata(m)
c := task2(p)
if c != test.expectedtask2 {
t.Fatalf("Test '%s' failed. Got '%d' - Wanted: '%d'", test.fname2, c, test.expectedtask2)
}
}
}
|
package main
//假设你正在爬楼梯。需要 n 阶你才能到达楼顶。
//
//每次你可以爬 1 或 2 个台阶。你有多少种不同的方法可以爬到楼顶呢?
//
//注意:给定 n 是一个正整数。
//
//示例 1:
//
//输入: 2
//输出: 2
//解释: 有两种方法可以爬到楼顶。
//1. 1 阶 + 1 阶
//2. 2 阶
//示例 2:
//
//输入: 3
//输出: 3
//解释: 有三种方法可以爬到楼顶。
//1. 1 阶 + 1 阶 + 1 阶
//2. 1 阶 + 2 阶
//3. 2 阶 + 1 阶
func main() {
}
func climbStairs(n int) int {
a, b := 1, 2
if n < 3 {
return n
}
for i := 3; i <= n; i++ {
b, a = a+b, b
}
return b
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2022/1/12 8:55 上午
# @File : lt_43_字符串相乘.go
# @Description :
# @Attention :
*/
package hot100
import "strconv"
/*
// 解题关键:
注意点:
1. 边界条件: 有一个数为0,直接返回0
2.遍历计算乘积的时候,要注意多出来的数, 如 4*5=20 ,多出来的是2,要充分记得考虑这个值
// 采用加法:
// num1的数 * num2 的每个数, 然后和再累加 (这里唯一需要注意的点是,记得结果跟上0)
// 如 num1=1234 ,num2=456
// 1. 1234 *6
1234
6
7404
2. 1234 *5 // 因为5 为倒数第2个数,后面还有个数,所以最后要加上一个0
1234
5
6170+0=> 61700
3. 1234*4 // 因为4为倒数第3个数,后面有2个数,所以要加上2个0
1234
4
493600
4. 最终将所有结果相加: 7304+61700+49360 =562704
*/
func multiply(num1 string, num2 string) string {
if num1 == "0" || num2 == "0" {
return "0"
}
l1, l2 := len(num1), len(num2)
ret := ""
// 遍历num2 的每个数
for i := l2 - 1; i >= 0; i-- {
cur := ""
// 表明在这个位置上该追加几个0 ,
for j := l2 - 1; j > i; j-- {
cur += "0"
}
// 代表的是乘 多于的值,如 3*4=12 则 more为1
more := 0
// 遍历num1的每个数,每个数去乘 num2的每个数
y := int(num2[i] - '0')
for j := l1 - 1; j >= 0; j-- {
x := int(num1[j] - '0')
mux := x*y + more
// 然后取余 获取得到最后一个数
cur = strconv.Itoa(mux%10) + cur
more = mux / 10
}
// 此时more 可能不为0 ,所以还需要加上more
for ; more != 0; more /= 10 {
cur = strconv.Itoa(more%10) + cur
}
// 最后再讲结果相加,字符串相加
ret = addStrings(cur, ret)
}
return ret
}
func addStrings(num1, num2 string) string {
i1, i2 := len(num1)-1, len(num2)-1
add := 0
ret := ""
for ; i1 >= 0 || i2 >= 0 || add != 0; i1, i2 = i1-1, i2-1 {
x, y := 0, 0
if i1 >= 0 {
x = int(num1[i1] - '0')
}
if i2 >= 0 {
y = int(num2[i2] - '0')
}
result := x + y + add
// 然后计算多的值
ret = strconv.Itoa(result%10) + ret
add = result / 10
}
return ret
}
|
package main
import (
"fmt"
"github.com/jackytck/projecteuler/tools"
)
func extract(slice []int, start, end int) int {
return tools.JoinInts(slice[start:end])
}
func check(n, d int) bool {
return n%d == 0
}
func solve() int {
var sum int
for v := range tools.Perms([]int{0, 1, 2, 3, 4, 6, 7, 8, 9}) {
p2 := extract(v, 1, 4)
p3 := extract(v, 2, 5)
p7 := v[4]*100 + 50 + v[5]
p11 := 500 + extract(v, 5, 7)
p13 := extract(v, 5, 8)
p17 := extract(v, 6, 9)
if check(p2, 2) && check(p3, 3) && check(p7, 7) && check(p11, 11) && check(p13, 13) && check(p17, 17) {
t := append(v[:5], append([]int{5}, v[5:]...)...)
sum += tools.JoinInts(t)
}
}
return sum
}
func main() {
fmt.Println(solve())
}
// Sum of all 0 to 9 pandigital numbers, where its sub-digits are divisible by
// 2, 3, 5, 7, 11, 13 and 17 respectively.
|
package main
import "fmt"
/*
This example shows how to create variables and constants in Go.
*/
func main(){
variables()
constants()
}
func variables(){
fmt.Println("==> Variables section:")
// implicit assignment (declaration and assignments are done at the same time
name, location := "Prince Oberyn", "Dorne"
fmt.Println("variables are:", name, location)
// explicit assignment
var (
name1 string
age1 int
)
name1, age1 = "test", 1
fmt.Println("variables are:", name1, age1)
// declaration of multiple variables
var a,b,c int
println("variables a,b,c, are:", a,b,c)
// Variable can also be declared one by one
var one string
var second int
var third bool = false
one, second = "hello", 100
fmt.Println("variables are:", one, second, third)
}
func constants(){
/*
Constants can only be character, string, boolean, or numeric values.
Cannot be declared using the := syntax
*/
fmt.Println("==> Constants section:")
const Pi = 3.14
const (
StatusOK = 200
StatusCreated = 201
Big = 1 << 62
)
fmt.Printf("Constants values are: Pi = %f, StatusOK = %d, StatusCreated = %d" +
", Big = %d", Pi, StatusOK, StatusCreated, Big)
}
|
package proto
type PubMsg struct {
RawID []byte
ID []byte
Topic []byte
Payload []byte
Acked bool
Type int8
QoS int8
TTL int64
Sender []byte
Timestamp []byte
}
type TimerMsg struct {
ID []byte
Topic []byte
Payload []byte
Trigger int64
Delay int
}
type Ack struct {
Topic []byte
Msgid []byte
}
|
package main
import "fmt"
import "math"
func main() {
counter := 0
TriangleNumber := 0
Switch := 1
for Switch >= 1 {
counter++
TriangleNumber += counter
if factors(TriangleNumber) > 500 {
fmt.Println(TriangleNumber)
Switch = 0
}
}
}
func factors(n int) (facCount int) {
facCounter := 0
k := int(math.Sqrt(float64(n)))
for i := 1; i < k+1; i++ {
if n%i == 0 {
facCounter++
}
}
return facCounter * 2
}
|
package main
import (
"fmt"
"math"
)
func checkPrime(num int) bool{
for i:=2 ; i<=int(math.Ceil(float64(num)/2)) ; i++{
if num%i==0{
return false
}
}
return true
}
func main() {
var num int
fmt.Print("Input: ")
_, _ = fmt.Scanln(&num)
fmt.Print("Output: ")
if !checkPrime(num){
fmt.Print("Bukan ")
}
fmt.Println("Bilangan Prima")
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package temporarytabletest
import (
"fmt"
"sort"
"strconv"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/testkit"
"github.com/stretchr/testify/require"
)
func TestLocalTemporaryTableInsert(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create temporary table tmp1 (id int primary key auto_increment, u int unique, v int)")
tk.MustExec("insert into tmp1 (u, v) values(11, 101)")
tk.MustExec("insert into tmp1 (u, v) values(12, 102)")
tk.MustExec("insert into tmp1 values(3, 13, 102)")
checkRecordOneTwoThreeAndNonExist := func() {
tk.MustQuery("select * from tmp1 where id=1").Check(testkit.Rows("1 11 101"))
tk.MustQuery("select * from tmp1 where id=2").Check(testkit.Rows("2 12 102"))
tk.MustQuery("select * from tmp1 where id=3").Check(testkit.Rows("3 13 102"))
tk.MustQuery("select * from tmp1 where id=99").Check(testkit.Rows())
}
// inserted records exist
checkRecordOneTwoThreeAndNonExist()
// insert dup records out txn must be error
require.True(t, kv.ErrKeyExists.Equal(tk.ExecToErr("insert into tmp1 values(1, 999, 9999)")))
checkRecordOneTwoThreeAndNonExist()
require.True(t, kv.ErrKeyExists.Equal(tk.ExecToErr("insert into tmp1 values(99, 11, 999)")))
checkRecordOneTwoThreeAndNonExist()
// insert dup records in txn must be error
tk.MustExec("begin")
require.True(t, kv.ErrKeyExists.Equal(tk.ExecToErr("insert into tmp1 values(1, 999, 9999)")))
checkRecordOneTwoThreeAndNonExist()
require.True(t, kv.ErrKeyExists.Equal(tk.ExecToErr("insert into tmp1 values(99, 11, 9999)")))
checkRecordOneTwoThreeAndNonExist()
tk.MustExec("insert into tmp1 values(4, 14, 104)")
tk.MustQuery("select * from tmp1 where id=4").Check(testkit.Rows("4 14 104"))
require.True(t, kv.ErrKeyExists.Equal(tk.ExecToErr("insert into tmp1 values(4, 999, 9999)")))
require.True(t, kv.ErrKeyExists.Equal(tk.ExecToErr("insert into tmp1 values(99, 14, 9999)")))
checkRecordOneTwoThreeAndNonExist()
tk.MustExec("commit")
// check committed insert works
checkRecordOneTwoThreeAndNonExist()
tk.MustQuery("select * from tmp1 where id=4").Check(testkit.Rows("4 14 104"))
// check rollback works
tk.MustExec("begin")
tk.MustExec("insert into tmp1 values(5, 15, 105)")
tk.MustQuery("select * from tmp1 where id=5").Check(testkit.Rows("5 15 105"))
tk.MustExec("rollback")
tk.MustQuery("select * from tmp1 where id=5").Check(testkit.Rows())
}
func TestLocalTemporaryTableUpdate(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create temporary table tmp1 (id int primary key, u int unique, v int)")
idList := []int{1, 2, 3, 4, 5, 6, 7, 8, 9}
insertRecords := func(idList []int) {
for _, id := range idList {
tk.MustExec("insert into tmp1 values (?, ?, ?)", id, id+100, id+1000)
}
}
checkNoChange := func() {
expect := make([]string, 0)
for _, id := range idList {
expect = append(expect, fmt.Sprintf("%d %d %d", id, id+100, id+1000))
}
tk.MustQuery("select * from tmp1").Check(testkit.Rows(expect...))
}
checkUpdatesAndDeletes := func(updates []string, deletes []int) {
modifyMap := make(map[int]string)
for _, m := range updates {
parts := strings.Split(strings.TrimSpace(m), " ")
require.NotZero(t, len(parts))
id, err := strconv.Atoi(parts[0])
require.NoError(t, err)
modifyMap[id] = m
}
for _, d := range deletes {
modifyMap[d] = ""
}
expect := make([]string, 0)
for _, id := range idList {
modify, exist := modifyMap[id]
if !exist {
expect = append(expect, fmt.Sprintf("%d %d %d", id, id+100, id+1000))
continue
}
if modify != "" {
expect = append(expect, modify)
}
delete(modifyMap, id)
}
otherIds := make([]int, 0)
for id := range modifyMap {
otherIds = append(otherIds, id)
}
sort.Ints(otherIds)
for _, id := range otherIds {
modify, exist := modifyMap[id]
require.True(t, exist)
expect = append(expect, modify)
}
tk.MustQuery("select * from tmp1").Check(testkit.Rows(expect...))
}
type checkSuccess struct {
update []string
delete []int
}
type checkError struct {
err error
}
cases := []struct {
sql string
checkResult interface{}
additionalCheck func(error)
}{
// update with point get for primary key
{"update tmp1 set v=999 where id=1", checkSuccess{[]string{"1 101 999"}, nil}, nil},
{"update tmp1 set id=12 where id=1", checkSuccess{[]string{"12 101 1001"}, []int{1}}, nil},
{"update tmp1 set id=1 where id=1", checkSuccess{nil, nil}, nil},
{"update tmp1 set u=101 where id=1", checkSuccess{nil, nil}, nil},
{"update tmp1 set v=999 where id=100", checkSuccess{nil, nil}, nil},
{"update tmp1 set u=102 where id=100", checkSuccess{nil, nil}, nil},
{"update tmp1 set u=21 where id=1", checkSuccess{[]string{"1 21 1001"}, nil}, func(_ error) {
// check index deleted
tk.MustQuery("select /*+ use_index(tmp1, u) */ * from tmp1 where u=101").Check(testkit.Rows())
tk.MustQuery("show warnings").Check(testkit.Rows())
}},
{"update tmp1 set id=2 where id=1", checkError{kv.ErrKeyExists}, nil},
{"update tmp1 set u=102 where id=1", checkError{kv.ErrKeyExists}, nil},
// update with batch point get for primary key
{"update tmp1 set v=v+1000 where id in (1, 3, 5)", checkSuccess{[]string{"1 101 2001", "3 103 2003", "5 105 2005"}, nil}, nil},
{"update tmp1 set u=u+1 where id in (9, 100)", checkSuccess{[]string{"9 110 1009"}, nil}, nil},
{"update tmp1 set u=101 where id in (100, 101)", checkSuccess{nil, nil}, nil},
{"update tmp1 set id=id+1 where id in (8, 9)", checkError{kv.ErrKeyExists}, nil},
{"update tmp1 set u=u+1 where id in (8, 9)", checkError{kv.ErrKeyExists}, nil},
{"update tmp1 set id=id+20 where id in (1, 3, 5)", checkSuccess{[]string{"21 101 1001", "23 103 1003", "25 105 1005"}, []int{1, 3, 5}}, nil},
{"update tmp1 set u=u+100 where id in (1, 3, 5)", checkSuccess{[]string{"1 201 1001", "3 203 1003", "5 205 1005"}, nil}, func(_ error) {
// check index deleted
tk.MustQuery("select /*+ use_index(tmp1, u) */ * from tmp1 where u in (101, 103, 105)").Check(testkit.Rows())
tk.MustQuery("show warnings").Check(testkit.Rows())
}},
// update with point get for unique key
{"update tmp1 set v=888 where u=101", checkSuccess{[]string{"1 101 888"}, nil}, nil},
{"update tmp1 set id=21 where u=101", checkSuccess{[]string{"21 101 1001"}, []int{1}}, nil},
{"update tmp1 set v=888 where u=201", checkSuccess{nil, nil}, nil},
{"update tmp1 set u=201 where u=101", checkSuccess{[]string{"1 201 1001"}, nil}, nil},
{"update tmp1 set id=2 where u=101", checkError{kv.ErrKeyExists}, nil},
{"update tmp1 set u=102 where u=101", checkError{kv.ErrKeyExists}, nil},
// update with batch point get for unique key
{"update tmp1 set v=v+1000 where u in (101, 103)", checkSuccess{[]string{"1 101 2001", "3 103 2003"}, nil}, nil},
{"update tmp1 set v=v+1000 where u in (201, 203)", checkSuccess{nil, nil}, nil},
{"update tmp1 set v=v+1000 where u in (101, 110)", checkSuccess{[]string{"1 101 2001"}, nil}, nil},
{"update tmp1 set id=id+1 where u in (108, 109)", checkError{kv.ErrKeyExists}, nil},
// update with table scan and index scan
{"update tmp1 set v=v+1000 where id<3", checkSuccess{[]string{"1 101 2001", "2 102 2002"}, nil}, nil},
{"update /*+ use_index(tmp1, u) */ tmp1 set v=v+1000 where u>107", checkSuccess{[]string{"8 108 2008", "9 109 2009"}, nil}, nil},
{"update tmp1 set v=v+1000 where v>=1007 or v<=1002", checkSuccess{[]string{"1 101 2001", "2 102 2002", "7 107 2007", "8 108 2008", "9 109 2009"}, nil}, nil},
{"update tmp1 set v=v+1000 where id>=10", checkSuccess{nil, nil}, nil},
{"update tmp1 set id=id+1 where id>7", checkError{kv.ErrKeyExists}, nil},
{"update tmp1 set id=id+1 where id>8", checkSuccess{[]string{"10 109 1009"}, []int{9}}, nil},
{"update tmp1 set u=u+1 where u>107", checkError{kv.ErrKeyExists}, nil},
{"update tmp1 set u=u+1 where u>108", checkSuccess{[]string{"9 110 1009"}, nil}, nil},
{"update /*+ use_index(tmp1, u) */ tmp1 set v=v+1000 where u>108 or u<102", checkSuccess{[]string{"1 101 2001", "9 109 2009"}, nil}, nil},
}
executeSQL := func(sql string, checkResult interface{}, additionalCheck func(error)) (err error) {
switch check := checkResult.(type) {
case checkSuccess:
tk.MustExec(sql)
tk.MustQuery("show warnings").Check(testkit.Rows())
checkUpdatesAndDeletes(check.update, check.delete)
case checkError:
err = tk.ExecToErr(sql)
require.Error(t, err)
expectedErr, _ := check.err.(*terror.Error)
require.True(t, expectedErr.Equal(err))
checkNoChange()
default:
t.Fail()
}
if additionalCheck != nil {
additionalCheck(err)
}
return
}
for _, sqlCase := range cases {
// update records in txn and records are inserted in txn
tk.MustExec("begin")
insertRecords(idList)
_ = executeSQL(sqlCase.sql, sqlCase.checkResult, sqlCase.additionalCheck)
tk.MustExec("rollback")
tk.MustQuery("select * from tmp1").Check(testkit.Rows())
// update records out of txn
insertRecords(idList)
_ = executeSQL(sqlCase.sql, sqlCase.checkResult, sqlCase.additionalCheck)
tk.MustExec("delete from tmp1")
// update records in txn and rollback
insertRecords(idList)
tk.MustExec("begin")
_ = executeSQL(sqlCase.sql, sqlCase.checkResult, sqlCase.additionalCheck)
tk.MustExec("rollback")
// rollback left records unmodified
checkNoChange()
// update records in txn and commit
tk.MustExec("begin")
err := executeSQL(sqlCase.sql, sqlCase.checkResult, sqlCase.additionalCheck)
tk.MustExec("commit")
if err != nil {
checkNoChange()
} else {
r, _ := sqlCase.checkResult.(checkSuccess)
checkUpdatesAndDeletes(r.update, r.delete)
}
if sqlCase.additionalCheck != nil {
sqlCase.additionalCheck(err)
}
tk.MustExec("delete from tmp1")
tk.MustQuery("select * from tmp1").Check(testkit.Rows())
}
}
func TestTemporaryTableSize(t *testing.T) {
// Test the @@tidb_tmp_table_max_size system variable.
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create global temporary table t (c1 int, c2 mediumtext) on commit delete rows")
tk.MustExec("create temporary table tl (c1 int, c2 mediumtext)")
tk.MustQuery("select @@global.tidb_tmp_table_max_size").Check(testkit.Rows(strconv.Itoa(variable.DefTiDBTmpTableMaxSize)))
require.Equal(t, int64(variable.DefTiDBTmpTableMaxSize), tk.Session().GetSessionVars().TMPTableSize)
// Min value 1M, so the result is change to 1M, with a warning.
tk.MustExec("set @@global.tidb_tmp_table_max_size = 123")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect tidb_tmp_table_max_size value: '123'"))
// Change the session scope value to 2M.
tk.MustExec("set @@session.tidb_tmp_table_max_size = 2097152")
require.Equal(t, int64(2097152), tk.Session().GetSessionVars().TMPTableSize)
// Check in another session, change session scope value does not affect the global scope.
tk1 := testkit.NewTestKit(t, store)
tk1.MustQuery("select @@global.tidb_tmp_table_max_size").Check(testkit.Rows(strconv.Itoa(1 << 20)))
// The value is now 1M, check the error when table size exceed it.
tk.MustExec(fmt.Sprintf("set @@session.tidb_tmp_table_max_size = %d", 1<<20))
tk.MustExec("begin")
tk.MustExec("insert into t values (1, repeat('x', 512*1024))")
tk.MustExec("insert into t values (1, repeat('x', 512*1024))")
tk.MustGetErrCode("insert into t values (1, repeat('x', 512*1024))", errno.ErrRecordFileFull)
tk.MustExec("rollback")
// Check local temporary table
tk.MustExec("begin")
tk.MustExec("insert into tl values (1, repeat('x', 512*1024))")
tk.MustExec("insert into tl values (1, repeat('x', 512*1024))")
tk.MustGetErrCode("insert into tl values (1, repeat('x', 512*1024))", errno.ErrRecordFileFull)
tk.MustExec("rollback")
// Check local temporary table with some data in session
tk.MustExec("insert into tl values (1, repeat('x', 512*1024))")
tk.MustExec("begin")
tk.MustExec("insert into tl values (1, repeat('x', 512*1024))")
tk.MustGetErrCode("insert into tl values (1, repeat('x', 512*1024))", errno.ErrRecordFileFull)
tk.MustExec("rollback")
}
func TestGlobalTemporaryTable(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create global temporary table g_tmp (a int primary key, b int, c int, index i_b(b)) on commit delete rows")
tk.MustExec("begin")
tk.MustExec("insert into g_tmp values (3, 3, 3)")
tk.MustExec("insert into g_tmp values (4, 7, 9)")
// Cover table scan.
tk.MustQuery("select * from g_tmp").Check(testkit.Rows("3 3 3", "4 7 9"))
// Cover index reader.
tk.MustQuery("select b from g_tmp where b > 3").Check(testkit.Rows("7"))
// Cover index lookup.
tk.MustQuery("select c from g_tmp where b = 3").Check(testkit.Rows("3"))
// Cover point get.
tk.MustQuery("select * from g_tmp where a = 3").Check(testkit.Rows("3 3 3"))
// Cover batch point get.
tk.MustQuery("select * from g_tmp where a in (2,3,4)").Check(testkit.Rows("3 3 3", "4 7 9"))
tk.MustExec("commit")
// The global temporary table data is discard after the transaction commit.
tk.MustQuery("select * from g_tmp").Check(testkit.Rows())
}
func TestRetryGlobalTemporaryTable(t *testing.T) {
store := testkit.CreateMockStore(t)
setTxnTk := testkit.NewTestKit(t, store)
setTxnTk.MustExec("set global tidb_txn_mode=''")
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists normal_table")
tk.MustExec("create table normal_table(a int primary key, b int)")
defer tk.MustExec("drop table if exists normal_table")
tk.MustExec("drop table if exists temp_table")
tk.MustExec("create global temporary table temp_table(a int primary key, b int) on commit delete rows")
defer tk.MustExec("drop table if exists temp_table")
// insert select
tk.MustExec("set tidb_disable_txn_auto_retry = 0")
tk.MustExec("insert normal_table value(100, 100)")
tk.MustExec("set @@autocommit = 0")
// used to make conflicts
tk.MustExec("update normal_table set b=b+1 where a=100")
tk.MustExec("insert temp_table value(1, 1)")
tk.MustExec("insert normal_table select * from temp_table")
require.Equal(t, 3, session.GetHistory(tk.Session()).Count())
// try to conflict with tk
tk1 := testkit.NewTestKit(t, store)
tk1.MustExec("use test")
tk1.MustExec("update normal_table set b=b+1 where a=100")
// It will retry internally.
tk.MustExec("commit")
tk.MustQuery("select a, b from normal_table order by a").Check(testkit.Rows("1 1", "100 102"))
tk.MustQuery("select a, b from temp_table order by a").Check(testkit.Rows())
// update multi-tables
tk.MustExec("update normal_table set b=b+1 where a=100")
tk.MustExec("insert temp_table value(1, 2)")
// before update: normal_table=(1 1) (100 102), temp_table=(1 2)
tk.MustExec("update normal_table, temp_table set normal_table.b=temp_table.b where normal_table.a=temp_table.a")
require.Equal(t, 3, session.GetHistory(tk.Session()).Count())
// try to conflict with tk
tk1.MustExec("update normal_table set b=b+1 where a=100")
// It will retry internally.
tk.MustExec("commit")
tk.MustQuery("select a, b from normal_table order by a").Check(testkit.Rows("1 2", "100 104"))
}
func TestRetryLocalTemporaryTable(t *testing.T) {
store := testkit.CreateMockStore(t)
setTxnTk := testkit.NewTestKit(t, store)
setTxnTk.MustExec("set global tidb_txn_mode=''")
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists normal_table")
tk.MustExec("create table normal_table(a int primary key, b int)")
defer tk.MustExec("drop table if exists normal_table")
tk.MustExec("drop table if exists temp_table")
tk.MustExec("create temporary table l_temp_table(a int primary key, b int)")
defer tk.MustExec("drop table if exists l_temp_table")
// insert select
tk.MustExec("set tidb_disable_txn_auto_retry = 0")
tk.MustExec("insert normal_table value(100, 100)")
tk.MustExec("set @@autocommit = 0")
// used to make conflicts
tk.MustExec("update normal_table set b=b+1 where a=100")
tk.MustExec("insert l_temp_table value(1, 2)")
tk.MustExec("insert normal_table select * from l_temp_table")
require.Equal(t, 3, session.GetHistory(tk.Session()).Count())
// try to conflict with tk
tk1 := testkit.NewTestKit(t, store)
tk1.MustExec("use test")
tk1.MustExec("update normal_table set b=b+1 where a=100")
// It will retry internally.
tk.MustExec("commit")
tk.MustQuery("select a, b from normal_table order by a").Check(testkit.Rows("1 2", "100 102"))
tk.MustQuery("select a, b from l_temp_table order by a").Check(testkit.Rows("1 2"))
// update multi-tables
tk.MustExec("update normal_table set b=b+1 where a=100")
tk.MustExec("insert l_temp_table value(3, 4)")
// before update: normal_table=(1 1) (100 102), temp_table=(1 2)
tk.MustExec("update normal_table, l_temp_table set normal_table.b=l_temp_table.b where normal_table.a=l_temp_table.a")
require.Equal(t, 3, session.GetHistory(tk.Session()).Count())
// try to conflict with tk
tk1.MustExec("update normal_table set b=b+1 where a=100")
// It will retry internally.
tk.MustExec("commit")
tk.MustQuery("select a, b from normal_table order by a").Check(testkit.Rows("1 2", "100 104"))
}
func TestLocalTemporaryTableInsertOnDuplicateKeyUpdate(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create temporary table tmp1 (id int primary key auto_increment, u int unique, v int)")
tk.MustExec("insert into tmp1 values(1, 11, 101)")
tk.MustExec("insert into tmp1 values(2, 12, 102)")
// test outside transaction
tk.MustExec("insert ignore into tmp1 values(1, 100, 1000) on duplicate key update u=12")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1062 Duplicate entry '12' for key 'tmp1.u'"))
tk.MustQuery("select * from tmp1 where id=1").Check(testkit.Rows("1 11 101"))
tk.MustExec("insert into tmp1 values(2, 100, 1000) on duplicate key update v=202")
tk.MustQuery("show warnings").Check(testkit.Rows())
tk.MustQuery("select * from tmp1 where id=2").Check(testkit.Rows("2 12 202"))
tk.MustExec("insert into tmp1 values(3, 13, 103) on duplicate key update v=203")
tk.MustQuery("show warnings").Check(testkit.Rows())
tk.MustQuery("select * from tmp1 where id=3").Check(testkit.Rows("3 13 103"))
// test in transaction and rollback
tk.MustExec("begin")
tk.MustExec("insert ignore into tmp1 values(1, 100, 1000) on duplicate key update u=12")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1062 Duplicate entry '12' for key 'tmp1.u'"))
tk.MustQuery("select * from tmp1 where id=1").Check(testkit.Rows("1 11 101"))
tk.MustExec("insert into tmp1 values(2, 100, 1000) on duplicate key update v=302")
tk.MustQuery("show warnings").Check(testkit.Rows())
tk.MustQuery("select * from tmp1 where id=2").Check(testkit.Rows("2 12 302"))
tk.MustExec("insert into tmp1 values(4, 14, 104) on duplicate key update v=204")
tk.MustQuery("show warnings").Check(testkit.Rows())
tk.MustQuery("select * from tmp1 where id=4").Check(testkit.Rows("4 14 104"))
tk.MustExec("rollback")
tk.MustQuery("select * from tmp1").Check(testkit.Rows("1 11 101", "2 12 202", "3 13 103"))
// test commit
tk.MustExec("begin")
tk.MustExec("insert ignore into tmp1 values(1, 100, 1000) on duplicate key update u=12")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1062 Duplicate entry '12' for key 'tmp1.u'"))
tk.MustExec("insert into tmp1 values(2, 100, 1000) on duplicate key update v=302")
tk.MustExec("insert into tmp1 values(4, 14, 104) on duplicate key update v=204")
tk.MustExec("commit")
tk.MustQuery("select * from tmp1").Check(testkit.Rows("1 11 101", "2 12 302", "3 13 103", "4 14 104"))
}
func TestLocalTemporaryTableReplace(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create temporary table tmp1 (id int primary key auto_increment, u int unique, v int)")
tk.MustExec("insert into tmp1 values(1, 11, 101)")
tk.MustExec("insert into tmp1 values(2, 12, 102)")
tk.MustExec("insert into tmp1 values(3, 13, 103)")
// out of transaction
tk.MustExec("replace into tmp1 values(1, 12, 1000)")
tk.MustQuery("select * from tmp1").Check(testkit.Rows("1 12 1000", "3 13 103"))
tk.MustExec("replace into tmp1 values(4, 14, 104)")
tk.MustQuery("select * from tmp1 where id=4").Check(testkit.Rows("4 14 104"))
// in transaction and rollback
tk.MustExec("begin")
tk.MustExec("replace into tmp1 values(1, 13, 999)")
tk.MustQuery("select * from tmp1").Check(testkit.Rows("1 13 999", "4 14 104"))
tk.MustExec("replace into tmp1 values(5, 15, 105)")
tk.MustQuery("select * from tmp1 where id=5").Check(testkit.Rows("5 15 105"))
tk.MustExec("rollback")
tk.MustQuery("select * from tmp1").Check(testkit.Rows("1 12 1000", "3 13 103", "4 14 104"))
// out of transaction
tk.MustExec("begin")
tk.MustExec("replace into tmp1 values(1, 13, 999)")
tk.MustExec("replace into tmp1 values(5, 15, 105)")
tk.MustExec("commit")
tk.MustQuery("select * from tmp1").Check(testkit.Rows("1 13 999", "4 14 104", "5 15 105"))
}
func TestLocalTemporaryTableDelete(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create temporary table tmp1 (id int primary key, u int unique, v int)")
insertRecords := func(idList []int) {
for _, id := range idList {
tk.MustExec("insert into tmp1 values (?, ?, ?)", id, id+100, id+1000)
}
}
checkAllExistRecords := func(idList []int) {
sort.Ints(idList)
expectedResult := make([]string, 0, len(idList))
expectedIndexResult := make([]string, 0, len(idList))
for _, id := range idList {
expectedResult = append(expectedResult, fmt.Sprintf("%d %d %d", id, id+100, id+1000))
expectedIndexResult = append(expectedIndexResult, fmt.Sprintf("%d", id+100))
}
tk.MustQuery("select * from tmp1 order by id").Check(testkit.Rows(expectedResult...))
// check index deleted
tk.MustQuery("select /*+ use_index(tmp1, u) */ u from tmp1 order by u").Check(testkit.Rows(expectedIndexResult...))
tk.MustQuery("show warnings").Check(testkit.Rows())
}
assertDelete := func(sql string, deleted []int) {
idList := []int{1, 2, 3, 4, 5, 6, 7, 8, 9}
deletedMap := make(map[int]bool)
for _, id := range deleted {
deletedMap[id] = true
}
keepList := make([]int, 0)
for _, id := range idList {
if _, exist := deletedMap[id]; !exist {
keepList = append(keepList, id)
}
}
// delete records in txn and records are inserted in txn
tk.MustExec("begin")
insertRecords(idList)
tk.MustExec(sql)
tk.MustQuery("show warnings").Check(testkit.Rows())
checkAllExistRecords(keepList)
tk.MustExec("rollback")
checkAllExistRecords([]int{})
// delete records out of txn
insertRecords(idList)
tk.MustExec(sql)
checkAllExistRecords(keepList)
// delete records in txn
insertRecords(deleted)
tk.MustExec("begin")
tk.MustExec(sql)
checkAllExistRecords(keepList)
// test rollback
tk.MustExec("rollback")
checkAllExistRecords(idList)
// test commit
tk.MustExec("begin")
tk.MustExec(sql)
tk.MustExec("commit")
checkAllExistRecords(keepList)
tk.MustExec("delete from tmp1")
checkAllExistRecords([]int{})
}
assertDelete("delete from tmp1 where id=1", []int{1})
assertDelete("delete from tmp1 where id in (1, 3, 5)", []int{1, 3, 5})
assertDelete("delete from tmp1 where u=102", []int{2})
assertDelete("delete from tmp1 where u in (103, 107, 108)", []int{3, 7, 8})
assertDelete("delete from tmp1 where id=10", []int{})
assertDelete("delete from tmp1 where id in (10, 12)", []int{})
assertDelete("delete from tmp1 where u=110", []int{})
assertDelete("delete from tmp1 where u in (111, 112)", []int{})
assertDelete("delete from tmp1 where id in (1, 11, 5)", []int{1, 5})
assertDelete("delete from tmp1 where u in (102, 121, 106)", []int{2, 6})
assertDelete("delete from tmp1 where id<3", []int{1, 2})
assertDelete("delete from tmp1 where u>107", []int{8, 9})
assertDelete("delete /*+ use_index(tmp1, u) */ from tmp1 where u>105 and u<107", []int{6})
assertDelete("delete from tmp1 where v>=1006 or v<=1002", []int{1, 2, 6, 7, 8, 9})
}
func TestLocalTemporaryTablePointGet(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create temporary table tmp1 (id int primary key auto_increment, u int unique, v int)")
tk.MustExec("insert into tmp1 values(1, 11, 101)")
tk.MustExec("insert into tmp1 values(2, 12, 102)")
tk.MustExec("insert into tmp1 values(4, 14, 104)")
// check point get out transaction
tk.MustQuery("select * from tmp1 where id=1").Check(testkit.Rows("1 11 101"))
tk.MustQuery("select * from tmp1 where u=11").Check(testkit.Rows("1 11 101"))
tk.MustQuery("select * from tmp1 where id=2").Check(testkit.Rows("2 12 102"))
tk.MustQuery("select * from tmp1 where u=12").Check(testkit.Rows("2 12 102"))
// check point get in transaction
tk.MustExec("begin")
tk.MustQuery("select * from tmp1 where id=1").Check(testkit.Rows("1 11 101"))
tk.MustQuery("select * from tmp1 where u=11").Check(testkit.Rows("1 11 101"))
tk.MustQuery("select * from tmp1 where id=2").Check(testkit.Rows("2 12 102"))
tk.MustQuery("select * from tmp1 where u=12").Check(testkit.Rows("2 12 102"))
tk.MustExec("insert into tmp1 values(3, 13, 103)")
tk.MustQuery("select * from tmp1 where id=3").Check(testkit.Rows("3 13 103"))
tk.MustQuery("select * from tmp1 where u=13").Check(testkit.Rows("3 13 103"))
tk.MustExec("update tmp1 set v=999 where id=2")
tk.MustQuery("select * from tmp1 where id=2").Check(testkit.Rows("2 12 999"))
tk.MustExec("delete from tmp1 where id=4")
tk.MustQuery("select * from tmp1 where id=4").Check(testkit.Rows())
tk.MustQuery("select * from tmp1 where u=14").Check(testkit.Rows())
tk.MustExec("commit")
// check point get after transaction
tk.MustQuery("select * from tmp1 where id=3").Check(testkit.Rows("3 13 103"))
tk.MustQuery("select * from tmp1 where u=13").Check(testkit.Rows("3 13 103"))
tk.MustQuery("select * from tmp1 where id=2").Check(testkit.Rows("2 12 999"))
tk.MustQuery("select * from tmp1 where id=4").Check(testkit.Rows())
tk.MustQuery("select * from tmp1 where u=14").Check(testkit.Rows())
}
func TestLocalTemporaryTableBatchPointGet(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create temporary table tmp1 (id int primary key auto_increment, u int unique, v int)")
tk.MustExec("insert into tmp1 values(1, 11, 101)")
tk.MustExec("insert into tmp1 values(2, 12, 102)")
tk.MustExec("insert into tmp1 values(3, 13, 103)")
tk.MustExec("insert into tmp1 values(4, 14, 104)")
// check point get out transaction
tk.MustQuery("select * from tmp1 where id in (1, 3)").Check(testkit.Rows("1 11 101", "3 13 103"))
tk.MustQuery("select * from tmp1 where u in (11, 13)").Check(testkit.Rows("1 11 101", "3 13 103"))
tk.MustQuery("select * from tmp1 where id in (1, 3, 5)").Check(testkit.Rows("1 11 101", "3 13 103"))
tk.MustQuery("select * from tmp1 where u in (11, 13, 15)").Check(testkit.Rows("1 11 101", "3 13 103"))
// check point get in transaction
tk.MustExec("begin")
tk.MustQuery("select * from tmp1 where id in (1, 3)").Check(testkit.Rows("1 11 101", "3 13 103"))
tk.MustQuery("select * from tmp1 where u in (11, 13)").Check(testkit.Rows("1 11 101", "3 13 103"))
tk.MustQuery("select * from tmp1 where id in (1, 3, 5)").Check(testkit.Rows("1 11 101", "3 13 103"))
tk.MustQuery("select * from tmp1 where u in (11, 13, 15)").Check(testkit.Rows("1 11 101", "3 13 103"))
tk.MustExec("insert into tmp1 values(6, 16, 106)")
tk.MustQuery("select * from tmp1 where id in (1, 6)").Check(testkit.Rows("1 11 101", "6 16 106"))
tk.MustQuery("select * from tmp1 where u in (11, 16)").Check(testkit.Rows("1 11 101", "6 16 106"))
tk.MustExec("update tmp1 set v=999 where id=3")
tk.MustQuery("select * from tmp1 where id in (1, 3)").Check(testkit.Rows("1 11 101", "3 13 999"))
tk.MustQuery("select * from tmp1 where u in (11, 13)").Check(testkit.Rows("1 11 101", "3 13 999"))
tk.MustExec("delete from tmp1 where id=4")
tk.MustQuery("select * from tmp1 where id in (1, 4)").Check(testkit.Rows("1 11 101"))
tk.MustQuery("select * from tmp1 where u in (11, 14)").Check(testkit.Rows("1 11 101"))
tk.MustExec("commit")
// check point get after transaction
tk.MustQuery("select * from tmp1 where id in (1, 3, 6)").Check(testkit.Rows("1 11 101", "3 13 999", "6 16 106"))
tk.MustQuery("select * from tmp1 where u in (11, 13, 16)").Check(testkit.Rows("1 11 101", "3 13 999", "6 16 106"))
tk.MustQuery("select * from tmp1 where id in (1, 4)").Check(testkit.Rows("1 11 101"))
tk.MustQuery("select * from tmp1 where u in (11, 14)").Check(testkit.Rows("1 11 101"))
}
func TestLocalTemporaryTableScan(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create temporary table tmp1 (id int primary key auto_increment, u int unique, v int)")
tk.MustExec("insert into tmp1 values" +
"(1, 101, 1001), (3, 113, 1003), (5, 105, 1005), (7, 117, 1007), (9, 109, 1009)," +
"(10, 110, 1010), (12, 112, 1012), (14, 114, 1014), (16, 116, 1016), (18, 118, 1018)",
)
assertSelectAsUnModified := func() {
// For TableReader
tk.MustQuery("select * from tmp1 where id>3 order by id").Check(testkit.Rows(
"5 105 1005", "7 117 1007", "9 109 1009",
"10 110 1010", "12 112 1012", "14 114 1014", "16 116 1016", "18 118 1018",
))
// For IndexLookUpReader
tk.MustQuery("select /*+ use_index(tmp1, u) */ * from tmp1 where u>101 order by u").Check(testkit.Rows(
"5 105 1005", "9 109 1009", "10 110 1010",
"12 112 1012", "3 113 1003", "14 114 1014", "16 116 1016", "7 117 1007", "18 118 1018",
))
tk.MustQuery("show warnings").Check(testkit.Rows())
// For IndexReader
tk.MustQuery("select /*+ use_index(tmp1, u) */ id,u from tmp1 where u>101 order by id").Check(testkit.Rows(
"3 113", "5 105", "7 117", "9 109", "10 110",
"12 112", "14 114", "16 116", "18 118",
))
tk.MustQuery("show warnings").Check(testkit.Rows())
// For IndexMerge, temporary table should not use index merge
tk.MustQuery("select /*+ use_index_merge(tmp1, primary, u) */ * from tmp1 where id>5 or u>110 order by u").Check(testkit.Rows(
"9 109 1009", "10 110 1010",
"12 112 1012", "3 113 1003", "14 114 1014", "16 116 1016", "7 117 1007", "18 118 1018",
))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 IndexMerge is inapplicable or disabled. Cannot use IndexMerge on temporary table."))
}
doModify := func() {
tk.MustExec("insert into tmp1 values(2, 100, 1002)")
tk.MustExec("insert into tmp1 values(4, 104, 1004)")
tk.MustExec("insert into tmp1 values(11, 111, 1011)")
tk.MustExec("update tmp1 set v=9999 where id=7")
tk.MustExec("update tmp1 set u=132 where id=12")
tk.MustExec("delete from tmp1 where id=16")
}
assertSelectAsModified := func() {
// For TableReader
tk.MustQuery("select * from tmp1 where id>3 order by id").Check(testkit.Rows(
"4 104 1004", "5 105 1005", "7 117 9999", "9 109 1009",
"10 110 1010", "11 111 1011", "12 132 1012", "14 114 1014", "18 118 1018",
))
// For IndexLookUpReader
tk.MustQuery("select /*+ use_index(tmp1, u) */ * from tmp1 where u>101 order by u").Check(testkit.Rows(
"4 104 1004", "5 105 1005", "9 109 1009", "10 110 1010", "11 111 1011",
"3 113 1003", "14 114 1014", "7 117 9999", "18 118 1018", "12 132 1012",
))
tk.MustQuery("show warnings").Check(testkit.Rows())
// For IndexReader
tk.MustQuery("select /*+ use_index(tmp1, u) */ id,u from tmp1 where u>101 order by id").Check(testkit.Rows(
"3 113", "4 104", "5 105", "7 117", "9 109",
"10 110", "11 111", "12 132", "14 114", "18 118",
))
tk.MustQuery("show warnings").Check(testkit.Rows())
// For IndexMerge, temporary table should not use index merge
tk.MustQuery("select /*+ use_index_merge(tmp1, primary, u) */ * from tmp1 where id>5 or u>110 order by u").Check(testkit.Rows(
"9 109 1009", "10 110 1010", "11 111 1011",
"3 113 1003", "14 114 1014", "7 117 9999", "18 118 1018", "12 132 1012",
))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 IndexMerge is inapplicable or disabled. Cannot use IndexMerge on temporary table."))
}
assertSelectAsUnModified()
tk.MustExec("begin")
assertSelectAsUnModified()
doModify()
tk.MustExec("rollback")
assertSelectAsUnModified()
tk.MustExec("begin")
doModify()
assertSelectAsModified()
tk.MustExec("commit")
assertSelectAsModified()
}
func TestSchemaCheckerTempTable(t *testing.T) {
store := testkit.CreateMockStoreWithSchemaLease(t, 1*time.Second)
tk1 := testkit.NewTestKit(t, store)
tk2 := testkit.NewTestKit(t, store)
tk1.MustExec("use test")
tk1.MustExec("set global tidb_enable_metadata_lock=0")
tk2.MustExec("use test")
// create table
tk1.MustExec(`drop table if exists normal_table`)
tk1.MustExec(`create table normal_table (id int, c int);`)
defer tk1.MustExec(`drop table if exists normal_table`)
tk1.MustExec(`drop table if exists temp_table`)
tk1.MustExec(`create global temporary table temp_table (id int primary key, c int) on commit delete rows;`)
defer tk1.MustExec(`drop table if exists temp_table`)
// The schema version is out of date in the first transaction, and the SQL can't be retried.
atomic.StoreUint32(&session.SchemaChangedWithoutRetry, 1)
defer func() {
atomic.StoreUint32(&session.SchemaChangedWithoutRetry, 0)
}()
// It's fine to change the schema of temporary tables.
tk1.MustExec(`begin;`)
tk2.MustExec(`alter table temp_table modify column c tinyint;`)
tk1.MustExec(`insert into temp_table values(3, 3);`)
tk1.MustExec(`commit;`)
tk1.MustExec("begin pessimistic")
tk2.MustExec(`alter table temp_table modify column c int;`)
tk1.MustQuery(`select * from temp_table for update;`).Check(testkit.Rows())
tk1.MustExec(`commit;`)
tk1.MustExec("begin pessimistic")
tk2.MustExec(`alter table temp_table modify column c smallint;`)
tk1.MustExec(`insert into temp_table values(3, 4);`)
tk1.MustQuery(`select * from temp_table for update;`).Check(testkit.Rows("3 4"))
tk1.MustExec(`commit;`)
tk1.MustExec("begin pessimistic")
tk2.MustExec(`alter table temp_table modify column c bigint;`)
tk1.MustQuery(`select * from temp_table where id=1 for update;`).Check(testkit.Rows())
tk1.MustExec(`commit;`)
tk1.MustExec("begin pessimistic")
tk2.MustExec(`alter table temp_table modify column c smallint;`)
tk1.MustExec("insert into temp_table values (1, 2), (2, 3), (4, 5)")
tk1.MustQuery(`select * from temp_table where id=1 for update;`).Check(testkit.Rows("1 2"))
tk1.MustExec(`commit;`)
tk1.MustExec("begin pessimistic")
tk2.MustExec(`alter table temp_table modify column c int;`)
tk1.MustQuery(`select * from temp_table where id=1 for update;`).Check(testkit.Rows())
tk1.MustExec(`commit;`)
tk1.MustExec("begin pessimistic")
tk2.MustExec(`alter table temp_table modify column c bigint;`)
tk1.MustQuery(`select * from temp_table where id in (1, 2, 3) for update;`).Check(testkit.Rows())
tk1.MustExec(`commit;`)
tk1.MustExec("begin pessimistic")
tk2.MustExec(`alter table temp_table modify column c int;`)
tk1.MustExec("insert into temp_table values (1, 2), (2, 3), (4, 5)")
tk1.MustQuery(`select * from temp_table where id in (1, 2, 3) for update;`).Check(testkit.Rows("1 2", "2 3"))
tk1.MustExec(`commit;`)
tk1.MustExec("insert into normal_table values(1, 2)")
tk1.MustExec("begin pessimistic")
tk2.MustExec(`alter table temp_table modify column c int;`)
tk1.MustExec(`insert into temp_table values(1, 5);`)
tk1.MustQuery(`select * from temp_table, normal_table where temp_table.id = normal_table.id for update;`).Check(testkit.Rows("1 5 1 2"))
tk1.MustExec(`commit;`)
tk1.MustExec("begin pessimistic")
tk2.MustExec(`alter table normal_table modify column c bigint;`)
tk1.MustQuery(`select * from temp_table, normal_table where temp_table.id = normal_table.id for update;`).Check(testkit.Rows())
tk1.MustExec(`commit;`)
// Truncate will modify table ID.
tk1.MustExec(`begin;`)
tk2.MustExec(`truncate table temp_table;`)
tk1.MustExec(`insert into temp_table values(3, 3);`)
tk1.MustExec(`commit;`)
// It reports error when also changing the schema of a normal table.
tk1.MustExec(`begin;`)
tk2.MustExec(`alter table normal_table modify column c bigint;`)
tk1.MustExec(`insert into temp_table values(3, 3);`)
tk1.MustExec(`insert into normal_table values(3, 3);`)
err := tk1.ExecToErr(`commit;`)
require.True(t, terror.ErrorEqual(err, domain.ErrInfoSchemaChanged), fmt.Sprintf("err %v", err))
tk1.MustExec("begin pessimistic")
tk2.MustExec(`alter table normal_table modify column c int;`)
tk1.MustExec(`insert into temp_table values(1, 6);`)
tk1.MustQuery(`select * from temp_table, normal_table where temp_table.id = normal_table.id for update;`).Check(testkit.Rows("1 6 1 2"))
err = tk1.ExecToErr(`commit;`)
require.True(t, terror.ErrorEqual(err, domain.ErrInfoSchemaChanged), fmt.Sprintf("err %v", err))
}
func TestSameNameObjectWithLocalTemporaryTable(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("drop sequence if exists s1")
tk.MustExec("drop view if exists v1")
// prepare
tk.MustExec("create table t1 (a int)")
defer tk.MustExec("drop table if exists t1")
tk.MustQuery("show create table t1").Check(testkit.Rows(
"t1 CREATE TABLE `t1` (\n" +
" `a` int(11) DEFAULT NULL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("create view v1 as select 1")
defer tk.MustExec("drop view if exists v1")
tk.MustQuery("show create view v1").Check(testkit.Rows("v1 CREATE ALGORITHM=UNDEFINED DEFINER=``@`` SQL SECURITY DEFINER VIEW `v1` (`1`) AS SELECT 1 AS `1` utf8mb4 utf8mb4_bin"))
tk.MustQuery("show create table v1").Check(testkit.Rows("v1 CREATE ALGORITHM=UNDEFINED DEFINER=``@`` SQL SECURITY DEFINER VIEW `v1` (`1`) AS SELECT 1 AS `1` utf8mb4 utf8mb4_bin"))
tk.MustExec("create sequence s1")
defer tk.MustExec("drop sequence if exists s1")
tk.MustQuery("show create sequence s1").Check(testkit.Rows("s1 CREATE SEQUENCE `s1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB"))
tk.MustQuery("show create table s1").Check(testkit.Rows("s1 CREATE SEQUENCE `s1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB"))
// temp table
tk.MustExec("create temporary table t1 (ct1 int)")
tk.MustQuery("show create table t1").Check(testkit.Rows(
"t1 CREATE TEMPORARY TABLE `t1` (\n" +
" `ct1` int(11) DEFAULT NULL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("create temporary table v1 (cv1 int)")
tk.MustQuery("show create view v1").Check(testkit.Rows("v1 CREATE ALGORITHM=UNDEFINED DEFINER=``@`` SQL SECURITY DEFINER VIEW `v1` (`1`) AS SELECT 1 AS `1` utf8mb4 utf8mb4_bin"))
tk.MustQuery("show create table v1").Check(testkit.Rows(
"v1 CREATE TEMPORARY TABLE `v1` (\n" +
" `cv1` int(11) DEFAULT NULL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("create temporary table s1 (cs1 int)")
tk.MustQuery("show create sequence s1").Check(testkit.Rows("s1 CREATE SEQUENCE `s1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB"))
tk.MustQuery("show create table s1").Check(testkit.Rows(
"s1 CREATE TEMPORARY TABLE `s1` (\n" +
" `cs1` int(11) DEFAULT NULL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
// drop
tk.MustExec("drop view v1")
tk.MustGetErrMsg("show create view v1", "[schema:1146]Table 'test.v1' doesn't exist")
tk.MustQuery("show create table v1").Check(testkit.Rows(
"v1 CREATE TEMPORARY TABLE `v1` (\n" +
" `cv1` int(11) DEFAULT NULL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("drop sequence s1")
tk.MustGetErrMsg("show create sequence s1", "[schema:1146]Table 'test.s1' doesn't exist")
tk.MustQuery("show create table s1").Check(testkit.Rows(
"s1 CREATE TEMPORARY TABLE `s1` (\n" +
" `cs1` int(11) DEFAULT NULL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
}
func TestLocalTemporaryTableInsertIgnore(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create temporary table tmp1 (id int primary key auto_increment, u int unique, v int)")
tk.MustExec("insert into tmp1 values(1, 11, 101)")
tk.MustExec("insert into tmp1 values(2, 12, 102)")
// test outside transaction
tk.MustExec("insert ignore into tmp1 values(1, 100, 1000)")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1062 Duplicate entry '1' for key 'tmp1.PRIMARY'"))
tk.MustQuery("select * from tmp1 where id=1").Check(testkit.Rows("1 11 101"))
tk.MustExec("insert ignore into tmp1 values(5, 15, 105)")
tk.MustQuery("show warnings").Check(testkit.Rows())
tk.MustQuery("select * from tmp1 where id=5").Check(testkit.Rows("5 15 105"))
// test in transaction and rollback
tk.MustExec("begin")
tk.MustExec("insert ignore into tmp1 values(1, 100, 1000)")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1062 Duplicate entry '1' for key 'tmp1.PRIMARY'"))
tk.MustQuery("select * from tmp1 where id=1").Check(testkit.Rows("1 11 101"))
tk.MustExec("insert ignore into tmp1 values(3, 13, 103)")
tk.MustQuery("show warnings").Check(testkit.Rows())
tk.MustQuery("select * from tmp1 where id=3").Check(testkit.Rows("3 13 103"))
tk.MustExec("insert ignore into tmp1 values(3, 100, 1000)")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1062 Duplicate entry '3' for key 'tmp1.PRIMARY'"))
tk.MustQuery("select * from tmp1 where id=3").Check(testkit.Rows("3 13 103"))
tk.MustExec("rollback")
tk.MustQuery("select * from tmp1").Check(testkit.Rows("1 11 101", "2 12 102", "5 15 105"))
// test commit
tk.MustExec("begin")
tk.MustExec("insert ignore into tmp1 values(1, 100, 1000)")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1062 Duplicate entry '1' for key 'tmp1.PRIMARY'"))
tk.MustExec("insert ignore into tmp1 values(3, 13, 103)")
tk.MustQuery("show warnings").Check(testkit.Rows())
tk.MustExec("insert ignore into tmp1 values(3, 100, 1000)")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1062 Duplicate entry '3' for key 'tmp1.PRIMARY'"))
tk.MustExec("commit")
tk.MustQuery("select * from tmp1").Check(testkit.Rows("1 11 101", "2 12 102", "3 13 103", "5 15 105"))
}
|
package main
import (
"fmt"
)
func information_ring_nodes(){
fmt.Println("LIST OF NODES")
for k,node := range node_dictionary.node_dictionary{
fmt.Println("START OF NODE INFORMATION")
fmt.Printf("\n Node %d present in ring\n",k)
fmt.Printf("Contents of Nodes %d",node.ChannelID)
if node.Successor != -1 {
fmt.Printf("Successor Id: %d\n", node.Successor)
}else{
fmt.Printf("Successor Id: nil\n")
}
if node.Predecessor != -1 {
fmt.Printf("Predecessor Id: %d\n", node.Predecessor)
}else{
fmt.Printf("Predecessor Id: nil\n")
}
if node.Finger_table != nil {
for node_id, node_entry := range node.Finger_table {
if node_entry != -1 {
fmt.Printf("Finger Table at %d is %d\n", node_id, node_entry)
}
}
}else{
fmt.Println("Finger Table Empty")
}
if node.Bucket!= nil{
for k,v:= range node.Bucket{
fmt.Println("Buckets: Data Key:",k,"&","Data Value:",v)
}
}else {
fmt.Println("Bucket Empty")
}
fmt.Println("END OF NODE INFORMATION")
}
}
|
package model
import (
"time"
)
/*************************/
/********菜单路由结构体*********/
/*************************/
type Route struct {
Id int `json:"id"` //ID
AppId string `json:"appid" binding:"required,max=32"` //所属应用
Name string `json:"name" binding:"required,min=1"` //名称
Route string `json:"item" binding:"required"` //路由地址
Type int `json:"action" binding:"required,min=1"` //路由类型
Parent int `json:"parent" binding:"required,min=-1"` //父级id
Priority int `json:"priority" binding:"required"` //权重
Schema interface{} `json:"schema"` //参数配置
SchemaTo string `json:"_schema"` //接口返回改字段
Remark string `json:"remark"` //描述
CreatedAt time.Time `json:"created_at"` //创建时间
UpdatedAt time.Time `json:"updated_at"` //更新时间
}
// Menu Router query condition
// type RouteWhere struct {
// Name string `json:"name,omitempty"`
// CreatedAt DateRang `json:"created_at,omitempty"`
// UpdatedAt DateRang `json:"updated_at,omitempty"`
// }
// // 菜单列表查询体参数
// // Search menu list by condtion
// type RouteQueryBody struct {
// QueryParams
// Where RouteWhere `json:"where"`
// }
// 更新menu router
type RouteUpdate struct {
Name string `json:"name"`
Route string `json:"item"`
Parent int `json:"parent" binding:"required"`
Priority int `json:"priority"`
Schema interface{} `json:"schema"`
} |
package util
import (
"errors"
"regexp"
)
var (
ErrBadFormat = errors.New("invalid_email")
emailRegexp = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$")
)
// ValidateFormat ...
func ValidateFormat(email string) error {
if !emailRegexp.MatchString(email) {
return ErrBadFormat
}
return nil
}
|
package main
import "fmt"
func main() {
fmt.Println(spiralOrder([][]int{
{1, 2, 3},
{4, 5, 6},
{7, 8, 9},
}))
fmt.Println(spiralOrder([][]int{
{3},
{2},
}))
}
func spiralOrder(matrix [][]int) []int {
ans := make([]int, 0)
if len(matrix) == 0 || len(matrix[0]) == 0 {
return ans
}
rl, rh := 0, len(matrix)-1
cl, ch := 0, len(matrix[0])-1
for {
// 向右走
for c := cl; c <= ch; c++ {
ans = append(ans, matrix[rl][c])
}
rl++
if rl > rh {
break
}
// 向下走
for r := rl; r <= rh; r++ {
ans = append(ans, matrix[r][ch])
}
ch--
if ch < 0 {
break
}
// 向左走
for c := ch; c >= cl; c-- {
ans = append(ans, matrix[rh][c])
}
rh--
if rh < 0 {
break
}
// 向上走
for r := rh; r >= rl; r-- {
ans = append(ans, matrix[r][cl])
}
cl++
if cl > ch {
break
}
}
return ans
}
|
package math
import "testing"
func TestAverage(t *testing.T) {
type args struct {
xs []float32
}
tests := []struct {
name string
args args
want float32
}{
// TODO: Add test cases.
{
name: "test 1",
args: args{
xs: []float32{3,4,3,2},
},
want: 3,
},
{
name: "test 2",
args: args{
xs: []float32{1.5, 6.7, 2.6},
},
want: 3.5999997,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := Average(tt.args.xs...); got != tt.want {
t.Errorf("Average() = %v, want %v", got, tt.want)
}
})
}
}
func TestMax(t *testing.T) {
type args struct {
xs []float32
}
tests := []struct {
name string
args args
want float32
}{
// TODO: Add test cases.
{
name: "test 1",
args: args{
xs: []float32{3,5,7,10},
},
want: 10,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := Max(tt.args.xs...); got != tt.want {
t.Errorf("Max() = %v, want %v", got, tt.want)
}
})
}
}
func TestMin(t *testing.T) {
type args struct {
xs []float32
}
tests := []struct {
name string
args args
want float32
}{
// TODO: Add test cases.
{
name: "min test 1",
args: args{
xs: []float32{4.5 , 7, 9, 3.4},
},
want: 3.4,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := Min(tt.args.xs...); got != tt.want {
t.Errorf("Min() = %v, want %v", got, tt.want)
}
})
}
} |
package query
import (
"github.com/jinzhu/gorm"
)
// Query is a flexible pattern to allow query DB.
type Query func(db *gorm.DB) *gorm.DB
// Transform applies multiple query to an existing instance of gorm.DB to create a new gorm.DB.
func Transform(db *gorm.DB, queries ...Query) *gorm.DB {
for _, q := range queries {
db = q(db)
}
return db
}
// Equal implements equal query.
func Equal(field string, value interface{}) Query {
return func(db *gorm.DB) *gorm.DB {
return db.Where(field+" = ?", value)
}
}
// Limit implements limit query.
func Limit(limit int64) Query {
return func(db *gorm.DB) *gorm.DB {
return db.Limit(limit)
}
}
// Offset implements offset query.
func Offset(offset int64) Query {
return func(db *gorm.DB) *gorm.DB {
return db.Offset(offset)
}
}
// OrderBy implements ORDER BY query.
func OrderBy(field string, asc bool) Query {
return func(db *gorm.DB) *gorm.DB {
if asc {
return db.Order(field + " ASC")
}
return db.Order(field + " DESC")
}
}
|
package knothash
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestKnotHash(t *testing.T) {
data := []struct {
alen int
lengths []byte
result int
}{
{5, []byte{3, 4, 1, 5}, 12},
}
assert := require.New(t)
for _, in := range data {
out := KnotHash(in.alen, in.lengths)
assert.Equal(in.result, out[0]*out[1])
}
}
func TestSolveKnotHash(t *testing.T) {
assert := require.New(t)
out := KnotHash(256, []byte{106, 118, 236, 1, 130, 0, 235, 254, 59, 205, 2, 87, 129, 25, 255, 118})
assert.Equal(6909, out[0]*out[1])
}
func TestHash(t *testing.T) {
data := []struct {
str string
want string
}{
{"", "a2582a3a0e66e6e86e3812dcb672a272"},
{"AoC 2017", "33efeb34ea91902bb2f59c9920caa6cd"},
{"1,2,3", "3efbe78a8d82f29979031a4aa0b16a9d"},
{"1,2,4", "63960835bcdc130f0b66d7ff4f6a5a8e"},
}
assert := require.New(t)
for _, in := range data {
out := Hash(256, in.str)
assert.Equal(in.want, out)
}
}
func TestSolveHash(t *testing.T) {
assert := require.New(t)
hash := Hash(256, "106,118,236,1,130,0,235,254,59,205,2,87,129,25,255,118")
assert.Equal("9d5f4561367d379cfbf04f8c471c0095", hash)
}
|
// Good morning! Here's your coding interview problem for today.
// This problem was asked by Google.
// Given the root to a binary tree, implement serialize(root), which serializes the tree into a string, and deserialize(s), which deserializes the string back into the tree.
// For example, given the following Node class
// class Node:
// def __init__(self, val, left=None, right=None):
// self.val = val
// self.left = left
// self.right = right
// The following test should pass:
// node = Node('root', Node('left', Node('left.left')), Node('right'))
// assert deserialize(serialize(node)).left.left.val == 'left.left'
package problems
import (
"encoding/xml"
"fmt"
)
//ThreePrint Returns a string representing the tree rooted at the provided Node.
func ThreePrint(node *Node) string {
if node == nil {
return ""
}
return fmt.Sprintf("(%s, l%s, r%s)", node.Val, ThreePrint(node.Left), ThreePrint(node.Right))
}
//ThreeSerialize Returns a string representing the tree rooted at the provided Node.
func ThreeSerialize(node *Node) string {
output, err := xml.MarshalIndent(node, " ", " ")
if err != nil {
fmt.Printf("error: %v\n", err)
}
return string(output)
}
//ThreeDeserialize Returns a Node that is the root of the tree represented by the provided string.
func ThreeDeserialize(node string) Node {
root := Node{}
if err := xml.Unmarshal([]byte(node), &root); err != nil {
fmt.Printf("error: %v\n", err)
}
return root
}
|
package main
import (
"container/heap"
"fmt"
)
type MedianFinder struct {
max *intHeap
min *intHeap
n int
}
/** initialize your data structure here. */
func Constructor() MedianFinder {
return MedianFinder{
min: &intHeap{min: true},
max: &intHeap{min: false},
}
}
func (mr *MedianFinder) AddNum(num int) {
mr.n++
if mr.max.Len() == 0 || num <= mr.max.top() {
heap.Push(mr.max, num)
} else {
heap.Push(mr.min, num)
}
for mr.max.Len() < mr.min.Len() {
v := heap.Pop(mr.min).(int)
heap.Push(mr.max, v)
}
for mr.max.Len() > mr.min.Len()+1 {
v := heap.Pop(mr.max).(int)
heap.Push(mr.min, v)
}
}
func (mr *MedianFinder) FindMedian() float64 {
if mr.n&1 == 1 {
return float64(mr.max.top())
}
a, b := mr.max.top(), mr.min.top()
return float64(a+b) / 2
}
var _ heap.Interface = &intHeap{}
type intHeap struct {
items []int
min bool
}
func (h intHeap) Len() int {
return len(h.items)
}
func (h intHeap) Less(i, j int) bool {
if h.min {
return h.items[i] < h.items[j]
}
return h.items[i] > h.items[j]
}
func (h intHeap) Swap(i, j int) {
h.items[i], h.items[j] = h.items[j], h.items[i]
}
func (h *intHeap) Push(x interface{}) {
h.items = append(h.items, x.(int))
}
func (h *intHeap) Pop() interface{} {
n := len(h.items)
if n == 0 {
return nil
}
top := h.items[n-1]
h.items = h.items[:n-1]
return top
}
func (h *intHeap) top() int {
if len(h.items) == 0 {
return 0
}
return h.items[0]
}
/**
* Your MedianFinder object will be instantiated and called as such:
* obj := Constructor();
* obj.AddNum(num);
* param_2 := obj.FindMedian();
*/
func main() {
o := Constructor()
o.AddNum(1)
o.AddNum(2)
o.FindMedian()
o.AddNum(3)
o.FindMedian()
cases := [][]int{
{},
{},
}
realCase := cases[0:]
for i, c := range realCase {
fmt.Println("## case", i)
// solve
fmt.Println(c)
}
}
|
//~0
//~1
//~2
//~3
//~4
//~5
//~6
//~7
//~8
//~9
package main
func main(){
for i:=0; i < 10;i++{
println(i)
}
} |
package ziface
/*
路由抽象接口
路由里的数据都是 IRequest
*/
type IRouter interface {
// 在处理 conn 业务之前的钩子方法 hook
PreHandle(request IRequest)
// 在处理 conn 业务的主方法 hook
Handler(request IRequest)
// 在处理 conn 业务之后的钩子方法 hook
PostHandler(request IRequest)
}
|
package main
import "fmt"
func main() {
var i int = 10
i = 30
fmt.Println("i=", i)
//i = 1.2不能改变原来的类型
//var i int = 60 变量在同一个作用域里面不能重名
}
|
package status
import (
"path/filepath"
"sort"
"github.com/go-task/task/v2/internal/execext"
"github.com/mattn/go-zglob"
)
func glob(dir string, globs []string) (files []string, err error) {
for _, g := range globs {
if !filepath.IsAbs(g) {
g = filepath.Join(dir, g)
}
g, err = execext.Expand(g)
if err != nil {
return nil, err
}
f, err := zglob.Glob(g)
if err != nil {
continue
}
files = append(files, f...)
}
sort.Strings(files)
return
}
|
package repositories
import "github.com/ariel17/railgun/api/entities"
// DomainsRepository is the behaviour contract for all Domain's repository
// implementations.
type DomainsRepository interface {
GetByID(id int64) (*entities.Domain, error)
GetByURL(url string) (*entities.Domain, error)
Add(domain *entities.Domain) error
Update(domain *entities.Domain) error
DeleteByID(id int64) error
}
|
package crypto
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"encoding/base64"
"encoding/binary"
)
// MessageCrypter 封装了生成签名和消息加解密的方法
type Decryptor struct {
token string
appId string
key []byte
iv []byte
}
// NewMessageCrypter 方法用于创建 MessageCrypter 实例
//
// token 为开发者在微信开放平台上设置的 Token,
// encodingAESKey 为开发者在微信开放平台上设置的 EncodingAESKey,
// AppId
func NewDecryptor(appId, token, encodingAESKey string) (decryptor *Decryptor, err error) {
var key []byte
if key, err = base64.StdEncoding.DecodeString(encodingAESKey + "="); err != nil {
return nil, err
}
if len(key) != 32 {
return nil, ENCODING_AES_KEY_INVALID
}
iv := key[:16]
decryptor = &Decryptor{token, appId, key, iv,}
return decryptor, nil
}
// Decrypt 方法用于对密文进行解密
//
// 返回解密后的消息,AppId, 或者错误信息
func (decryptor Decryptor) Decrypt(text string) (decryptData []byte, appId string, err error) {
deciphered, err := base64.StdEncoding.DecodeString(text)
if err != nil {
return decryptData, "", err
}
c, err := aes.NewCipher(decryptor.key)
if err != nil {
return decryptData, "", err
}
cbc := cipher.NewCBCDecrypter(c, decryptor.iv)
cbc.CryptBlocks(deciphered, deciphered)
decoded := PKCS7Decode(deciphered)
buf := bytes.NewBuffer(decoded[16:20])
var msgLen int32
binary.Read(buf, binary.BigEndian, &msgLen)
decryptData = decoded[20 : 20+msgLen]
appId = string(decoded[20+msgLen:])
return decryptData, appId, nil
}
|
package goSolution
func sumOfFlooredPairs(nums []int) int {
m := max(nums...)
b := make([]int, m + 1)
for _, num := range nums {
b[num] += 1
}
s := GetPrefixSum(b)
ret := 0
for _, num := range nums {
if b[num] != 0 {
t := m / num
for j := 1; j <= t; j++ {
k := s[min((j + 1) * num, m + 1)] - s[j * num]
if k != 0 {
k = (k * j * b[num]) % MODULO
ret = (ret + k) % MODULO
}
}
b[num] = 0
}
}
return ret
}
|
// Package fail - geojson.go Gets data from servers
package main
import "encoding/json"
import "fmt"
// Geojson feature results
type Result struct {
Type string `json: "type"`
Features []interface{} `json: "features"`
}
// Geojson reports object
type Reports struct {
StatusCode float64 `json: "statusCode"`
Result Result `json: "result"`
}
// Get json data from server
// TODO return error
// TODO return json object proper
// TODO documentation standard
// TODO code layout
func GetJSON(address string) (Reports, error) {
// Create empty json reports object
reports := Reports{}
logger.Printf("Contacting server %s\n", address)
// Send request
resp, err := client.Get(address)
if err!= nil {
return reports, err
}
// Close output
defer resp.Body.Close()
// Decode json stream
err = json.NewDecoder(resp.Body).Decode(&reports)
if err != nil {
fmt.Printf("Error %+v\n", err)
return reports, err // On decode error return empty reports with error
}
// Return
return reports, nil
}
|
package main
import (
"encoding/json"
"io/ioutil"
"net/http"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
)
type User struct {
Username string `json:"username"`
Email string `json:"email"`
}
func main() {
r := mux.NewRouter()
//1 routing per 1 handler
r.Handle("/health", HealthHandler).Methods("GET")
r.Handle("/user", GetUserHandler).Methods("GET")
r.Handle("/user", CreateUserHandler).Methods("POST")
//วิธีกำหนดคนเข้าถึงกรณีเรียกใช้งานข้าม IP
corsObj := handlers.AllowedOrigins([]string{"*"})
method := handlers.AllowedMethods([]string{"POST"})
header := handlers.AllowedHeaders([]string{"Content-Type", "X-Requested-With"})
http.ListenAndServe(":3001", handlers.CORS(corsObj, method, header)(r))
}
// HealthHandler return api health
var HealthHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
//เรียกใช้ service อื่นที่ต้องใช้งาน
w.Write([]byte("API is up and running"))
})
var GetUserHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
user := User{Username: "ploy", Email: "chonnikan@gmail.com"}
response, _ := json.Marshal(user)
//ให้ส่งข้อมูลกลับในรูปแบบ json
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(response))
})
var CreateUserHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var user User
body, _ := ioutil.ReadAll(r.Body)
json.Unmarshal(body, &user)
response := "created user: " + user.Username + " email: " + user.Email
w.Write([]byte(response))
})
|
package database
import (
"gorm.io/driver/postgres"
"gorm.io/gorm"
)
var (
DB *gorm.DB
)
func ConnectDB() error {
// TODO: Refactor dsn
dsn := "host=localhost user=postgres password=postgres dbname=tgtc_user_coupon port=3306 sslmode=disable TimeZone=Asia/Shanghai"
db, err := gorm.Open(postgres.Open(dsn), &gorm.Config{})
if err != nil {
return err
}
DB = db
return nil
}
|
package testing
import "github.com/selectel/go-selvpcclient/selvpcclient/resell/v2/quotas"
// TestGetAllQuotasResponseRaw represents a raw response from the GetAll request.
const TestGetAllQuotasResponseRaw = `
{
"quotas": {
"compute_cores": [
{
"region": "ru-1",
"value": 20,
"zone": "ru-1b"
},
{
"region": "ru-3",
"value": 12,
"zone": "ru-3a"
}
],
"image_gigabytes": [
{
"region": "ru-2",
"value": 8
},
{
"region": "ru-3",
"value": 24
}
]
}
}
`
// TestGetAllQuotasResponseSingleRaw represents a raw response with a single quota from the GetAll request.
const TestGetAllQuotasResponseSingleRaw = `
{
"quotas": {
"compute_cores": [
{
"region": "ru-1",
"value": 20,
"zone": "ru-1b"
}
]
}
}
`
// TestGetAllQuotasResponseSingle represents the unmarshalled TestGetAllQuotasResponseSingleRaw response.
var TestGetAllQuotasResponseSingle = []*quotas.Quota{
{
Name: "compute_cores",
ResourceQuotasEntities: []quotas.ResourceQuotaEntity{
{
Region: "ru-1",
Zone: "ru-1b",
Value: 20,
},
},
},
}
// TestGetFreeQuotasResponseRaw represents a raw response from the GetFree request.
const TestGetFreeQuotasResponseRaw = `
{
"quotas": {
"compute_cores": [
{
"region": "ru-2",
"value": 40,
"zone": "ru-2a"
},
{
"region": "ru-3",
"value": 100,
"zone": "ru-3a"
}
],
"compute_ram": [
{
"region": "ru-2",
"zone": "ru-2a",
"value": 2560
},
{
"region": "ru-3",
"zone": "ru-3a",
"value": 10240
}
]
}
}
`
// TestGetFreeQuotasResponseSingleRaw represents a raw response with a single quota from the GetFree request.
const TestGetFreeQuotasResponseSingleRaw = `
{
"quotas": {
"compute_cores": [
{
"region": "ru-2",
"value": 40,
"zone": "ru-2a"
}
]
}
}
`
// TestGetFreeQuotasResponseSingle represents the unmarshalled TestGetFreeQuotasResponseSingleRaw response.
var TestGetFreeQuotasResponseSingle = []*quotas.Quota{
{
Name: "compute_cores",
ResourceQuotasEntities: []quotas.ResourceQuotaEntity{
{
Region: "ru-2",
Zone: "ru-2a",
Value: 40,
},
},
},
}
// TestGetProjectsQuotasResponseRaw represents a raw response from the GetProjectsQuotas request.
const TestGetProjectsQuotasResponseRaw = `
{
"quotas": {
"c83243b3c18a4d109a5f0fe45336af85": {
"compute_cores": [
{
"region": "ru-2",
"value": 40,
"zone": "ru-2a"
},
{
"region": "ru-3",
"value": 100,
"zone": "ru-3a"
}
],
"compute_ram": [
{
"region": "ru-2",
"zone": "ru-2a",
"value": 2560
},
{
"region": "ru-3",
"zone": "ru-3a",
"value": 10240
}
]
},
"fe4cde3ee844415098edb570f381c190": {
"compute_cores": [
{
"region": "ru-1",
"value": 40,
"zone": "ru-1b"
}
],
"image_gigabytes": [
{
"region": "ru-1",
"value": 24
}
]
}
}
}
`
// TestGetProjectsQuotasResponseSingleRaw represents a raw response with a single quota from the GetProjectsQuotas request.
const TestGetProjectsQuotasResponseSingleRaw = `
{
"quotas": {
"c83243b3c18a4d109a5f0fe45336af85": {
"compute_cores": [
{
"region": "ru-2",
"value": 40,
"zone": "ru-2a"
}
]
}
}
}
`
// TestGetProjectsQuotasResponseSingle represents the unmarshalled TestProjectsQuotasResponseRaw response.
var TestGetProjectsQuotasResponseSingle = []*quotas.ProjectQuota{
{
ID: "c83243b3c18a4d109a5f0fe45336af85",
ProjectQuotas: []quotas.Quota{
{
Name: "compute_cores",
ResourceQuotasEntities: []quotas.ResourceQuotaEntity{
{
Region: "ru-2",
Zone: "ru-2a",
Value: 40,
},
},
},
},
},
}
// TestGetProjectQuotasResponseRaw represents a raw response from the GetProject request.
const TestGetProjectQuotasResponseRaw = `
{
"quotas": {
"network_subnets_29_vrrp": [
{
"value": 1
}
],
"network_floatingips": [
{
"region": "ru-3",
"value": 2
}
]
}
}
`
// TestGetProjectQuotasResponseSingleRaw represents a raw response with a single quota from the GetProject request.
const TestGetProjectQuotasResponseSingleRaw = `
{
"quotas": {
"compute_ram": [
{
"region": "ru-3",
"value": 51200,
"zone": "ru-3a"
}
]
}
}
`
// TestGetProjectQuotasResponseSingle represents the unmarshalled TestGetProjectQuotasResponseSingleRaw response.
var TestGetProjectQuotasResponseSingle = []*quotas.Quota{
{
Name: "compute_ram",
ResourceQuotasEntities: []quotas.ResourceQuotaEntity{
{
Region: "ru-3",
Zone: "ru-3a",
Value: 51200,
},
},
},
}
var ramQuotaValue = 64000
// TestUpdateQuotasOpts represents options for the UpdateProjectQuotas request.
var TestUpdateQuotasOpts = quotas.UpdateProjectQuotasOpts{
QuotasOpts: []quotas.QuotaOpts{
{
Name: "compute_ram",
ResourceQuotasOpts: []quotas.ResourceQuotaOpts{
{
Region: "ru-2",
Zone: "ru-2a",
Value: &ramQuotaValue,
},
},
},
},
}
// TestUpdateQuotasOptsRaw represents unmarshalled options for the UpdateProjectQuotas request.
const TestUpdateQuotasOptsRaw = `
{
"quotas": {
"compute_ram": [
{
"region": "ru-2",
"value": 64000,
"zone": "ru-2a"
}
]
}
}
`
// TestUpdateProjectQuotasResponseRaw represents a raw response from the UpdateProjectQuotas request.
const TestUpdateProjectQuotasResponseRaw = `
{
"quotas": {
"compute_ram": [
{
"region": "ru-2",
"value": 64000,
"zone": "ru-2a"
}
]
}
}
`
// TestUpdateProjectQuotasResponse represents the unmarshalled TestUpdateProjectQuotasResponseRaw response.
var TestUpdateProjectQuotasResponse = []*quotas.Quota{
{
Name: "compute_ram",
ResourceQuotasEntities: []quotas.ResourceQuotaEntity{
{
Region: "ru-2",
Zone: "ru-2a",
Value: 64000,
},
},
},
}
// TestQuotasInvalidResponseRaw represents a raw invalid quotas response.
const TestQuotasInvalidResponseRaw = `
{
"quotas": {
111: [
{
"region": "ru-2",
"value": 64000,
"zone": "ru-2a"
}
]
}
}
`
// TestUpdateQuotasInvalidOptsRaw represents a raw request body without quotas.
const TestUpdateQuotasInvalidOptsRaw = `
{
"quotas": {}
}
`
// TestUpdateQuotasInvalidOpts represents update opts without quotas.
var TestUpdateQuotasInvalidOpts = quotas.UpdateProjectQuotasOpts{
QuotasOpts: []quotas.QuotaOpts{},
}
|
package git
/*
#include <git2.h>
*/
import "C"
import (
"runtime"
"unsafe"
)
type ReferenceType int
const (
ReferenceSymbolic ReferenceType = C.GIT_REF_SYMBOLIC
ReferenceOid ReferenceType = C.GIT_REF_OID
)
type Reference struct {
doNotCompare
ptr *C.git_reference
repo *Repository
}
type ReferenceCollection struct {
doNotCompare
repo *Repository
}
func (c *ReferenceCollection) Lookup(name string) (*Reference, error) {
cname := C.CString(name)
defer C.free(unsafe.Pointer(cname))
var ptr *C.git_reference
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ecode := C.git_reference_lookup(&ptr, c.repo.ptr, cname)
runtime.KeepAlive(c)
if ecode < 0 {
return nil, MakeGitError(ecode)
}
return newReferenceFromC(ptr, c.repo), nil
}
func (c *ReferenceCollection) Create(name string, id *Oid, force bool, msg string) (*Reference, error) {
cname := C.CString(name)
defer C.free(unsafe.Pointer(cname))
var cmsg *C.char
if msg == "" {
cmsg = nil
} else {
cmsg = C.CString(msg)
defer C.free(unsafe.Pointer(cmsg))
}
var ptr *C.git_reference
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ecode := C.git_reference_create(&ptr, c.repo.ptr, cname, id.toC(), cbool(force), cmsg)
runtime.KeepAlive(c)
runtime.KeepAlive(id)
if ecode < 0 {
return nil, MakeGitError(ecode)
}
return newReferenceFromC(ptr, c.repo), nil
}
func (c *ReferenceCollection) CreateSymbolic(name, target string, force bool, msg string) (*Reference, error) {
cname := C.CString(name)
defer C.free(unsafe.Pointer(cname))
ctarget := C.CString(target)
defer C.free(unsafe.Pointer(ctarget))
var cmsg *C.char
if msg == "" {
cmsg = nil
} else {
cmsg = C.CString(msg)
defer C.free(unsafe.Pointer(cmsg))
}
var ptr *C.git_reference
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ecode := C.git_reference_symbolic_create(&ptr, c.repo.ptr, cname, ctarget, cbool(force), cmsg)
runtime.KeepAlive(c)
if ecode < 0 {
return nil, MakeGitError(ecode)
}
return newReferenceFromC(ptr, c.repo), nil
}
// EnsureLog ensures that there is a reflog for the given reference
// name and creates an empty one if necessary.
func (c *ReferenceCollection) EnsureLog(name string) error {
cname := C.CString(name)
defer C.free(unsafe.Pointer(cname))
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_reference_ensure_log(c.repo.ptr, cname)
runtime.KeepAlive(c)
if ret < 0 {
return MakeGitError(ret)
}
return nil
}
// HasLog returns whether there is a reflog for the given reference
// name
func (c *ReferenceCollection) HasLog(name string) (bool, error) {
cname := C.CString(name)
defer C.free(unsafe.Pointer(cname))
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_reference_has_log(c.repo.ptr, cname)
runtime.KeepAlive(c)
if ret < 0 {
return false, MakeGitError(ret)
}
return ret == 1, nil
}
// Dwim looks up a reference by DWIMing its short name
func (c *ReferenceCollection) Dwim(name string) (*Reference, error) {
cname := C.CString(name)
defer C.free(unsafe.Pointer(cname))
runtime.LockOSThread()
defer runtime.UnlockOSThread()
var ptr *C.git_reference
ret := C.git_reference_dwim(&ptr, c.repo.ptr, cname)
runtime.KeepAlive(c)
if ret < 0 {
return nil, MakeGitError(ret)
}
return newReferenceFromC(ptr, c.repo), nil
}
func newReferenceFromC(ptr *C.git_reference, repo *Repository) *Reference {
ref := &Reference{ptr: ptr, repo: repo}
runtime.SetFinalizer(ref, (*Reference).Free)
return ref
}
func (v *Reference) SetSymbolicTarget(target string, msg string) (*Reference, error) {
var ptr *C.git_reference
ctarget := C.CString(target)
defer C.free(unsafe.Pointer(ctarget))
runtime.LockOSThread()
defer runtime.UnlockOSThread()
var cmsg *C.char
if msg == "" {
cmsg = nil
} else {
cmsg = C.CString(msg)
defer C.free(unsafe.Pointer(cmsg))
}
ret := C.git_reference_symbolic_set_target(&ptr, v.ptr, ctarget, cmsg)
runtime.KeepAlive(v)
if ret < 0 {
return nil, MakeGitError(ret)
}
return newReferenceFromC(ptr, v.repo), nil
}
func (v *Reference) SetTarget(target *Oid, msg string) (*Reference, error) {
var ptr *C.git_reference
runtime.LockOSThread()
defer runtime.UnlockOSThread()
var cmsg *C.char
if msg == "" {
cmsg = nil
} else {
cmsg = C.CString(msg)
defer C.free(unsafe.Pointer(cmsg))
}
ret := C.git_reference_set_target(&ptr, v.ptr, target.toC(), cmsg)
runtime.KeepAlive(v)
if ret < 0 {
return nil, MakeGitError(ret)
}
return newReferenceFromC(ptr, v.repo), nil
}
func (v *Reference) Resolve() (*Reference, error) {
var ptr *C.git_reference
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_reference_resolve(&ptr, v.ptr)
runtime.KeepAlive(v)
if ret < 0 {
return nil, MakeGitError(ret)
}
return newReferenceFromC(ptr, v.repo), nil
}
func (v *Reference) Rename(name string, force bool, msg string) (*Reference, error) {
var ptr *C.git_reference
cname := C.CString(name)
defer C.free(unsafe.Pointer(cname))
var cmsg *C.char
if msg == "" {
cmsg = nil
} else {
cmsg = C.CString(msg)
defer C.free(unsafe.Pointer(cmsg))
}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_reference_rename(&ptr, v.ptr, cname, cbool(force), cmsg)
runtime.KeepAlive(v)
if ret < 0 {
return nil, MakeGitError(ret)
}
return newReferenceFromC(ptr, v.repo), nil
}
func (v *Reference) Target() *Oid {
ret := newOidFromC(C.git_reference_target(v.ptr))
runtime.KeepAlive(v)
return ret
}
func (v *Reference) SymbolicTarget() string {
var ret string
cstr := C.git_reference_symbolic_target(v.ptr)
if cstr != nil {
return C.GoString(cstr)
}
runtime.KeepAlive(v)
return ret
}
func (v *Reference) Delete() error {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_reference_delete(v.ptr)
runtime.KeepAlive(v)
if ret < 0 {
return MakeGitError(ret)
}
return nil
}
func (v *Reference) Peel(t ObjectType) (*Object, error) {
var cobj *C.git_object
runtime.LockOSThread()
defer runtime.UnlockOSThread()
err := C.git_reference_peel(&cobj, v.ptr, C.git_object_t(t))
runtime.KeepAlive(v)
if err < 0 {
return nil, MakeGitError(err)
}
return allocObject(cobj, v.repo), nil
}
// Owner returns a weak reference to the repository which owns this reference.
// This won't keep the underlying repository alive, but it should still be
// Freed.
func (v *Reference) Owner() *Repository {
repo := newRepositoryFromC(C.git_reference_owner(v.ptr))
runtime.KeepAlive(v)
repo.weak = true
return repo
}
// Cmp compares v to ref2. It returns 0 on equality, otherwise a
// stable sorting.
func (v *Reference) Cmp(ref2 *Reference) int {
ret := int(C.git_reference_cmp(v.ptr, ref2.ptr))
runtime.KeepAlive(v)
runtime.KeepAlive(ref2)
return ret
}
// Shorthand returns a "human-readable" short reference name.
func (v *Reference) Shorthand() string {
ret := C.GoString(C.git_reference_shorthand(v.ptr))
runtime.KeepAlive(v)
return ret
}
// Name returns the full name of v.
func (v *Reference) Name() string {
ret := C.GoString(C.git_reference_name(v.ptr))
runtime.KeepAlive(v)
return ret
}
func (v *Reference) Type() ReferenceType {
ret := ReferenceType(C.git_reference_type(v.ptr))
runtime.KeepAlive(v)
return ret
}
func (v *Reference) IsBranch() bool {
ret := C.git_reference_is_branch(v.ptr) == 1
runtime.KeepAlive(v)
return ret
}
func (v *Reference) IsRemote() bool {
ret := C.git_reference_is_remote(v.ptr) == 1
runtime.KeepAlive(v)
return ret
}
func (v *Reference) IsTag() bool {
ret := C.git_reference_is_tag(v.ptr) == 1
runtime.KeepAlive(v)
return ret
}
// IsNote checks if the reference is a note.
func (v *Reference) IsNote() bool {
ret := C.git_reference_is_note(v.ptr) == 1
runtime.KeepAlive(v)
return ret
}
func (v *Reference) Free() {
runtime.SetFinalizer(v, nil)
C.git_reference_free(v.ptr)
}
type ReferenceIterator struct {
doNotCompare
ptr *C.git_reference_iterator
repo *Repository
}
type ReferenceNameIterator struct {
doNotCompare
*ReferenceIterator
}
// NewReferenceIterator creates a new iterator over reference names
func (repo *Repository) NewReferenceIterator() (*ReferenceIterator, error) {
var ptr *C.git_reference_iterator
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_reference_iterator_new(&ptr, repo.ptr)
if ret < 0 {
return nil, MakeGitError(ret)
}
return newReferenceIteratorFromC(ptr, repo), nil
}
// NewReferenceIterator creates a new branch iterator over reference names
func (repo *Repository) NewReferenceNameIterator() (*ReferenceNameIterator, error) {
var ptr *C.git_reference_iterator
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_reference_iterator_new(&ptr, repo.ptr)
if ret < 0 {
return nil, MakeGitError(ret)
}
iter := newReferenceIteratorFromC(ptr, repo)
return iter.Names(), nil
}
// NewReferenceIteratorGlob creates an iterator over reference names
// that match the speicified glob. The glob is of the usual fnmatch
// type.
func (repo *Repository) NewReferenceIteratorGlob(glob string) (*ReferenceIterator, error) {
cstr := C.CString(glob)
defer C.free(unsafe.Pointer(cstr))
var ptr *C.git_reference_iterator
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_reference_iterator_glob_new(&ptr, repo.ptr, cstr)
if ret < 0 {
return nil, MakeGitError(ret)
}
return newReferenceIteratorFromC(ptr, repo), nil
}
func (i *ReferenceIterator) Names() *ReferenceNameIterator {
return &ReferenceNameIterator{ReferenceIterator: i}
}
// NextName retrieves the next reference name. If the iteration is over,
// the returned error code is git.ErrorCodeIterOver
func (v *ReferenceNameIterator) Next() (string, error) {
var ptr *C.char
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_reference_next_name(&ptr, v.ptr)
if ret < 0 {
return "", MakeGitError(ret)
}
return C.GoString(ptr), nil
}
// Next retrieves the next reference. If the iterationis over, the
// returned error code is git.ErrorCodeIterOver
func (v *ReferenceIterator) Next() (*Reference, error) {
var ptr *C.git_reference
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_reference_next(&ptr, v.ptr)
if ret < 0 {
return nil, MakeGitError(ret)
}
return newReferenceFromC(ptr, v.repo), nil
}
func newReferenceIteratorFromC(ptr *C.git_reference_iterator, r *Repository) *ReferenceIterator {
iter := &ReferenceIterator{
ptr: ptr,
repo: r,
}
runtime.SetFinalizer(iter, (*ReferenceIterator).Free)
return iter
}
// Free the reference iterator
func (v *ReferenceIterator) Free() {
runtime.SetFinalizer(v, nil)
C.git_reference_iterator_free(v.ptr)
}
// ReferenceNameIsValid returns whether the reference name is well-formed.
//
// Valid reference names must follow one of two patterns:
//
// 1. Top-level names must contain only capital letters and underscores,
// and must begin and end with a letter. (e.g. "HEAD", "ORIG_HEAD").
//
// 2. Names prefixed with "refs/" can be almost anything. You must avoid
// the characters '~', '^', ':', ' \ ', '?', '[', and '*', and the sequences
// ".." and " @ {" which have special meaning to revparse.
func ReferenceNameIsValid(name string) (bool, error) {
cname := C.CString(name)
defer C.free(unsafe.Pointer(cname))
runtime.LockOSThread()
defer runtime.UnlockOSThread()
var valid C.int
ret := C.git_reference_name_is_valid(&valid, cname)
if ret < 0 {
return false, MakeGitError(ret)
}
return valid == 1, nil
}
const (
// This should match GIT_REFNAME_MAX in src/refs.h
_refnameMaxLength = C.size_t(1024)
)
type ReferenceFormat uint
const (
ReferenceFormatNormal ReferenceFormat = C.GIT_REFERENCE_FORMAT_NORMAL
ReferenceFormatAllowOnelevel ReferenceFormat = C.GIT_REFERENCE_FORMAT_ALLOW_ONELEVEL
ReferenceFormatRefspecPattern ReferenceFormat = C.GIT_REFERENCE_FORMAT_REFSPEC_PATTERN
ReferenceFormatRefspecShorthand ReferenceFormat = C.GIT_REFERENCE_FORMAT_REFSPEC_SHORTHAND
)
// ReferenceNormalizeName normalizes the reference name and checks validity.
//
// This will normalize the reference name by removing any leading slash '/'
// characters and collapsing runs of adjacent slashes between name components
// into a single slash.
//
// See git_reference_symbolic_create() for rules about valid names.
func ReferenceNormalizeName(name string, flags ReferenceFormat) (string, error) {
cname := C.CString(name)
defer C.free(unsafe.Pointer(cname))
buf := (*C.char)(C.malloc(_refnameMaxLength))
defer C.free(unsafe.Pointer(buf))
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ecode := C.git_reference_normalize_name(buf, _refnameMaxLength, cname, C.uint(flags))
if ecode < 0 {
return "", MakeGitError(ecode)
}
return C.GoString(buf), nil
}
|
package filter
import (
"errors"
"io"
"strings"
"github.com/sensu/sensu-go/cli"
"github.com/sensu/sensu-go/cli/commands/helpers"
"github.com/sensu/sensu-go/cli/elements/list"
"github.com/sensu/sensu-go/types"
"github.com/spf13/cobra"
)
// InfoCommand defines the 'filter info' subcommand
func InfoCommand(cli *cli.SensuCli) *cobra.Command {
cmd := &cobra.Command{
Use: "info [NAME]",
Short: "show detailed filter information",
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
format, _ := cmd.Flags().GetString("format")
if len(args) != 1 {
_ = cmd.Help()
return errors.New("invalid argument(s) received")
}
// Fetch the filter from API
name := args[0]
r, err := cli.Client.FetchFilter(name)
if err != nil {
return err
}
if format == "json" {
return helpers.PrintJSON(r, cmd.OutOrStdout())
}
return printToList(r, cmd.OutOrStdout())
},
}
helpers.AddFormatFlag(cmd.Flags())
return cmd
}
func printToList(filter *types.EventFilter, writer io.Writer) error {
cfg := &list.Config{
Title: filter.Name,
Rows: []*list.Row{
{
Label: "Name",
Value: filter.Name,
},
{
Label: "Action",
Value: filter.Action,
},
{
Label: "Statements",
Value: strings.Join(filter.Statements, " && "),
},
{
Label: "Organization",
Value: filter.Organization,
},
{
Label: "Environment",
Value: filter.Environment,
},
},
}
return list.Print(writer, cfg)
}
|
package main
import (
"log"
"os"
"os/signal"
"syscall"
nats "github.com/nats-io/nats.go"
"github.com/nats-io/stan.go"
)
func main() {
opts := []nats.Option{
nats.ClientCert("cert.pem", "key.pem"),
nats.MaxReconnects(10),
}
nc, err := nats.Connect("tls://localhost:4443", opts...)
if err != nil {
log.Fatalln(err)
}
conn, err := stan.Connect(
"clusterID",
"clientID",
stan.NatsConn(nc),
stan.SetConnectionLostHandler(func(_ stan.Conn, reason error) {
log.Fatalf("connection lost, reason: %v", reason)
}))
if err != nil {
log.Fatalln(err)
}
sub, err := conn.Subscribe("sub", func(m *stan.Msg) {
log.Printf("received a message: %s\n", string(m.Data))
})
if err != nil {
log.Fatalln(err)
}
quit := make(chan os.Signal)
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
<-quit
sub.Unsubscribe()
conn.Close()
}
|
package gogo
import (
"errors"
)
var (
ErrHeaderFlushed = errors.New("Response headers have been written!")
ErrConfigSection = errors.New("Config section does not exist!")
ErrSettingsKey = errors.New("Settings key is duplicated!")
ErrHash = errors.New("The hash function does not linked into the binary!")
)
|
// +build integration
package tests
import (
"fmt"
"os/exec"
"regexp"
"strings"
"testing"
"github.com/deis/deis/tests/utils"
)
var (
limitsListCmd = "limits:list --app={{.AppName}}"
limitsSetMemCmd = "limits:set --app={{.AppName}} web=256M"
limitsSetCPUCmd = "limits:set --app={{.AppName}} -c web=512"
limitsUnsetMemCmd = "limits:unset --app={{.AppName}} --memory web"
limitsUnsetCPUCmd = "limits:unset --app={{.AppName}} -c web"
output1 = `(?s)"CpuShares": 512,.*"Memory": 0,`
output2 = `(?s)"CpuShares": 512,.*"Memory": 268435456,`
output3 = `(?s)"CpuShares": 0,.*"Memory": 268435456,`
output4 = `(?s)"CpuShares": 0,.*"Memory": 0,`
)
func limitsSetTest(t *testing.T, cfg *utils.DeisTestConfig, ver int) {
cpuCmd, memCmd := limitsSetCPUCmd, limitsSetMemCmd
// regression test for https://github.com/deis/deis/issues/1563
// previously the client would throw a stack trace with empty limits
utils.Execute(t, limitsListCmd, cfg, false, "Unlimited")
if strings.Contains(cfg.ExampleApp, "dockerfile") {
cpuCmd = strings.Replace(cpuCmd, "web", "cmd", 1)
memCmd = strings.Replace(memCmd, "web", "cmd", 1)
}
utils.Execute(t, cpuCmd, cfg, false, "512")
out := dockerInspect(t, cfg, ver)
if _, err := regexp.MatchString(output1, out); err != nil {
t.Fatal(err)
}
utils.Execute(t, limitsListCmd, cfg, false, "512")
utils.Execute(t, memCmd, cfg, false, "256M")
out = dockerInspect(t, cfg, ver+1)
if _, err := regexp.MatchString(output2, out); err != nil {
t.Fatal(err)
}
utils.Execute(t, limitsListCmd, cfg, false, "256M")
}
func limitsUnsetTest(t *testing.T, cfg *utils.DeisTestConfig, ver int) {
cpuCmd, memCmd := limitsUnsetCPUCmd, limitsUnsetMemCmd
if strings.Contains(cfg.ExampleApp, "dockerfile") {
cpuCmd = strings.Replace(cpuCmd, "web", "cmd", 1)
memCmd = strings.Replace(memCmd, "web", "cmd", 1)
}
utils.Execute(t, cpuCmd, cfg, false, "Unlimited")
out := dockerInspect(t, cfg, ver)
if _, err := regexp.MatchString(output3, out); err != nil {
t.Fatal(err)
}
utils.Execute(t, limitsListCmd, cfg, false, "Unlimited")
utils.Execute(t, memCmd, cfg, false, "Unlimited")
out = dockerInspect(t, cfg, ver+1)
if _, err := regexp.MatchString(output4, out); err != nil {
t.Fatal(err)
}
utils.Execute(t, limitsListCmd, cfg, false, "Unlimited")
}
// dockerInspect creates an SSH session to the Deis controller
// and runs "docker inspect" on the first app container.
func dockerInspect(
t *testing.T, cfg *utils.DeisTestConfig, ver int) string {
cmd := fmt.Sprintf("docker inspect %s_v%d.web.1", cfg.AppName, ver)
sshCmd := exec.Command("ssh",
"-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
"-o", "PasswordAuthentication=no",
"core@deis."+cfg.Domain, cmd)
out, err := sshCmd.Output()
if err != nil {
t.Fatal(out, err)
}
return string(out)
}
|
package main
import "fmt"
func main() {
d := []string{"Welcome", "for", "Tianjin", "Have", "a", "good", "journey"}
insertSlice := []string{"It", "is", "a", "big", "city"}
insertSliceIndex := 3
d = append(d[:insertSliceIndex], append(insertSlice, d[insertSliceIndex:]...)...)
fmt.Printf("result:%v\n", d)
a := []rune{'a','c','m','龢'}
fmt.Println((a[1:]))
b := [10]rune{}
a = append(a,b[:]...)
fmt.Println(len(a))
fmt.Println(a)
}
|
package scsprotov1
import (
"encoding/binary"
"math"
"time"
)
func (c *scsv1) handleMessage(cmd byte, deviceid string, payload []byte) {
switch cmd {
case CMD_KEEPALIVE:
fallthrough
case CMD_KEEPALIVE_POSITION:
model, _, err := readBinString(payload)
if err != nil {
panic(err)
}
version, _, err := readBinString(payload[1+len(model):])
var lat float32 = 0
var lng float32 = 0
if cmd == CMD_KEEPALIVE_POSITION {
payload = payload[1+len(model)+1+len(version):]
lat = math.Float32frombits(binary.LittleEndian.Uint32(payload[:4]))
lng = math.Float32frombits(binary.LittleEndian.Uint32(payload[4:8]))
c.log.Debugf("[%s] Keepalive with position, model %s version %s lat %f lng %f", deviceid, model, version,
lat, lng)
} else {
c.log.Debugf("[%s] Keepalive, model %s version %s", deviceid, model, version)
}
c.cb.Alive(deviceid, model, version, false, lat, lng)
sigma := c.cb.GetSigma(deviceid)
// TODO: get config for updates
cfgpayload := make([]byte, 1+4+1+1)
cfgpayload[0] = CMD_CFG
copy(cfgpayload[1:5], Float32bytes(sigma))
// Hostname length
cfgpayload[5] = 0
// Path length
cfgpayload[6] = 0
c.mqttc.Publish("device-"+deviceid, 0, false, cfgpayload).Wait()
break
case CMD_QUAKE:
c.log.Debugf("[%s] QUAKE", deviceid)
c.cb.Quake(deviceid)
break
case CMD_TIMEREQ:
tm := make([]byte, 1+4)
tm[0] = CMD_TIMEREPLY
binary.LittleEndian.PutUint32(tm[1:], uint32(time.Now().Unix()))
c.mqttc.Publish("device-"+deviceid, 0, false, tm).Wait()
c.log.Debugf("[%s] Time requested and sent", deviceid)
break
case CMD_DISC_WILL:
c.cb.Disconnect(deviceid)
break
case CMD_TEMP:
floatsize := payload[0]
if floatsize == 4 {
payload = payload[1:]
temperature := math.Float32frombits(binary.LittleEndian.Uint32(payload))
c.log.Debugf("[%s] Temperature received: %f", deviceid, temperature)
c.cb.TemperatureReceived(deviceid, temperature)
}
break
default:
c.log.Debugf("[%s] Message not implemented: %d", deviceid, cmd)
break
}
}
|
package data
import (
"time"
)
type Products struct {
Id int
Name string
Summary string
Price float64
Sold int
Comments string
Score float64
Collected int
Category int
Specification string //规格json字符串(用于提供给用户选择规格)
Squarepic string //存储正方形缩略图的名称
Carouselpics string //存储轮播图片名称,以逗号分隔
Params string //规格参数(html)
Detail string //商品详情(html)
CreateTime time.Time //创建时间
LastEditTime time.Time //最近修改时间
}
|
package hateoas
import ()
// This is a returned type
// Error type for REST hateoas
type Error struct {
Status int `json:"status"`
Code int `json:"code"`
Property string `json:"property,omitempty"`
Message string `json:"message"`
DeveloperMessage string `json:"developerMessage"`
MoreInfo string `json:"moreInfo,omitempty"`
}
func (e *Error) Error() string {
return e.DeveloperMessage
}
// URL type for representing HREF links
type Url string
// This is a returned type
// Page type for REST hateoas
type Page struct {
Href Url
Offset int
Limit int
First *Url
Previous *Url
Next *Url
Last *Url
Items []hateoasResource
TotalItems int
}
// PageOpts type for page options extracted from the GET parameters
type PageOpts struct {
Offset int
Limit int
}
// Abstract interface for a REST resource
type Resource interface {
GetId() string
}
// Abstract interface for a REST resource handler
type ResourceHandler interface {
ResourceName() string
GetOne(string) (Resource, *Error)
GetAll(PageOpts) ([]Resource, *Error)
Create(Resource) (string, *Error)
Update(string, Resource) (Resource, *Error)
Delete(string) *Error
Count() (int, *Error)
}
|
package testdata
import (
"time"
"github.com/frk/gosql/internal/testdata/common"
)
type SelectWithWhereBlockBetweenQuery struct {
Users []*common.User `rel:"test_user:u"`
Where struct {
CreatedAt struct {
After time.Time `sql:"x"`
Before time.Time `sql:"y"`
} `sql:"u.created_at isbetween"`
}
}
|
package helm
import (
"fmt"
"strings"
"time"
"github.com/gruntwork-io/terratest/modules/k8s"
"github.com/gruntwork-io/terratest/modules/random"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/kumahq/kuma/pkg/config/core"
. "github.com/kumahq/kuma/test/framework"
"github.com/kumahq/kuma/test/framework/deployments/testserver"
)
func AppDeploymentWithHelmChart() {
namespaceWithSidecarInjection := func(namespace string) string {
return fmt.Sprintf(`
apiVersion: v1
kind: Namespace
metadata:
name: %s
annotations:
kuma.io/sidecar-injection: "enabled"
`, namespace)
}
defaultMesh := `
apiVersion: kuma.io/v1alpha1
kind: Mesh
metadata:
name: default
`
var cluster Cluster
var deployOptsFuncs = KumaK8sDeployOpts
BeforeEach(func() {
c, err := NewK8sClusterWithTimeout(
NewTestingT(),
Kuma1,
Silent,
6*time.Second)
Expect(err).ToNot(HaveOccurred())
cluster = c.WithRetries(60)
releaseName := fmt.Sprintf(
"kuma-%s",
strings.ToLower(random.UniqueId()),
)
deployOptsFuncs = append(deployOptsFuncs,
WithInstallationMode(HelmInstallationMode),
WithHelmReleaseName(releaseName),
WithSkipDefaultMesh(true), // it's common case for HELM deployments that Mesh is also managed by HELM therefore it's not created by default
WithCPReplicas(3), // test HA capability
WithCNI())
err = NewClusterSetup().
Install(Kuma(core.Standalone, deployOptsFuncs...)).
Install(YamlK8s(defaultMesh)).
Setup(cluster)
Expect(err).ToNot(HaveOccurred())
err = NewClusterSetup().
Install(YamlK8s(namespaceWithSidecarInjection(TestNamespace))).
Install(DemoClientK8s("default")).
Install(testserver.Install()).
Setup(cluster)
Expect(err).ToNot(HaveOccurred())
})
AfterEach(func() {
if ShouldSkipCleanup() {
return
}
// tear down apps
Expect(cluster.DeleteNamespace(TestNamespace)).To(Succeed())
// tear down Kuma
Expect(cluster.DeleteKuma(deployOptsFuncs...)).To(Succeed())
// tear down cluster
Expect(cluster.DismissCluster()).To(Succeed())
})
It("Should deploy two apps", func() {
pods, err := k8s.ListPodsE(
cluster.GetTesting(),
cluster.GetKubectlOptions(TestNamespace),
metav1.ListOptions{
LabelSelector: fmt.Sprintf("app=%s", "demo-client"),
},
)
Expect(err).ToNot(HaveOccurred())
Expect(pods).To(HaveLen(1))
clientPod := pods[0]
Eventually(func() (string, error) {
_, stderr, err := cluster.ExecWithRetries(TestNamespace, clientPod.GetName(), "demo-client",
"curl", "-v", "-m", "3", "--fail", "test-server")
return stderr, err
}, "10s", "1s").Should(ContainSubstring("HTTP/1.1 200 OK"))
Eventually(func() (string, error) {
_, stderr, err := cluster.ExecWithRetries(TestNamespace, clientPod.GetName(), "demo-client",
"curl", "-v", "-m", "3", "--fail", "test-server_kuma-test_svc_80.mesh")
return stderr, err
}, "10s", "1s").Should(ContainSubstring("HTTP/1.1 200 OK"))
Eventually(func() (string, error) { // should access a service with . instead of _
_, stderr, err := cluster.ExecWithRetries(TestNamespace, clientPod.GetName(), "demo-client",
"curl", "-v", "-m", "3", "--fail", "test-server.kuma-test.svc.80.mesh")
return stderr, err
}, "10s", "1s").Should(ContainSubstring("HTTP/1.1 200 OK"))
})
}
|
package main
import "fmt"
//func main() {
// sum := sum(7,8,8)
// fmt.Print(sum)
//}
//
//func sum(params ...int) int {
// sum :=0;
// for _,i :=range params {
// sum +=i
// }
// return sum
//}
func main() {
cl := colsure()
fmt.Println(cl())
fmt.Println(cl())
fmt.Println(cl())
fmt.Println(colsure())
fmt.Println(colsure())
fmt.Println(colsure())
}
func colsure() func() int {
i := 0
return func() int {
i++
return i
}
}
|
package rtrserver
import (
"bytes"
"encoding/binary"
"errors"
"github.com/cpusoft/goutil/belogs"
"github.com/cpusoft/goutil/jsonutil"
)
func ParseToSerialNotify(buf *bytes.Reader, protocolVersion uint8) (rtrPduModel RtrPduModel, err error) {
var sessionId uint16
var serialNumber uint32
var length uint32
// get sessionId
err = binary.Read(buf, binary.BigEndian, &sessionId)
if err != nil {
belogs.Error("ParseToSerialNotify(): PDU_TYPE_SERIAL_NOTIFY get sessionId fail, buf:", buf, err)
rtrError := NewRtrError(
err,
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get SessionId")
return rtrPduModel, rtrError
}
// get length
err = binary.Read(buf, binary.BigEndian, &length)
if err != nil {
belogs.Error("ParseToSerialNotify(): PDU_TYPE_SERIAL_NOTIFY get length fail, buf:", buf, err)
rtrError := NewRtrError(
err,
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get length")
return rtrPduModel, rtrError
}
if length != 12 {
belogs.Error("ParseToSerialNotify():PDU_TYPE_SERIAL_NOTIFY, length must be 12, buf:", buf, " length:", length)
rtrError := NewRtrError(
errors.New("pduType is SERIAL NOTIFY, length must be 12"),
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get length")
return rtrPduModel, rtrError
}
// get serialNumber
err = binary.Read(buf, binary.BigEndian, &serialNumber)
if err != nil {
belogs.Error("ParseToSerialNotify(): PDU_TYPE_SERIAL_NOTIFY get serialNumber fail, buf:", buf, err)
rtrError := NewRtrError(
err,
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get serialNumber")
return rtrPduModel, rtrError
}
sq := NewRtrSerialNotifyModel(protocolVersion, sessionId, serialNumber)
belogs.Debug("ParseToSerialNotify():get PDU_TYPE_SERIAL_NOTIFY, buf:", buf, " sq:", jsonutil.MarshalJson(sq))
return sq, nil
}
func ProcessSerialNotify(protocolVersion uint8) (rtrPduModel RtrPduModel, err error) {
sessionId, serialNumber, err := getSessionIdAndSerialNumberDb()
if err != nil {
belogs.Error("ProcessSerialNotify():getSessionIdAndSerialNumberDb fail:", err)
rtrError := NewRtrError(
err,
false, protocolVersion, PDU_TYPE_ERROR_CODE_INTERNAL_ERROR,
nil, "")
return rtrPduModel, rtrError
}
rtrSerialNotifyModel := NewRtrSerialNotifyModel(protocolVersion, sessionId, serialNumber)
belogs.Debug("ProcessSerialNotify(): rtrSerialNotifyModel : ", jsonutil.MarshalJson(rtrSerialNotifyModel))
return rtrSerialNotifyModel, nil
}
|
package auth
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01200101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:auth.012.001.01 Document"`
Message *MoneyMarketSecuredMarketStatisticalReportV01 `xml:"MnyMktScrdMktSttstclRpt"`
}
func (d *Document01200101) AddMessage() *MoneyMarketSecuredMarketStatisticalReportV01 {
d.Message = new(MoneyMarketSecuredMarketStatisticalReportV01)
return d.Message
}
// The MoneyMarketSecuredMarketStatisticalReport message is sent by the reporting agents to the relevant competent authority, to report all relevant secured money market transactions.
type MoneyMarketSecuredMarketStatisticalReportV01 struct {
// Provides the elements specific to the report.
ReportHeader *iso20022.MoneyMarketReportHeader1 `xml:"RptHdr"`
// Provides the reason why no activity is reported or the required list of transactions for the secured market segment.
SecuredMarketReport *iso20022.SecuredMarketReport3Choice `xml:"ScrdMktRpt"`
// Additional information that can not be captured in the structured fields and/or any other specific block.
SupplementaryData []*iso20022.SupplementaryData1 `xml:"SplmtryData,omitempty"`
}
func (m *MoneyMarketSecuredMarketStatisticalReportV01) AddReportHeader() *iso20022.MoneyMarketReportHeader1 {
m.ReportHeader = new(iso20022.MoneyMarketReportHeader1)
return m.ReportHeader
}
func (m *MoneyMarketSecuredMarketStatisticalReportV01) AddSecuredMarketReport() *iso20022.SecuredMarketReport3Choice {
m.SecuredMarketReport = new(iso20022.SecuredMarketReport3Choice)
return m.SecuredMarketReport
}
func (m *MoneyMarketSecuredMarketStatisticalReportV01) AddSupplementaryData() *iso20022.SupplementaryData1 {
newValue := new(iso20022.SupplementaryData1)
m.SupplementaryData = append(m.SupplementaryData, newValue)
return newValue
}
|
package producers
import (
"errors"
"os"
"testing"
"time"
c "github.com/pedromss/kafli/config"
"github.com/pedromss/kafli/contracts"
"github.com/pedromss/kafli/model"
)
func TestProduceMode(t *testing.T) {
type checker struct {
called bool
}
setChecker := func(idx int, checker *checker) {
if checker.called != false {
t.Errorf("Case %d | Checker was already set. Unsuited for this test", idx)
}
checker.called = true
}
burst := func(idx int, checker *checker) BurstProducer {
return func(p contracts.Producer, ch chan *model.RecordToSend) {
setChecker(idx, checker)
}
}
throttle := func(idx int, checker *checker) ThrottledProducer {
return func(p contracts.Producer, ch chan *model.RecordToSend, ticker <-chan time.Time, stopper func()) {
setChecker(idx, checker)
}
}
oneMinute := time.Minute
var tests = []struct {
conf *c.AppConf
burstHandler BurstProducer
throttledHandler ThrottledProducer
burstChecker *checker
throttleChecker *checker
shouldCallBurst bool
shouldCallThrottle bool
}{
{
conf: &c.AppConf{Rate: &model.Rate{Duration: &oneMinute}, Ticker: time.NewTicker(time.Second)},
burstChecker: &checker{},
throttleChecker: &checker{},
shouldCallBurst: false,
shouldCallThrottle: true,
},
{
conf: &c.AppConf{Rate: nil},
burstChecker: &checker{},
throttleChecker: &checker{},
shouldCallBurst: true,
shouldCallThrottle: false,
},
}
for i, testCase := range tests {
if testCase.shouldCallBurst == testCase.shouldCallThrottle {
t.Errorf("Case %d | Impossible test case. Call burst and call throttle must be different", i)
}
testCase.burstHandler = burst(i, testCase.burstChecker)
testCase.throttledHandler = throttle(i, testCase.throttleChecker)
produce(nil, testCase.conf, nil, testCase.burstHandler, testCase.throttledHandler)
if testCase.shouldCallBurst != testCase.burstChecker.called {
t.Errorf("Case %d | Expected burst handler to be called", i)
}
if testCase.shouldCallThrottle != testCase.throttleChecker.called {
t.Errorf("Case %d | Expected throttled handler to be called", i)
}
}
}
func TestPrepareInputData(t *testing.T) {
fakeExtractor := func(*c.AppConf) (chan *model.RecordToSend, error) {
return nil, nil // not meant to be used
}
errorOutExtractor := func(*c.AppConf) (chan *model.RecordToSend, error) {
return nil, errors.New("Expected error for test")
}
var tests = []struct {
conf *c.AppConf
inputExtractor InputExtractor
expectErr bool
}{
{conf: &c.AppConf{In: nil}, inputExtractor: fakeExtractor, expectErr: true},
{conf: &c.AppConf{In: os.Stdin}, inputExtractor: fakeExtractor, expectErr: false},
{conf: &c.AppConf{In: os.Stdin}, inputExtractor: errorOutExtractor, expectErr: true},
}
for i, testCase := range tests {
_, err := prepareInputData(testCase.conf, testCase.inputExtractor)
if (err != nil) != testCase.expectErr {
t.Errorf("Case %d | Error condition not met. Expected error: %t, but was the oposite", i, testCase.expectErr)
}
}
}
|
package main
import (
"fmt"
"testing"
"time"
)
func TestNewClock(t *testing.T) {
clock := NewClock()
// TODO: finish mocking clock and compare the times
}
func TestLoadSound(t *testing.T) {
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
)
func gen() {
loadBaseTemplate()
errMsg := loadAllFromDisk()
if errMsg != "" {
die(errMsg)
}
// TODO fix me
generateAndWriteHTML()
// And the rest should work
errMsg = copyStatic()
if errMsg != "" {
die(errMsg)
}
}
// TODO MUST be combined with local functions!
func generateAndWriteHTML() {
// Loop over all pages in the index:
for _, p := range pages {
if !p.Meta.Live {
log.Printf("Skipping draft page %s", p.FullPath)
continue
}
// Make this file's base dir (no error if already exists,
// same behavior as mkdir -p)
err := os.MkdirAll(p.BuildDir, 0755)
if err != nil {
die("Failed to create dir %s", err) // Haven't seen this happen
}
finalPage := renderHTML(templateData(p))
err = validateHTML(finalPage)
if err != nil {
die("%s Resulted in invalid html (%s)", p.FullPath, err)
}
// Pretty print after we validate since this stupid lib won't check crap!
// Don't use this for now as it screws up the pre formatted code blocks
// finalPage = gohtml.FormatBytes(finalPage)
err = ioutil.WriteFile(p.BuildFullPath, finalPage, 0644)
if err != nil {
die("Couldn't write file (%s), err: (%v)", p.BuildFullPath, err)
}
}
}
func copyStatic() string {
// This is lazy but leverage system cp. Will also create dir
err := exec.Command("cp", "-r", "./static", "build").Run()
if err != nil {
return fmt.Sprintf("Failed to copyStatic using system cp cmd: (%v)", err)
}
return ""
}
|
package middleware
import (
"fmt"
"net/http"
jwt "github.com/dgrijalva/jwt-go"
"github.com/dgrijalva/jwt-go/request"
"github.com/looyun/feedall/controllers"
"github.com/looyun/feedall/models"
macaron "gopkg.in/macaron.v1"
"gopkg.in/mgo.v2/bson"
)
func ValidateJWTToken() macaron.Handler {
return func(ctx *macaron.Context) {
token, err := request.ParseFromRequest(ctx.Req.Request, request.AuthorizationHeaderExtractor,
func(token *jwt.Token) (interface{}, error) {
return controllers.TokenSecure, nil
})
if err == nil {
if token.Valid {
claims := token.Claims.(jwt.MapClaims)
username := claims["username"].(string)
user := models.User{}
err := models.FindOne(models.Users, bson.M{"username": username}, &user)
if err != nil {
fmt.Println(err)
ctx.Status(http.StatusUnauthorized)
}
ctx.Data["user"] = user
ctx.Next()
} else {
fmt.Println("Token not valid")
ctx.Status(http.StatusUnauthorized)
}
} else {
fmt.Println(err)
ctx.Status(http.StatusUnauthorized)
}
}
}
|
package main
import "fmt"
type Vector struct {
x int
y int
}
type Player struct {
ID int
Name string
}
func main() {
var v Vector
v.x = 1
v.y = 10
fmt.Println(v)
fmt.Println("X =", v.x)
fmt.Println("Y =", v.y)
player1 := Player{ID: 1, Name: "Depa"}
fmt.Println(player1.ID)
fmt.Println(player1.Name)
}
|
package hw04_lru_cache //nolint:golint,stylecheck
import "sync"
type Key string
type Cache interface {
Set(key Key, value interface{}) bool
Get(key Key) (interface{}, bool)
Clear()
}
type lruCache struct {
sync.Mutex
capacity int
queue List
items map[Key]cacheItem
}
type cacheItem struct {
Key *listItem
Value interface{}
}
func (c *lruCache) Set(key Key, value interface{}) bool {
c.Lock()
defer c.Unlock()
cItem, found := c.items[key]
var cacheKey *listItem
if found {
cacheKey = cItem.Key
c.queue.MoveToFront(cacheKey)
} else {
cacheKey = c.queue.PushFront(key)
}
if c.queue.Len() > c.capacity {
lastQItem := c.queue.Back()
delete(c.items, lastQItem.Value.(Key))
c.queue.Remove(lastQItem)
}
c.items[key] = cacheItem{Value: value, Key: cacheKey}
return found
}
func (c *lruCache) Get(key Key) (interface{}, bool) {
c.Lock()
defer c.Unlock()
cacheItem, found := c.items[key]
if found {
c.queue.MoveToFront(cacheItem.Key)
}
return cacheItem.Value, found
}
func (c *lruCache) Clear() {
c.queue = NewList()
for key := range c.items {
delete(c.items, key)
}
}
func NewCache(capacity int) Cache {
items := make(map[Key]cacheItem)
queue := NewList()
return &lruCache{capacity: capacity, queue: queue, items: items}
}
|
package encoding
import (
"bytes"
"encoding/binary"
"errors"
"io"
)
// WriteUint16 writes an uint16 into a byte buffer
func WriteUint16(w *bytes.Buffer, i uint16) {
w.WriteByte(byte(i >> 8))
w.WriteByte(byte(i))
}
// WriteBool writes a boolean value into a byte buffer
func WriteBool(w *bytes.Buffer, b bool) {
if b {
w.WriteByte(1)
} else {
w.WriteByte(0)
}
}
// ReadBool reads a bool from a byte buffer
func ReadBool(r *bytes.Buffer) (bool, error) {
b, err := r.ReadByte()
if err != nil {
return false, err
}
if b == 0 {
return false, nil
}
return true, nil
}
// WriteString writes a string into a byte buffer as a length-value
func WriteString(w *bytes.Buffer, s []byte) {
WriteUint16(w, uint16(len(s)))
w.Write(s)
}
// ReadString reads a string from a byte buffer
func ReadString(r *bytes.Buffer) (b []byte, err error) {
l := make([]byte, 2)
_, err = io.ReadFull(r, l)
if err != nil {
return nil, err
}
length := int(binary.BigEndian.Uint16(l))
paylaod := make([]byte, length)
_, err = io.ReadFull(r, paylaod)
if err != nil {
return nil, err
}
return paylaod, nil
}
// WriteUint32 writes an uint into a byte buffer
func WriteUint32(w *bytes.Buffer, i uint32) {
w.WriteByte(byte(i >> 24))
w.WriteByte(byte(i >> 16))
w.WriteByte(byte(i >> 8))
w.WriteByte(byte(i))
}
// ReadUint16 reads an uint16 from a byte buffer
func ReadUint16(r *bytes.Buffer) (uint16, error) {
if r.Len() < 2 {
return 0, errors.New("invalid length")
}
return binary.BigEndian.Uint16(r.Next(2)), nil
}
// ReadUint32 reads an uint32 from a byte buffer
func ReadUint32(r *bytes.Buffer) (uint32, error) {
if r.Len() < 4 {
return 0, errors.New("invalid length")
}
return binary.BigEndian.Uint32(r.Next(4)), nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.