text
stringlengths 11
4.05M
|
|---|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package api
import (
"crypto/rand"
"encoding/base64"
"encoding/binary"
"fmt"
"net"
"sort"
"strconv"
"strings"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
"github.com/Azure/go-autorest/autorest/to"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/Azure/aks-engine/pkg/api/common"
"github.com/Azure/aks-engine/pkg/helpers"
"github.com/Azure/aks-engine/pkg/versions"
)
// DistroValues is a list of currently supported distros
var DistroValues = []Distro{"", Ubuntu, Ubuntu2004, Ubuntu2004Gen2, Ubuntu1804, Flatcar, AKSUbuntu1604, AKSUbuntu1804, Ubuntu1804Gen2, AKSUbuntu2004, ACC1604}
// PropertiesDefaultsParams is the parameters when we set the properties defaults for ContainerService.
type PropertiesDefaultsParams struct {
IsUpgrade bool
IsScale bool
PkiKeySize int
}
// SetPropertiesDefaults for the container Properties, returns true if certs are generated
func (cs *ContainerService) SetPropertiesDefaults(params PropertiesDefaultsParams) (bool, error) {
properties := cs.Properties
// Set custom cloud profile defaults if this cluster configuration has custom cloud profile
if cs.Properties.IsCustomCloudProfile() {
err := cs.setCustomCloudProfileDefaults(CustomCloudProfileDefaultsParams{
IsUpgrade: params.IsUpgrade,
IsScale: params.IsScale,
})
if err != nil {
return false, err
}
}
// Set master profile defaults if this cluster configuration includes master node(s)
if cs.Properties.MasterProfile != nil {
properties.setMasterProfileDefaults()
}
properties.setAgentProfileDefaults(params.IsUpgrade, params.IsScale)
properties.setStorageDefaults()
cs.setOrchestratorDefaults(params.IsUpgrade, params.IsScale)
// Set Linux profile defaults if this cluster configuration includes Linux nodes
if cs.Properties.LinuxProfile != nil {
properties.setLinuxProfileDefaults()
}
properties.setExtensionDefaults()
if cs.Properties.WindowsProfile != nil {
cs.setWindowsProfileDefaults(params.IsUpgrade, params.IsScale)
cs.setCSIProxyDefaults()
}
properties.setTelemetryProfileDefaults()
certsGenerated, _, e := cs.SetDefaultCerts(DefaultCertParams{
PkiKeySize: params.PkiKeySize,
})
if e != nil {
return false, e
}
return certsGenerated, nil
}
// setOrchestratorDefaults for orchestrators
func (cs *ContainerService) setOrchestratorDefaults(isUpgrade, isScale bool) {
isUpdate := isUpgrade || isScale
a := cs.Properties
cloudSpecConfig := cs.GetCloudSpecConfig()
if a.OrchestratorProfile == nil {
a.OrchestratorProfile = &OrchestratorProfile{
OrchestratorType: Kubernetes,
}
}
o := a.OrchestratorProfile
o.OrchestratorVersion = common.GetValidPatchVersion(
o.OrchestratorType,
o.OrchestratorVersion, isUpdate, a.HasWindows(), a.IsAzureStackCloud())
switch o.OrchestratorType {
case Kubernetes:
if o.KubernetesConfig == nil {
o.KubernetesConfig = &KubernetesConfig{}
}
// For backwards compatibility with original, overloaded "NetworkPolicy" config vector
// we translate deprecated NetworkPolicy usage to the NetworkConfig equivalent
// and set a default network policy enforcement configuration
switch o.KubernetesConfig.NetworkPolicy {
case NetworkPolicyAzure:
if o.KubernetesConfig.NetworkPlugin == "" {
o.KubernetesConfig.NetworkPlugin = NetworkPluginAzure
o.KubernetesConfig.NetworkPolicy = DefaultNetworkPolicy
}
case NetworkPolicyNone:
o.KubernetesConfig.NetworkPlugin = NetworkPluginKubenet
o.KubernetesConfig.NetworkPolicy = DefaultNetworkPolicy
case NetworkPolicyCalico:
if o.KubernetesConfig.NetworkPlugin == "" {
// If not specified, then set the network plugin to be kubenet
// for backwards compatibility. Otherwise, use what is specified.
o.KubernetesConfig.NetworkPlugin = NetworkPluginKubenet
}
case NetworkPolicyCilium:
o.KubernetesConfig.NetworkPlugin = NetworkPluginCilium
case NetworkPolicyAntrea:
if o.KubernetesConfig.NetworkPlugin == "" {
o.KubernetesConfig.NetworkPlugin = NetworkPluginAzure
}
}
if a.IsAzureStackCloud() {
// Azure Stack's custom hyperkube image is now hosted along with MCR images.
// Forcing KubernetesImageBase/KubernetesImageBaseType.
mcrKubernetesImageBase := cloudSpecConfig.KubernetesSpecConfig.MCRKubernetesImageBase
if !strings.EqualFold(o.KubernetesConfig.KubernetesImageBase, mcrKubernetesImageBase) {
log.Warnf("apimodel: orchestratorProfile.kubernetesConfig.kubernetesImageBase forced to \"%s\"\n", mcrKubernetesImageBase)
}
o.KubernetesConfig.KubernetesImageBase = cloudSpecConfig.KubernetesSpecConfig.MCRKubernetesImageBase
if !strings.EqualFold(o.KubernetesConfig.KubernetesImageBaseType, common.KubernetesImageBaseTypeMCR) {
log.Warnf("apimodel: orchestratorProfile.kubernetesConfig.kubernetesImageBaseType forced to \"%s\"\n", common.KubernetesImageBaseTypeMCR)
}
o.KubernetesConfig.KubernetesImageBaseType = common.KubernetesImageBaseTypeMCR
if isUpgrade && strings.EqualFold(o.KubernetesConfig.MCRKubernetesImageBase, "mcr.microsoft.com/k8s/core/") {
log.Warn("apimodel: clearing deprecated orchestratorProfile.kubernetesConfig.mcrKubernetesImageBase value\n")
o.KubernetesConfig.MCRKubernetesImageBase = ""
}
}
if isUpgrade {
if (o.KubernetesConfig.KubernetesImageBase == "" || o.KubernetesConfig.KubernetesImageBase == cloudSpecConfig.KubernetesSpecConfig.KubernetesImageBase) &&
(o.KubernetesConfig.KubernetesImageBaseType == "" || o.KubernetesConfig.KubernetesImageBaseType == common.KubernetesImageBaseTypeGCR) {
o.KubernetesConfig.KubernetesImageBase = cloudSpecConfig.KubernetesSpecConfig.MCRKubernetesImageBase
o.KubernetesConfig.KubernetesImageBaseType = common.KubernetesImageBaseTypeMCR
}
}
if o.KubernetesConfig.KubernetesImageBase == "" {
o.KubernetesConfig.KubernetesImageBase = cloudSpecConfig.KubernetesSpecConfig.MCRKubernetesImageBase
} else {
if o.KubernetesConfig.KubernetesImageBase[len(o.KubernetesConfig.KubernetesImageBase)-1:] != "/" {
o.KubernetesConfig.KubernetesImageBase += "/"
}
}
if o.KubernetesConfig.KubernetesImageBaseType == "" {
o.KubernetesConfig.KubernetesImageBaseType = common.KubernetesImageBaseTypeMCR
}
if o.KubernetesConfig.MCRKubernetesImageBase == "" {
o.KubernetesConfig.MCRKubernetesImageBase = cloudSpecConfig.KubernetesSpecConfig.MCRKubernetesImageBase
}
if o.KubernetesConfig.EtcdVersion == "" {
o.KubernetesConfig.EtcdVersion = DefaultEtcdVersion
} else if isUpgrade {
if o.KubernetesConfig.EtcdVersion != DefaultEtcdVersion {
// Override (i.e., upgrade) the etcd version if the default is newer in an upgrade scenario
if common.GetMinVersion([]string{o.KubernetesConfig.EtcdVersion, DefaultEtcdVersion}, true) == o.KubernetesConfig.EtcdVersion {
log.Warnf("etcd will be upgraded to version %s\n", DefaultEtcdVersion)
o.KubernetesConfig.EtcdVersion = DefaultEtcdVersion
}
}
}
if !isUpgrade && !isScale &&
!cs.Properties.IsCustomCloudProfile() &&
!cs.Properties.MasterProfile.IsVirtualMachineScaleSets() &&
o.KubernetesConfig.UseManagedIdentity == nil {
o.KubernetesConfig.UseManagedIdentity = to.BoolPtr(true)
}
if a.HasWindows() {
if o.KubernetesConfig.NetworkPlugin == "" {
o.KubernetesConfig.NetworkPlugin = DefaultNetworkPluginWindows
}
} else {
if o.KubernetesConfig.NetworkPlugin == "" {
o.KubernetesConfig.NetworkPlugin = DefaultNetworkPlugin
}
}
if o.KubernetesConfig.NetworkPlugin == NetworkPluginAzure {
if o.KubernetesConfig.NetworkMode == "" {
o.KubernetesConfig.NetworkMode = NetworkModeTransparent
}
}
if o.KubernetesConfig.ContainerRuntime == "" {
o.KubernetesConfig.ContainerRuntime = DefaultContainerRuntime
if a.IsAzureStackCloud() && common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.24.0") {
log.Warnf("The docker runtime is no longer supported for v1.24+ clusters, setting ContainerRuntime to 'containerd'")
o.KubernetesConfig.ContainerRuntime = Containerd
}
}
switch o.KubernetesConfig.ContainerRuntime {
case Docker:
if o.KubernetesConfig.MobyVersion == "" || isUpdate {
if o.KubernetesConfig.MobyVersion != DefaultMobyVersion {
if isUpgrade {
log.Warnf("Moby will be upgraded to version %s\n", DefaultMobyVersion)
} else if isScale {
log.Warnf("Any new nodes will have Moby version %s\n", DefaultMobyVersion)
}
}
o.KubernetesConfig.MobyVersion = DefaultMobyVersion
}
// Moby versions >= 19.03 depend on containerd packaging (instead of the moby packages supplying their own containerd)
// For that case we'll need to specify the containerd version.
if versions.GreaterThanOrEqualTo(o.KubernetesConfig.MobyVersion, "19.03") && (o.KubernetesConfig.ContainerdVersion == "" || isUpdate) {
if o.KubernetesConfig.ContainerdVersion != DefaultContainerdVersion {
if isUpgrade {
log.Warnf("containerd will be upgraded to version %s\n", DefaultContainerdVersion)
}
if isScale {
log.Warnf("Any new nodes will have containerd version %s\n", DefaultContainerdVersion)
}
}
o.KubernetesConfig.ContainerdVersion = DefaultContainerdVersion
}
case Containerd:
if o.KubernetesConfig.ContainerdVersion == "" || isUpdate {
if o.KubernetesConfig.ContainerdVersion != DefaultContainerdVersion {
if isUpgrade {
log.Warnf("containerd will be upgraded to version %s\n", DefaultContainerdVersion)
} else if isScale {
log.Warnf("Any new nodes will have containerd version %s\n", DefaultContainerdVersion)
}
}
o.KubernetesConfig.ContainerdVersion = DefaultContainerdVersion
}
if o.KubernetesConfig.WindowsContainerdURL == "" {
o.KubernetesConfig.WindowsContainerdURL = DefaultWindowsContainerdURL
}
}
if o.KubernetesConfig.ClusterSubnet == "" {
if o.IsAzureCNI() {
// When Azure CNI is enabled, all masters, agents and pods share the same large subnet.
// Except when master is VMSS, then masters and agents have separate subnets within the same large subnet.
o.KubernetesConfig.ClusterSubnet = DefaultKubernetesSubnet
if cs.Properties.FeatureFlags.IsFeatureEnabled("EnableIPv6DualStack") {
o.KubernetesConfig.ClusterSubnet = strings.Join([]string{DefaultKubernetesSubnet, cs.getDefaultKubernetesClusterSubnetIPv6()}, ",")
}
} else {
o.KubernetesConfig.ClusterSubnet = DefaultKubernetesClusterSubnet
// ipv6 only cluster
if cs.Properties.FeatureFlags.IsFeatureEnabled("EnableIPv6Only") {
o.KubernetesConfig.ClusterSubnet = DefaultKubernetesClusterSubnetIPv6
}
// ipv4 and ipv6 subnet for dual stack
if cs.Properties.FeatureFlags.IsFeatureEnabled("EnableIPv6DualStack") {
o.KubernetesConfig.ClusterSubnet = strings.Join([]string{DefaultKubernetesClusterSubnet, cs.getDefaultKubernetesClusterSubnetIPv6()}, ",")
}
}
} else {
// ensure 2 subnets exists if ipv6 dual stack feature is enabled
if cs.Properties.FeatureFlags.IsFeatureEnabled("EnableIPv6DualStack") {
clusterSubnets := strings.Split(o.KubernetesConfig.ClusterSubnet, ",")
if len(clusterSubnets) == 1 {
// if error exists, then it'll be caught by validate
ip, _, err := net.ParseCIDR(clusterSubnets[0])
if err == nil {
if ip.To4() != nil {
// the first cidr block is ipv4, so append ipv6
clusterSubnets = append(clusterSubnets, cs.getDefaultKubernetesClusterSubnetIPv6())
} else {
// first cidr has to be ipv4
if o.IsAzureCNI() {
clusterSubnets = append([]string{DefaultKubernetesSubnet}, clusterSubnets...)
} else {
clusterSubnets = append([]string{DefaultKubernetesClusterSubnet}, clusterSubnets...)
}
}
// only set the cluster subnet if no error has been encountered
}
o.KubernetesConfig.ClusterSubnet = strings.Join(clusterSubnets, ",")
}
}
}
if o.KubernetesConfig.GCHighThreshold == 0 {
o.KubernetesConfig.GCHighThreshold = DefaultKubernetesGCHighThreshold
}
if o.KubernetesConfig.GCLowThreshold == 0 {
o.KubernetesConfig.GCLowThreshold = DefaultKubernetesGCLowThreshold
}
if o.KubernetesConfig.DNSServiceIP == "" {
o.KubernetesConfig.DNSServiceIP = DefaultKubernetesDNSServiceIP
if cs.Properties.FeatureFlags.IsFeatureEnabled("EnableIPv6Only") {
o.KubernetesConfig.DNSServiceIP = DefaultKubernetesDNSServiceIPv6
}
}
if o.KubernetesConfig.DockerBridgeSubnet == "" {
o.KubernetesConfig.DockerBridgeSubnet = DefaultDockerBridgeSubnet
}
if o.KubernetesConfig.ServiceCIDR == "" {
o.KubernetesConfig.ServiceCIDR = DefaultKubernetesServiceCIDR
if cs.Properties.FeatureFlags.IsFeatureEnabled("EnableIPv6Only") {
o.KubernetesConfig.ServiceCIDR = DefaultKubernetesServiceCIDRIPv6
}
if cs.Properties.FeatureFlags.IsFeatureEnabled("EnableIPv6DualStack") {
o.KubernetesConfig.ServiceCIDR = strings.Join([]string{DefaultKubernetesServiceCIDR, DefaultKubernetesServiceCIDRIPv6}, ",")
}
} else {
if cs.Properties.FeatureFlags.IsFeatureEnabled("EnableIPv6DualStack") {
serviceAddrs := strings.Split(o.KubernetesConfig.ServiceCIDR, ",")
if len(serviceAddrs) == 1 {
ip, _, err := net.ParseCIDR(serviceAddrs[0])
if err == nil {
if ip.To4() != nil {
// the first cidr block is ipv4, so append ipv6
serviceAddrs = append(serviceAddrs, DefaultKubernetesServiceCIDRIPv6)
} else {
// first cidr has to be ipv4
serviceAddrs = append([]string{DefaultKubernetesServiceCIDR}, serviceAddrs...)
}
}
o.KubernetesConfig.ServiceCIDR = strings.Join(serviceAddrs, ",")
}
}
}
if common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.14.0") {
o.KubernetesConfig.CloudProviderBackoffMode = CloudProviderBackoffModeV2
if o.KubernetesConfig.CloudProviderBackoff == nil {
o.KubernetesConfig.CloudProviderBackoff = to.BoolPtr(true)
}
} else {
o.KubernetesConfig.CloudProviderBackoffMode = "v1"
if o.KubernetesConfig.CloudProviderBackoff == nil {
o.KubernetesConfig.CloudProviderBackoff = to.BoolPtr(false)
}
}
// Enforce sane cloudprovider backoff defaults.
a.SetCloudProviderBackoffDefaults()
if o.KubernetesConfig.CloudProviderRateLimit == nil {
o.KubernetesConfig.CloudProviderRateLimit = to.BoolPtr(DefaultKubernetesCloudProviderRateLimit)
}
// Enforce sane cloudprovider rate limit defaults.
a.SetCloudProviderRateLimitDefaults()
if o.KubernetesConfig.PrivateCluster == nil {
o.KubernetesConfig.PrivateCluster = &PrivateCluster{}
}
if o.KubernetesConfig.PrivateCluster.Enabled == nil {
o.KubernetesConfig.PrivateCluster.Enabled = to.BoolPtr(DefaultPrivateClusterEnabled)
}
if o.KubernetesConfig.PrivateCluster.EnableHostsConfigAgent == nil {
o.KubernetesConfig.PrivateCluster.EnableHostsConfigAgent = to.BoolPtr(DefaultPrivateClusterHostsConfigAgentEnabled)
}
if "" == a.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB {
switch {
case a.TotalNodes() > 20:
if a.IsAzureStackCloud() {
// Currently on Azure Stack max size of managed disk size is 1023GB.
a.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB = MaxAzureStackManagedDiskSize
} else {
a.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB = DefaultEtcdDiskSizeGT20Nodes
}
case a.TotalNodes() > 10:
if a.IsAzureStackCloud() {
// Currently on Azure Stack max size of managed disk size is 1023GB.
a.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB = MaxAzureStackManagedDiskSize
} else {
a.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB = DefaultEtcdDiskSizeGT10Nodes
}
case a.TotalNodes() > 3:
a.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB = DefaultEtcdDiskSizeGT3Nodes
default:
a.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB = DefaultEtcdDiskSize
}
}
if a.OrchestratorProfile.KubernetesConfig.EtcdStorageLimitGB == 0 {
a.OrchestratorProfile.KubernetesConfig.EtcdStorageLimitGB = DefaultEtcdStorageLimitGB
}
if to.Bool(o.KubernetesConfig.EnableDataEncryptionAtRest) {
if "" == a.OrchestratorProfile.KubernetesConfig.EtcdEncryptionKey {
a.OrchestratorProfile.KubernetesConfig.EtcdEncryptionKey = generateEtcdEncryptionKey()
}
}
if a.OrchestratorProfile.KubernetesConfig.PrivateJumpboxProvision() && a.OrchestratorProfile.KubernetesConfig.PrivateCluster.JumpboxProfile.OSDiskSizeGB == 0 {
a.OrchestratorProfile.KubernetesConfig.PrivateCluster.JumpboxProfile.OSDiskSizeGB = DefaultJumpboxDiskSize
}
if a.OrchestratorProfile.KubernetesConfig.PrivateJumpboxProvision() && a.OrchestratorProfile.KubernetesConfig.PrivateCluster.JumpboxProfile.Username == "" {
a.OrchestratorProfile.KubernetesConfig.PrivateCluster.JumpboxProfile.Username = DefaultJumpboxUsername
}
if a.OrchestratorProfile.KubernetesConfig.PrivateJumpboxProvision() && a.OrchestratorProfile.KubernetesConfig.PrivateCluster.JumpboxProfile.StorageProfile == "" {
a.OrchestratorProfile.KubernetesConfig.PrivateCluster.JumpboxProfile.StorageProfile = ManagedDisks
}
if a.OrchestratorProfile.KubernetesConfig.EnableRbac == nil {
a.OrchestratorProfile.KubernetesConfig.EnableRbac = to.BoolPtr(DefaultRBACEnabled)
}
if a.OrchestratorProfile.KubernetesConfig.MicrosoftAptRepositoryURL == "" {
a.OrchestratorProfile.KubernetesConfig.MicrosoftAptRepositoryURL = DefaultMicrosoftAptRepositoryURL
}
// Upgrade scenario:
// We need to force set EnableRbac to true for upgrades to 1.15.0 and greater if it was previously set to false (AKS Engine only)
if !a.OrchestratorProfile.KubernetesConfig.IsRBACEnabled() && common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.15.0") && isUpgrade {
log.Warnf("RBAC will be enabled during upgrade to version %s\n", o.OrchestratorVersion)
a.OrchestratorProfile.KubernetesConfig.EnableRbac = to.BoolPtr(true)
}
if a.OrchestratorProfile.KubernetesConfig.IsRBACEnabled() {
a.OrchestratorProfile.KubernetesConfig.EnableAggregatedAPIs = true
} else if isUpdate && a.OrchestratorProfile.KubernetesConfig.EnableAggregatedAPIs {
// Upgrade scenario:
// We need to force set EnableAggregatedAPIs to false if RBAC was previously disabled
a.OrchestratorProfile.KubernetesConfig.EnableAggregatedAPIs = false
}
if a.OrchestratorProfile.KubernetesConfig.EnableSecureKubelet == nil {
a.OrchestratorProfile.KubernetesConfig.EnableSecureKubelet = to.BoolPtr(DefaultSecureKubeletEnabled)
}
if a.OrchestratorProfile.KubernetesConfig.UseInstanceMetadata == nil {
if a.IsAzureStackCloud() {
a.OrchestratorProfile.KubernetesConfig.UseInstanceMetadata = to.BoolPtr(DefaultAzureStackUseInstanceMetadata)
} else {
a.OrchestratorProfile.KubernetesConfig.UseInstanceMetadata = to.BoolPtr(DefaultUseInstanceMetadata)
}
}
if a.IsAzureStackCloud() && a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku != DefaultAzureStackLoadBalancerSku {
if a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku != "" {
log.Warnf("apimodel: orchestratorProfile.kubernetesConfig.LoadBalancerSku forced to \"%s\"\n", DefaultAzureStackLoadBalancerSku)
}
a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku = DefaultAzureStackLoadBalancerSku
} else if a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku == "" {
a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku = StandardLoadBalancerSku
}
if strings.EqualFold(a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku, BasicLoadBalancerSku) {
a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku = BasicLoadBalancerSku
} else if strings.EqualFold(a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku, StandardLoadBalancerSku) {
a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku = StandardLoadBalancerSku
}
if a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku == StandardLoadBalancerSku && a.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB == nil {
a.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB = to.BoolPtr(DefaultExcludeMasterFromStandardLB)
}
if a.OrchestratorProfile.IsAzureCNI() {
if a.HasWindows() {
a.OrchestratorProfile.KubernetesConfig.AzureCNIVersion = AzureCniPluginVerWindows
} else {
a.OrchestratorProfile.KubernetesConfig.AzureCNIVersion = AzureCniPluginVerLinux
}
}
if a.OrchestratorProfile.KubernetesConfig.MaximumLoadBalancerRuleCount == 0 {
a.OrchestratorProfile.KubernetesConfig.MaximumLoadBalancerRuleCount = DefaultMaximumLoadBalancerRuleCount
}
if a.OrchestratorProfile.KubernetesConfig.ProxyMode == "" {
a.OrchestratorProfile.KubernetesConfig.ProxyMode = DefaultKubeProxyMode
}
if a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku == StandardLoadBalancerSku &&
a.OrchestratorProfile.KubernetesConfig.OutboundRuleIdleTimeoutInMinutes == 0 {
a.OrchestratorProfile.KubernetesConfig.OutboundRuleIdleTimeoutInMinutes = DefaultOutboundRuleIdleTimeoutInMinutes
}
if o.KubernetesConfig.LoadBalancerSku == StandardLoadBalancerSku {
if o.KubernetesConfig.CloudProviderDisableOutboundSNAT == nil {
o.KubernetesConfig.CloudProviderDisableOutboundSNAT = to.BoolPtr(false)
}
} else {
// CloudProviderDisableOutboundSNAT is only valid in the context of Standard LB, statically set to false if not Standard LB
o.KubernetesConfig.CloudProviderDisableOutboundSNAT = to.BoolPtr(false)
}
if o.KubernetesConfig.ContainerRuntimeConfig == nil {
o.KubernetesConfig.ContainerRuntimeConfig = make(map[string]string)
}
// Master-specific defaults that depend upon OrchestratorProfile defaults
if cs.Properties.MasterProfile != nil {
if !cs.Properties.MasterProfile.IsCustomVNET() {
if cs.Properties.OrchestratorProfile.IsAzureCNI() {
// When VNET integration is enabled, all masters, agents and pods share the same large subnet.
cs.Properties.MasterProfile.Subnet = o.KubernetesConfig.ClusterSubnet
clusterSubnets := strings.Split(o.KubernetesConfig.ClusterSubnet, ",")
if cs.Properties.IsAzureCNIDualStack() && len(clusterSubnets) > 1 {
cs.Properties.MasterProfile.Subnet = clusterSubnets[0]
}
cs.Properties.MasterProfile.SubnetIPv6 = DefaultKubernetesMasterSubnetIPv6
// FirstConsecutiveStaticIP is not reset if it is upgrade and some value already exists
if !isUpgrade || len(cs.Properties.MasterProfile.FirstConsecutiveStaticIP) == 0 {
if cs.Properties.MasterProfile.IsVirtualMachineScaleSets() {
cs.Properties.MasterProfile.FirstConsecutiveStaticIP = DefaultFirstConsecutiveKubernetesStaticIPVMSS
cs.Properties.MasterProfile.Subnet = DefaultKubernetesMasterSubnet
cs.Properties.MasterProfile.AgentSubnet = DefaultKubernetesAgentSubnetVMSS
} else {
cs.Properties.MasterProfile.FirstConsecutiveStaticIP = cs.Properties.MasterProfile.GetFirstConsecutiveStaticIPAddress(cs.Properties.MasterProfile.Subnet)
}
}
} else {
cs.Properties.MasterProfile.Subnet = DefaultKubernetesMasterSubnet
cs.Properties.MasterProfile.SubnetIPv6 = DefaultKubernetesMasterSubnetIPv6
// FirstConsecutiveStaticIP is not reset if it is upgrade and some value already exists
if !isUpgrade || len(cs.Properties.MasterProfile.FirstConsecutiveStaticIP) == 0 {
if cs.Properties.MasterProfile.IsVirtualMachineScaleSets() {
cs.Properties.MasterProfile.FirstConsecutiveStaticIP = DefaultFirstConsecutiveKubernetesStaticIPVMSS
cs.Properties.MasterProfile.AgentSubnet = DefaultKubernetesAgentSubnetVMSS
} else {
cs.Properties.MasterProfile.FirstConsecutiveStaticIP = DefaultFirstConsecutiveKubernetesStaticIP
}
}
}
}
// Distro assignment for masterProfile
if cs.Properties.MasterProfile.ImageRef == nil {
if cs.Properties.MasterProfile.Distro == "" {
cs.Properties.MasterProfile.Distro = AKSUbuntu1804
if cs.Properties.IsAzureStackCloud() {
cs.Properties.MasterProfile.Distro = AKSUbuntu2004
}
} else if isUpgrade || isScale {
if cs.Properties.MasterProfile.Distro == AKSDockerEngine || cs.Properties.MasterProfile.Distro == AKS1604Deprecated {
cs.Properties.MasterProfile.Distro = AKSUbuntu1604
} else if cs.Properties.MasterProfile.Distro == AKS1804Deprecated {
cs.Properties.MasterProfile.Distro = AKSUbuntu1804
}
}
// The AKS Distro is not available in Azure German Cloud.
if cloudSpecConfig.CloudName == AzureGermanCloud {
cs.Properties.MasterProfile.Distro = Ubuntu1804
if cs.Properties.IsAzureStackCloud() {
cs.Properties.MasterProfile.Distro = Ubuntu2004
}
}
}
}
// Pool-specific defaults that depend upon OrchestratorProfile defaults
for _, profile := range cs.Properties.AgentPoolProfiles {
// configure the subnets if not in custom VNET
if cs.Properties.MasterProfile != nil && !cs.Properties.MasterProfile.IsCustomVNET() {
subnetCounter := 0
for _, profile := range cs.Properties.AgentPoolProfiles {
if !cs.Properties.MasterProfile.IsVirtualMachineScaleSets() {
profile.Subnet = cs.Properties.MasterProfile.Subnet
}
subnetCounter++
}
}
// Distro assignment for pools
if profile.OSType != Windows {
if profile.ImageRef == nil {
if profile.Distro == "" {
if profile.OSDiskSizeGB != 0 && profile.OSDiskSizeGB < VHDDiskSizeAKS {
profile.Distro = Ubuntu1804
if cs.Properties.IsAzureStackCloud() {
profile.Distro = Ubuntu2004
}
} else {
profile.Distro = AKSUbuntu1804
if cs.Properties.IsAzureStackCloud() {
profile.Distro = AKSUbuntu2004
}
}
// Ensure deprecated distros are overridden
// Previous versions of aks-engine required the docker-engine distro for N series vms,
// so we need to hard override it in order to produce a working cluster in upgrade/scale contexts.
} else if isUpgrade || isScale {
if profile.Distro == AKSDockerEngine || profile.Distro == AKS1604Deprecated {
profile.Distro = AKSUbuntu1604
} else if profile.Distro == AKS1804Deprecated {
profile.Distro = AKSUbuntu1804
}
}
// The AKS Distro is not available in Azure German Cloud.
if cloudSpecConfig.CloudName == AzureGermanCloud {
profile.Distro = Ubuntu1804
if cs.Properties.IsAzureStackCloud() {
profile.Distro = Ubuntu2004
}
}
}
}
// Ensure that all VMSS pools have SinglePlacementGroup set to false in Standard LB cluster scenarios
if profile.AvailabilityProfile == VirtualMachineScaleSets && profile.SinglePlacementGroup == nil {
if cs.Properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku == StandardLoadBalancerSku {
profile.SinglePlacementGroup = to.BoolPtr(false)
} else {
profile.SinglePlacementGroup = to.BoolPtr(DefaultSinglePlacementGroup)
}
}
}
// Configure kubelet
cs.setKubeletConfig(isUpgrade)
// Configure addons
cs.setAddonsConfig(isUpgrade)
// Master-specific defaults that depend upon kubelet defaults
// Set the default number of IP addresses allocated for masters.
if cs.Properties.MasterProfile != nil {
if cs.Properties.MasterProfile.IPAddressCount == 0 {
// Allocate one IP address for the node.
cs.Properties.MasterProfile.IPAddressCount = 1
// Allocate IP addresses for pods if VNET integration is enabled.
if cs.Properties.OrchestratorProfile.IsAzureCNI() {
masterMaxPods, _ := strconv.Atoi(cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig["--max-pods"])
cs.Properties.MasterProfile.IPAddressCount += masterMaxPods
}
}
}
// Pool-specific defaults that depend upon kubelet defaults
for _, profile := range cs.Properties.AgentPoolProfiles {
// Set the default number of IP addresses allocated for agents.
if profile.IPAddressCount == 0 {
// Allocate one IP address for the node.
profile.IPAddressCount = 1
// Allocate IP addresses for pods if VNET integration is enabled.
if cs.Properties.OrchestratorProfile.IsAzureCNI() {
agentPoolMaxPods, _ := strconv.Atoi(profile.KubernetesConfig.KubeletConfig["--max-pods"])
if profile.IsWindows() {
profile.IPAddressCount += agentPoolMaxPods
} else {
profile.IPAddressCount = getPodIPAddressCountForAzureCNI(agentPoolMaxPods, o.KubernetesConfig)
}
}
}
}
// Configure controller-manager
cs.setControllerManagerConfig()
// Configure cloud-controller-manager
cs.setCloudControllerManagerConfig()
// Configure apiserver
cs.setAPIServerConfig()
// Configure scheduler
cs.setSchedulerConfig()
// Configure components
cs.setComponentsConfig(isUpgrade)
// Configure Linux kernel runtime values via sysctl.d
cs.setSysctlDConfig()
}
}
func (p *Properties) setExtensionDefaults() {
if p.ExtensionProfiles == nil {
return
}
for _, extension := range p.ExtensionProfiles {
if extension.RootURL == "" {
extension.RootURL = DefaultExtensionsRootURL
}
}
}
func (p *Properties) setMasterProfileDefaults() {
// set default to VMAS for now
if p.MasterProfile.AvailabilityProfile == "" {
p.MasterProfile.AvailabilityProfile = AvailabilitySet
if p.IsAzureStackCloud() {
p.MasterProfile.AvailabilityProfile = DefaultAzureStackAvailabilityProfile
}
}
if p.MasterProfile.IsVirtualMachineScaleSets() {
if p.MasterProfile.SinglePlacementGroup == nil {
p.MasterProfile.SinglePlacementGroup = to.BoolPtr(DefaultSinglePlacementGroup)
}
}
if p.MasterProfile.IsCustomVNET() && p.MasterProfile.IsVirtualMachineScaleSets() {
p.MasterProfile.FirstConsecutiveStaticIP = p.MasterProfile.GetFirstConsecutiveStaticIPAddress(p.MasterProfile.VnetCidr)
}
if p.MasterProfile.HTTPSourceAddressPrefix == "" {
p.MasterProfile.HTTPSourceAddressPrefix = "*"
}
if nil == p.MasterProfile.CosmosEtcd {
p.MasterProfile.CosmosEtcd = to.BoolPtr(DefaultUseCosmos)
}
// Update default fault domain value for Azure Stack
if p.IsAzureStackCloud() && p.MasterProfile.PlatformFaultDomainCount == nil {
p.MasterProfile.PlatformFaultDomainCount = to.IntPtr(DefaultAzureStackFaultDomainCount)
}
if p.MasterProfile.PlatformUpdateDomainCount == nil {
p.MasterProfile.PlatformUpdateDomainCount = to.IntPtr(3)
}
if p.MasterProfile.OSDiskCachingType == "" {
p.MasterProfile.OSDiskCachingType = string(compute.CachingTypesReadWrite)
}
}
func (p *Properties) setLinuxProfileDefaults() {
if !p.IsAzureStackCloud() && p.LinuxProfile.RunUnattendedUpgradesOnBootstrap == nil {
p.LinuxProfile.RunUnattendedUpgradesOnBootstrap = to.BoolPtr(DefaultRunUnattendedUpgradesOnBootstrap)
}
if p.LinuxProfile.EnableUnattendedUpgrades == nil {
if p.IsAzureStackCloud() {
p.LinuxProfile.EnableUnattendedUpgrades = to.BoolPtr(DefaultEnableUnattendedUpgradesAzureStack)
} else {
p.LinuxProfile.EnableUnattendedUpgrades = to.BoolPtr(DefaultEnableUnattendedUpgrades)
}
}
if p.OrchestratorProfile.IsAzureCNI() && p.LinuxProfile.Eth0MTU == 0 {
p.LinuxProfile.Eth0MTU = DefaultEth0MTU
}
}
func (p *Properties) setAgentProfileDefaults(isUpgrade, isScale bool) {
for i, profile := range p.AgentPoolProfiles {
if profile.AvailabilityProfile == "" {
profile.AvailabilityProfile = VirtualMachineScaleSets
}
if profile.AvailabilityProfile == VirtualMachineScaleSets {
if profile.ScaleSetEvictionPolicy == "" && (profile.ScaleSetPriority == ScaleSetPriorityLow || profile.ScaleSetPriority == ScaleSetPrioritySpot) {
profile.ScaleSetEvictionPolicy = ScaleSetEvictionPolicyDelete
}
if profile.ScaleSetPriority == ScaleSetPrioritySpot && profile.SpotMaxPrice == nil {
var maximumValueFlag float64 = -1
profile.SpotMaxPrice = &maximumValueFlag
}
if profile.VMSSOverProvisioningEnabled == nil {
profile.VMSSOverProvisioningEnabled = to.BoolPtr(DefaultVMSSOverProvisioningEnabled && !isUpgrade && !isScale)
}
profile.VMSSName = p.GetAgentVMPrefix(profile, i)
}
// set default OSType to Linux
if profile.OSType == "" {
profile.OSType = Linux
}
// Update default fault domain value for Azure Stack
if p.IsAzureStackCloud() && profile.PlatformFaultDomainCount == nil {
profile.PlatformFaultDomainCount = to.IntPtr(DefaultAzureStackFaultDomainCount)
}
if profile.PlatformUpdateDomainCount == nil {
profile.PlatformUpdateDomainCount = to.IntPtr(3)
}
// Accelerated Networking is supported on most general purpose and compute-optimized instance sizes with 2 or more vCPUs.
// These supported series are: D/DSv2 and F/Fs // All the others are not supported
// On instances that support hyperthreading, Accelerated Networking is supported on VM instances with 4 or more vCPUs.
// Supported series are: D/DSv3, E/ESv3, Fsv2, and Ms/Mms.
if profile.AcceleratedNetworkingEnabled == nil {
if p.IsAzureStackCloud() {
profile.AcceleratedNetworkingEnabled = to.BoolPtr(DefaultAzureStackAcceleratedNetworking)
} else {
profile.AcceleratedNetworkingEnabled = to.BoolPtr(DefaultAcceleratedNetworking && !isUpgrade && !isScale && helpers.AcceleratedNetworkingSupported(profile.VMSize))
}
}
if profile.AcceleratedNetworkingEnabledWindows == nil {
if p.IsAzureStackCloud() {
// Here we are using same default variable. We will change once we will start supporting AcceleratedNetworking feature in general.
profile.AcceleratedNetworkingEnabledWindows = to.BoolPtr(DefaultAzureStackAcceleratedNetworking)
} else {
profile.AcceleratedNetworkingEnabledWindows = to.BoolPtr(DefaultAcceleratedNetworkingWindowsEnabled && !isUpgrade && !isScale && helpers.AcceleratedNetworkingSupported(profile.VMSize))
}
}
if profile.AuditDEnabled == nil {
profile.AuditDEnabled = to.BoolPtr(DefaultAuditDEnabled && !isUpgrade && !isScale)
}
if profile.PreserveNodesProperties == nil {
profile.PreserveNodesProperties = to.BoolPtr(DefaultPreserveNodesProperties)
}
if profile.EnableVMSSNodePublicIP == nil {
profile.EnableVMSSNodePublicIP = to.BoolPtr(DefaultEnableVMSSNodePublicIP)
}
if profile.OSDiskCachingType == "" {
if profile.IsEphemeral() {
profile.OSDiskCachingType = string(compute.CachingTypesReadOnly)
} else {
profile.OSDiskCachingType = string(compute.CachingTypesReadWrite)
}
}
if profile.DataDiskCachingType == "" {
profile.DataDiskCachingType = string(compute.CachingTypesReadOnly)
}
}
}
// ImagePublisherAndOfferMatch returns true if image publisher and offer match for specified WindowsProfile and AzureOSImageConfig objects
func ImagePublisherAndOfferMatch(wp *WindowsProfile, imageConfig AzureOSImageConfig) bool {
return wp.WindowsPublisher == imageConfig.ImagePublisher && wp.WindowsOffer == imageConfig.ImageOffer
}
// setWindowsProfileDefaults sets default WindowsProfile values
func (cs *ContainerService) setWindowsProfileDefaults(isUpgrade, isScale bool) {
cloudSpecConfig := cs.GetCloudSpecConfig()
windowsProfile := cs.Properties.WindowsProfile
if !isUpgrade && !isScale {
// Allow non-default values of windowsProfile.ProvisioningScriptsPackageURL to allow for testing of updates to the scripts.
if len(windowsProfile.ProvisioningScriptsPackageURL) == 0 {
windowsProfile.ProvisioningScriptsPackageURL = cloudSpecConfig.KubernetesSpecConfig.WindowsProvisioningScriptsPackageURL
}
if len(windowsProfile.WindowsPauseImageURL) == 0 {
windowsProfile.WindowsPauseImageURL = cloudSpecConfig.KubernetesSpecConfig.WindowsPauseImageURL
}
if windowsProfile.AlwaysPullWindowsPauseImage == nil {
windowsProfile.AlwaysPullWindowsPauseImage = to.BoolPtr(cloudSpecConfig.KubernetesSpecConfig.AlwaysPullWindowsPauseImage)
}
if windowsProfile.SSHEnabled == nil {
windowsProfile.SSHEnabled = to.BoolPtr(DefaultWindowsSSHEnabled)
}
// Default to aks-engine WIndows Server 2019 docker image
defaultImageConfig := AKSWindowsServer2019OSImageConfig
if ImagePublisherAndOfferMatch(windowsProfile, WindowsServer2019OSImageConfig) {
// Use 'vanilla' Windows Server 2019 images
defaultImageConfig = WindowsServer2019OSImageConfig
} else if cs.Properties.OrchestratorProfile.KubernetesConfig.NeedsContainerd() {
// Use to using aks-engine Windows Server 2019 conatinerD VHD
defaultImageConfig = AKSWindowsServer2019ContainerDOSImageConfig
}
// This allows caller to use the latest ImageVersion and WindowsSku for adding a new Windows pool to an existing cluster.
// We must assure that same WindowsPublisher and WindowsOffer are used in an existing cluster.
if ImagePublisherAndOfferMatch(windowsProfile, defaultImageConfig) {
if windowsProfile.WindowsSku == "" {
windowsProfile.WindowsSku = defaultImageConfig.ImageSku
}
if windowsProfile.ImageVersion == "" {
if windowsProfile.WindowsSku == defaultImageConfig.ImageSku {
windowsProfile.ImageVersion = defaultImageConfig.ImageVersion
} else {
windowsProfile.ImageVersion = "latest"
}
}
} else {
if windowsProfile.WindowsPublisher == "" {
windowsProfile.WindowsPublisher = defaultImageConfig.ImagePublisher
}
if windowsProfile.WindowsOffer == "" {
windowsProfile.WindowsOffer = defaultImageConfig.ImageOffer
}
if windowsProfile.WindowsSku == "" {
windowsProfile.WindowsSku = defaultImageConfig.ImageSku
}
if windowsProfile.ImageVersion == "" {
// default versions are specific to a publisher/offer/sku for aks-engine VHDs
aksEngineImageConfigs := []AzureOSImageConfig{AKSWindowsServer2019ContainerDOSImageConfig, AKSWindowsServer2019OSImageConfig}
for _, imageConfig := range aksEngineImageConfigs {
if ImagePublisherAndOfferMatch(windowsProfile, imageConfig) && windowsProfile.WindowsSku == imageConfig.ImageSku {
windowsProfile.ImageVersion = imageConfig.ImageVersion
break
}
}
// set imageVersion to 'latest' if still unset
if windowsProfile.ImageVersion == "" {
windowsProfile.ImageVersion = "latest"
}
}
}
} else if isUpgrade {
// Always set windowsProfile.ProvisioningScriptsPackerURL to the default value during upgrade.
// The contents on this package must stay in sync with other powershell code in /parts/k8s and the best way to ensure that is to update the value here.
windowsProfile.ProvisioningScriptsPackageURL = cloudSpecConfig.KubernetesSpecConfig.WindowsProvisioningScriptsPackageURL
if len(windowsProfile.WindowsPauseImageURL) == 0 {
windowsProfile.WindowsPauseImageURL = cloudSpecConfig.KubernetesSpecConfig.WindowsPauseImageURL
}
if windowsProfile.AlwaysPullWindowsPauseImage == nil {
windowsProfile.AlwaysPullWindowsPauseImage = to.BoolPtr(cloudSpecConfig.KubernetesSpecConfig.AlwaysPullWindowsPauseImage)
}
// Image reference publisher and offer only can be set when you create the scale set so we keep the old values.
// Reference: https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-upgrade-scale-set#create-time-properties
windowsImageConfigs := []AzureOSImageConfig{AKSWindowsServer2019OSImageConfig, WindowsServer2019OSImageConfig}
if cs.Properties.OrchestratorProfile.KubernetesConfig.NeedsContainerd() {
windowsImageConfigs = []AzureOSImageConfig{AKSWindowsServer2019ContainerDOSImageConfig, WindowsServer2019OSImageConfig}
}
for _, imageConfig := range windowsImageConfigs {
if ImagePublisherAndOfferMatch(windowsProfile, imageConfig) {
if windowsProfile.ImageVersion == "" {
windowsProfile.ImageVersion = imageConfig.ImageVersion
}
if windowsProfile.WindowsSku == "" {
windowsProfile.WindowsSku = imageConfig.ImageSku
}
break
}
}
}
// Scale: Keep the same version to match other nodes because we have no way to rollback
}
// setStorageDefaults for agents
func (p *Properties) setStorageDefaults() {
if p.MasterProfile != nil && len(p.MasterProfile.StorageProfile) == 0 {
p.MasterProfile.StorageProfile = ManagedDisks
}
for _, profile := range p.AgentPoolProfiles {
if len(profile.StorageProfile) == 0 {
profile.StorageProfile = ManagedDisks
}
}
}
func (p *Properties) setTelemetryProfileDefaults() {
if p.TelemetryProfile == nil {
p.TelemetryProfile = &TelemetryProfile{}
}
if len(p.TelemetryProfile.ApplicationInsightsKey) == 0 {
p.TelemetryProfile.ApplicationInsightsKey = ""
}
}
// DefaultCertParams is the params when we set the default certs.
type DefaultCertParams struct {
PkiKeySize int
}
// SetDefaultCerts generates and sets defaults for the container certificateProfile, returns true if certs are generated
func (cs *ContainerService) SetDefaultCerts(params DefaultCertParams) (bool, []net.IP, error) {
p := cs.Properties
if p.MasterProfile == nil {
return false, nil, nil
}
provided := certsAlreadyPresent(p.CertificateProfile, p.MasterProfile.Count)
if areAllTrue(provided) {
return false, nil, nil
}
var azureProdFQDNs []string
for _, location := range cs.GetLocations() {
azureProdFQDNs = append(azureProdFQDNs, FormatProdFQDNByLocation(p.MasterProfile.DNSPrefix, location, p.GetCustomCloudName()))
}
masterExtraFQDNs := append(azureProdFQDNs, p.MasterProfile.SubjectAltNames...)
masterExtraFQDNs = append(masterExtraFQDNs, "localhost")
firstMasterIP := net.ParseIP(p.MasterProfile.FirstConsecutiveStaticIP).To4()
localhostIP := net.ParseIP("127.0.0.1").To4()
if firstMasterIP == nil {
return false, nil, errors.Errorf("MasterProfile.FirstConsecutiveStaticIP '%s' is an invalid IP address", p.MasterProfile.FirstConsecutiveStaticIP)
}
ips := []net.IP{firstMasterIP, localhostIP}
// Include the Internal load balancer as well
if p.MasterProfile.IsVirtualMachineScaleSets() {
ips = append(ips, net.IP{firstMasterIP[0], firstMasterIP[1], byte(255), byte(DefaultInternalLbStaticIPOffset)})
} else {
// Add the Internal Loadbalancer IP which is always at p known offset from the firstMasterIP
ips = append(ips, net.IP{firstMasterIP[0], firstMasterIP[1], firstMasterIP[2], firstMasterIP[3] + byte(DefaultInternalLbStaticIPOffset)})
}
var offsetMultiplier int
if p.MasterProfile.IsVirtualMachineScaleSets() {
offsetMultiplier = p.MasterProfile.IPAddressCount
} else {
offsetMultiplier = 1
}
addr := binary.BigEndian.Uint32(firstMasterIP)
for i := 1; i < p.MasterProfile.Count; i++ {
newAddr := getNewAddr(addr, i, offsetMultiplier)
ip := make(net.IP, 4)
binary.BigEndian.PutUint32(ip, newAddr)
ips = append(ips, ip)
}
if p.CertificateProfile == nil {
p.CertificateProfile = &CertificateProfile{}
}
// use the specified Certificate Authority pair, or generate p new pair
var caPair *helpers.PkiKeyCertPair
if provided["ca"] {
caPair = &helpers.PkiKeyCertPair{CertificatePem: p.CertificateProfile.CaCertificate, PrivateKeyPem: p.CertificateProfile.CaPrivateKey}
} else {
var err error
pkiKeyCertPairParams := helpers.PkiKeyCertPairParams{
CommonName: "ca",
PkiKeySize: params.PkiKeySize,
}
caPair, err = helpers.CreatePkiKeyCertPair(pkiKeyCertPairParams)
if err != nil {
return false, ips, err
}
p.CertificateProfile.CaCertificate = caPair.CertificatePem
p.CertificateProfile.CaPrivateKey = caPair.PrivateKeyPem
}
serviceCIDR := p.OrchestratorProfile.KubernetesConfig.ServiceCIDR
// all validation for dual stack done with primary service cidr as that is considered
// the default ip family for cluster.
if cs.Properties.FeatureFlags.IsFeatureEnabled("EnableIPv6DualStack") {
// split service cidrs
serviceCIDRs := strings.Split(serviceCIDR, ",")
serviceCIDR = serviceCIDRs[0]
}
cidrFirstIP, err := common.CidrStringFirstIP(serviceCIDR)
if err != nil {
return false, ips, err
}
ips = append(ips, cidrFirstIP)
pkiParams := helpers.PkiParams{}
pkiParams.CaPair = caPair
pkiParams.ClusterDomain = DefaultKubernetesClusterDomain
pkiParams.ExtraFQDNs = masterExtraFQDNs
pkiParams.ExtraIPs = ips
pkiParams.MasterCount = p.MasterProfile.Count
pkiParams.PkiKeySize = params.PkiKeySize
apiServerPair, clientPair, kubeConfigPair, etcdServerPair, etcdClientPair, etcdPeerPairs, err :=
helpers.CreatePki(pkiParams)
if err != nil {
return false, ips, err
}
// If no Certificate Authority pair or no cert/key pair was provided, use generated cert/key pairs signed by provided Certificate Authority pair
if !provided["apiserver"] || !provided["ca"] {
p.CertificateProfile.APIServerCertificate = apiServerPair.CertificatePem
p.CertificateProfile.APIServerPrivateKey = apiServerPair.PrivateKeyPem
}
if !provided["client"] || !provided["ca"] {
p.CertificateProfile.ClientCertificate = clientPair.CertificatePem
p.CertificateProfile.ClientPrivateKey = clientPair.PrivateKeyPem
}
if !provided["kubeconfig"] || !provided["ca"] {
p.CertificateProfile.KubeConfigCertificate = kubeConfigPair.CertificatePem
p.CertificateProfile.KubeConfigPrivateKey = kubeConfigPair.PrivateKeyPem
}
if !provided["etcd"] || !provided["ca"] {
p.CertificateProfile.EtcdServerCertificate = etcdServerPair.CertificatePem
p.CertificateProfile.EtcdServerPrivateKey = etcdServerPair.PrivateKeyPem
p.CertificateProfile.EtcdClientCertificate = etcdClientPair.CertificatePem
p.CertificateProfile.EtcdClientPrivateKey = etcdClientPair.PrivateKeyPem
p.CertificateProfile.EtcdPeerCertificates = make([]string, p.MasterProfile.Count)
p.CertificateProfile.EtcdPeerPrivateKeys = make([]string, p.MasterProfile.Count)
for i, v := range etcdPeerPairs {
p.CertificateProfile.EtcdPeerCertificates[i] = v.CertificatePem
p.CertificateProfile.EtcdPeerPrivateKeys[i] = v.PrivateKeyPem
}
}
return true, ips, nil
}
func areAllTrue(m map[string]bool) bool {
for _, v := range m {
if !v {
return false
}
}
return true
}
// getNewIP returns a new IP derived from an address plus a multiple of an offset
func getNewAddr(addr uint32, count int, offsetMultiplier int) uint32 {
offset := count * offsetMultiplier
newAddr := addr + uint32(offset)
return newAddr
}
// certsAlreadyPresent already present returns a map where each key is a type of cert and each value is true if that cert/key pair is user-provided
func certsAlreadyPresent(c *CertificateProfile, m int) map[string]bool {
g := map[string]bool{
"ca": false,
"apiserver": false,
"kubeconfig": false,
"client": false,
"etcd": false,
}
if c != nil {
etcdPeer := true
if len(c.EtcdPeerCertificates) != m || len(c.EtcdPeerPrivateKeys) != m {
etcdPeer = false
} else {
for i, p := range c.EtcdPeerCertificates {
if !(len(p) > 0) || !(len(c.EtcdPeerPrivateKeys[i]) > 0) {
etcdPeer = false
}
}
}
g["ca"] = len(c.CaCertificate) > 0 && len(c.CaPrivateKey) > 0
g["apiserver"] = len(c.APIServerCertificate) > 0 && len(c.APIServerPrivateKey) > 0
g["kubeconfig"] = len(c.KubeConfigCertificate) > 0 && len(c.KubeConfigPrivateKey) > 0
g["client"] = len(c.ClientCertificate) > 0 && len(c.ClientPrivateKey) > 0
g["etcd"] = etcdPeer && len(c.EtcdClientCertificate) > 0 && len(c.EtcdClientPrivateKey) > 0 && len(c.EtcdServerCertificate) > 0 && len(c.EtcdServerPrivateKey) > 0
}
return g
}
// combine user-provided --feature-gates vals with defaults
// a minimum k8s version may be declared as required for defaults assignment
func addDefaultFeatureGates(m map[string]string, version string, minVersion string, defaults string) {
if minVersion == "" || common.IsKubernetesVersionGe(version, minVersion) {
m["--feature-gates"] = combineValues(m["--feature-gates"], defaults)
} else {
m["--feature-gates"] = combineValues(m["--feature-gates"], "")
}
}
// combineValues takes a variadic string input of strings matching a pattern []string{"foo=bar","key=val"}
// and returns a single, comma-delimited, concatenated string of all key/val string values, e.g.: "foo=bar,key=val"
// if more than one key is encountered, the first one is always preferred
func combineValues(inputs ...string) string {
valueMap := make(map[string]string)
for _, input := range inputs {
applyValueStringToMap(valueMap, input)
}
return mapToString(valueMap)
}
// removeInvalidFeatureGates removes specified invalid --feature-gates
func removeInvalidFeatureGates(m map[string]string, invalidFeatureGates []string) {
m["--feature-gates"] = removeKeys(m["--feature-gates"], invalidFeatureGates)
}
// removeKeys takes a input of strings matching a pattern []string{"foo=bar","key=val"}
// removes from this input the given input keys e.g.: "foo"
// and returns a single, comma-delimited, concatenated string of all remaining key/val string values, e.g.: "key=val"
func removeKeys(input string, keysToRemove []string) string {
valueMap := make(map[string]string)
applyValueStringToMap(valueMap, input)
for _, key := range keysToRemove {
delete(valueMap, key)
}
return mapToString(valueMap)
}
func applyValueStringToMap(valueMap map[string]string, input string) {
values := strings.Split(input, ",")
for index := 0; index < len(values); index++ {
// trim spaces (e.g. if the input was "foo=true, bar=true" - we want to drop the space after the comma)
value := strings.Trim(values[index], " ")
valueParts := strings.Split(value, "=")
if len(valueParts) != 2 {
continue
}
if _, ok := valueMap[valueParts[0]]; !ok {
valueMap[valueParts[0]] = valueParts[1]
}
}
}
func mapToString(valueMap map[string]string) string {
// Order by key for consistency
keys := []string{}
for key := range valueMap {
keys = append(keys, key)
}
sort.Strings(keys)
ret := []string{}
for _, key := range keys {
ret = append(ret, fmt.Sprintf("%s=%s", key, valueMap[key]))
}
return strings.Join(ret, ",")
}
func generateEtcdEncryptionKey() string {
b := make([]byte, 32)
_, _ = rand.Read(b)
return base64.StdEncoding.EncodeToString(b)
}
// getDefaultKubernetesClusterSubnetIPv6 returns the default IPv6 cluster subnet
func (cs *ContainerService) getDefaultKubernetesClusterSubnetIPv6() string {
o := cs.Properties.OrchestratorProfile
// In 1.17+ the default IPv6 mask size is /64 which means the cluster
// subnet mask size >= /48
if common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.17.0") {
return DefaultKubernetesClusterSubnetIPv6
}
// In 1.16, the default mask size for IPv6 is /24 which forces the cluster
// subnet mask size to be strictly >= /8
return "fc00::/8"
}
func (cs *ContainerService) setCSIProxyDefaults() {
p := cs.Properties
useCloudControllerManager := p.OrchestratorProfile.KubernetesConfig != nil && to.Bool(p.OrchestratorProfile.KubernetesConfig.UseCloudControllerManager)
w := p.WindowsProfile
// We should enable CSI proxy if:
// 1. enableCSIProxy is not defined and cloud-controller-manager
// is being used on a Windows cluster or
// 2. enabledCSIProxy is true
// 3. csiProxyURL is defined
shouldEnableCSIProxy := (w.EnableCSIProxy == nil && useCloudControllerManager) ||
w.IsCSIProxyEnabled() ||
w.CSIProxyURL != ""
if shouldEnableCSIProxy {
w.EnableCSIProxy = to.BoolPtr(true)
if w.CSIProxyURL == "" {
cloudSpecConfig := cs.GetCloudSpecConfig()
w.CSIProxyURL = cloudSpecConfig.KubernetesSpecConfig.CSIProxyDownloadURL
}
}
}
func getPodIPAddressCountForAzureCNI(kubeletMaxPods int, k *KubernetesConfig) int {
ret := 1 // We need at least IP address for eth0
var numHostNetworkPods int
if k.IsAddonEnabled(common.AADPodIdentityAddonName) {
numHostNetworkPods++
}
if k.IsAddonEnabled(common.AntreaAddonName) {
numHostNetworkPods++
}
if k.IsAddonEnabled(common.AzureNetworkPolicyAddonName) {
numHostNetworkPods++
}
if k.IsAddonEnabled(common.AzureDiskCSIDriverAddonName) {
numHostNetworkPods++
}
if k.IsAddonEnabled(common.AzureFileCSIDriverAddonName) {
numHostNetworkPods++
}
if k.IsAddonEnabled(common.CalicoAddonName) {
numHostNetworkPods++
}
if k.IsAddonEnabled(common.CiliumAddonName) {
numHostNetworkPods++
}
if k.IsAddonEnabled(common.CloudNodeManagerAddonName) {
numHostNetworkPods++
}
if k.IsAddonEnabled(common.FlannelAddonName) {
numHostNetworkPods++
}
if k.IsAddonEnabled(common.IPMASQAgentAddonName) {
numHostNetworkPods++
}
if k.IsAddonEnabled(common.KubeProxyAddonName) {
numHostNetworkPods++
}
if k.IsAddonEnabled(common.SecretsStoreCSIDriverAddonName) {
numHostNetworkPods++
}
ret += (kubeletMaxPods - numHostNetworkPods)
return ret
}
|
package dht_pb
import (
"testing"
)
func TestBadAddrsDontReturnNil(t *testing.T) {
mp := new(Message_Peer)
mp.Addrs = [][]byte{[]byte("NOT A VALID MULTIADDR")}
addrs := mp.Addresses()
if len(addrs) > 0 {
t.Fatal("shouldnt have any multiaddrs")
}
}
|
package publisher
import (
"github.com/google/martian/log"
"go-rabbitmq/infra/mq"
)
type Publish interface {
PublishMessage(exchanged, name string, mandatory, immediate bool, msg string)
}
type publisher struct {
mq.MQInterface
}
// PublishMessage is example for implement simple publish message
func (p publisher) PublishMessage(exchanged, name string, mandatory, immediate bool, msg string) {
err := p.Publisher(
exchanged,
name,
mandatory,
immediate,
[]byte(msg),
)
if err != nil {
log.Errorf(err.Error())
}
}
func NewPublisherMQ(mqInterface mq.MQInterface) Publish {
return publisher{
mqInterface,
}
}
|
package gossip
import (
"fmt"
"net"
"net/http"
"net/rpc"
)
const (
ServerPort = 3030
)
type Server struct{}
func MakeServer() *Server {
return new(Server)
}
func (s *Server) Start(g *Gossip) error {
r := new(GRPC)
r.g = g
rpc.Register(r)
rpc.HandleHTTP()
l, err := net.Listen("tcp", fmt.Sprintf(":%d", ServerPort))
if err != nil {
return err
}
go http.Serve(l, nil)
return nil
}
|
package main
import (
"github.com/fananchong/go-xserver/services/internal/protocol"
"github.com/fananchong/gotcp"
)
// User : 登录玩家类
type User struct {
gotcp.Session
}
// OnRecv : 接收到网络数据包,被触发
func (user *User) OnRecv(data []byte, flag byte) {
cmd := gotcp.GetCmd(data)
if user.IsVerified() == false && user.doVerify(protocol.CMD_GATEWAY_ENUM(cmd), data, flag) == false {
return
}
switch protocol.CMD_GATEWAY_ENUM(cmd) {
default:
Ctx.Log.Errorln("Unknown message number, message number is", cmd)
}
}
// OnClose : 断开连接,被触发
func (user *User) OnClose() {
}
func (user *User) doVerify(cmd protocol.CMD_GATEWAY_ENUM, data []byte, flag byte) bool {
return true
}
|
// Copyright 2013 http://gumuz.nl/. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package treap/iterato.go provides an iterator for the Treap.
Inspired by: http://en.wikipedia.org/wiki/Tree_traversal#In-order
The Iterator has 4 constructors: sliced/unsliced * reversed/non-reversed
And 1 public method: Next()
Create a new Iterator with a treap value:
treap := New(uint64(1))
treap = Set(treap, uint64(5))
treap = Set(treap, uint64(2))
treap = Set(treap, uint64(8))
// regular iterator
i := NewIterator(treap)
i.next() // 1, true
i.next() // 2, true
i.next() // 5, true
i.next() // 8, true
i.next() // 0, false
// reversed iterator
i := NewIteratorReversed(treap)
i.next() // 8, true
i.next() // 5, true
i.next() // 2, true
i.next() // 1, true
i.next() // 0, false
// slice iterator
i := NewSliceIterator(treap, 1, 2)
i.next() // 2, true
i.next() // 5, true
i.next() // 0, false
// reversed slice iterator
i := NewSliceIteratorReversed(treap, 1, 2)
i.next() // 5, true
i.next() // 2, true
i.next() // 0, false
*/
package treap
type Iterator struct {
stack *Stack
node *Treap
start int
length int
count int
reversed bool
}
func (i *Iterator) Next() (uint64, bool) {
for i.stack.Empty() == false || i.node != nil {
if i.length > 0 && i.count == i.start+i.length {
break
}
if i.node != nil {
i.stack.Push(i.node)
if i.reversed {
i.node = i.node.right
} else {
i.node = i.node.left
}
} else {
i.node = i.stack.Pop().(*Treap)
key := i.node.key
if i.reversed {
i.node = i.node.left
} else {
i.node = i.node.right
}
i.count++
if i.count > i.start {
return key, true
}
}
}
return 0, false
}
func NewIterator(n *Treap) *Iterator {
iterator := &Iterator{
stack: &Stack{},
node: n,
}
return iterator
}
func NewIteratorReversed(n *Treap) *Iterator {
iterator := &Iterator{
stack: &Stack{},
node: n,
reversed: true,
}
return iterator
}
func NewSliceIterator(n *Treap, start, length int) *Iterator {
iterator := &Iterator{
stack: &Stack{},
node: n,
start: start,
length: length,
}
return iterator
}
func NewSliceIteratorReversed(n *Treap, start, length int) *Iterator {
iterator := &Iterator{
stack: &Stack{},
node: n,
start: start,
length: length,
reversed: true,
}
return iterator
}
|
package gov
import (
"bytes"
"fmt"
"log"
"sort"
"testing"
"github.com/ColorPlatform/color-sdk/codec"
"github.com/ColorPlatform/color-sdk/store"
sdk "github.com/ColorPlatform/color-sdk/types"
"github.com/ColorPlatform/color-sdk/x/auth"
"github.com/ColorPlatform/color-sdk/x/bank"
distr "github.com/ColorPlatform/color-sdk/x/distribution"
"github.com/ColorPlatform/color-sdk/x/distribution/types"
"github.com/ColorPlatform/color-sdk/x/mint"
"github.com/ColorPlatform/color-sdk/x/mock"
"github.com/ColorPlatform/color-sdk/x/params"
"github.com/ColorPlatform/color-sdk/x/staking"
abci "github.com/ColorPlatform/prism/abci/types"
"github.com/ColorPlatform/prism/crypto"
"github.com/ColorPlatform/prism/crypto/ed25519"
dbm "github.com/ColorPlatform/prism/libs/db"
logm "github.com/ColorPlatform/prism/libs/log"
"github.com/stretchr/testify/require"
)
// initialize the mock application for this module
func getMockApp(t *testing.T, numGenAccs int, genState GenesisState, genAccs []auth.Account) (
mapp *mock.App, keeper Keeper, sk staking.Keeper, addrs []sdk.AccAddress,
pubKeys []crypto.PubKey, privKeys []crypto.PrivKey) {
mapp = mock.NewApp()
staking.RegisterCodec(mapp.Cdc)
RegisterCodec(mapp.Cdc)
keyStaking := sdk.NewKVStoreKey(staking.StoreKey)
tkeyStaking := sdk.NewTransientStoreKey(staking.TStoreKey)
keyGov := sdk.NewKVStoreKey(StoreKey)
keyDistr := sdk.NewKVStoreKey(distr.StoreKey)
keyMinting := sdk.NewKVStoreKey(mint.StoreKey)
pk := mapp.ParamsKeeper
ck := bank.NewBaseKeeper(mapp.AccountKeeper, mapp.ParamsKeeper.Subspace(bank.DefaultParamspace), bank.DefaultCodespace)
sk = staking.NewKeeper(mapp.Cdc, keyStaking, tkeyStaking, ck, pk.Subspace(staking.DefaultParamspace), staking.DefaultCodespace)
feeKeeper := auth.NewFeeCollectionKeeper(mapp.Cdc, mapp.KeyFeeCollection)
distrKeeper := distr.NewKeeper(mapp.Cdc, keyDistr, pk.Subspace(distr.DefaultParamspace), ck, &sk, feeKeeper, distr.DefaultCodespace)
minKeeper := mint.NewKeeper(mapp.Cdc, keyMinting, pk.Subspace(mint.DefaultParamspace), &sk, feeKeeper)
keeper = NewKeeper(mapp.Cdc, distrKeeper, minKeeper, keyGov, pk, pk.Subspace("testgov"), ck, sk, sk, DefaultCodespace)
mapp.Router().AddRoute(RouterKey, NewHandler(keeper))
mapp.QueryRouter().AddRoute(QuerierRoute, NewQuerier(keeper))
mapp.SetEndBlocker(getEndBlocker(keeper))
mapp.SetInitChainer(getInitChainer(mapp, keeper, sk, genState))
require.NoError(t, mapp.CompleteSetup(keyStaking, tkeyStaking, keyGov))
valTokens := sdk.TokensFromTendermintPower(10000000000000)
if genAccs == nil || len(genAccs) == 0 {
genAccs, addrs, pubKeys, privKeys = mock.CreateGenAccounts(numGenAccs,
sdk.Coins{sdk.NewCoin(sdk.DefaultBondDenom, valTokens)})
}
mock.SetGenesis(mapp, genAccs)
return mapp, keeper, sk, addrs, pubKeys, privKeys
}
// gov and staking endblocker
func getEndBlocker(keeper Keeper) sdk.EndBlocker {
return func(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock {
tags := EndBlocker(ctx, keeper)
return abci.ResponseEndBlock{
Tags: tags,
}
}
}
// gov and staking initchainer
func getInitChainer(mapp *mock.App, keeper Keeper, stakingKeeper staking.Keeper, genState GenesisState) sdk.InitChainer {
return func(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain {
mapp.InitChainer(ctx, req)
stakingGenesis := staking.DefaultGenesisState()
tokens := sdk.TokensFromTendermintPower(2000000)
stakingGenesis.Pool.NotBondedTokens = tokens
validators, err := staking.InitGenesis(ctx, stakingKeeper, stakingGenesis)
if err != nil {
panic(err)
}
if genState.IsEmpty() {
InitGenesis(ctx, keeper, DefaultGenesisState())
} else {
InitGenesis(ctx, keeper, genState)
}
return abci.ResponseInitChain{
Validators: validators,
}
}
}
// TODO: Remove once address interface has been implemented (ref: #2186)
func SortValAddresses(addrs []sdk.ValAddress) {
var byteAddrs [][]byte
for _, addr := range addrs {
byteAddrs = append(byteAddrs, addr.Bytes())
}
SortByteArrays(byteAddrs)
for i, byteAddr := range byteAddrs {
addrs[i] = byteAddr
}
}
// Sorts Addresses
func SortAddresses(addrs []sdk.AccAddress) {
var byteAddrs [][]byte
for _, addr := range addrs {
byteAddrs = append(byteAddrs, addr.Bytes())
}
SortByteArrays(byteAddrs)
for i, byteAddr := range byteAddrs {
addrs[i] = byteAddr
}
}
// implement `Interface` in sort package.
type sortByteArrays [][]byte
func (b sortByteArrays) Len() int {
return len(b)
}
func (b sortByteArrays) Less(i, j int) bool {
// bytes package already implements Comparable for []byte.
switch bytes.Compare(b[i], b[j]) {
case -1:
return true
case 0, 1:
return false
default:
log.Panic("not fail-able with `bytes.Comparable` bounded [-1, 1].")
return false
}
}
func (b sortByteArrays) Swap(i, j int) {
b[j], b[i] = b[i], b[j]
}
// Public
func SortByteArrays(src [][]byte) [][]byte {
sorted := sortByteArrays(src)
sort.Sort(sorted)
return sorted
}
func testProposal() TextProposal {
var priv = ed25519.GenPrivKey()
var addr = sdk.AccAddress(priv.PubKey().Address())
return NewTextProposal("Test", "description", sdk.Coins{sdk.NewInt64Coin(sdk.DefaultBondDenom, 5)}, 4, addr)
}
// checks if two proposals are equal (note: slow, for tests only)
func ProposalEqual(proposalA Proposal, proposalB Proposal) bool {
return bytes.Equal(msgCdc.MustMarshalBinaryBare(proposalA), msgCdc.MustMarshalBinaryBare(proposalB))
}
// create a codec used only for testing
func MakeTestCodec() *codec.Codec {
var cdc = codec.New()
bank.RegisterCodec(cdc)
staking.RegisterCodec(cdc)
auth.RegisterCodec(cdc)
sdk.RegisterCodec(cdc)
codec.RegisterCrypto(cdc)
types.RegisterCodec(cdc) // distr
return cdc
}
type DummyFeeCollectionKeeper struct{}
var (
delPk1 = ed25519.GenPrivKey().PubKey()
delPk2 = ed25519.GenPrivKey().PubKey()
delPk3 = ed25519.GenPrivKey().PubKey()
delAddr1 = sdk.AccAddress(delPk1.Address())
delAddr2 = sdk.AccAddress(delPk2.Address())
delAddr3 = sdk.AccAddress(delPk3.Address())
valOpPk1 = ed25519.GenPrivKey().PubKey()
valOpPk2 = ed25519.GenPrivKey().PubKey()
valOpPk3 = ed25519.GenPrivKey().PubKey()
valOpAddr1 = sdk.ValAddress(valOpPk1.Address())
valOpAddr2 = sdk.ValAddress(valOpPk2.Address())
valOpAddr3 = sdk.ValAddress(valOpPk3.Address())
valAccAddr1 = sdk.AccAddress(valOpPk1.Address()) // generate acc addresses for these validator keys too
valAccAddr2 = sdk.AccAddress(valOpPk2.Address())
valAccAddr3 = sdk.AccAddress(valOpPk3.Address())
valConsPk1 = ed25519.GenPrivKey().PubKey()
valConsPk2 = ed25519.GenPrivKey().PubKey()
valConsPk3 = ed25519.GenPrivKey().PubKey()
valConsAddr1 = sdk.ConsAddress(valConsPk1.Address())
valConsAddr2 = sdk.ConsAddress(valConsPk2.Address())
valConsAddr3 = sdk.ConsAddress(valConsPk3.Address())
// test addresses
TestAddrs = []sdk.AccAddress{
delAddr1, delAddr2, delAddr3,
valAccAddr1, valAccAddr2, valAccAddr3,
}
emptyDelAddr sdk.AccAddress
emptyValAddr sdk.ValAddress
emptyPubkey crypto.PubKey
)
// testMsg is a mock transaction that has a validation which can fail.
type testMsg struct {
signers []sdk.AccAddress
positiveNum int64
}
// hogpodge of all sorts of input required for testing
func CreateTestInputAdvanced1(t *testing.T, isCheckTx bool, initPower int64,
communityTax sdk.Dec, numGenAccs int, genState GenesisState, genAccs []auth.Account) (mapp *mock.App, ctx sdk.Context, keeper Keeper, sk staking.Keeper, addrs []sdk.AccAddress,
pubKeys []crypto.PubKey, privKeys []crypto.PrivKey) {
mapp = mock.NewApp()
bank.RegisterCodec(mapp.Cdc)
staking.RegisterCodec(mapp.Cdc)
types.RegisterCodec(mapp.Cdc)
RegisterCodec(mapp.Cdc)
initCoins := sdk.TokensFromTendermintPower(initPower)
keyDistr := sdk.NewKVStoreKey(distr.StoreKey)
keyStaking := sdk.NewKVStoreKey(staking.StoreKey)
tkeyStaking := sdk.NewTransientStoreKey(staking.TStoreKey)
keyAcc := sdk.NewKVStoreKey(auth.StoreKey)
keyFeeCollection := sdk.NewKVStoreKey(auth.FeeStoreKey)
keyParams := sdk.NewKVStoreKey(params.StoreKey)
tkeyParams := sdk.NewTransientStoreKey(params.TStoreKey)
keyGov := sdk.NewKVStoreKey(StoreKey)
keyMinting := sdk.NewKVStoreKey(mint.StoreKey)
db := dbm.NewMemDB()
ms := store.NewCommitMultiStore(db)
ms.MountStoreWithDB(keyDistr, sdk.StoreTypeIAVL, db)
ms.MountStoreWithDB(tkeyStaking, sdk.StoreTypeTransient, nil)
ms.MountStoreWithDB(keyStaking, sdk.StoreTypeIAVL, db)
ms.MountStoreWithDB(keyAcc, sdk.StoreTypeIAVL, db)
ms.MountStoreWithDB(keyFeeCollection, sdk.StoreTypeIAVL, db)
ms.MountStoreWithDB(keyParams, sdk.StoreTypeIAVL, db)
ms.MountStoreWithDB(tkeyParams, sdk.StoreTypeTransient, db)
err := ms.LoadLatestVersion()
require.Nil(t, err)
pk := params.NewKeeper(mapp.Cdc, keyParams, tkeyParams)
ctx = sdk.NewContext(ms, abci.Header{ChainID: "foochainid"}, isCheckTx, logm.NewNopLogger())
accountKeeper := auth.NewAccountKeeper(mapp.Cdc, keyAcc, pk.Subspace(auth.DefaultParamspace), auth.ProtoBaseAccount)
bankKeeper := bank.NewBaseKeeper(accountKeeper, pk.Subspace(bank.DefaultParamspace), bank.DefaultCodespace)
sk = staking.NewKeeper(mapp.Cdc, keyStaking, tkeyStaking, bankKeeper, pk.Subspace(staking.DefaultParamspace), staking.DefaultCodespace)
feeKeeper := auth.NewFeeCollectionKeeper(mapp.Cdc, keyFeeCollection)
distrKeeper := distr.NewKeeper(mapp.Cdc, keyDistr, pk.Subspace(DefaultParamspace), bankKeeper, &sk, feeKeeper, distr.DefaultCodespace)
minKeeper := mint.NewKeeper(mapp.Cdc, keyMinting, pk.Subspace(mint.DefaultParamspace), &sk, feeKeeper)
keeper = NewKeeper(mapp.Cdc, distrKeeper, minKeeper, keyGov, pk, pk.Subspace("testgov"), bankKeeper, sk, sk, DefaultCodespace)
sk.SetPool(ctx, staking.InitialPool())
sk.SetParams(ctx, staking.DefaultParams())
sk.SetHooks(distrKeeper.Hooks())
// set genesis items required for distribution
distrKeeper.SetFeePool(ctx, types.InitialFeePool())
distrKeeper.SetCommunityTax(ctx, communityTax)
distrKeeper.SetBaseProposerReward(ctx, sdk.NewDecWithPrec(1, 2))
distrKeeper.SetBonusProposerReward(ctx, sdk.NewDecWithPrec(4, 2))
fmt.Println("community tax", distrKeeper.GetCommunityTax(ctx))
mapp.Router().AddRoute(RouterKey, NewHandler(keeper))
mapp.QueryRouter().AddRoute(QuerierRoute, NewQuerier(keeper))
mapp.SetEndBlocker(getEndBlocker(keeper))
mapp.SetInitChainer(getInitChainer(mapp, keeper, sk, genState))
require.NoError(t, mapp.CompleteSetup(keyStaking, tkeyStaking, keyGov))
// fill all the addresses with some coins, set the loose pool tokens simultaneously
// set the distribution hooks on staking
valTokens := sdk.TokensFromTendermintPower(10000000000000)
if genAccs == nil || len(genAccs) == 0 {
genAccs, addrs, pubKeys, privKeys = mock.CreateGenAccounts(numGenAccs,
sdk.Coins{sdk.NewCoin(sdk.DefaultBondDenom, valTokens)})
}
mock.SetGenesis(mapp, genAccs)
for _, addr := range TestAddrs {
pool := sk.GetPool(ctx)
_, _, err := bankKeeper.AddCoins(ctx, addr, sdk.Coins{
sdk.NewCoin(sk.GetParams(ctx).BondDenom, initCoins),
})
require.Nil(t, err)
pool.NotBondedTokens = pool.NotBondedTokens.Add(initCoins)
sk.SetPool(ctx, pool)
}
return mapp, ctx, keeper, sk, addrs, pubKeys, privKeys
}
// hogpodge of all sorts of input required for testing
func CreateTestInputAdvanced(t *testing.T, isCheckTx bool, initPower int64,
communityTax sdk.Dec, numGenAccs int, genState GenesisState, genAccs []auth.Account) sdk.Context {
initCoins := sdk.TokensFromTendermintPower(initPower)
keyDistr := sdk.NewKVStoreKey(types.StoreKey)
keyStaking := sdk.NewKVStoreKey(staking.StoreKey)
tkeyStaking := sdk.NewTransientStoreKey(staking.TStoreKey)
keyAcc := sdk.NewKVStoreKey(auth.StoreKey)
keyFeeCollection := sdk.NewKVStoreKey(auth.FeeStoreKey)
keyParams := sdk.NewKVStoreKey(params.StoreKey)
tkeyParams := sdk.NewTransientStoreKey(params.TStoreKey)
db := dbm.NewMemDB()
ms := store.NewCommitMultiStore(db)
ms.MountStoreWithDB(keyDistr, sdk.StoreTypeIAVL, db)
ms.MountStoreWithDB(tkeyStaking, sdk.StoreTypeTransient, nil)
ms.MountStoreWithDB(keyStaking, sdk.StoreTypeIAVL, db)
ms.MountStoreWithDB(keyAcc, sdk.StoreTypeIAVL, db)
ms.MountStoreWithDB(keyFeeCollection, sdk.StoreTypeIAVL, db)
ms.MountStoreWithDB(keyParams, sdk.StoreTypeIAVL, db)
ms.MountStoreWithDB(tkeyParams, sdk.StoreTypeTransient, db)
err := ms.LoadLatestVersion()
require.Nil(t, err)
cdc := MakeTestCodec()
pk := params.NewKeeper(cdc, keyParams, tkeyParams)
ctx := sdk.NewContext(ms, abci.Header{ChainID: "foochainid"}, isCheckTx, logm.NewNopLogger())
accountKeeper := auth.NewAccountKeeper(cdc, keyAcc, pk.Subspace(auth.DefaultParamspace), auth.ProtoBaseAccount)
bankKeeper := bank.NewBaseKeeper(accountKeeper, pk.Subspace(bank.DefaultParamspace), bank.DefaultCodespace)
sk := staking.NewKeeper(cdc, keyStaking, tkeyStaking, bankKeeper, pk.Subspace(staking.DefaultParamspace), staking.DefaultCodespace)
sk.SetPool(ctx, staking.InitialPool())
sk.SetParams(ctx, staking.DefaultParams())
// fill all the addresses with some coins, set the loose pool tokens simultaneously
for _, addr := range TestAddrs {
pool := sk.GetPool(ctx)
_, _, err := bankKeeper.AddCoins(ctx, addr, sdk.Coins{
sdk.NewCoin(sk.GetParams(ctx).BondDenom, initCoins),
})
require.Nil(t, err)
pool.NotBondedTokens = pool.NotBondedTokens.Add(initCoins)
sk.SetPool(ctx, pool)
}
feeKeeper := auth.NewFeeCollectionKeeper(cdc, keyFeeCollection)
keeper := distr.NewKeeper(cdc, keyDistr, pk.Subspace(DefaultParamspace), bankKeeper, sk, feeKeeper, types.DefaultCodespace)
// set the distribution hooks on staking
sk.SetHooks(keeper.Hooks())
// set genesis items required for distribution
keeper.SetFeePool(ctx, types.InitialFeePool())
keeper.SetCommunityTax(ctx, communityTax)
keeper.SetBaseProposerReward(ctx, sdk.NewDecWithPrec(1, 2))
keeper.SetBonusProposerReward(ctx, sdk.NewDecWithPrec(4, 2))
fmt.Println(keeper.GetCommunityTax(ctx))
return ctx
}
func getMockApp1(t *testing.T, isCheckTx bool, initPower int64,
communityTax sdk.Dec, numGenAccs int, genState GenesisState, genAccs []auth.Account) (
mapp *mock.App, keeper Keeper, sk staking.Keeper, addrs []sdk.AccAddress,
pubKeys []crypto.PubKey, privKeys []crypto.PrivKey) {
mapp = mock.NewApp()
staking.RegisterCodec(mapp.Cdc)
bank.RegisterCodec(mapp.Cdc)
types.RegisterCodec(mapp.Cdc)
RegisterCodec(mapp.Cdc)
keyStaking := sdk.NewKVStoreKey(staking.StoreKey)
tkeyStaking := sdk.NewTransientStoreKey(staking.TStoreKey)
keyGov := sdk.NewKVStoreKey(StoreKey)
keyDistr := sdk.NewKVStoreKey(distr.StoreKey)
keyMinting := sdk.NewKVStoreKey(mint.StoreKey)
keyParams := sdk.NewKVStoreKey(params.StoreKey)
tkeyParams := sdk.NewTransientStoreKey(params.TStoreKey)
db := dbm.NewMemDB()
ms := store.NewCommitMultiStore(db)
keyAcc := sdk.NewKVStoreKey(auth.StoreKey)
ms.MountStoreWithDB(keyAcc, sdk.StoreTypeIAVL, db)
ms.MountStoreWithDB(keyDistr, sdk.StoreTypeIAVL, db)
ms.MountStoreWithDB(tkeyStaking, sdk.StoreTypeTransient, nil)
ms.MountStoreWithDB(keyStaking, sdk.StoreTypeIAVL, db)
ms.MountStoreWithDB(keyParams, sdk.StoreTypeIAVL, db)
ms.MountStoreWithDB(tkeyParams, sdk.StoreTypeTransient, db)
pk := mapp.ParamsKeeper
accountKeeper := auth.NewAccountKeeper(mapp.Cdc, keyAcc, pk.Subspace(auth.DefaultParamspace), auth.ProtoBaseAccount)
ck := bank.NewBaseKeeper(accountKeeper, mapp.ParamsKeeper.Subspace(bank.DefaultParamspace), bank.DefaultCodespace)
sk = staking.NewKeeper(mapp.Cdc, keyStaking, tkeyStaking, ck, pk.Subspace(staking.DefaultParamspace), staking.DefaultCodespace)
feeKeeper := auth.NewFeeCollectionKeeper(mapp.Cdc, mapp.KeyFeeCollection)
distrKeeper := distr.NewKeeper(mapp.Cdc, keyDistr, pk.Subspace(distr.DefaultParamspace), ck, &sk, feeKeeper, distr.DefaultCodespace)
minKeeper := mint.NewKeeper(mapp.Cdc, keyMinting, pk.Subspace(mint.DefaultParamspace), &sk, feeKeeper)
keeper = NewKeeper(mapp.Cdc, distrKeeper, minKeeper, keyGov, pk, pk.Subspace("testgov"), ck, sk, sk, DefaultCodespace)
pk = params.NewKeeper(mapp.Cdc, keyParams, tkeyParams)
ctx := sdk.NewContext(ms, abci.Header{ChainID: "foochainid"}, isCheckTx, logm.NewNopLogger())
sk.SetPool(ctx, staking.InitialPool())
sk.SetParams(ctx, staking.DefaultParams())
sk.SetHooks(distrKeeper.Hooks())
distrKeeper.SetFeePool(ctx, types.InitialFeePool())
distrKeeper.SetCommunityTax(ctx, communityTax)
distrKeeper.SetBaseProposerReward(ctx, sdk.NewDecWithPrec(1, 2))
distrKeeper.SetBonusProposerReward(ctx, sdk.NewDecWithPrec(4, 2))
fmt.Println("community tax", distrKeeper.GetCommunityTax(ctx))
mapp.Router().AddRoute(RouterKey, NewHandler(keeper))
mapp.QueryRouter().AddRoute(QuerierRoute, NewQuerier(keeper))
mapp.SetEndBlocker(getEndBlocker(keeper))
mapp.SetInitChainer(getInitChainer(mapp, keeper, sk, genState))
require.NoError(t, mapp.CompleteSetup(keyStaking, tkeyStaking, keyGov))
valTokens := sdk.TokensFromTendermintPower(10000000000000)
if genAccs == nil || len(genAccs) == 0 {
genAccs, addrs, pubKeys, privKeys = mock.CreateGenAccounts(numGenAccs,
sdk.Coins{sdk.NewCoin(sdk.DefaultBondDenom, valTokens)})
}
mock.SetGenesis(mapp, genAccs)
return mapp, keeper, sk, addrs, pubKeys, privKeys
}
|
package ims_api_connector
import (
"testing"
"time"
"net/http"
"reflect"
)
func assertStringsEqual(t *testing.T, was, expected string) {
if was != expected {
t.Errorf("'%v' != '%v'", was, expected)
}
}
func assertDurationsEqual(t *testing.T, was, expected time.Duration) {
if was != expected {
t.Errorf("%v != %v", was, expected)
}
}
func assertErrorIsNil(t *testing.T, was error) {
if was != nil {
t.Errorf("%v != nil", was)
}
}
func assertBoolEqual(t *testing.T, was, expected bool) {
if was != expected {
t.Errorf("%v != %v", expected, was)
}
}
func assertDeepEqual(t *testing.T, was, expected interface{}) {
if !reflect.DeepEqual(was, expected) {
t.Errorf("%v != %v", was, expected)
}
}
func setUp() *IMSAPIConnector {
return New("some_username", "some_password", "192.168.137.253:8000", 5)
}
func Test_New(t *testing.T) {
subject := setUp()
assertStringsEqual(t, subject.Username, "some_username")
assertStringsEqual(t, subject.Password, "some_password")
assertStringsEqual(t, subject.BaseURL, "http://192.168.137.253:8000/api/")
assertDurationsEqual(t, subject.client.Timeout, time.Second*time.Duration(5))
}
func Test_prepareResource(t *testing.T) {
subject := setUp()
was := subject.buildResource("auth/login")
expected := "http://192.168.137.253:8000/api/auth/login/"
assertStringsEqual(t, was, expected)
}
func Test_buildRequest(t *testing.T) {
subject := setUp()
req1, err1 := subject.buildRequest(http.MethodGet, "some url", "some body")
assertStringsEqual(t, req1.URL.String(), "some%20url")
assertErrorIsNil(t, err1)
assertStringsEqual(t, req1.Header.Get("Content-Type"), "application/x-www-form-urlencoded")
subject.Authenticated = true
subject.Key = "some_key"
req2, err2 := subject.buildRequest(http.MethodGet, "some url", "some body")
assertStringsEqual(t, req2.URL.String(), "some%20url")
assertErrorIsNil(t, err2)
assertStringsEqual(t, req2.Header.Get("Content-Type"), "application/json")
assertStringsEqual(t, req2.Header.Get("Authorization"), "Token some_key")
}
func Test_buildUsernameAndPasswordParams(t *testing.T) {
subject := setUp()
body := subject.buildUsernameAndPasswordParams()
assertStringsEqual(t, string(body), "password=some_password&username=some_username")
}
func Test_handleAuthenticationResponse(t *testing.T) {
subject := setUp()
body := []byte("{\"key\": \"1c1a552f5b013bd76b7d6acd731a8e46955f4b13\"}")
authenticated := subject.handleAuthenticationResponse(body)
assertBoolEqual(t, authenticated, true)
assertBoolEqual(t, subject.Authenticated, true)
assertStringsEqual(t, subject.Key, "1c1a552f5b013bd76b7d6acd731a8e46955f4b13")
}
func Test_handleGetAssetsResponse(t *testing.T) {
subject := setUp()
var body_str string
body_str = "[{\"id\":1,\"name\":\"Asset 1\",\"is_deleted\":false,\"last_updated\":\"1991-02-06T00:00:00.000000+00:00\",\"note\":null,\"json_data\":null,\"type_id\":3,\"primary_ip_device_id\":5,\"site_id\":1,\"tags\":[7]},"
body_str += "{\"id\":2,\"name\":\"Asset 2\",\"is_deleted\":false,\"last_updated\":\"1991-02-06T00:00:00.000000+00:00\",\"note\":null,\"json_data\":null,\"type_id\":4,\"primary_ip_device_id\":6,\"site_id\":1,\"tags\":[8]}]"
var body []byte = []byte(body_str)
was, _ := subject.handleGetAssetsResponse(body)
expected := make([]Asset, 2)
lastUpdated := time.Date(1991, 2, 6, 0, 0, 0, 0, time.UTC)
tags1 := make([]int, 1)
tags1[0] = 8
expected[1] = Asset{ID: 2, Name: "Asset 2", IsDeleted: false, LastUpdated: lastUpdated, Note: "", JSONData: nil, TypeID: 4, PrimaryIPDeviceID: 6, SiteID: 1, Tags: tags1}
tags2 := make([]int, 1)
tags2[0] = 7
expected[0] = Asset{ID: 1, Name: "Asset 1", IsDeleted: false, LastUpdated: lastUpdated, Note: "", JSONData: nil, TypeID: 3, PrimaryIPDeviceID: 5, SiteID: 1, Tags: tags2}
for i := 0; i < 2; i++ {
if expected[i].ID != was[i].ID {
t.Errorf("ID not equal")
}
if expected[i].Name != was[i].Name {
t.Errorf("Name not equal")
}
if expected[i].IsDeleted != was[i].IsDeleted {
t.Errorf("IsDeleted not equal")
}
if expected[i].LastUpdated.Unix() != was[i].LastUpdated.Unix() {
t.Log(expected[i].LastUpdated, was[i].LastUpdated)
t.Errorf("LastUpdated not equal")
}
if expected[i].Note != was[i].Note {
t.Errorf("Note not equal")
}
if expected[i].JSONData != was[i].JSONData {
t.Errorf("JSONData not equal")
}
if expected[i].TypeID != was[i].TypeID {
t.Errorf("TypeID not equal")
}
if expected[i].PrimaryIPDeviceID != was[i].PrimaryIPDeviceID {
t.Errorf("PrimaryIPDeviceID not equal")
}
if expected[i].SiteID != was[i].SiteID {
t.Errorf("SiteID not equal")
}
if !reflect.DeepEqual(expected[i].Tags, expected[i].Tags) {
t.Errorf("Tags not equal")
}
}
}
|
// Package base provides a simple, unregistered gRPC server
// with functional options not included in the base library.
// Primarily useful to synchornize configurations across
// development teams.
package base
import (
"net"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap"
grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
// Server for gRPC without any registered services.
type Server struct {
S *grpc.Server
}
// Option provide a functional interface for constructing grpc.ServerOptions.
type Option func() ([]grpc.ServerOption, error)
// NewServer returns a unregistered gRPC Server with
// sensible defaults for configuration and middlewares.
func NewServer(opts ...Option) (*Server, error) {
var oo []grpc.ServerOption
for _, opt := range opts {
o, err := opt()
if err != nil {
return nil, err
}
oo = append(oo, o...)
}
return &Server{
S: grpc.NewServer(oo...),
}, nil
}
// ListenAndServe from the Server Config's port.
func (s *Server) ListenAndServe(addr string) error {
if len(s.S.GetServiceInfo()) < 1 {
return errors.New("no services registered to this server")
}
lis, err := net.Listen("tcp", addr)
if err != nil {
return errors.Wrapf(err, "unable to listen on %s", addr)
}
return s.S.Serve(lis)
}
// Bundle standard gRPC options into our custom Option.
func Bundle(opts ...grpc.ServerOption) Option {
var oo []grpc.ServerOption
for _, opt := range opts {
oo = append(oo, opt)
}
return func() ([]grpc.ServerOption, error) { return oo, nil }
}
// TLS parses certs for a valid TLS config.
func TLS(certPath, keyPath string) Option {
return func() ([]grpc.ServerOption, error) {
creds, err := credentials.NewServerTLSFromFile(certPath, keyPath)
if err != nil {
return nil, err
}
opts := []grpc.ServerOption{grpc.Creds(creds)}
return opts, err
}
}
// Log for server and other basic, non-intrusive interceptors.
func Log(l *zap.Logger) Option {
return func() ([]grpc.ServerOption, error) {
if l == nil {
l = zap.NewExample()
}
opts := []grpc.ServerOption{
grpc_middleware.WithUnaryServerChain(
grpc_ctxtags.UnaryServerInterceptor(grpc_ctxtags.WithFieldExtractor(grpc_ctxtags.CodeGenRequestFieldExtractor)),
grpc_zap.UnaryServerInterceptor(l),
grpc_recovery.UnaryServerInterceptor(),
),
grpc_middleware.WithStreamServerChain(
grpc_ctxtags.StreamServerInterceptor(grpc_ctxtags.WithFieldExtractor(grpc_ctxtags.CodeGenRequestFieldExtractor)),
grpc_zap.StreamServerInterceptor(l),
grpc_recovery.StreamServerInterceptor(),
)}
return opts, nil
}
}
|
/*
Copyright (C) 2018 Synopsys, Inc.
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package hubdashperf
import (
"time"
"github.com/prometheus/client_golang/prometheus"
)
var linkTypeDurationHistogram *prometheus.HistogramVec
var errorCounter *prometheus.CounterVec
func recordLinkTypeDuration(linkType string, duration time.Duration) {
milliseconds := float64(duration / time.Millisecond)
linkTypeDurationHistogram.With(prometheus.Labels{"linkType": linkType}).Observe(milliseconds)
}
func recordError(name string) {
errorCounter.With(prometheus.Labels{"name": name}).Inc()
}
func init() {
linkTypeDurationHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "perceptor",
Subsystem: "hubdashperf",
Name: "hub_api_link_duration",
Help: "durations for hub API calls in milliseconds, grouped by link type",
Buckets: prometheus.ExponentialBuckets(1, 2, 20),
}, []string{"linkType"})
prometheus.MustRegister(linkTypeDurationHistogram)
errorCounter = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "perceptor",
Subsystem: "hubdashperf",
Name: "hub_api_link_duration_errors",
Help: "errors encountered when scraping the Hub API",
}, []string{"name"})
prometheus.MustRegister(errorCounter)
}
|
package utils
import (
"net/http"
tr "github.com/ebikode/eLearning-core/translation"
validation "github.com/go-ozzo/ozzo-validation"
"github.com/go-ozzo/ozzo-validation/is"
)
// REquired rule
func NotNilRule(r *http.Request, key string) []validation.Rule {
field := Translate(tr.TParam{Key: key, TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.NotNil.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": field},
PluralCount: nil,
},
r,
),
),
}
}
// RequiredRule rule
func RequiredRule(r *http.Request, key string) []validation.Rule {
field := Translate(tr.TParam{Key: key, TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": field},
PluralCount: nil,
},
r,
),
),
}
}
// REquired integer rule
func RequiredIntRule(r *http.Request, key string) []validation.Rule {
field := Translate(tr.TParam{Key: key, TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": field},
PluralCount: nil,
},
r,
),
),
// is.Int.Error(
// Translate(
// tr.TParam{
// Key: "validation.number",
// TemplateData: map[string]interface{}{"Field": field},
// PluralCount: nil,
// },
// r,
// ),
// ),
}
}
// Validation Rule for IDs
// ID cannot be empty
func IDRule(r *http.Request) []validation.Rule {
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": "id"},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule for position
// position cannot be empty and must be an integer
func PositionRule(r *http.Request) []validation.Rule {
position := Translate(tr.TParam{Key: "general.position", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": position},
PluralCount: nil,
},
r,
),
),
// is.Int.Error(
// Translate(
// tr.TParam{
// Key: "validation.number",
// TemplateData: map[string]interface{}{"Field": position},
// PluralCount: nil,
// },
// r,
// ),
// ),
}
}
// Validation Rule for scores
// position cannot be empty and must be an integer
func ScoreRule(r *http.Request) []validation.Rule {
score := Translate(tr.TParam{Key: "general.score", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.NotNil.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": score},
PluralCount: nil,
},
r,
),
),
// is.Int.Error(
// Translate(
// tr.TParam{
// Key: "validation.number",
// TemplateData: map[string]interface{}{"Field": score},
// PluralCount: nil,
// },
// r,
// ),
// ),
}
}
// Validation Rule for mobile phone
// Phone cannot be empty, and the length must between 7 and 20
func PhoneRule(r *http.Request) []validation.Rule {
phone := Translate(tr.TParam{Key: "general.phone", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": phone},
PluralCount: nil,
},
r,
),
),
validation.Length(7, 20).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": phone, "Min": 7, "Max": 20},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule for Credit Card No
// Card No cannot be empty and must be 16 digits
func CardNoRule(r *http.Request) []validation.Rule {
cardNo := Translate(tr.TParam{Key: "general.card_no", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": cardNo},
PluralCount: nil,
},
r,
),
),
validation.Length(16, 16).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": cardNo, "Min": 16, "Max": 16},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule for PIN
// PIN cannot be empty and must be 4 digits
func PinRule(r *http.Request) []validation.Rule {
pin := Translate(tr.TParam{Key: "general.pin", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": pin},
PluralCount: nil,
},
r,
),
),
validation.Length(4, 4).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": pin, "Min": 4, "Max": 4},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule for PIN
// PIN cannot be empty and must be 4 digits
func OTPRule(r *http.Request) []validation.Rule {
otp := Translate(tr.TParam{Key: "general.otp", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": otp},
PluralCount: nil,
},
r,
),
),
validation.Length(6, 6).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": otp, "Min": 6, "Max": 6},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule for Credit Card Expiry month
// Card month cannot be empty and must be 2 digits long
func CardMonthRule(r *http.Request) []validation.Rule {
cardMonth := Translate(tr.TParam{Key: "general.expiry_month", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": cardMonth},
PluralCount: nil,
},
r,
),
),
validation.Length(2, 2).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": cardMonth, "Min": 2, "Max": 2},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule for Credit Card Expiry year
// Card year cannot be empty and must be 2 digits long
func CardYearRule(r *http.Request) []validation.Rule {
cardYear := Translate(tr.TParam{Key: "general.expiry_year", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": cardYear},
PluralCount: nil,
},
r,
),
),
validation.Length(2, 2).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": cardYear, "Min": 2, "Max": 2},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule for Credit Card Expiry year
// Card year cannot be empty and must be 2 digits long
func CardCvvRule(r *http.Request) []validation.Rule {
cvv := "cvv"
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": cvv},
PluralCount: nil,
},
r,
),
),
validation.Length(2, 2).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": cvv, "Min": 3, "Max": 3},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule for year
// The length must between 4 and 4
func YearRule(r *http.Request) []validation.Rule {
year := Translate(tr.TParam{Key: "general.year", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Length(4, 4).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": year, "Min": 4, "Max": 4},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule for Customername
// Customername cannot be empty, and the length must between 3 and 20 and Alpha Numeric
func CustomernameRule(r *http.Request) []validation.Rule {
customername := Translate(tr.TParam{Key: "general.customername", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": customername},
PluralCount: nil,
},
r,
),
),
validation.Length(3, 20).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": customername, "Min": 3, "Max": 20},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule for plan name
// SearchPlan name cannot be empty, and the length must between 3 and 50
func SearchPlanNameRule(r *http.Request) []validation.Rule {
name := Translate(tr.TParam{Key: "general.name", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": name},
PluralCount: nil,
},
r,
),
),
validation.Length(3, 50).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": name, "Min": 3, "Max": 50},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule for Name
// Name cannot be empty, and the length must between 3 and 50
func NameRule(r *http.Request) []validation.Rule {
name := Translate(tr.TParam{Key: "general.name", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": name},
PluralCount: nil,
},
r,
),
),
validation.Length(3, 50).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": name, "Min": 3, "Max": 50},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule for Short Code
// ShortCode cannot be empty, and the length must between 2 and 20
func ShortCodeRule(r *http.Request) []validation.Rule {
name := Translate(tr.TParam{Key: "general.short_code", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": name},
PluralCount: nil,
},
r,
),
),
validation.Length(2, 20).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": name, "Min": 3, "Max": 20},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule for Question
// Question cannot be empty, and the length must between 10 and 100
func QuestionRule(r *http.Request) []validation.Rule {
question := Translate(tr.TParam{Key: "general.question", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": question},
PluralCount: nil,
},
r,
),
),
validation.Length(10, 100).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": question, "Min": 10, "Max": 100},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule for resource unique Key
// resource unique Key cannot be empty, and the length must between 3 and 50
func KeyRule(r *http.Request) []validation.Rule {
langKey := Translate(tr.TParam{Key: "general.key", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": langKey},
PluralCount: nil,
},
r,
),
),
validation.Length(3, 50).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": langKey, "Min": 3, "Max": 50},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule for Language Key
// Langauge Key cannot be empty, and the length must between 3 and 50
func LangKeyRule(r *http.Request) []validation.Rule {
langKey := Translate(tr.TParam{Key: "general.lang_key", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": langKey},
PluralCount: nil,
},
r,
),
),
validation.Length(3, 50).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": langKey, "Min": 3, "Max": 50},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule for Description Language Key
// Langauge Key cannot be empty, and the length must between 3 and 50
func DescLangKeyRule(r *http.Request) []validation.Rule {
langKey := Translate(tr.TParam{Key: "general.desc_lang_key", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": langKey},
PluralCount: nil,
},
r,
),
),
validation.Length(3, 50).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": langKey, "Min": 3, "Max": 50},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule For Email Addresses
// Email cannot be empty, and must be a valid Email
func EmailRule(r *http.Request) []validation.Rule {
email := Translate(tr.TParam{Key: "general.email", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
// is.Email.Error(
// Translate(
// tr.TParam{
// Key: "validation.email",
// TemplateData: nil,
// PluralCount: nil,
// },
// r,
// ),
// ),
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": email},
PluralCount: nil,
},
r,
),
),
validation.Length(5, 100).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": email, "Min": 5, "Max": 100},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule For Email Addresses
// Email cannot be empty, and must be a valid Email
func AboutRule(r *http.Request) []validation.Rule {
about := Translate(tr.TParam{Key: "general.about", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": about},
PluralCount: nil,
},
r,
),
),
validation.Length(5, 150).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": about, "Min": 5, "Max": 150},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule for languagestring
// Lang cannot be empty, and the length must between 2 and 2c
func LangRule(r *http.Request) []validation.Rule {
language := Translate(tr.TParam{Key: "general.language", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": language},
PluralCount: nil,
},
r,
),
),
validation.Length(2, 2).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": language, "Min": 2, "Max": 2},
PluralCount: nil,
},
r,
),
),
}
}
// Validation rule for Avatar
// Avatar length must between 5 and 200
func AvatarRule(r *http.Request) []validation.Rule {
avatar := Translate(tr.TParam{Key: "general.avatar", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Length(5, 200).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": avatar, "Min": 5, "Max": 200},
PluralCount: nil,
},
r,
),
),
is.URL.Error(
Translate(
tr.TParam{
Key: "validation.url",
TemplateData: nil,
PluralCount: nil,
},
r,
),
),
}
}
// Validation rule for Enum Fields
// Enum must be among the list enum
func EnumRule(r *http.Request, fieldNameKey string, enum ...interface{}) []validation.Rule {
field := Translate(tr.TParam{Key: fieldNameKey, TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": field},
PluralCount: nil,
},
r,
),
),
validation.In(enum...).Error(
Translate(
tr.TParam{
Key: "validation.enum",
TemplateData: map[string]interface{}{"Field": field, "Values": enum},
PluralCount: nil,
},
r,
),
),
}
}
func EnumNotNilRule(r *http.Request, fieldNameKey string, enum ...interface{}) []validation.Rule {
field := Translate(tr.TParam{Key: fieldNameKey, TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.NotNil.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": field},
PluralCount: nil,
},
r,
),
),
validation.In(enum...).Error(
Translate(
tr.TParam{
Key: "validation.enum",
TemplateData: map[string]interface{}{"Field": field, "Values": enum},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule For Bank Name
// Bank name cannot be empty, Length between 3 t0 50
func BankNameRule(r *http.Request, key string) []validation.Rule {
bankName := Translate(tr.TParam{Key: key, TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": bankName},
PluralCount: nil,
},
r,
),
),
validation.Length(3, 100).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": bankName, "Min": 3, "Max": 100},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule For Bank account
// account number cannot be empty, Length between 3 t0 50
func BankaccountRule(r *http.Request) []validation.Rule {
acctNum := Translate(tr.TParam{Key: "general.account_number", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": acctNum},
PluralCount: nil,
},
r,
),
),
validation.Length(3, 50).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": acctNum, "Min": 3, "Max": 50},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule For Routing Number
// Routing number cannot be empty, Length between 3 t0 50
func BankRoutingRule(r *http.Request, key string) []validation.Rule {
rountingNum := Translate(tr.TParam{Key: key, TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
// validation.Required.Error(
// Translate(
// tr.TParam{
// Key: "validation.required",
// TemplateData: map[string]interface{}{"Field": rountingNum},
// PluralCount: nil,
// },
// r,
// ),
// ),
validation.Length(3, 50).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": rountingNum, "Min": 3, "Max": 50},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule For Security answer
// Answer cannot be empty, Length between 2 t0 50
func AnswerRule(r *http.Request) []validation.Rule {
answer := Translate(tr.TParam{Key: "general.answer", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": answer},
PluralCount: nil,
},
r,
),
),
validation.Length(3, 50).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": answer, "Min": 3, "Max": 50},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule For Solution
// Solution cannot be empty, Length between 5 t0 200
func SolutionRule(r *http.Request) []validation.Rule {
solution := Translate(tr.TParam{Key: "general.solution", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": solution},
PluralCount: nil,
},
r,
),
),
validation.Length(5, 200).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": solution, "Min": 5, "Max": 200},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule For Password
// Password cannot be empty, Length between 7 t0 50
func PasswordRule(r *http.Request) []validation.Rule {
password := Translate(tr.TParam{Key: "general.password", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": password},
PluralCount: nil,
},
r,
),
),
validation.Length(7, 50).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": password, "Min": 7, "Max": 50},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule For Message
// Message cannot be empty, Length between 1 t0 1000
func MessageRule(r *http.Request) []validation.Rule {
message := Translate(tr.TParam{Key: "general.message", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": message},
PluralCount: nil,
},
r,
),
),
validation.Length(1, 1000).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": message, "Min": 1, "Max": 1000},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule For Comment
// Comment Length between 3 to 200
func CommentRule(r *http.Request) []validation.Rule {
comment := Translate(tr.TParam{Key: "general.comment", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Length(3, 200).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": comment, "Min": 3, "Max": 200},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule For Comment that is required
// Comment Length between 3 to 200
func RequiredCommentRule(r *http.Request) []validation.Rule {
comment := Translate(tr.TParam{Key: "general.comment", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": comment},
PluralCount: nil,
},
r,
),
),
validation.Length(3, 200).Error(
Translate(
tr.TParam{
Key: "validation.length",
TemplateData: map[string]interface{}{"Field": comment, "Min": 3, "Max": 200},
PluralCount: nil,
},
r,
),
),
}
}
// Validation Rule For Money
// Money cannot be empty, must be digits and floats
func MoneyRule(r *http.Request) []validation.Rule {
money := Translate(tr.TParam{Key: "general.money", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": money},
PluralCount: nil,
},
r,
),
),
// is.Float.Error(
// Translate(
// tr.TParam{
// Key: "validation.float",
// TemplateData: map[string]interface{}{"Field": money},
// PluralCount: nil,
// },
// r,
// ),
// ),
}
}
// Validation Rule For Duration
// Money cannot be empty, must be digits
func DurationRule(r *http.Request) []validation.Rule {
duration := Translate(tr.TParam{Key: "general.duration", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": duration},
PluralCount: nil,
},
r,
),
),
// is.Int.Error(
// Translate(
// tr.TParam{
// Key: "validation.number",
// TemplateData: map[string]interface{}{"Field": duration},
// PluralCount: nil,
// },
// r,
// ),
// ),
}
}
// Validation Rule For Date
// Date cannot be empty, must be a valid date
func DateRule(r *http.Request) []validation.Rule {
date := Translate(tr.TParam{Key: "general.date", TemplateData: nil, PluralCount: nil}, r)
return []validation.Rule{
validation.Required.Error(
Translate(
tr.TParam{
Key: "validation.required",
TemplateData: map[string]interface{}{"Field": date},
PluralCount: nil,
},
r,
),
),
validation.Date("2006-01-01 15:04:05").Error(
Translate(
tr.TParam{
Key: "validation.date",
TemplateData: map[string]interface{}{"Field": date},
PluralCount: nil,
},
r,
),
),
}
}
|
package runmon
import (
"fmt"
"io/ioutil"
"os"
"github.com/kasworld/nonkey/interpreter/evaluator"
"github.com/kasworld/nonkey/interpreter/lexer"
"github.com/kasworld/nonkey/interpreter/object"
"github.com/kasworld/nonkey/interpreter/parser"
)
func RunFile(filename string, env *object.Environment) *object.Environment {
input, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Fprintf(os.Stderr, "fail to load %v %v\n", filename, err)
return env
}
return RunString(string(input), env)
}
func RunString(input string, env *object.Environment) *object.Environment {
l := lexer.New(input)
p := parser.New(l)
prg := p.ParseProgram()
if len(p.Errors()) != 0 {
for _, v := range p.Errors() {
fmt.Fprintf(os.Stderr, "%v\n", v)
}
return env
}
evaluated := evaluator.Eval(prg, env)
if evaluated != nil {
if erro, ok := evaluated.(*object.Error); ok {
fmt.Fprintf(os.Stderr, "%v\n", evaluated.Inspect())
fmt.Fprintf(os.Stderr, "%v\n", l.GetLineStr(erro.Node.GetToken().Line))
} else {
fmt.Fprintf(os.Stderr, "%v\n", evaluated.Inspect())
}
}
return env
}
|
package payment
import (
"reflect"
"testing"
"github.com/stretchr/testify/mock"
"github.com/satori/go.uuid"
"github.com/tppgit/we_service/core"
)
type mockPaymentRepo struct {
mock.Mock
PaymentRepository
}
func (m *mockPaymentRepo) InsertPayment(model *Payment) (err error) {
arg := m.Called(model)
return arg.Error(0)
}
func Test_implPaymentService_InsertPayment(t *testing.T) {
type fields struct {
Repo PaymentRepository
}
type args struct {
model *Payment
}
tests := []struct {
name string
fields fields
args args
wantRespone *core.PaymentStripeRespone
wantErr bool
}{
{
name: "Test Ok",
args: args{
model: &Payment{
Token: "tok_visa",
Email: "payment@gmail.com",
OrderId: uuid.FromStringOrNil("1e13af24-c7d8-42be-9cc7-f7659441bf32"),
Amount: int64(100),
},
},
wantRespone: &core.PaymentStripeRespone{
OrderId: "0d632dad-b087-4d5e-b724-b30c48bb76f3",
Amount: int64(100),
Status: STATUS_PAYMENT_SUCCESS,
},
wantErr: true,
},
{
name: "Test Fail Token",
args: args{
model: &Payment{
Token: "xxxxxx",
Email: "payment@gmail.com",
OrderId: uuid.FromStringOrNil("1e13af24-c7d8-42be-9cc7-f7659441bf32"),
Amount: int64(100),
},
},
wantRespone: &core.PaymentStripeRespone{
Status: STATUS_PAYMENT_FAIL,
},
wantErr: false,
},
{
name: "Test Fail Amount",
args: args{
model: &Payment{
Token: "tok_visa",
Email: "payment@gmail.com",
OrderId: uuid.FromStringOrNil("1e13af24-c7d8-42be-9cc7-f7659441bf32"),
Amount: int64(1),
},
},
wantRespone: &core.PaymentStripeRespone{
Status: STATUS_PAYMENT_FAIL,
},
wantErr: false,
},
{
name: "Test Token must not be null",
args: args{
model: &Payment{
Token: "",
Email: "payment@gmail.com",
OrderId: uuid.FromStringOrNil("1e13af24-c7d8-42be-9cc7-f7659441bf32"),
Amount: int64(100),
},
},
wantRespone: &core.PaymentStripeRespone{
Status: "Token must not be null",
},
wantErr: false,
},
{
name: "Test OrderId must not be null",
args: args{
model: &Payment{
Token: "tok_visa",
Email: "payment@gmail.com",
OrderId: uuid.FromStringOrNil(""),
Amount: int64(100),
},
},
wantRespone: &core.PaymentStripeRespone{
Status: "OrderId must not be null",
},
wantErr: false,
},
{
name: "Test Email must not be null",
args: args{
model: &Payment{
Token: "tok_visa",
Email: "",
OrderId: uuid.FromStringOrNil("1e13af24-c7d8-42be-9cc7-f7659441bf32"),
Amount: int64(100),
},
},
wantRespone: &core.PaymentStripeRespone{
Status: "Email must not be null",
},
wantErr: false,
},
{
name: "Test Email wrong format",
args: args{
model: &Payment{
Token: "tok_visa",
Email: "payment.com",
OrderId: uuid.FromStringOrNil("1e13af24-c7d8-42be-9cc7-f7659441bf32"),
Amount: int64(100),
},
},
wantRespone: &core.PaymentStripeRespone{
Status: "Email Email wrong format",
},
wantErr: false,
},
{
name: "Test Amount must be larger zero",
args: args{
model: &Payment{
Token: "tok_visa",
Email: "payment@gmail.com",
OrderId: uuid.FromStringOrNil("1e13af24-c7d8-42be-9cc7-f7659441bf32"),
Amount: int64(0),
},
},
wantRespone: &core.PaymentStripeRespone{
Status: "Amount must be larger zero",
},
wantErr: false,
},
{
name: "Test Amount must be equal with order price",
args: args{
model: &Payment{
Token: "tok_visa",
Email: "payment@gmail.com",
OrderId: uuid.FromStringOrNil("1e13af24-c7d8-42be-9cc7-f7659441bf32"),
Amount: int64(50),
},
},
wantRespone: &core.PaymentStripeRespone{
Status: "Amount must be equal with order price",
},
wantErr: false,
},
{
name: "Test Order must be existed",
args: args{
model: &Payment{
Token: "tok_visa",
Email: "payment@gmail.com",
OrderId: uuid.FromStringOrNil("xxxx"),
Amount: int64(1),
},
},
wantRespone: &core.PaymentStripeRespone{
Status: "Order must be existed",
},
wantErr: false,
},
}
for _, tt := range tests {
mockRepo := &mockPaymentRepo{}
mockRepo.On("InsertPayment", mock.Anything).Return(nil)
t.Run(tt.name, func(t *testing.T) {
p := &implPaymentService{
Repo: mockRepo,
}
gotRespone, err := p.InsertPayment(tt.args.model)
if (err != nil) != tt.wantErr {
t.Errorf("implPaymentService.InsertPayment() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(gotRespone, tt.wantRespone) {
t.Errorf("implPaymentService.InsertPayment() = %v, want %v", gotRespone, tt.wantRespone)
}
})
}
}
|
/*
* @lc app=leetcode.cn id=1 lang=golang
*
* [1] 两数之和
*/
// @lc code=start
package main
import "fmt"
func twoSum(nums []int, target int) []int {
res := []int{}
a := map[int]int{}
for i, v := range nums {
left := target-v
if _, existed := a[left]; existed {
res = append(res, i)
res = append(res, a[left])
break
}
a[v] = i
}
return res
}
// @lc code=end
func main() {
a := []int{2, 7, 11, 15}
fmt.Printf("%v, %v\n", a, twoSum(a, 9))
b := []int{3,0,-2,-1,1,2}
fmt.Printf("%v, %v\n", b, twoSum(b,0))
c := []int{-4,-2,1,-5,-4,-4,4,-2,0,4,0,-2,3,1,-5,0}
fmt.Printf("%v, %v\n", c, twoSum(c,0 ))
}
|
package cherry
import (
"errors"
"golang.org/x/net/context"
)
const (
nodeKey = "node"
middleNodeKey = "middleNode"
prevContextKey = "prevContext"
)
const (
queueSize = 1000
)
var (
SessionNilError = errors.New("session null")
)
type Node interface {
Receive(msg *Message) error
Send(msg *Message) error
Forward(msg *Message) error
Reply(msg *Message) error
Error(err error)
Start()
Close() error
}
func NodeValue(ctx context.Context) Node {
n := ctx.Value(nodeKey)
if n == nil {
return nil
}
n1, _ := n.(Node)
return n1
}
func WithNode(parent context.Context, node Node) context.Context {
return context.WithValue(parent, nodeKey, node)
}
func PrevContextValue(ctx context.Context) context.Context {
n := ctx.Value(prevContextKey)
if n == nil {
return nil
}
n1, _ := n.(context.Context)
return n1
}
func WithPrevContext(parent context.Context, ctx context.Context) context.Context {
return context.WithValue(parent, prevContextKey, ctx)
}
func NewPipelienNode(parent context.Context, rh Handler, wh Handler, fh Handler, rph Handler, next Node) Node {
p := &PipelineNode{}
p.ctx, _ = context.WithCancel(parent)
p.rh = rh
p.rq = make(chan *Message, queueSize)
p.wh = wh
p.wq = make(chan *Message, queueSize)
p.fh = fh
p.fq = make(chan *Message, queueSize)
p.rph = rph
p.rpq = make(chan *Message, queueSize)
p.next = next
return p
}
type PipelineNode struct {
ctx context.Context
rq chan *Message
rh Handler
wq chan *Message
wh Handler
fq chan *Message
fh Handler
rpq chan *Message
rph Handler
next Node
}
func (dn *PipelineNode) Receive(msg *Message) error {
dn.rq <- msg
logger.Println("receive end")
return nil
}
func (dn *PipelineNode) Send(msg *Message) error {
dn.wq <- msg
return nil
}
func (dn *PipelineNode) Reply(msg *Message) error {
dn.rpq <- msg
return nil
}
func (dn *PipelineNode) Forward(msg *Message) error {
dn.fq <- msg
return nil
}
func (dn *PipelineNode) Error(err error) {
logger.Printf("err %s\n", err.Error())
}
func (dn *PipelineNode) Start() {
logger.Printf("pipeline start")
for {
var err error
select {
case rm := <-dn.rq:
err = dn.handleReceiveMsg(rm)
case wm := <-dn.wq:
err = dn.handleWriteMsg(wm)
case fm := <-dn.fq:
err = dn.handleForwardMsg(fm)
case rpm := <-dn.rpq:
err = dn.handleReplyMsg(rpm)
}
if err != nil {
dn.Error(err)
}
}
logger.Printf("pipeline end")
}
func (dn *PipelineNode) handleReceiveMsg(msg *Message) (err error) {
logger.Printf("rece")
tempCtx := WithPrevContext(context.Background(), msg.Ctx)
tempCtx = WithNode(tempCtx, dn)
result, err := dn.rh.Handle(tempCtx, msg.Msg)
if err != nil {
return
}
logger.Println(result)
if result != nil {
tc := &Message{}
tc.Ctx = tempCtx
tc.Msg = result
err = dn.Forward(tc)
}
return
}
func (dn *PipelineNode) handleWriteMsg(wm *Message) (err error) {
logger.Println("wr")
var result interface{}
if dn.wh == nil {
result = wm.Msg
} else {
result, err = dn.wh.Handle(wm.Ctx, wm.Msg)
if err != nil {
return
}
}
if result != nil {
pctx := PrevContextValue(dn.ctx)
if pctx == nil {
sess := SessionValue(dn.ctx)
if sess == nil {
err = SessionNilError
return
}
content, ok := wm.Msg.([]byte)
if !ok {
logger.Println("write error")
}
err = sess.Send(content)
logger.Println("wr done")
return err
}
pNode := NodeValue(pctx)
tm := &Message{}
tm.Ctx = dn.ctx
tm.Msg = result
err = pNode.Reply(tm)
}
logger.Printf("wr done")
return
}
func (dn *PipelineNode) handleForwardMsg(fm *Message) (err error) {
logger.Printf("forward")
result, err := dn.fh.Handle(fm.Ctx, fm.Msg)
if err != nil {
return
}
if result != nil {
tm := &Message{}
tm.Ctx = dn.ctx
tm.Msg = result
err = dn.next.Receive(tm)
}
return
}
func (dn *PipelineNode) handleReplyMsg(rpm *Message) (err error) {
logger.Printf("reply")
cctx := PrevContextValue(dn.ctx)
result, err := dn.rph.Handle(cctx, rpm.Msg)
if err != nil {
return
}
if result != nil {
n := NodeValue(cctx)
tm := &Message{}
tm.Ctx = cctx
tm.Msg = result
n.Send(tm)
}
return
}
func (dn *PipelineNode) Close() error {
close(dn.rq)
close(dn.wq)
close(dn.fq)
close(dn.rpq)
return nil
}
|
package handler
import (
"encoding/json"
"golang-api/model"
"log"
"net/http"
)
// ProcessStats takes a given HTTP request and returns the statistics information relevent to the number of previous requests made to hash a password.
func ProcessStats(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodGet:
w.WriteHeader(200)
w.Header().Set("Content-Type", "application/json")
data, err := json.Marshal(model.Stats)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if _, err := w.Write(data); err != nil {
log.Fatal(err)
}
default:
w.WriteHeader(http.StatusBadRequest)
log.Printf("Unsupported HTTP method '%v'", r.Method)
}
}
|
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"strings"
"github.com/google/go-github/github"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
const (
envSecret = "GITHUB_SECRET_TOKEN"
)
type Result struct {
Action string `json:"action"`
Repository struct {
CloneURL string `json:"clone_url"`
} `json:"repository"`
}
type PushPayload struct {
github.PushEvent
WebhookBranch string `json:"webhooks-tekton-git-branch"`
WebhookSuggestedImageTag string `json:"webhooks-tekton-image-tag"`
}
type PullRequestPayload struct {
github.PullRequestEvent
WebhookBranch string `json:"webhooks-tekton-git-branch"`
WebhookSuggestedImageTag string `json:"webhooks-tekton-image-tag"`
}
func main() {
log.Print("Interceptor started")
http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) {
foundTriggerName := request.Header.Get("Wext-Trigger-Name")
config, err := rest.InClusterConfig()
if err != nil {
log.Printf("[%s] Error creating in cluster config: %s", foundTriggerName, err.Error())
http.Error(writer, fmt.Sprint(err), http.StatusInternalServerError)
return
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
log.Printf("[%s] Error creating new clientset: %s", foundTriggerName, err.Error())
http.Error(writer, fmt.Sprint(err), http.StatusInternalServerError)
return
}
foundNamespace := os.Getenv("INSTALLED_NAMESPACE")
foundSecretName := request.Header.Get("Wext-Secret-Name")
foundSecret, err := clientset.CoreV1().Secrets(foundNamespace).Get(foundSecretName, metav1.GetOptions{})
if err != nil {
log.Printf("[%s] Error getting the secret %s to validate: %s", foundTriggerName, foundSecretName, err.Error())
http.Error(writer, fmt.Sprint(err), http.StatusBadRequest)
return
}
wantedRepoURL := request.Header.Get("Wext-Repository-Url")
payload, err := github.ValidatePayload(request, foundSecret.Data["secretToken"])
if err != nil {
log.Printf("[%s] Validation FAIL (error %s validating payload)", foundTriggerName, err.Error())
http.Error(writer, fmt.Sprint(err), http.StatusExpectationFailed)
return
}
var result Result
err = json.Unmarshal(payload, &result)
if err != nil {
log.Printf("[%s] Validation FAIL (error %s marshalling payload as JSON)", foundTriggerName, err.Error())
http.Error(writer, fmt.Sprint(err), http.StatusInternalServerError)
return
}
cloneURL := result.Repository.CloneURL
log.Printf("[%s] Clone URL coming in as JSON: %s", foundTriggerName, cloneURL)
id := github.DeliveryID(request)
if err != nil {
log.Printf("[%s] Error handling GitHub Event with delivery ID %s: %s", foundTriggerName, id, err.Error())
http.Error(writer, fmt.Sprint(err), http.StatusInternalServerError)
return
}
log.Printf("[%s] Handling GitHub Event with delivery ID: %s", foundTriggerName, id)
validationPassed := false
if sanitizeGitInput(cloneURL) == sanitizeGitInput(wantedRepoURL) {
if request.Header.Get("Wext-Incoming-Event") != "" {
wantedEvent := request.Header.Get("Wext-Incoming-Event")
foundEvent := request.Header.Get("X-Github-Event")
if wantedEvent == foundEvent { // Wanted GitHub event type provided AND repository URL matches so all is well
wantedActions := request.Header["Wext-Incoming-Actions"]
if len(wantedActions) == 0 {
validationPassed = true
log.Printf("[%s] Validation PASS (repository URL, secret payload, event type checked)", foundTriggerName)
} else {
actions := strings.Split(wantedActions[0], ",")
for _, action := range actions {
if action == result.Action {
validationPassed = true
log.Printf("[%s] Validation PASS (repository URL, secret payload, event type, action:%s checked)", foundTriggerName, action)
}
}
}
} else {
log.Printf("[%s] Validation FAIL (event type does not match, got %s but wanted %s)", foundTriggerName, foundEvent, wantedEvent)
http.Error(writer, fmt.Sprint(err), http.StatusExpectationFailed)
return
}
} else { // No wanted GitHub event type provided, but the repository URL matches so all is well
log.Printf("[%s] Validation PASS (repository URL and secret payload checked)", foundTriggerName)
validationPassed = true
}
if validationPassed {
returnPayload, err := addExtrasToPayload(request.Header.Get("X-Github-Event"), payload)
if err != nil {
log.Printf("[%s] Failed to add branch to payload processing Github event ID: %s. Error: %s", foundTriggerName, id, err.Error())
http.Error(writer, fmt.Sprint(err), http.StatusInternalServerError)
return
}
log.Printf("[%s] Validation PASS so writing response", foundTriggerName)
_, err = writer.Write(returnPayload)
if err != nil {
log.Printf("[%s] Failed to write response for Github event ID: %s. Error: %s", foundTriggerName, id, err.Error())
http.Error(writer, fmt.Sprint(err), http.StatusInternalServerError)
return
}
} else {
http.Error(writer, "Validation failed", http.StatusExpectationFailed)
}
} else {
log.Printf("[%s] Validation FAIL (repository URL does not match, got %s but wanted %s): ",
foundTriggerName,
sanitizeGitInput(cloneURL),
sanitizeGitInput(wantedRepoURL))
http.Error(writer, fmt.Sprint(err), http.StatusExpectationFailed)
return
}
})
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", 8080), nil))
}
// Adds branch and a suggested image tag
func addExtrasToPayload(event string, payload []byte) ([]byte, error) {
if "push" == event {
var toReturn PushPayload
var p github.PushEvent
err := json.Unmarshal(payload, &p)
if err != nil {
return nil, err
}
toReturn = PushPayload{
PushEvent: p,
WebhookBranch: p.GetRef()[strings.LastIndex(p.GetRef(), "/")+1:],
WebhookSuggestedImageTag: getSuggestedTag(p.GetRef(), *p.HeadCommit.ID),
}
return json.Marshal(toReturn)
} else if "pull_request" == event {
var toReturn PullRequestPayload
var pr github.PullRequestEvent
err := json.Unmarshal(payload, &pr)
if err != nil {
return nil, err
}
ref := pr.GetPullRequest().GetHead().GetRef()
toReturn = PullRequestPayload{
PullRequestEvent: pr,
WebhookBranch: ref[strings.LastIndex(ref, "/")+1:],
WebhookSuggestedImageTag: getSuggestedTag(ref, *pr.PullRequest.Head.SHA),
}
return json.Marshal(toReturn)
} else {
return payload, nil
}
}
func sanitizeGitInput(input string) string {
noGitSuffix := strings.TrimSuffix(input, ".git")
asLower := strings.ToLower(noGitSuffix)
noHTTPSPrefix := strings.TrimPrefix(asLower, "https://")
noHTTPrefix := strings.TrimPrefix(noHTTPSPrefix, "http://")
return noHTTPrefix
}
func getSuggestedTag(ref, commit string) string {
var suggestedImageTag string
if strings.HasPrefix(ref, "refs/tags/") {
suggestedImageTag = ref[strings.LastIndex(ref, "/")+1:]
} else {
suggestedImageTag = commit[0:7]
}
return suggestedImageTag
}
|
package main
import (
"context"
"fmt"
"golang.org/x/sync/errgroup"
"net/http"
"os"
"os/signal"
)
var sErr chan error
var sigC chan os.Signal
var fd http.Server
func main() {
g := errgroup.Group{}
sErr = make(chan error, 1)
sigC = make(chan os.Signal, 1)
fd = http.Server{Addr:":8080"}
g.Go(serve)
g.Go(signalListen)
select{}
}
func serve() error {
sErr <- fd.ListenAndServe()
select {
case err := <- sErr:
close(sigC)
close(sErr)
return err
}
}
func signalListen() error{
signal.Notify(sigC)
select {
case s:= <- sigC:
fmt.Println("get signal:",s)
signal.Stop(sigC)
return fd.Shutdown(context.TODO())
}
}
|
package slack
import (
"errors"
"net/url"
)
type oAuthResponseFull struct {
AccessToken string `json:"access_token"`
Scope string `json:"scope"`
SlackResponse
}
// GetOAuthToken retrieves an AccessToken
func GetOAuthToken(clientID, clientSecret, code, redirectURI string, debug bool) (accessToken string, scope string, err error) {
values := url.Values{
"client_id": {clientID},
"client_secret": {clientSecret},
"code": {code},
"redirect_uri": {redirectURI},
}
response := &oAuthResponseFull{}
err = post("oauth.access", values, response, debug)
if err != nil {
return "", "", err
}
if !response.Ok {
return "", "", errors.New(response.Error)
}
return response.AccessToken, response.Scope, nil
}
|
/*
Copyright 2015 Google Inc. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file or at
https://developers.google.com/open-source/licenses/bsd
*/
package oid
import (
"fmt"
"strconv"
"strings"
"github.com/google/cups-connector/cdd"
)
// Some MIB definitions from Printer-MIB (RFC 3805).
var (
PrinterMIB OID = OID{1, 3, 6, 1, 2, 1, 43}
PrinterMIBGeneral = append(PrinterMIB, OID{5}...)
PrinterGeneralSerialNumber = append(PrinterMIBGeneral, OID{1, 1, 17, 1}...)
PrinterMIBCover = append(PrinterMIB, OID{6}...)
PrinterCoverDescription = append(PrinterMIBCover, OID{1, 1, 2, 1}...)
PrinterCoverStatus = append(PrinterMIBCover, OID{1, 1, 3, 1}...)
PrinterMIBInput = append(PrinterMIB, OID{8}...)
PrinterInputMaxCapacity = append(PrinterMIBInput, OID{2, 1, 9, 1}...)
PrinterInputCurrentLevel = append(PrinterMIBInput, OID{2, 1, 10, 1}...)
PrinterInputStatus = append(PrinterMIBInput, OID{2, 1, 11, 1}...)
PrinterInputName = append(PrinterMIBInput, OID{2, 1, 13, 1}...)
PrinterMIBOutput = append(PrinterMIB, OID{9}...)
PrinterOutputMaxCapacity = append(PrinterMIBOutput, OID{2, 1, 4, 1}...)
PrinterOutputRemainingCapacity = append(PrinterMIBOutput, OID{2, 1, 5, 1}...)
PrinterOutputStatus = append(PrinterMIBOutput, OID{2, 1, 6, 1}...)
PrinterOutputName = append(PrinterMIBOutput, OID{2, 1, 7, 1}...)
PrinterMIBMarker = append(PrinterMIB, OID{11}...)
PrinterMarkerSuppliesClass = append(PrinterMIBMarker, OID{1, 1, 4, 1}...)
PrinterMarkerSuppliesType = append(PrinterMIBMarker, OID{1, 1, 5, 1}...)
PrinterMarkerSuppliesDescription = append(PrinterMIBMarker, OID{1, 1, 6, 1}...)
PrinterMarkerSuppliesSupplyUnit = append(PrinterMIBMarker, OID{1, 1, 7, 1}...)
PrinterMarkerSuppliesMaxCapacity = append(PrinterMIBMarker, OID{1, 1, 8, 1}...)
PrinterMarkerSuppliesLevel = append(PrinterMIBMarker, OID{1, 1, 9, 1}...)
PrinterMIBMarkerColorant = append(PrinterMIB, OID{12}...)
PrinterMarkerColorantValue = append(PrinterMIBMarkerColorant, OID{1, 1, 4, 1}...)
)
// GetSerialNumber gets the printer serial number, if available.
func (vs *VariableSet) GetSerialNumber() (string, bool) {
return vs.GetValue(PrinterGeneralSerialNumber)
}
// Printer cover status TC.
var PrinterCoverStatusTC map[string]string = map[string]string{
"1": "other",
"2": "unknown",
"3": "coverOpen",
"4": "coverClosed",
"5": "interlockOpen",
"6": "interlockClosed",
}
// Printer cover status to cdd.CoverStateType.
var PrinterCoverStatusToGCP map[string]cdd.CoverStateType = map[string]cdd.CoverStateType{
"1": cdd.CoverStateFailure,
"2": cdd.CoverStateFailure,
"3": cdd.CoverStateOpen,
"4": cdd.CoverStateOK,
"5": cdd.CoverStateOpen,
"6": cdd.CoverStateOK,
}
func (vs *VariableSet) GetCovers() (*[]cdd.Cover, *cdd.CoverState, bool) {
descriptions := vs.GetSubtree(PrinterCoverDescription).Variables()
statuses := vs.GetSubtree(PrinterCoverStatus).Variables()
if len(descriptions) < 1 ||
len(descriptions) != len(statuses) {
return nil, nil, false
}
covers := make([]cdd.Cover, 0, len(descriptions))
coverState := cdd.CoverState{make([]cdd.CoverStateItem, 0, len(statuses))}
for i := 0; i < len(descriptions); i++ {
index := cdd.NewSchizophrenicInt64(descriptions[i].Name[len(descriptions[i].Name)-1])
description := descriptions[i].Value
state := PrinterCoverStatusToGCP[statuses[i].Value]
cover := cdd.Cover{
VendorID: index.String(),
Index: index,
}
switch strings.ToLower(description) {
case "cover":
cover.Type = cdd.CoverTypeCover
case "door":
cover.Type = cdd.CoverTypeDoor
default:
cover.Type = cdd.CoverTypeCustom
cover.CustomDisplayNameLocalized = cdd.NewLocalizedString(description)
}
covers = append(covers, cover)
coverStateItem := cdd.CoverStateItem{
VendorID: index.String(),
State: state,
}
if state == cdd.CoverStateFailure {
coverStateItem.VendorMessage = PrinterCoverStatusTC[statuses[i].Value]
}
coverState.Item = append(coverState.Item, coverStateItem)
}
return &covers, &coverState, true
}
type PrinterSubUnitStatusTC uint8
const (
PrinterSubUnitAvailableAndIdle PrinterSubUnitStatusTC = 0
PrinterSubUnitAvailableAndStandby = 2
PrinterSubUnitAvailableAndActive = 4
PrinterSubUnitAvailableAndBusy = 6
PrinterSubUnitUnavailable = 1
PrinterSubUnitUnavailableAndOnRequest = 1
PrinterSubUnitUnavailableBecauseBroken = 3
PrinterSubUnitUnknown = 5
PrinterSubUnitNonCritical = 8
PrinterSubUnitCritical = 16
PrinterSubUnitOffline = 32
PrinterSubUnitTransitioning = 64
)
func (vs *VariableSet) GetInputTrays() (*[]cdd.InputTrayUnit, *cdd.InputTrayState, bool) {
levelsMax := vs.GetSubtree(PrinterInputMaxCapacity).Variables()
levelsCurrent := vs.GetSubtree(PrinterInputCurrentLevel).Variables()
statuses := vs.GetSubtree(PrinterInputStatus).Variables()
names := vs.GetSubtree(PrinterInputName).Variables()
if len(levelsMax) < 1 ||
len(levelsMax) != len(levelsCurrent) ||
len(levelsMax) != len(statuses) ||
len(levelsMax) != len(names) {
return nil, nil, false
}
inputTrayUnits := make([]cdd.InputTrayUnit, 0, len(statuses))
inputTrayState := cdd.InputTrayState{make([]cdd.InputTrayStateItem, 0, len(statuses))}
for i := 0; i < len(statuses); i++ {
index := cdd.NewSchizophrenicInt64(statuses[i].Name[len(statuses[i].Name)-1])
status, err := strconv.ParseUint(statuses[i].Value, 10, 8)
if err != nil {
return nil, nil, false
}
state := cdd.InputTrayStateOK
stateMessage := []string{}
if (status & PrinterSubUnitUnavailable) != 0 {
stateMessage = append(stateMessage, "unavailable")
switch status & 7 {
case PrinterSubUnitUnavailableAndOnRequest:
stateMessage = append(stateMessage, "on request")
case PrinterSubUnitUnavailableBecauseBroken:
state = cdd.InputTrayStateFailure
stateMessage = append(stateMessage, "broken")
case PrinterSubUnitUnknown:
state = cdd.InputTrayStateFailure
stateMessage = append(stateMessage, "reason unknown")
}
}
if (status & PrinterSubUnitNonCritical) != 0 {
stateMessage = append(stateMessage, "non-critical")
}
if (status & PrinterSubUnitCritical) != 0 {
state = cdd.InputTrayStateFailure
stateMessage = append(stateMessage, "critical")
}
if (status & PrinterSubUnitOffline) != 0 {
state = cdd.InputTrayStateOff
stateMessage = append(stateMessage, "offline")
}
inputState := cdd.InputTrayStateItem{
VendorID: index.String(),
State: state,
VendorMessage: strings.Join(stateMessage, ", "),
}
levelMax, err := strconv.ParseInt(levelsMax[i].Value, 10, 32)
if err != nil {
return nil, nil, false
}
levelCurrent, err := strconv.ParseInt(levelsCurrent[i].Value, 10, 32)
if err != nil {
return nil, nil, false
}
if levelMax >= 0 && levelCurrent >= 0 {
if levelCurrent == 0 && state == cdd.InputTrayStateOK {
inputState.State = cdd.InputTrayStateEmpty
}
var levelPercent int32
if levelMax > 0 {
levelPercent = int32(100 * levelCurrent / levelMax)
}
inputState.LevelPercent = &levelPercent
}
if inputState.State == cdd.InputTrayStateOK ||
inputState.State == cdd.InputTrayStateEmpty {
// No message necessary when state says everything.
inputState.VendorMessage = ""
}
inputTrayState.Item = append(inputTrayState.Item, inputState)
inputTrayUnits = append(inputTrayUnits, cdd.InputTrayUnit{
VendorID: index.String(),
Type: cdd.InputTrayUnitCustom,
Index: index,
CustomDisplayNameLocalized: cdd.NewLocalizedString(names[i].Value),
})
}
return &inputTrayUnits, &inputTrayState, true
}
func (vs *VariableSet) GetOutputBins() (*[]cdd.OutputBinUnit, *cdd.OutputBinState, bool) {
capacitiesMax := vs.GetSubtree(PrinterOutputMaxCapacity).Variables()
capacitiesRemaining := vs.GetSubtree(PrinterOutputRemainingCapacity).Variables()
statuses := vs.GetSubtree(PrinterOutputStatus).Variables()
names := vs.GetSubtree(PrinterOutputName).Variables()
if len(names) < 1 ||
len(names) != len(capacitiesMax) ||
len(names) != len(capacitiesRemaining) ||
len(names) != len(statuses) {
return nil, nil, false
}
outputBinUnits := make([]cdd.OutputBinUnit, 0, len(names))
outputBinState := cdd.OutputBinState{make([]cdd.OutputBinStateItem, 0, len(names))}
for i := 0; i < len(names); i++ {
index := cdd.NewSchizophrenicInt64(statuses[i].Name[len(statuses[i].Name)-1])
status, err := strconv.ParseUint(statuses[i].Value, 10, 8)
if err != nil {
return nil, nil, false
}
state := cdd.OutputBinStateOK
stateMessage := []string{}
if (status & PrinterSubUnitUnavailable) != 0 {
stateMessage = append(stateMessage, "unavailable")
switch status & 7 {
case PrinterSubUnitUnavailableAndOnRequest:
stateMessage = append(stateMessage, "on request")
case PrinterSubUnitUnavailableBecauseBroken:
state = cdd.OutputBinStateFailure
stateMessage = append(stateMessage, "broken")
case PrinterSubUnitUnknown:
state = cdd.OutputBinStateFailure
stateMessage = append(stateMessage, "reason unknown")
}
}
if (status & PrinterSubUnitNonCritical) != 0 {
stateMessage = append(stateMessage, "non-critical")
}
if (status & PrinterSubUnitCritical) != 0 {
state = cdd.OutputBinStateFailure
stateMessage = append(stateMessage, "critical")
}
if (status & PrinterSubUnitOffline) != 0 {
state = cdd.OutputBinStateOff
stateMessage = append(stateMessage, "offline")
}
outputState := cdd.OutputBinStateItem{
VendorID: index.String(),
State: state,
VendorMessage: strings.Join(stateMessage, ","),
}
capacityMax, err := strconv.ParseInt(capacitiesMax[i].Value, 10, 32)
if err != nil {
return nil, nil, false
}
capacityRemaining, err := strconv.ParseInt(capacitiesRemaining[i].Value, 10, 32)
if err != nil {
return nil, nil, false
}
if capacityMax >= 0 && capacityRemaining >= 0 {
if capacityRemaining == 0 && state == cdd.OutputBinStateOK {
outputState.State = cdd.OutputBinStateFull
}
levelPercent := 100 - int32(100*capacityRemaining/capacityMax)
outputState.LevelPercent = &levelPercent
}
outputBinState.Item = append(outputBinState.Item, outputState)
outputBinUnits = append(outputBinUnits, cdd.OutputBinUnit{
VendorID: index.String(),
Type: cdd.OutputBinUnitCustom,
Index: index,
CustomDisplayNameLocalized: cdd.NewLocalizedString(names[i].Value),
})
}
return &outputBinUnits, &outputBinState, true
}
// Printer marker supplies supply unit TC.
var PrinterMarkerSuppliesSupplyUnitTC map[string]string = map[string]string{
"1": "other",
"2": "unknown",
"3": "tenThousandthsOfInches",
"4": "micrometers",
"7": "impressions",
"8": "sheets",
"11": "hours",
"12": "thousandthsOfOunces",
"13": "tenthsOfGrams",
"14": "hundrethsOfFluidOunces",
"15": "tenthsOfMilliliters",
"16": "feet",
"17": "meters",
"18": "items",
"19": "percent",
}
// Printer marker supplies type TC.
var PrinterMarkerSuppliesTypeTC map[string]string = map[string]string{
"1": "other",
"2": "unknown",
"3": "toner",
"4": "wasteToner",
"5": "ink",
"6": "inkCartridge",
"7": "inkRibbon",
"8": "wasteInk",
"9": "opc",
"10": "developer",
"11": "fuserOil",
"12": "solidWax",
"13": "ribbonWax",
"14": "wasteWax",
"15": "fuser",
"16": "coronaWire",
"17": "fuserOilWick",
"18": "cleanerUnit",
"19": "fuserCleaningPad",
"20": "transferUnit",
"21": "tonerCartridge",
"22": "fuserOiler",
"23": "water",
"24": "wasteWater",
"25": "glueWaterAdditive",
"26": "wastePaper",
"27": "bindingSupply",
"28": "bandingSupply",
"29": "stitchingWire",
"30": "shrinkWrap",
"31": "paperWrap",
"32": "staples",
"33": "inserts",
"34": "covers",
}
var PrinterMarkerSuppliesTypeToGCP map[string]cdd.MarkerType = map[string]cdd.MarkerType{
"1": "",
"2": "",
"3": cdd.MarkerToner,
"4": "",
"5": cdd.MarkerInk,
"6": cdd.MarkerInk,
"7": cdd.MarkerInk,
"8": "",
"9": "",
"10": "",
"11": "",
"12": "",
"13": "",
"14": "",
"15": "",
"16": "",
"17": "",
"18": "",
"19": "",
"20": "",
"21": cdd.MarkerToner,
"22": "",
"23": "",
"24": "",
"25": "",
"26": "",
"27": "",
"28": "",
"29": "",
"30": "",
"31": "",
"32": cdd.MarkerStaples,
"33": "",
"34": "",
}
type PrinterMarkerSuppliesClassTC string
const (
PrinterMarkerSuppliesClassOther PrinterMarkerSuppliesClassTC = "1" // other
PrinterMarkerSuppliesClassConsumed = "3" // supplyThatIsConsumed
PrinterMarkerSuppliesClassFilled = "4" // receptacleThatIsFilled
)
var snmpMarkerColorToGCP map[string]cdd.MarkerColorType = map[string]cdd.MarkerColorType{
"black": cdd.MarkerColorBlack,
"color": cdd.MarkerColorColor,
"cyan": cdd.MarkerColorCyan,
"magenta": cdd.MarkerColorMagenta,
"yellow": cdd.MarkerColorYellow,
"lightcyan": cdd.MarkerColorLightCyan,
"lightmagenta": cdd.MarkerColorLightMagenta,
"gray": cdd.MarkerColorGray,
"lightgray": cdd.MarkerColorLightGray,
"pigmentblack": cdd.MarkerColorPigmentBlack,
"matteblack": cdd.MarkerColorMatteBlack,
"photocyan": cdd.MarkerColorPhotoCyan,
"photomagenta": cdd.MarkerColorPhotoMagenta,
"photoyellow": cdd.MarkerColorPhotoYellow,
"photogray": cdd.MarkerColorPhotoGray,
"red": cdd.MarkerColorRed,
"green": cdd.MarkerColorGreen,
"blue": cdd.MarkerColorBlue,
}
func (vs *VariableSet) GetMarkers() (*[]cdd.Marker, *cdd.MarkerState, *cdd.VendorState, bool) {
classes := vs.GetSubtree(PrinterMarkerSuppliesClass).Variables()
types := vs.GetSubtree(PrinterMarkerSuppliesType).Variables()
descriptions := vs.GetSubtree(PrinterMarkerSuppliesDescription).Variables()
units := vs.GetSubtree(PrinterMarkerSuppliesSupplyUnit).Variables()
levelsMax := vs.GetSubtree(PrinterMarkerSuppliesMaxCapacity).Variables()
levelsCurrent := vs.GetSubtree(PrinterMarkerSuppliesLevel).Variables()
colors := vs.GetSubtree(PrinterMarkerColorantValue).Variables()
if len(classes) < 1 ||
len(classes) != len(types) ||
len(classes) != len(descriptions) ||
len(classes) != len(units) ||
len(classes) != len(levelsMax) ||
len(classes) != len(levelsCurrent) ||
len(classes) != len(colors) {
return nil, nil, nil, false
}
markers := []cdd.Marker{}
markerState := cdd.MarkerState{}
vendorState := cdd.VendorState{}
for i := 0; i < len(classes); i++ {
index := int64(classes[i].Name[len(classes[i].Name)-1])
levelMax, err := strconv.ParseInt(levelsMax[i].Value, 10, 32)
if err != nil {
return nil, nil, nil, false
}
levelCurrent, err := strconv.ParseInt(levelsCurrent[i].Value, 10, 32)
if err != nil {
return nil, nil, nil, false
}
var levelPercent int32
if levelMax > 0 {
levelPercent = int32(100 * levelCurrent / levelMax)
}
if markerType, exists := PrinterMarkerSuppliesTypeToGCP[types[i].Value]; exists && markerType != "" {
// GCP calls this a Marker.
state := cdd.MarkerStateOK
markerStateItem := cdd.MarkerStateItem{
VendorID: strconv.FormatInt(index, 10),
State: state,
}
if levelMax >= 0 && levelCurrent >= 0 {
if levelPercent <= 10 {
markerStateItem.State = cdd.MarkerStateExhausted
}
markerStateItem.LevelPercent = &levelPercent
if unit, exists := PrinterMarkerSuppliesSupplyUnitTC[units[i].Value]; exists && unit == "sheets" {
levelPages := int32(levelCurrent)
markerStateItem.LevelPages = &levelPages
}
}
rawColor := strings.Replace(strings.Replace(strings.ToLower(colors[i].Value), " ", "", -1), "-", "", -1)
colorType := cdd.MarkerColorCustom
for k, v := range snmpMarkerColorToGCP {
if strings.HasPrefix(rawColor, k) {
colorType = v
break
}
}
markerColor := cdd.MarkerColor{Type: colorType}
if colorType == cdd.MarkerColorCustom {
name := colors[i].Value
name = strings.TrimSuffix(name, " Cartridge")
name = strings.TrimSuffix(name, " cartridge")
name = strings.TrimSuffix(name, " Ribbon")
name = strings.TrimSuffix(name, " ribbon")
name = strings.TrimSuffix(name, " Toner")
name = strings.TrimSuffix(name, " toner")
name = strings.TrimSuffix(name, " Ink")
name = strings.TrimSuffix(name, " ink")
name = strings.Replace(name, "-", " ", -1)
markerColor.CustomDisplayNameLocalized = cdd.NewLocalizedString(name)
}
marker := cdd.Marker{
VendorID: strconv.FormatInt(index, 10),
Type: markerType,
Color: &markerColor,
}
markerState.Item = append(markerState.Item, markerStateItem)
markers = append(markers, marker)
} else {
var state cdd.VendorStateType
if levelPercent <= 1 {
state = cdd.VendorStateError
} else if levelPercent <= 10 {
state = cdd.VendorStateWarning
} else {
state = cdd.VendorStateInfo
}
// GCP doesn't call this a Marker, so treat it like a VendorState.
class := PrinterMarkerSuppliesClassTC(classes[i].Value)
var description string
if class == PrinterMarkerSuppliesClassFilled {
levelPercent = 100 - levelPercent
description = fmt.Sprintf("%s at %d%%", descriptions[i].Value, levelPercent)
if levelPercent == 100 {
description = fmt.Sprintf("%s full", descriptions[i].Value)
}
} else { // class == PrinterMarkerSuppliesClassConsumed
description = fmt.Sprintf("%s at %d%%", descriptions[i].Value, levelPercent)
if levelPercent == 0 {
description = fmt.Sprintf("%s empty", descriptions[i].Value)
}
}
vendorState.Item = append(vendorState.Item, cdd.VendorStateItem{
State: state,
DescriptionLocalized: cdd.NewLocalizedString(description),
})
}
}
return &markers, &markerState, &vendorState, true
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"math"
"sort"
"strconv"
)
type Restaurant struct {
ID int64 `json:"id"`
Name string `json:"name"`
Distance int64 `json:"distance"`
DistanceFromStation int `json:"distance_from_station"`
DinnerBudget interface{} `json:"dinner_budget"`
LunchBudget interface{} `json:"lunch_budget"`
PaymentMethods []string `json:"payment_methods"`
RecommendersCount int64 `json:"recommenders_count"`
Status int `json:"status"`
}
func main() {
b, err := ioutil.ReadFile("./restaurants.json")
if err != nil {
panic(err)
}
var rs []Restaurant
if err := json.Unmarshal(b, &rs); err != nil {
panic(err)
}
idToScore := map[int64]int64{}
for _, r := range rs {
idToScore[r.ID] = 0
}
// 「店舗の代表予算」の安い順に、1番目に3pt, 2番目に2pt, 3番目に1pt与える
sort.Slice(rs, func(i, j int) bool {
return rs[i].calcTypicalPrice() < rs[j].calcTypicalPrice()
})
idToScore[rs[0].ID] += 3
idToScore[rs[1].ID] += 2
idToScore[rs[2].ID] += 1
// 「現在地からの距離」の近い順に、1番目に3pt, 2番目に2pt, 3番目に1pt与える
sort.Slice(rs, func(i, j int) bool {
return rs[i].Distance < rs[j].Distance
})
idToScore[rs[0].ID] += 3
idToScore[rs[1].ID] += 2
idToScore[rs[2].ID] += 1
// 「QRコード決済対応」と判定される店舗に2pt与える
for _, r := range rs {
for _, pm := range r.PaymentMethods {
if pm == "QR" {
idToScore[r.ID] += 2
}
}
}
// 「店舗のおすすめ人数」の多い順に、1番目に3pt, 2番目に2pt, 3番目に1pt与える
sort.Slice(rs, func(i, j int) bool {
return rs[i].RecommendersCount > rs[j].RecommendersCount
})
idToScore[rs[0].ID] += 3
idToScore[rs[1].ID] += 2
idToScore[rs[2].ID] += 1
// 「店舗ステータス」が「閉店」の店舗を0ptとする
for _, r := range rs {
if r.Status == 0 {
idToScore[r.ID] = 0
}
}
// 算出したスコアの降順にソート
sort.Slice(rs, func(i, j int) bool {
return idToScore[rs[i].ID] > idToScore[rs[j].ID]
})
/*
以下のforループでこのような出力になる
焼肉一筋:15614324:9
The Pizza:19831008:4
おにぎり屋さん:18136859:2
ぱんや:13455313:1
伝説のうな丼:16244132:0
焼き鳥屋:12109451:0
*/
for _, r := range rs {
fmt.Println(r.Name + ":" + strconv.FormatInt(r.ID, 10) + ":" + strconv.FormatInt(idToScore[r.ID], 10))
}
// 先頭の次のID -> 19831008
fmt.Println(strconv.FormatInt(rs[1].ID, 10))
}
// 代表価格の算出
func (p Restaurant) calcTypicalPrice() float64 {
if p.DinnerBudget == nil && p.LunchBudget == nil {
return 0
}
if p.DinnerBudget == nil && p.LunchBudget != nil {
return p.LunchBudget.(float64)
}
if p.DinnerBudget != nil && p.LunchBudget == nil {
return p.DinnerBudget.(float64)
}
if p.DinnerBudget != nil && p.LunchBudget != nil {
return math.Round(float64(p.LunchBudget.(float64))*0.4 + float64(p.DinnerBudget.(float64))*0.6)
}
return 0
}
|
package queries
import (
"log"
"github.com/jmoiron/sqlx"
"gitlab.com/semestr-6/projekt-grupowy/backend/obsluga-formularzy/categories/models"
"gitlab.com/semestr-6/projekt-grupowy/backend/obsluga-formularzy/configuration"
)
const GET_CATEGORIES_SQL = `
SELECT
c."CategoryId"
, c."CategoryNamePl"
, c."CategoryNameEn"
, c."CategoryDescriptionPl"
, c."CategoryDescriptionEn"
FROM
categories."Categories" c;`
const GET_HIERARCHY_SQL = `
SELECT
c."CategoryId"
, c."ParentId"
FROM
categories."HierarchyOfCategories" c;`
const GET_CATEGORY_BY_ID2_SQL = `
SELECT
q."CategoryId"
,q."CategoryNamePl"
,q."CategoryNameEn"
,q."CategoryDescriptionPl"
,q."CategoryDescriptionEn"
FROM
categories."Categories" q
WHERE
q."CategoryId"=$1;`
func GetCategories() (categories []models.CategoryForm, err error) {
db, err := sqlx.Open("postgres", configuration.ConnectionString)
defer db.Close()
if err != nil {
log.Fatal(err)
return
}
var _categories []models.EditCategory
err = db.Select(&_categories, GET_CATEGORIES_SQL)
if err != nil {
log.Fatal(err)
return
}
var hierarchy []models.Hierarchy
err = db.Select(&hierarchy, GET_HIERARCHY_SQL)
if err != nil {
log.Fatal(err)
return
}
var buf models.CategoryForm
for i := 0; i < len(_categories); i++ {
buf.CategoryId = _categories[i].CategoryId
buf.CategoryNameEn = _categories[i].CategoryNameEn
buf.CategoryNamePl = _categories[i].CategoryNamePl
buf.CategoryDescriptionPl = _categories[i].CategoryDescriptionPl
buf.CategoryDescriptionEn = _categories[i].CategoryDescriptionEn
for j := 0; j < len(hierarchy); j++ {
if _categories[i].CategoryId == hierarchy[j].CategoryId {
err = db.Select(&buf.CategoryParents, GET_CATEGORY_BY_ID2_SQL, hierarchy[j].ParentId)
if err != nil {
log.Fatal(err)
return
}
}
}
categories = append(categories, buf)
buf.CategoryParents = nil
}
return
}
|
package main
import (
"bufio"
"fmt"
"io"
"log"
"net/http"
"os"
"strings"
"time"
"strconv"
"encoding/json"
)
const ConfigFile = "config/config.json"
type Settings struct {
Scheme string `json:"scheme"`
Server string `json:"server"`
Prefix string `json:"prefix"`
Region string `json:"region"`
Size string `json:"size"`
Rotation string `json:"rotation"`
Quality string `json:"quality"`
Format string `json:"format"`
ImageInfo string `json:"image_info"`
PidFile string `json:"pid_file"`
DelayInMs int `json:"delay_in_ms"`
ImageDir string `json:"image_dir"`
}
var settings *Settings = read_settings()
func read_settings() *Settings {
var settings Settings
config_file, err := os.Open(ConfigFile)
if err != nil {
log.Fatal(err)
}
json_decoder := json.NewDecoder(config_file)
err = json_decoder.Decode(&settings)
return &settings
}
func setup_date_string() string {
year, month, day := time.Now().Date()
date_string := strconv.Itoa(year) +
month.String() +
strconv.Itoa(day) +
"_" +
fmt.Sprintf("%02d",time.Now().Hour()) +
fmt.Sprintf("%02d",time.Now().Minute()) +
fmt.Sprintf("%02d",time.Now().Second())
return date_string
}
func log_settings() {
log.Print("Here are the settings (i.e. constants in code):")
log.Print("Scheme: " + settings.Scheme);
log.Print("Server: " + settings.Server);
log.Print("Prefix: " + settings.Prefix)
log.Print("Region: " + settings.Region)
log.Print("Size: " + settings.Size)
log.Print("Rotation: " + settings.Rotation)
log.Print("Quality: " + settings.Quality)
log.Print("Format: " + settings.Format)
log.Print("ImageInfo: " + settings.ImageInfo)
log.Print("PidFile: " + settings.PidFile)
log.Print("DelayInMs: " + strconv.Itoa(settings.DelayInMs))
log.Print("ImageDir: " + settings.ImageDir)
}
func main() {
// Read settings from config file
settings := read_settings()
// Setup date_string, used to create log filename and image subdir
date_string := setup_date_string()
// Setup log file for logging
log_filename := date_string + ".log"
log_file, err := os.Create("logs/"+log_filename)
defer log_file.Close()
if err != nil {
log.Fatal(err)
}
log.SetOutput(log_file)
fmt.Println("Output sent to following logfile:")
fmt.Println(log_file.Name())
// Print settings to logfile
log_settings()
// Read in the image pids
pid_file, err := os.Open(settings.PidFile)
defer pid_file.Close()
if err != nil {
log.Fatal(err)
}
// Create and populate a slice containing the pids
pids := make([]string,0,50)
scanner := bufio.NewScanner(pid_file)
for scanner.Scan() {
pid := scanner.Text()
pids = append(pids, pid)
}
// Generate path to image dir
image_dir := settings.ImageDir + date_string + "/"
err = os.Mkdir(image_dir,os.ModePerm)
if err != nil {
log.Fatal(err)
}
log.Print("Images will be saved in " + image_dir)
// Main processing
log.Print("Processing started, " + strconv.Itoa(len(pids)) + " pids.")
for _, pid := range pids {
log.Print("About to process pid " + pid)
// formated_pid will be used in the name of the generated files
formated_pid := strings.Replace(pid,":","_",1)
// Construct the URL to retrieve the jpg from the image server
image_url := settings.Scheme + "://" + settings.Server + "/" + settings.Prefix + "/" +
pid + "/" + settings.Region + "/" + settings.Size + "/" + settings.Rotation +
"/" + settings.Quality + "." + settings.Format
log.Print(image_url)
// Send GET request to server
resp, err := http.Get(image_url)
defer resp.Body.Close()
if err != nil {
log.Fatal(err)
}
// fmt.Println(resp.Status)
log.Print(resp.Status)
// if Status Code is 200, create the file
if resp.StatusCode == 200 {
image_filename := image_dir + formated_pid + ".jpg"
image_file, err := os.Create(image_filename)
defer image_file.Close()
if err != nil {
log.Fatal(err)
}
io.Copy(image_file, resp.Body)
log.Print("Generated file: " + image_filename)
} else {
log.Print("File not created due to non-200 status code")
}
// Sleep to lessen burden on server
log.Print("Sleeping for " + strconv.Itoa(settings.DelayInMs) + "ms")
time.Sleep(time.Duration(settings.DelayInMs) * time.Millisecond)
// Get JSON info, not really required
/*
info_url := settings.Scheme + "://" + settings.Server + "/" + settings.Prefix + "/" +
pid + "/" + settings.ImageInfo
fmt.Println(info_url)
resp, err = http.Get(info_url)
defer resp.Body.Close()
if err != nil {
log.Fatal(err)
}
// fmt.Println(resp, err)
json_file, err := os.Create("info/"+formated_pid+".json")
defer json_file.Close()
if err != nil {
log.Fatal(err)
}
io.Copy(json_file, resp.Body)
*/
}
}
|
package main
import "fmt"
func isMatch(s string, p string) bool {
sLen, pLen := len(s), len(p)
dp := make([][]bool, sLen+1)
for i := range dp {
dp[i] = make([]bool, pLen+1)
}
dp[0][0] = true
for j := 1; j <= pLen; j++ {
if p[j-1] == '*' {
dp[0][j] = dp[0][j-2]
}
}
for i := 1; i <= sLen; i++ {
for j := 1; j <= pLen; j++ {
if s[i-1] == p[j-1] || p[j-1] == '.' {
dp[i][j] = dp[i-1][j-1]
} else if p[j-1] == '*' {
if s[i-1] == p[j-2] || p[j-2] == '.' {
dp[i][j] = dp[i][j-2] || dp[i-1][j]
} else {
dp[i][j] = dp[i][j-2]
}
}
}
}
return dp[sLen][pLen]
}
func main() {
fmt.Println(isMatch("a", "aa"))
}
|
package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"math/rand"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/bwmarrin/discordgo"
)
var (
token string
motivationalMessages = []string{
"Rock on!",
"You got this!",
"Whoo!",
"Whoo hoo!",
"Those gains!",
"Bro do you even lift? Wait I guess you do",
"Virtual flex!",
"",
"Keep it up!",
"Awesome!",
"Party time!",
"Amazing!",
"Wicked Awesome!",
"Now go get a beer.",
"https://media1.giphy.com/media/l46CDHTqbmnGZyxKo/giphy.gif?cid=ecf05e479f9e2ff596964bc8df54c38800879a549d4668d6&rid=giphy.gif",
}
)
const (
dir = "/tmp/workouts/"
ffilename = dir + "%s.json"
help = "+workout (or +w for short) to add a workout\n-workout (or -w) to remove your most recent workout\n?me to view your workouts\n?all to view everyones' workouts\n?help to see this message"
)
func init() {
flag.StringVar(&token, "t", "", "Bot Token")
flag.Parse()
}
func main() {
if _, err := os.Stat(dir); os.IsNotExist(err) {
os.Mkdir(dir, 0777)
}
dg, err := discordgo.New("Bot " + token)
if err != nil {
fmt.Println("error creating Discord session,", err)
return
}
dg.AddHandler(messageCreate)
err = dg.Open()
if err != nil {
fmt.Println("error opening connection,", err)
return
}
fmt.Println("Bot is now running. Press CTRL-C to exit.")
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
dg.Close()
}
func messageCreate(s *discordgo.Session, m *discordgo.MessageCreate) {
if m.Author.ID == s.State.User.ID || m.Author.Bot {
return
}
if strings.HasPrefix(m.Content, "+w") {
addWorkout(m.Author.Username, m.GuildID, 1)
n := rand.Intn(len(motivationalMessages))
s.ChannelMessageSend(m.ChannelID, "Added. "+motivationalMessages[n])
}
if strings.HasPrefix(m.Content, "-w") {
addWorkout(m.Author.Username, m.GuildID, -1)
s.ChannelMessageSend(m.ChannelID, "Latest workout removed. Bummer")
}
if strings.HasPrefix(m.Content, "?me") {
msg := queryWorkouts(m.Author.Username, m.GuildID)
s.ChannelMessageSend(m.ChannelID, msg)
}
if strings.HasPrefix(m.Content, "?all") {
msg := queryAllWorkouts(m.GuildID)
s.ChannelMessageSend(m.ChannelID, msg)
}
if strings.HasPrefix(m.Content, "?help") {
s.ChannelMessageSend(m.ChannelID, help)
}
}
type workouts map[string][]time.Time
func addWorkout(name, guild string, count int) {
filename := fmt.Sprintf(ffilename, guild)
file, _ := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0666)
data, _ := ioutil.ReadAll(file)
var w workouts
json.Unmarshal(data, &w)
if w == nil {
w = make(workouts)
}
if count > 0 {
if _, ok := w[name]; !ok {
w[name] = []time.Time{time.Now()}
} else {
w[name] = append(w[name], time.Now())
}
} else if count < 0 {
if _, ok := w[name]; !ok {
w[name] = []time.Time{}
} else {
if len(w[name]) > 0 {
w[name] = w[name][:len(w[name])-1]
}
}
}
data, _ = json.Marshal(w)
n, _ := file.WriteAt(data, 0)
file.Truncate(int64(n))
file.Sync()
file.Close()
}
func queryWorkouts(name, guild string) string {
filename := fmt.Sprintf(ffilename, guild)
file, _ := os.Open(filename)
defer file.Close()
data, _ := ioutil.ReadAll(file)
var w workouts
json.Unmarshal(data, &w)
if w == nil {
return name + " has no workouts"
}
if _, ok := w[name]; !ok {
return name + " has no workouts"
}
return report(name, w[name])
}
func queryAllWorkouts(guild string) string {
filename := fmt.Sprintf(ffilename, guild)
file, _ := os.Open(filename)
defer file.Close()
data, _ := ioutil.ReadAll(file)
var w workouts
json.Unmarshal(data, &w)
if w == nil || len(w) == 0 {
return "No workouts found"
}
ret := ""
for k, v := range w {
ret += report(k, v) + "\n"
}
return ret
}
func report(name string, workouts []time.Time) string {
if workouts == nil || len(workouts) == 0 {
return name + " has no workouts"
}
total := len(workouts)
thisWeek := 0
year, week := time.Now().ISOWeek()
for i := len(workouts) - 1; i >= 0; i-- {
y, w := workouts[i].ISOWeek()
if y == year && w == week {
thisWeek++
} else {
break // assume sorted with most recent at the end of the slice
}
}
return fmt.Sprintf("%s: %d total, %d this week", name, total, thisWeek)
}
|
/*
Description
Ms. Terry is a pre-school art teacher who likes to have her students work with clay. One of her assignments is to form a lump of clay into a block and then measure the dimensions of the block. However, in every class, there is always one child who insists on taking some clay from some other child. Since Ms. Terry always gives every child in a class the same amount of clay to begin with, you can write a program that helps Ms. Terry find the bully and victim after she measures each child's finished block.
Input
There are one or more classes of students, followed by a final line containing only the value -1. Each class starts with a line containing an integer, n, which is the number of students in the class, followed by n lines of student information. Each line of student information consists of three positive integers, representing the dimensions of the clay block, followed by the student's first name. There can never be more than 9 students nor less than 2 students in any class. Each student's name is at most 8 characters. Ms. Terry always gives each student at most 250 cubic units of clay. There is exactly one bully and one victim in each class.
Output
For each class print a single line exactly as shown in the sample output.
Sample Input
3
10 10 2 Jill
5 3 10 Will
5 5 10 Bill
4
2 4 10 Cam
4 3 7 Sam
8 11 1 Graham
6 2 7 Pam
-1
Sample Output
Bill took clay from Will.
Graham took clay from Cam.
Source
Mid-Central USA 2003
*/
package main
import (
"fmt"
"math"
)
func main() {
assert(victim([]Lump{
{10, 10, 2, "Jill"},
{5, 3, 10, "Will"},
{5, 5, 10, "Bill"},
}) == "Bill took clay from Will.")
assert(victim([]Lump{
{2, 4, 10, "Cam"},
{4, 3, 7, "Sam"},
{8, 11, 1, "Graham"},
{6, 2, 7, "Pam"},
}) == "Graham took clay from Cam.")
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func victim(l []Lump) string {
if len(l) < 1 {
return ""
}
m0 := math.MaxInt
m1 := math.MinInt
i0 := 0
i1 := 0
for i, l := range l {
v := l.width * l.height * l.depth
if m1 < v {
m1, i1 = v, i
}
if m0 > v {
m0, i0 = v, i
}
}
return fmt.Sprintf("%v took clay from %v.", l[i1].student, l[i0].student)
}
type Lump struct {
width int
height int
depth int
student string
}
|
package utils
import (
"glsamaker/pkg/app/handler/authentication/auth_session"
"glsamaker/pkg/database/connection"
"glsamaker/pkg/models/users"
"net/http"
"strings"
)
// utility methods to check whether a user is authenticated
func Only2FAMissing(w http.ResponseWriter, r *http.Request) bool {
sessionID, err := r.Cookie("session")
userIP := getIP(r)
return err == nil && sessionID != nil && auth_session.Only2FAMissing(sessionID.Value, userIP)
}
func IsAuthenticated(w http.ResponseWriter, r *http.Request) bool {
sessionID, err := r.Cookie("session")
userIP := getIP(r)
return err == nil && sessionID != nil && auth_session.IsLoggedIn(sessionID.Value, userIP)
}
func IsAuthenticatedAndNeedsNewPassword(w http.ResponseWriter, r *http.Request) bool {
sessionID, err := r.Cookie("session")
userIP := getIP(r)
return err == nil && sessionID != nil && auth_session.IsLoggedInAndNeedsNewPassword(sessionID.Value, userIP)
}
func IsAuthenticatedAndNeeds2FA(w http.ResponseWriter, r *http.Request) bool {
sessionID, err := r.Cookie("session")
userIP := getIP(r)
return err == nil && sessionID != nil && auth_session.IsLoggedInAndNeeds2FA(sessionID.Value, userIP)
}
func IsAuthenticatedAsAdmin(w http.ResponseWriter, r *http.Request) bool {
sessionID, err := r.Cookie("session")
userIP := getIP(r)
if err != nil || sessionID == nil || !auth_session.IsLoggedIn(sessionID.Value, userIP) {
return false
}
user := GetAuthenticatedUser(r)
return user != nil && user.Permissions.Admin.View
}
func GetAuthenticatedUser(r *http.Request) *users.User {
sessionID, err := r.Cookie("session")
userIP := getIP(r)
if err != nil || sessionID == nil || !(auth_session.IsLoggedIn(sessionID.Value, userIP) || auth_session.Only2FAMissing(sessionID.Value, userIP)) {
return nil
}
userId := auth_session.GetUserId(sessionID.Value, userIP)
user := &users.User{Id: userId}
err = connection.DB.Select(user)
if err != nil {
return nil
}
return user
}
func getIP(r *http.Request) string {
forwarded := r.Header.Get("X-FORWARDED-FOR")
if forwarded != "" {
return strings.Split(forwarded, ":")[0]
}
return strings.Split(r.RemoteAddr, ":")[0]
}
|
package leetcode
func duplicateZeros(arr []int) {
zeroCnt := 0
l := len(arr)
last_dup := false
for idx, v := range arr {
if v == 0 {
if idx+zeroCnt+1 < l {
if idx+zeroCnt+2 == l {
last_dup = true
}
zeroCnt++
}
}
if idx+zeroCnt >= l {
break
}
}
t := l - 1
for i := 0; i < l-zeroCnt; i++ {
if (i > 0 || last_dup) && arr[l-zeroCnt-1-i] == 0 {
arr[t] = arr[l-zeroCnt-1-i]
t--
}
arr[t] = arr[l-zeroCnt-1-i]
t--
}
}
|
package arraycode
import (
"fmt"
"crypto/sha256"
)
func comparecode(b1 []byte, b2 []byte) int {
c1 := sha256.Sum256(b1)
c2 := sha256.Sum256(b2)
fmt.Printf("%x\n%x\n%t\n%T\n", c1, c2, c1 == c2, c1)
return 1
}
|
package seccomp
import (
"errors"
"testing"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pjbgf/go-test/should"
)
func TestBruteForce_ShortCircuit(t *testing.T) {
assertThat := func(assumption string, expectedExecutions int, expectedErr error) {
should := should.New(t)
stub := &runnerStub{failAlways: true}
s := NewBruteForceSource(stub)
actual, err := s.GetSystemCalls()
should.BeNil(actual, assumption)
should.BeEqual(expectedExecutions, stub.totalExecutions, assumption)
should.BeEqual(expectedErr, err, assumption)
}
assertThat("should abort if fails without seccomp", 1,
errors.New("execution aborted, command could not be executed: could not load container"))
}
func TestBruteForce_GetSystemCalls(t *testing.T) {
assertThat := func(assumption string, injected []string,
expected *specs.LinuxSyscall, expectedErr error) {
should := should.New(t)
stub := &runnerStub{callsToFail: injected}
s := NewBruteForceSource(stub)
actual, err := s.GetSystemCalls()
should.BeEqual(expectedErr, err, assumption)
should.BeEqual(expected.Action, actual.Action, assumption)
should.HaveSameItems(expected.Names, actual.Names, assumption)
}
assertThat("should return all syscalls that the container can't run without",
[]string{"read", "write", "close"},
&specs.LinuxSyscall{
Action: specs.ActAllow,
Names: []string{"read", "write", "close", "futex", "exit", "execve", "exit_group"},
},
nil)
}
func TestBruteForce_IndexesOf(t *testing.T) {
assertThat := func(assumption string, source []string, item string, expected []int) {
should := should.New(t)
stub := &runnerStub{}
s := NewBruteForceSource(stub)
actual := s.indexesOf(source, item)
should.BeEqual(expected, actual, assumption)
}
assertThat("should return empty []int when item not found",
[]string{"item1", "item2", "item3"},
"item4",
[]int{})
assertThat("should return single index when item found once",
[]string{"item1", "item2", "item3"},
"item2",
[]int{1})
assertThat("should return two indexes when item found twice",
[]string{"item1", "item2", "item3", "item2"},
"item2",
[]int{1, 3})
}
func TestBruteForce_ExcludeItemFromSlice(t *testing.T) {
assertThat := func(assumption string, source []string, itemToExclude string, expected []string) {
should := should.New(t)
stub := &runnerStub{}
s := NewBruteForceSource(stub)
actual := s.excludeItemFromSlice(source, itemToExclude)
should.BeEqual(expected, actual, assumption)
}
assertThat("should not change source slice when item not found",
[]string{"item1", "item2", "item3"},
"item4",
[]string{"item1", "item2", "item3"})
assertThat("should exclude item from slice when item is found once at the end",
[]string{"item1", "item2", "item3"},
"item3",
[]string{"item1", "item2"})
assertThat("should exclude item from slice when item is found once in the middle",
[]string{"item1", "item2", "item3"},
"item2",
[]string{"item1", "item3"})
assertThat("should exclude item from slice when item is found once in the start",
[]string{"item1", "item2", "item3"},
"item1",
[]string{"item2", "item3"})
assertThat("should exclude item from slice when item is found multiple times",
[]string{"item1", "item2", "item3", "item2", "item4", "item2"},
"item2",
[]string{"item1", "item3", "item4"})
}
// runnerStub is a stub to replace a runner.
type runnerStub struct {
profile *specs.LinuxSeccomp
callsToFail []string
totalExecutions int
failAlways bool
}
func (r *runnerStub) RunWithSeccomp(profile *specs.LinuxSeccomp) error {
r.totalExecutions++
r.profile = profile
if r.shouldFail() {
return errors.New("could not load container")
}
return nil
}
// forces failures every time a system call on r.callsToFail is not
// in the profile being currently executed.
func (r *runnerStub) shouldFail() bool {
if r.failAlways {
return true
}
if r.profile != nil {
for _, a := range r.callsToFail {
contains := false
for _, n := range r.profile.Syscalls[0].Names {
if a == n {
contains = true
}
}
if !contains {
return true
}
}
}
return false
}
|
package red_test
import (
. "github.com/DennisDenuto/igrb/builds/red"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/concourse/fly/rc/rcfakes"
"github.com/concourse/go-concourse/concourse/concoursefakes"
"github.com/concourse/atc"
"errors"
)
var _ = Describe("Builds", func() {
Describe("ListBuilds", func() {
var pipelineName string
var build FailedBuildFetcher
var target *rcfakes.FakeTarget
var team *concoursefakes.FakeTeam
BeforeEach(func() {
target = &rcfakes.FakeTarget{}
team = &concoursefakes.FakeTeam{}
target.TeamReturns(team)
build = FailedBuildFetcher{
Target: target,
}
})
Context("A Single pipeline with a Single Job with a Red Build", func() {
BeforeEach(func() {
pipelineName = "bosh"
config := atc.Config{}
config.Jobs = []atc.JobConfig{
{Name: "job-1"},
}
team.PipelineConfigReturns(config, "", "", false, nil)
team.JobReturns(atc.Job{FinishedBuild: &atc.Build{Status: "failed"}}, true, nil)
})
It("Should return the correct build", func() {
redBuilds, err := build.Fetch(pipelineName)
Expect(err).ToNot(HaveOccurred())
Expect(target.TeamCallCount()).To(Equal(1))
Expect(team.PipelineConfigCallCount()).To(Equal(1))
pipelineName := team.PipelineConfigArgsForCall(0)
Expect(pipelineName).To(Equal(pipelineName))
Expect(team.JobCallCount()).To(Equal(1))
pipelineName, jobName := team.JobArgsForCall(0)
Expect(pipelineName).To(Equal(pipelineName))
Expect(jobName).To(Equal("job-1"))
Expect(redBuilds).To(HaveLen(1))
Expect(redBuilds[0].Status).To(Equal("failed"))
})
})
Context("A Single pipeline with multiple Jobs all being Red builds", func() {
BeforeEach(func() {
pipelineName = "bosh"
config := atc.Config{}
config.Jobs = []atc.JobConfig{
{Name: "job-1"},
{Name: "job-2"},
}
team.PipelineConfigReturns(config, "", "", false, nil)
team.JobReturns(atc.Job{FinishedBuild: &atc.Build{Status: "failed"}}, true, nil)
})
It("Should return the correct build", func() {
redBuilds, err := build.Fetch(pipelineName)
Expect(err).ToNot(HaveOccurred())
Expect(redBuilds).To(HaveLen(2))
Expect(redBuilds[0].Status).To(Equal("failed"))
Expect(redBuilds[1].Status).To(Equal("failed"))
})
})
Context("A Single pipeline with multiple Jobs with some Red and Green builds", func() {
BeforeEach(func() {
pipelineName = "bosh"
config := atc.Config{}
config.Jobs = []atc.JobConfig{
{Name: "job-1"},
{Name: "job-2"},
{Name: "job-3"},
}
team.PipelineConfigReturns(config, "", "", false, nil)
team.JobStub = func(pipelineName, jobName string) (atc.Job, bool, error) {
switch jobName {
case "job-1": return atc.Job{FinishedBuild: &atc.Build{Status: "failed"}}, true, nil
case "job-2": return atc.Job{FinishedBuild: &atc.Build{Status: "succeeded"}}, true, nil
case "job-3": return atc.Job{FinishedBuild: &atc.Build{Status: "errored"}}, true, nil
default: return atc.Job{FinishedBuild: &atc.Build{Status: "failed"}}, true, nil
}
}
})
It("Should return the correct build", func() {
redBuilds, err := build.Fetch(pipelineName)
Expect(err).ToNot(HaveOccurred())
Expect(redBuilds).To(HaveLen(2))
Expect(redBuilds[0].Status).To(Equal("failed"))
Expect(redBuilds[1].Status).To(Equal("errored"))
})
})
Context("concourse errors out", func() {
BeforeEach(func() {
pipelineName = "bosh"
})
It("should return correct error if pipeline fails", func() {
team.PipelineConfigReturns(atc.Config{}, "", "", false, errors.New("some error"))
_, err := build.Fetch(pipelineName)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("Unable to get pipeline config: some error"))
})
It("should return correct error if getting a job fails", func() {
config := atc.Config{}
config.Jobs = []atc.JobConfig{
{Name: "job-1"},
{Name: "job-2"},
}
team.PipelineConfigReturns(config, "", "", false, nil)
team.JobReturns(atc.Job{}, true, errors.New("some strange error"))
_, err := build.Fetch(pipelineName)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("Unable to get pipeline config: some strange error"))
})
})
})
})
|
package myast
import (
"bytes"
"fmt"
"go/ast"
"go/format"
"go/parser"
"go/printer"
"go/token"
"log"
"os"
"path/filepath"
"strconv"
"testing"
)
/*
基础结构说明
普通Node,不是特定语法结构,属于某个语法结构的一部分.
Comment 表示一行注释 // 或者 / /
CommentGroup 表示多行注释
Field 表示结构体中的一个定义或者变量,或者函数签名当中的参数或者返回值
FieldList 表示以”{}”或者”()”包围的Filed列表
Expression & Types (都划分成Expr接口)
BadExpr 用来表示错误表达式的占位符
Ident 比如报名,函数名,变量名
Ellipsis 省略号表达式,比如参数列表的最后一个可以写成arg…
BasicLit 基本字面值,数字或者字符串
FuncLit 函数定义
CompositeLit 构造类型,比如{1,2,3,4}
ParenExpr 括号表达式,被括号包裹的表达式
SelectorExpr 选择结构,类似于a.b的结构
IndexExpr 下标结构,类似这样的结构 expr[expr]
SliceExpr 切片表达式,类似这样 expr[low:mid:high]
TypeAssertExpr 类型断言类似于 X.(type)
CallExpr 调用类型,类似于 expr()
StarExpr 指针表达式,类似于 *X
UnaryExpr 一元表达式
BinaryExpr 二元表达式
KeyValueExp 键值表达式 key:value
ArrayType 数组类型
StructType 结构体类型
FuncType 函数类型
InterfaceType 接口类型
MapType map类型
ChanType 管道类型
Statements语句
BadStmt 错误的语句
DeclStmt 在语句列表里的申明
EmptyStmt 空语句
LabeledStmt 标签语句类似于 indent:stmt
ExprStmt 包含单独的表达式语句
SendStmt chan发送语句
IncDecStmt 自增或者自减语句
AssignStmt 赋值语句
GoStmt Go语句
DeferStmt 延迟语句
ReturnStmt return 语句
BranchStmt 分支语句 例如break continue
BlockStmt 块语句 {} 包裹
IfStmt If 语句
CaseClause case 语句
SwitchStmt switch 语句
TypeSwitchStmt 类型switch 语句 switch x:=y.(type)
CommClause 发送或者接受的case语句,类似于 case x <-:
SelectStmt select 语句
ForStmt for 语句
RangeStmt range 语句
Declarations声明
Spec type
Import Spec
Value Spec
Type Spec
BadDecl 错误申明
GenDecl 一般申明(和Spec相关,比如 import “a”,var a,type a)
FuncDecl 函数申明
Files and Packages
File 代表一个源文件节点,包含了顶级元素.
Package 代表一个包,包含了很多文件.
以上内容转载自某片大神文章..具体地址忘了,知道
*/
func TestHello(t *testing.T) {
fset := token.NewFileSet()
node, err := parser.ParseFile(fset, "./sample/hello.go", nil, parser.ParseComments)
if err != nil {
log.Fatal(err)
}
fmt.Println("Imports:================================================")
for _, i := range node.Imports {
fmt.Println(i.Path.Value)
}
fmt.Println("Comments:================================================")
for _, c := range node.Comments {
fmt.Print(c.Text())
}
fmt.Println("Functions:================================================")
for _, f := range node.Decls {
fn, ok := f.(*ast.FuncDecl)
if !ok {
continue
}
fmt.Println(fn.Name.Name)
}
fmt.Println("Inspect:================================================")
ast.Inspect(node, func(n ast.Node) bool {
// Find Return Statements
ret, ok := n.(*ast.ReturnStmt)
if ok {
fmt.Printf("return statement found on line %d:\n\t", fset.Position(ret.Pos()).Line)
printer.Fprint(os.Stdout, fset, ret)
return true
}
return true
})
}
type Visitor struct {
}
func (v *Visitor) Visit(node ast.Node) ast.Visitor {
switch node.(type) {
case *ast.GenDecl:
genDecl := node.(*ast.GenDecl)
// 查找有没有import context包
// Notice:没有考虑没有import任何包的情况
if genDecl.Tok == token.IMPORT {
v.addImport(genDecl)
// 不需要再遍历子树
return nil
}
case *ast.InterfaceType:
// 遍历所有的接口类型
iface := node.(*ast.InterfaceType)
addContext(iface)
// 不需要再遍历子树
return nil
case *ast.Ident:
ident := node.(*ast.Ident)
fmt.Printf("%+v", ident)
}
return v
}
// addImport 引入context包
func (v *Visitor) addImport(genDecl *ast.GenDecl) {
// 是否已经import
hasImported := false
for _, v := range genDecl.Specs {
imptSpec := v.(*ast.ImportSpec)
// 如果已经包含"context"
if imptSpec.Path.Value == strconv.Quote("context") {
hasImported = true
}
}
// 如果没有import context,则import
if !hasImported {
genDecl.Specs = append(genDecl.Specs, &ast.ImportSpec{
Path: &ast.BasicLit{
Kind: token.STRING,
Value: strconv.Quote("context"),
},
})
}
}
// addImport 引入context包
func AddImport(genDecl *ast.GenDecl) {
// 是否已经import
hasImported := false
for _, v := range genDecl.Specs {
imptSpec := v.(*ast.ImportSpec)
// 如果已经包含"context"
if imptSpec.Path.Value == strconv.Quote("context") {
hasImported = true
}
}
// 如果没有import context,则import
if !hasImported {
genDecl.Specs = append(genDecl.Specs, &ast.ImportSpec{
Path: &ast.BasicLit{
Kind: token.STRING,
Value: strconv.Quote("context"),
},
})
}
}
// addContext 添加context参数
func addContext(iface *ast.InterfaceType) {
// 接口方法不为空时,遍历接口方法
if iface.Methods != nil || iface.Methods.List != nil {
for _, v := range iface.Methods.List {
ft := v.Type.(*ast.FuncType)
hasContext := false
// 判断参数中是否包含context.Context类型
for _, v := range ft.Params.List {
if expr, ok := v.Type.(*ast.SelectorExpr); ok {
if ident, ok := expr.X.(*ast.Ident); ok {
if ident.Name == "context" {
hasContext = true
}
}
}
}
// 为没有context参数的方法添加context参数
if !hasContext {
ctxField := &ast.Field{
Names: []*ast.Ident{
ast.NewIdent("ctx"),
},
// Notice: 没有考虑import别名的情况
Type: &ast.SelectorExpr{
X: ast.NewIdent("context"),
Sel: ast.NewIdent("Context"),
},
}
list := []*ast.Field{
ctxField,
}
ft.Params.List = append(list, ft.Params.List...)
}
}
}
}
func TestDemo(t *testing.T) {
fset := token.NewFileSet()
// 这里取绝对路径,方便打印出来的语法树可以转跳到编辑器
path, _ := filepath.Abs("./sample/demo.go")
f, err := parser.ParseFile(fset, path, nil, parser.AllErrors)
if err != nil {
log.Println(err)
}
// 打印语法树
ast.Print(fset, f)
}
func TestDemo2(t *testing.T) {
fset := token.NewFileSet()
// 这里取绝对路径,方便打印出来的语法树可以转跳到编辑器
path, _ := filepath.Abs("./sample/demo.go")
f, err := parser.ParseFile(fset, path, nil, parser.AllErrors)
if err != nil {
log.Println(err)
}
ast.Print(fset, f)
v := Visitor{}
ast.Walk(&v, f)
var output []byte
buffer := bytes.NewBuffer(output)
err = format.Node(buffer, fset, f)
if err != nil {
log.Fatal(err)
}
// 输出Go代码
fmt.Println(buffer.String())
}
func TestDemo3(t *testing.T) {
fset := token.NewFileSet()
// 这里取绝对路径,方便打印出来的语法树可以转跳到编辑器
path, _ := filepath.Abs("/root/goworkspace/nfa_autotest/nfa_gotests/example/example_test.go")
f, err := parser.ParseFile(fset, path, nil, parser.AllErrors)
if err != nil {
log.Println(err)
}
ast.Print(fset, f)
}
func TestImports(t *testing.T) {
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, "./sample/ast_traversal.go", nil, parser.ParseComments)
if err != nil {
t.Fatal(err)
return
}
for _, i := range f.Imports {
t.Logf("import: %s", i.Path.Value)
}
return
}
func TestComments(t *testing.T) {
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, "./sample/ast_traversal.go", nil, parser.ParseComments)
if err != nil {
t.Fatal(err)
return
}
for _, i := range f.Comments {
t.Logf("comment: %s", i.Text())
}
return
}
func TestFunctions(t *testing.T) {
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, "./sample/ast_traversal.go", nil, parser.ParseComments)
if err != nil {
t.Fatal(err)
return
}
for _, i := range f.Decls {
fn, ok := i.(*ast.FuncDecl)
if !ok {
continue
}
t.Logf("function: %s", fn.Name.Name)
}
return
}
func TestInspect(t *testing.T) {
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, "./sample/demo.go", nil, parser.ParseComments)
if err != nil {
t.Fatal(err)
return
}
//ast.Print(fset, f)
ast.Inspect(f, func(n ast.Node) bool {
// Find Return Statements
gen, ok := n.(*ast.GenDecl)
if ok {
t.Logf("return GenDecl found on line %d:\n\t", fset.Position(gen.Pos()).Line)
if gen.Tok == token.IMPORT {
AddImport(gen)
}
}
ret, ok := n.(*ast.ReturnStmt)
if ok {
t.Logf("return statement found on line %d:\n\t", fset.Position(ret.Pos()).Line)
printer.Fprint(os.Stdout, fset, ret)
return true
}
// Find Functions
fn, ok := n.(*ast.FuncDecl)
if ok {
var exported string
if fn.Name.IsExported() {
exported = "exported "
}
t.Logf("%sfunction declaration found on line %d: %s", exported, fset.Position(fn.Pos()).Line, fn.Name.Name)
return true
}
call, ok := n.(*ast.CallExpr)
if ok {
t.Logf("call:%+v", call)
}
return true
})
var output []byte
buffer := bytes.NewBuffer(output)
err = format.Node(buffer, fset, f)
if err != nil {
log.Fatal(err)
}
// 输出Go代码
fmt.Println(buffer.String())
return
}
func TestGomonkey(t *testing.T) {
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, "./sample/demo.go", nil, parser.ParseComments)
if err != nil {
t.Fatal(err)
return
}
ast.Inspect(f, func(n ast.Node) bool {
// Find Return Statements
gen, ok := n.(*ast.GenDecl)
if ok {
if gen.Tok != token.VAR {
return false
}
if len(gen.Specs) < 1 {
return false
}
spec := gen.Specs[0]
vs, ok := spec.(*ast.ValueSpec)
if !ok {
return false
}
if len(vs.Names) < 1 || vs.Names[0].Name != "mocks" {
return false
}
if len(vs.Values) < 1 {
return false
}
fnDecl, ok := vs.Values[0].(*ast.FuncLit)
if !ok {
return false
}
if len(fnDecl.Type.Results.List) < 1 {
return false
}
retType, ok := fnDecl.Type.Results.List[0].Type.(*ast.ArrayType)
if !ok {
return false
}
sexpr, ok := retType.Elt.(*ast.StarExpr)
if !ok {
return false
}
selexpr, ok := sexpr.X.(*ast.SelectorExpr)
if !ok {
return false
}
pack, ok := selexpr.X.(*ast.Ident)
if !ok {
return false
}
if pack.Name != "gomonkey" || selexpr.Sel.Name != "Patches" {
return false
}
t.Logf("return GenDecl found on line %d %+v:\n\t", fset.Position(gen.Pos()).Line, gen)
var output []byte
buf := bytes.NewBuffer(output)
err = format.Node(buf, fset, gen)
t.Logf(buf.String())
//ast.Inspect(gen, func(n ast.Node) bool {
//
// return true
//})
//if gen.Tok == token.IMPORT {
// AddImport(gen)
//}
}
return true
})
var output []byte
buffer := bytes.NewBuffer(output)
err = format.Node(buffer, fset, f)
if err != nil {
log.Fatal(err)
}
// 输出Go代码
fmt.Println(buffer.String())
return
}
|
package main
import "fmt"
func main() {
hello()
r := sum(1, 2)
fmt.Println(r)
rr := sub(1, 2)
fmt.Println(rr)
//리턴값 여러개
_, sub := SumAndSub(6, 2)
fmt.Println(sub)
//가변인자
rrr := sum2(1, 2, 3, 4, 5)
fmt.Println(rrr)
n := []int{1, 2, 3, 4, 5}
rrrr := sum2(n...) // ...을 사용하여 가변인자에 슬라이스를 바로 넘겨줌
fmt.Println(rrrr)
//함수를 변수에 저장하기
var hello func(a int, b int) int = sum // 함수를 저장하는 변수를 선언하고 함수 대입
world := sum // 변수 선언과 동시에 함수를 바로 대입
fmt.Println(hello(1, 2)) // 3: hello 변수에 저장된 sum 함수 호출
fmt.Println(world(1, 2)) // 3: world 변수에 저장된 sum 함수 출호
f := []func(int, int) int{sum, sub} // 함수를 저장할 수 있는 슬라이스를 생성한 뒤 함수로 초기화
fmt.Println(f[0](1, 2)) // 3: 배열의 첫 번째 요소로 함수 호출
fmt.Println(f[1](1, 2)) // -1: 배열의 두 번째 요소로 함수 호출
ff := map[string]func(int, int) int{
"sum": sum,
"sub": sub,
}
fmt.Println(ff["sum"](1, 2)) // 3: 맵에 sum 키를 지정하여 함수 호출
fmt.Println(ff["sub"](1, 2)) // -1: 맵에 diff 키를 지정하여 함수 호출
//익명 함수 사용하기
// ( 이런 익명 함수는 코드양을 줄일 수 있으며,
// 클로저, 지연 호출(defer), 고루틴(go)에서 주로 사용 )
func() { // 함수에 이름이 없음
fmt.Println("Hello, world!")
}()
func(s string) { // 익명 함수를 정의한 뒤
fmt.Println(s)
}("Hello, world") // 바로 호출
r5 := func(a int, b int) int { // 익명 함수를 정의한 뒤
return a + b
}(1, 2) // 바로 호출하여 리턴값을 변수 r에 저장
fmt.Println(r5)
}
func hello() {
fmt.Println장("hello, world")
}
func sum(a int, b int) int {
return a + b
}
func sub(a int, b int) (r int) {
r := a - b
return
}
func SumAndSub(a int, b int) (int, int) {
return a + b, a - b
}
func SumAndSub(a int, b int) (sum int, sub int) {
sum = a + b
sub = a - b
return
}
func sum2(n ...int) int { // int형 가변인자를 받는 함수 정의
totla := 0
for _, value := range n { // range로 가변인자의 모든 값을 꺼냄 (n은 슬라이스 타입)
totla += value // 꺼낸 값을 모두 더함
}
return total
}
|
package car
import (
"net/http"
"time"
"strconv"
)
type Server interface {
Start() error
Wait()
Stop()
}
type server struct {
stopListener chan struct{}
port int
server *http.Server
staticHandler http.Handler
steering Steering
mux map[string]func(*server, http.ResponseWriter, *http.Request)
}
func NewServer(port int, steering Steering, staticPath string) Server {
var srv server
s := &http.Server{
Addr: ":" + strconv.Itoa(port),
Handler: &srv,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20,
}
srv = server{
stopListener: make(chan struct{}),
port: port,
server: s,
staticHandler: http.FileServer(http.Dir(staticPath)),
steering: steering,
mux :make(map[string]func(*server, http.ResponseWriter, *http.Request)),
}
srv.mux["/"] = static
srv.mux["/left"] = left
srv.mux["/right"] = right
srv.mux["/forward"] = forward
srv.mux["/backward"] = backward
srv.mux["/resetSteering"] = resetSteering
srv.mux["/resetThrust"] = resetThrust
return &srv
}
func static(s *server, w http.ResponseWriter, r *http.Request) {
s.staticHandler.ServeHTTP(w, r)
}
func left(s *server, w http.ResponseWriter, r *http.Request) {
s.steering.Left()
}
func right(s *server, w http.ResponseWriter, r *http.Request) {
s.steering.Right()
}
func forward(s *server, w http.ResponseWriter, r *http.Request) {
s.steering.Forward()
}
func backward(s *server, w http.ResponseWriter, r *http.Request) {
s.steering.Backward()
}
func resetSteering(s *server, w http.ResponseWriter, r *http.Request) {
s.steering.ResetSteering()
}
func resetThrust(s *server, w http.ResponseWriter, r *http.Request) {
s.steering.ResetThrusting()
}
func (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if h, ok := s.mux[r.URL.String()]; ok {
h(s, w, r)
return
}
s.mux["/"](s, w, r)
}
func (s *server) Start() error {
return s.server.ListenAndServe()
}
func (s *server) Wait() {
<-s.stopListener
}
func (s *server) Stop() {
s.stopListener <- struct{}{}
}
|
/*
There is a car with capacity empty seats. The vehicle only drives east (i.e., it cannot turn around and drive west).
You are given the integer capacity and an array trips where trips[i] = [numPassengersi, fromi, toi] indicates that the ith trip has numPassengersi passengers and the locations to pick them up and drop them off are fromi and toi respectively. The locations are given as the number of kilometers due east from the car's initial location.
Return true if it is possible to pick up and drop off all passengers for all the given trips, or false otherwise.
Example 1:
Input: trips = [[2,1,5],[3,3,7]], capacity = 4
Output: false
Example 2:
Input: trips = [[2,1,5],[3,3,7]], capacity = 5
Output: true
Constraints:
1 <= trips.length <= 1000
trips[i].length == 3
1 <= numPassengersi <= 100
0 <= fromi < toi <= 1000
1 <= capacity <= 10^5
Hint: Sort the pickup and dropoff events by location, then process them in order.
*/
package main
import "sort"
func main() {
assert(carpool([][3]int{
{2, 1, 5},
{3, 3, 7},
}, 4) == false)
assert(carpool([][3]int{
{2, 1, 5},
{3, 3, 7},
}, 5) == true)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func carpool(trips [][3]int, capacity int) bool {
timeline := make(map[int]int)
for _, trip := range trips {
passengers, start, end := trip[0], trip[1], trip[2]
timeline[start] += passengers
timeline[end] -= passengers
}
var intervals [][2]int
for time, passengers := range timeline {
intervals = append(intervals, [2]int{time, passengers})
}
sort.Slice(intervals, func(i, j int) bool {
return intervals[i][0] < intervals[j][0]
})
total := 0
for _, interval := range intervals {
total += interval[1]
if total > capacity {
return false
}
}
return true
}
|
// ˅
package main
import "fmt"
// ˄
type TextDisplayImpl struct {
// ˅
// ˄
// A string to display
text string
// A number of characters in bytes
width int
// ˅
// ˄
}
func NewTextDisplayImpl(text string) *TextDisplayImpl {
// ˅
textDisplayImpl := &TextDisplayImpl{}
textDisplayImpl.text = text
textDisplayImpl.width = len(text)
return textDisplayImpl
// ˄
}
func (self *TextDisplayImpl) ImplOpen() {
// ˅
self.printLine()
// ˄
}
func (self *TextDisplayImpl) ImplWrite() {
// ˅
fmt.Println(":" + self.text + ":") // Enclose a text with ":" and display it.
// ˄
}
func (self *TextDisplayImpl) ImplClose() {
// ˅
self.printLine()
// ˄
}
func (self *TextDisplayImpl) printLine() {
// ˅
fmt.Print("*") // Display "*" mark at the beginning of a frame.
for i := 0; i < self.width; i++ { // Display "." for the number of "width".
fmt.Print(".")
}
fmt.Println("*") // Display "*" mark at the end of a frame.
// ˄
}
// ˅
// ˄
|
package library
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
func SetControllerRef(owner metav1.Object, owned metav1.Object) error {
return controllerutil.SetControllerReference(owner, owned, scheme)
}
// Ported from controllerutil
func OwnerRefsMatch(a, b *metav1.OwnerReference) bool {
if a == nil || b == nil {
return false
}
aGV, err := schema.ParseGroupVersion(a.APIVersion)
if err != nil {
return false
}
bGV, err := schema.ParseGroupVersion(b.APIVersion)
if err != nil {
return false
}
return aGV.Group == bGV.Group && a.Kind == b.Kind && a.Name == b.Name
}
|
package routers
import (
"github.com/francescoforesti/appointments/be/logging"
"github.com/francescoforesti/appointments/be/service"
"github.com/francescoforesti/appointments/be/utils"
"github.com/gin-gonic/gin"
)
func Init(router *gin.Engine, service service.AppointmentService) {
router.RedirectTrailingSlash = true
router.RedirectFixedPath = true
contextPath := utils.GetEnv(utils.CONTEXT_PATH_ENV, utils.CONTEXT_PATH)
api := router.Group(contextPath)
logging.Debug("Creating routes " + contextPath + "...")
handler := CreateAPI(service)
api.POST("/appointments", handler.Create)
api.GET("/appointments", handler.FindAll)
api.GET("/calendar/appointments", handler.FindAllCalendar)
api.GET("/appointments/:id/", handler.FindOne)
api.DELETE("/appointments/:id/", handler.Delete)
api.PUT("/appointments/:id/", handler.Update)
err := router.Run(utils.GetEnv(utils.SERVER_PORT_ENV, utils.SERVER_PORT))
if err != nil {
panic(err)
}
}
|
// Copyright 2019 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package java
import (
"android/soong/android"
"fmt"
)
func init() {
android.RegisterSingletonType("platform_compat_config_singleton", platformCompatConfigSingletonFactory)
android.RegisterModuleType("platform_compat_config", PlatformCompatConfigFactory)
android.RegisterModuleType("global_compat_config", globalCompatConfigFactory)
}
func platformCompatConfigPath(ctx android.PathContext) android.OutputPath {
return android.PathForOutput(ctx, "compat_config", "merged_compat_config.xml")
}
type platformCompatConfigSingleton struct {
metadata android.Path
}
type platformCompatConfigProperties struct {
Src *string `android:"path"`
}
type platformCompatConfig struct {
android.ModuleBase
properties platformCompatConfigProperties
installDirPath android.InstallPath
configFile android.OutputPath
metadataFile android.OutputPath
}
func (p *platformCompatConfig) compatConfigMetadata() android.OutputPath {
return p.metadataFile
}
func (p *platformCompatConfig) CompatConfig() android.OutputPath {
return p.configFile
}
func (p *platformCompatConfig) SubDir() string {
return "compatconfig"
}
type PlatformCompatConfigIntf interface {
android.Module
compatConfigMetadata() android.OutputPath
CompatConfig() android.OutputPath
// Sub dir under etc dir.
SubDir() string
}
var _ PlatformCompatConfigIntf = (*platformCompatConfig)(nil)
// compat singleton rules
func (p *platformCompatConfigSingleton) GenerateBuildActions(ctx android.SingletonContext) {
var compatConfigMetadata android.Paths
ctx.VisitAllModules(func(module android.Module) {
if c, ok := module.(PlatformCompatConfigIntf); ok {
metadata := c.compatConfigMetadata()
compatConfigMetadata = append(compatConfigMetadata, metadata)
}
})
if compatConfigMetadata == nil {
// nothing to do.
return
}
rule := android.NewRuleBuilder()
outputPath := platformCompatConfigPath(ctx)
rule.Command().
BuiltTool(ctx, "process-compat-config").
FlagForEachInput("--xml ", compatConfigMetadata).
FlagWithOutput("--merged-config ", outputPath)
rule.Build(pctx, ctx, "merged-compat-config", "Merge compat config")
p.metadata = outputPath
}
func (p *platformCompatConfigSingleton) MakeVars(ctx android.MakeVarsContext) {
if p.metadata != nil {
ctx.Strict("INTERNAL_PLATFORM_MERGED_COMPAT_CONFIG", p.metadata.String())
}
}
func (p *platformCompatConfig) GenerateAndroidBuildActions(ctx android.ModuleContext) {
rule := android.NewRuleBuilder()
configFileName := p.Name() + ".xml"
metadataFileName := p.Name() + "_meta.xml"
p.configFile = android.PathForModuleOut(ctx, configFileName).OutputPath
p.metadataFile = android.PathForModuleOut(ctx, metadataFileName).OutputPath
path := android.PathForModuleSrc(ctx, String(p.properties.Src))
rule.Command().
BuiltTool(ctx, "process-compat-config").
FlagWithInput("--jar ", path).
FlagWithOutput("--device-config ", p.configFile).
FlagWithOutput("--merged-config ", p.metadataFile)
p.installDirPath = android.PathForModuleInstall(ctx, "etc", "compatconfig")
rule.Build(pctx, ctx, configFileName, "Extract compat/compat_config.xml and install it")
}
func (p *platformCompatConfig) AndroidMkEntries() []android.AndroidMkEntries {
return []android.AndroidMkEntries{android.AndroidMkEntries{
Class: "ETC",
OutputFile: android.OptionalPathForPath(p.configFile),
Include: "$(BUILD_PREBUILT)",
ExtraEntries: []android.AndroidMkExtraEntriesFunc{
func(entries *android.AndroidMkEntries) {
entries.SetString("LOCAL_MODULE_PATH", p.installDirPath.ToMakePath().String())
entries.SetString("LOCAL_INSTALLED_MODULE_STEM", p.configFile.Base())
},
},
}}
}
func platformCompatConfigSingletonFactory() android.Singleton {
return &platformCompatConfigSingleton{}
}
func PlatformCompatConfigFactory() android.Module {
module := &platformCompatConfig{}
module.AddProperties(&module.properties)
android.InitAndroidArchModule(module, android.DeviceSupported, android.MultilibFirst)
return module
}
//============== merged_compat_config =================
type globalCompatConfigProperties struct {
// name of the file into which the metadata will be copied.
Filename *string
}
type globalCompatConfig struct {
android.ModuleBase
properties globalCompatConfigProperties
outputFilePath android.OutputPath
}
func (c *globalCompatConfig) GenerateAndroidBuildActions(ctx android.ModuleContext) {
filename := String(c.properties.Filename)
inputPath := platformCompatConfigPath(ctx)
c.outputFilePath = android.PathForModuleOut(ctx, filename).OutputPath
// This ensures that outputFilePath has the correct name for others to
// use, as the source file may have a different name.
ctx.Build(pctx, android.BuildParams{
Rule: android.Cp,
Output: c.outputFilePath,
Input: inputPath,
})
}
func (h *globalCompatConfig) OutputFiles(tag string) (android.Paths, error) {
switch tag {
case "":
return android.Paths{h.outputFilePath}, nil
default:
return nil, fmt.Errorf("unsupported module reference tag %q", tag)
}
}
// global_compat_config provides access to the merged compat config xml file generated by the build.
func globalCompatConfigFactory() android.Module {
module := &globalCompatConfig{}
module.AddProperties(&module.properties)
android.InitAndroidArchModule(module, android.HostAndDeviceSupported, android.MultilibCommon)
return module
}
|
package core
type Stringer interface {
String() string
}
|
package admin
import (
"firstProject/app/dto"
myjwt "firstProject/app/http/middleware/jwt"
"firstProject/app/http/result"
"firstProject/app/models"
adminRep "firstProject/app/repositories/admin"
"firstProject/app/requests"
"firstProject/app/services/admin"
"firstProject/database"
"fmt"
"reflect"
"strconv"
"net/http"
"time"
jwtgo "github.com/dgrijalva/jwt-go"
"github.com/gin-gonic/gin"
"github.com/go-playground/locales/zh"
ut "github.com/go-playground/universal-translator"
"github.com/go-playground/validator/v10"
zh_translations "github.com/go-playground/validator/v10/translations/zh"
"golang.org/x/crypto/bcrypt"
)
/* type Admin struct {
Username string `form:"username" json:"username" validate:"required" label:"用户名"`
Password string `form:"password" json:"password" validate:"required" label:"密码"`
}
func Register(c *gin.Context) {
var adminAccount Admin
returnData := result.NewResult(c)
erra := c.ShouldBind(&adminAccount)
uni := ut.New(zh.New())
trans, _ := uni.GetTranslator("zh")
validate := validator.New()
validate.RegisterTagNameFunc(func(fld reflect.StructField) string {
name := fld.Tag.Get("label")
return name
})
//验证器注册翻译器
err := zh_translations.RegisterDefaultTranslations(validate, trans)
if err != nil {
fmt.Println(err)
}
err = validate.Struct(adminAccount)
if err != nil {
for _, err := range err.(validator.ValidationErrors) {
result.NewResult(c).Error(err.Translate(trans))
return
}
}
if erra == nil {
err := model.Register(adminAccount.Username, adminAccount.Password)
if err == nil {
returnData.Success("注册成功")
} else {
returnData.Error("注册失败" + err.Error())
}
} else {
returnData.Error("解析数据失败")
}
} */
//RegisterHandle 注册
func RegisterHandle(c *gin.Context) {
request := requests.AdminRegisterRequest{}
returnData := result.NewResult(c)
erra := c.ShouldBind(&request)
uni := ut.New(zh.New())
trans, _ := uni.GetTranslator("zh")
validate := validator.New()
validate.RegisterTagNameFunc(func(fld reflect.StructField) string {
name := fld.Tag.Get("label")
return name
})
//验证器注册翻译器
err := zh_translations.RegisterDefaultTranslations(validate, trans)
if err != nil {
fmt.Println(err)
}
err = validate.Struct(request)
if err != nil {
for _, err := range err.(validator.ValidationErrors) {
result.NewResult(c).Error(err.Translate(trans))
return
}
}
if erra != nil {
fmt.Println(err)
}
userDto := dto.AdminDto{
Username: request.Username,
Password: request.Password,
Name: request.Name,
Phone: request.Phone,
}
model := adminRep.GetAdminByUsername(userDto.Username)
if model.ID != 0 {
returnData.Error("用户名重复")
return
}
service := admin.AdminService{}
err = service.Register(userDto)
if err != nil {
returnData.Error("注册失败:" + err.Error())
return
}
returnData.Success("注册成功")
}
func AdminLogin(c *gin.Context) {
loginReq := requests.AdminLoginRequest{}
returnData := result.NewResult(c)
err := c.ShouldBind(&loginReq)
if err != nil {
returnData.Error("解析失败")
return
}
model := adminRep.GetAdminByUsername(loginReq.Username)
fmt.Println(model)
if model.ID == 0 {
returnData.Error("帐号错误")
return
}
erra := bcrypt.CompareHashAndPassword([]byte(model.Password), []byte(loginReq.Password))
if erra != nil {
returnData.Error("密码错误")
return
}
generateToken(c, model)
}
type Xtoken struct {
Token string `json:"token"`
}
// 生成令牌
func generateToken(c *gin.Context, user models.Admin) {
var mytoken Xtoken
j := &myjwt.JWT{
[]byte("yangpanda"),
}
claims := myjwt.CustomClaims{
user.ID,
user.Username,
user.Name,
user.Phone,
user.Avatar,
jwtgo.StandardClaims{
NotBefore: int64(time.Now().Unix() - 1000), // 签名生效时间
ExpiresAt: int64(time.Now().Unix() + 360000), // 过期时间 一小时
Issuer: "yangpanda", //签名的发行者
},
}
returnData := result.NewResult(c)
token, err := j.CreateToken(claims)
if err != nil {
c.JSON(http.StatusOK, gin.H{
"code": -1,
"msg": err.Error(),
})
return
}
mytoken.Token = token
returnData.Success(mytoken)
}
// GetDataByTime 一个需要token认证的测试接口
func Info(c *gin.Context) {
claims := c.MustGet("claims").(*myjwt.CustomClaims)
returnData := result.NewResult(c)
if claims != nil {
returnData.Success(claims)
return
}
returnData.Error("解析token错误")
}
func Logout(c *gin.Context) {
returnData := result.NewResult(c)
returnData.Success("logout ok")
}
func List(c *gin.Context) {
returnData := result.NewResult(c)
page, _ := strconv.Atoi(c.Query("page"))
limit, _ := strconv.Atoi(c.Query("limit"))
user := make([]models.Admin, 10)
database.DB.Limit(limit).Offset((page - 1) * limit).Find(&user)
result := database.DB.Find(&user)
var data struct {
Item []models.Admin `json:"item"`
Total int `json:"total"`
}
data.Item = user
data.Total = int(result.RowsAffected)
returnData.Success(data)
}
func DeleteAdmin(c *gin.Context) {
returnData := result.NewResult(c)
id := c.Param("id")
model := database.DB.Delete(&models.Admin{}, id)
if model.Error == nil {
returnData.Success("删除成功")
return
}
returnData.Error("删除失败")
}
func UpdateAdminPassword(c *gin.Context) {
claims := c.MustGet("claims").(*myjwt.CustomClaims)
var admin models.Admin
returnData := result.NewResult(c)
if claims != nil {
model := database.DB.First(&admin, claims.ID)
oldPassword := c.Query("oldPassword")
erra := bcrypt.CompareHashAndPassword([]byte(admin.Password), []byte(oldPassword))
if erra != nil {
returnData.Error("原始密码错误")
return
}
if model.Error == nil {
password := c.Query("password")
hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) //加密处理
if err != nil {
fmt.Println(err)
}
encodePWD := string(hash)
admin.Password = encodePWD
errs := database.DB.Save(&admin)
if errs.Error == nil {
returnData.Success("成功")
return
}
}
}
returnData.Error("修改失败")
}
|
package orm
import (
"context"
"database/sql"
"log"
)
type Connection struct {
ctx context.Context
conn *sql.Conn
tx *sql.Tx
name string
}
func newConnection() *Connection {
c := new(Connection)
c.ctx = context.Background()
return c
}
func (c *Connection) WithDB(name string) *Connection {
c.name = name
return c
}
func (c *Connection) WithContext(ctx context.Context) *Connection {
c.ctx = ctx
return c
}
func (c *Connection) Select(query string, bindings []interface{}) *Rows {
rows, err := c.queryRows(query, bindings)
if err != nil {
return &Rows{rs:nil, lastError:err}
}
return &Rows{rs:rows, lastError:err}
}
func (c *Connection) queryRows(query string, bindings []interface{}) (rows sql.Rows, err error) {
log.Println("query:", query, "| bindings:", bindings)
if c.tx != nil {
rows, err = c.tx.QueryContext(c.ctx, query, bindings...)
return
}
var conn *sql.Conn
conn, err = c.getConn()
if err != nil {
return nil, err
}
rows, err = conn.QueryContext(c.ctx, query, bindings...)
return
}
|
package middleware
import (
"net/http"
"strings"
"muto/context"
"muto/models"
)
// User middleware will lookup the current user via their
// remember_token cookie using the UserService. If the user
// is found, they will be set on the request context.
// Regardless, the next handler is always called.
type Account struct {
models.AccountService
}
func (mw *Account) Apply(next http.Handler) http.HandlerFunc {
return mw.ApplyFn(next.ServeHTTP)
}
// ApplyFn will return an http.HandlerFunc that checks to see
// if an account is logged in. if so, we will call next (w, r)
// otherwise we will redirect the guest to the login page.
func (mw *Account) ApplyFn(next http.HandlerFunc) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
path := r.URL.Path
// If account is requesting a static assets
// or image we will not need to lookup
// current account, so we can skip it.
if strings.HasPrefix(path, "/assets/") ||
strings.HasPrefix(path, "/images/") {
next(w, r)
return
}
cookie, err := r.Cookie("remember_token")
if err != nil {
next(w, r)
return
}
account, err := mw.AccountService.ByRemember(cookie.Value)
if err != nil {
next(w, r)
return
}
// Get the context from our request.
ctx := r.Context()
// Create a new context from the existing one
// that has our account stored in it
// with the private account key.
ctx = context.WithAccount(ctx, account)
// Create a new request from the existing one
// with our context attached to it and assign it back to 'r'.
r = r.WithContext(ctx)
// Call next(w, r) with our updated context.
next(w, r)
})
}
// RequireUser will redirect a user to the /login page
// if they are not logged in. This middleware assumes
// that User middleware has already been run, otherwise
// it will always redirect users.
type RequireAccount struct{}
func (mw *RequireAccount) Apply(next http.Handler) http.HandlerFunc {
return mw.ApplyFn(next.ServeHTTP)
}
func (mw *RequireAccount) ApplyFn(next http.HandlerFunc) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
account := context.Account(r.Context())
if account == nil {
http.Redirect(w, r, "/enter", http.StatusFound)
return
}
next(w, r)
})
}
|
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package getter
import (
"crypto/tls"
"crypto/x509"
"fmt"
"net/url"
"helm.sh/helm/v3/pkg/getter"
corev1 "k8s.io/api/core/v1"
)
// ClientOptionsFromSecret constructs a getter.Option slice for the given secret.
// It returns the slice, or an error.
func ClientOptionsFromSecret(secret corev1.Secret) ([]getter.Option, error) {
var opts []getter.Option
basicAuth, err := BasicAuthFromSecret(secret)
if err != nil {
return opts, err
}
if basicAuth != nil {
opts = append(opts, basicAuth)
}
return opts, nil
}
// BasicAuthFromSecret attempts to construct a basic auth getter.Option for the
// given v1.Secret and returns the result.
//
// Secrets with no username AND password are ignored, if only one is defined it
// returns an error.
func BasicAuthFromSecret(secret corev1.Secret) (getter.Option, error) {
username, password := string(secret.Data["username"]), string(secret.Data["password"])
switch {
case username == "" && password == "":
return nil, nil
case username == "" || password == "":
return nil, fmt.Errorf("invalid '%s' secret data: required fields 'username' and 'password'", secret.Name)
}
return getter.WithBasicAuth(username, password), nil
}
// TLSClientConfigFromSecret attempts to construct a TLS client config
// for the given v1.Secret. It returns the TLS client config or an error.
//
// Secrets with no certFile, keyFile, AND caFile are ignored, if only a
// certBytes OR keyBytes is defined it returns an error.
func TLSClientConfigFromSecret(secret corev1.Secret, repositoryUrl string) (*tls.Config, error) {
certBytes, keyBytes, caBytes := secret.Data["certFile"], secret.Data["keyFile"], secret.Data["caFile"]
switch {
case len(certBytes)+len(keyBytes)+len(caBytes) == 0:
return nil, nil
case (len(certBytes) > 0 && len(keyBytes) == 0) || (len(keyBytes) > 0 && len(certBytes) == 0):
return nil, fmt.Errorf("invalid '%s' secret data: fields 'certFile' and 'keyFile' require each other's presence",
secret.Name)
}
tlsConf := &tls.Config{}
if len(certBytes) > 0 && len(keyBytes) > 0 {
cert, err := tls.X509KeyPair(certBytes, keyBytes)
if err != nil {
return nil, err
}
tlsConf.Certificates = append(tlsConf.Certificates, cert)
}
if len(caBytes) > 0 {
cp, err := x509.SystemCertPool()
if err != nil {
return nil, fmt.Errorf("cannot retrieve system certificate pool: %w", err)
}
if !cp.AppendCertsFromPEM(caBytes) {
return nil, fmt.Errorf("cannot append certificate into certificate pool: invalid caFile")
}
tlsConf.RootCAs = cp
}
tlsConf.BuildNameToCertificate()
u, err := url.Parse(repositoryUrl)
if err != nil {
return nil, fmt.Errorf("cannot parse repository URL: %w", err)
}
tlsConf.ServerName = u.Hostname()
return tlsConf, nil
}
|
/*
Copyright 2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package view
import (
"testing"
"time"
"github.com/gdamore/tcell/v2"
"github.com/rivo/tview"
"github.com/stretchr/testify/assert"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"github.com/oam-dev/kubevela/pkg/utils/common"
"github.com/oam-dev/kubevela/references/cli/top/component"
)
func TestApp(t *testing.T) {
testEnv := &envtest.Environment{
ControlPlaneStartTimeout: time.Minute * 3,
ControlPlaneStopTimeout: time.Minute,
UseExistingCluster: pointer.Bool(false),
}
cfg, err := testEnv.Start()
assert.NoError(t, err)
defer func() {
assert.NoError(t, testEnv.Stop())
}()
testClient, err := client.New(cfg, client.Options{Scheme: common.Scheme})
assert.NoError(t, err)
app := NewApp(testClient, cfg, "")
assert.NotEmpty(t, app.config.Theme)
assert.Equal(t, len(app.Components()), 4)
t.Run("init", func(t *testing.T) {
app.Init()
assert.Equal(t, app.Main.HasPage("main"), true)
app.Main.SwitchToPage("main")
name, page := app.Main.GetFrontPage()
assert.Equal(t, name, "main")
assert.NotEmpty(t, page)
flex, ok := page.(*tview.Flex)
assert.Equal(t, ok, true)
assert.Equal(t, flex.GetBorderColor(), app.config.Theme.Border.App.Color())
_, ok = app.HasAction(component.KeyQ)
assert.Equal(t, ok, true)
app.content.Stack.RemoveListener(app.Crumbs())
assert.NotEmpty(t, app.content.Stack.TopView())
assert.Equal(t, app.content.Stack.Empty(), false)
assert.Equal(t, app.content.Stack.IsLastView(), true)
})
t.Run("keyboard", func(t *testing.T) {
evt1 := tcell.NewEventKey(component.KeyQ, '/', 0)
assert.Empty(t, app.keyboard(evt1))
evt2 := tcell.NewEventKey(tcell.KeyTAB, '/', 0)
assert.NotEmpty(t, app.keyboard(evt2))
assert.Equal(t, app.keyboard(evt2), evt2)
})
t.Run("help view", func(t *testing.T) {
assert.Empty(t, app.helpView(nil))
assert.Equal(t, app.content.IsLastView(), false)
assert.Empty(t, app.helpView(nil))
assert.Equal(t, app.content.IsLastView(), true)
})
t.Run("back", func(t *testing.T) {
assert.Empty(t, app.helpView(nil))
app.Back(nil)
assert.Equal(t, app.content.IsLastView(), true)
})
t.Run("theme switch", func(t *testing.T) {
app.SwitchTheme(nil)
assert.Equal(t, app.Main.HasPage("theme"), true)
assert.Equal(t, app.Main.GetPageCount(), 2)
})
}
|
// Copyright 2015-2018 trivago N.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package awss3
import (
"crypto/sha1"
"encoding/hex"
"fmt"
"io"
)
// s3ByteBuffer is a byte buffer used for s3 target objects
type s3ByteBuffer struct {
bytes []byte
position int64
}
func newS3ByteBuffer() *s3ByteBuffer {
return &s3ByteBuffer{
bytes: make([]byte, 0),
position: int64(0),
}
}
func (buf *s3ByteBuffer) Bytes() ([]byte, error) {
return buf.bytes, nil
}
func (buf *s3ByteBuffer) CloseAndDelete() error {
buf.bytes = make([]byte, 0)
buf.position = 0
return nil
}
func (buf *s3ByteBuffer) Read(p []byte) (n int, err error) {
n = copy(p, buf.bytes[buf.position:])
buf.position += int64(n)
if buf.position == int64(len(buf.bytes)) {
return n, io.EOF
}
return n, nil
}
func (buf *s3ByteBuffer) Write(p []byte) (n int, err error) {
buf.bytes = append(buf.bytes[:buf.position], p...)
buf.position += int64(len(p))
return len(p), nil
}
func (buf *s3ByteBuffer) Seek(offset int64, whence int) (int64, error) {
var position int64
switch whence {
case 0: // io.SeekStart
position = offset
case 1: // io.SeekCurrent
position = buf.position + offset
case 2: // io.SeekEnd
position = int64(len(buf.bytes)) + offset
}
if position < 0 {
return 0, fmt.Errorf("s3Buffer bad seek result %d", position)
}
buf.position = position
return position, nil
}
func (buf *s3ByteBuffer) Size() (int, error) {
return len(buf.bytes), nil
}
func (buf *s3ByteBuffer) Sha1() (string, error) {
hash := sha1.Sum(buf.bytes)
return hex.EncodeToString(hash[:]), nil
}
|
package main
import (
"context"
"github.com/dragonflyoss/image-service/contrib/nydusify/pkg/converter"
"github.com/dragonflyoss/image-service/contrib/nydusify/pkg/converter/provider"
)
func main() {
// Configurable parameters for converter
wordDir := "./tmp"
nydusImagePath := "/path/to/nydus-image"
source := "localhost:5000/ubuntu:latest"
target := "localhost:5000/ubuntu:latest-nydus"
// Set to empty if no authorization be required
auth := "<base64_encoded_auth>"
// Set to false if using https registry
insecure := true
// Logger outputs Nydus image conversion progress
logger, err := provider.DefaultLogger()
if err != nil {
panic(err)
}
// Create remote with auth string for registry communication
sourceRemote, err := provider.DefaultRemoteWithAuth(source, insecure, auth)
if err != nil {
panic(err)
}
// Or we can create with docker config
// sourceRemote, err := provider.DefaultRemote(source, insecure)
// if err != nil {
// panic(err)
// }
targetRemote, err := provider.DefaultRemoteWithAuth(target, insecure, auth)
if err != nil {
panic(err)
}
// Source provider gets source image manifest, config, and layer
sourceProviders, err := provider.DefaultSource(context.Background(), sourceRemote, wordDir, "linux/amd64")
if err != nil {
panic(err)
}
opt := converter.Opt{
Logger: logger,
SourceProviders: sourceProviders,
TargetRemote: targetRemote,
WorkDir: wordDir,
PrefetchDir: "/",
NydusImagePath: nydusImagePath,
MultiPlatform: false,
DockerV2Format: true,
}
cvt, err := converter.New(opt)
if err != nil {
panic(err)
}
if err := cvt.Convert(context.Background()); err != nil {
panic(err)
}
}
|
package main
import (
"strings"
"time"
"github.com/emersion/go-imap"
"github.com/gdamore/tcell"
)
type Message imap.Message
func IsUnseen(flags []string) bool {
for _, x := range flags {
if x == imap.SeenFlag {
return false
}
}
return true
}
func formatDate(date time.Time) string {
date = date.Local()
now := time.Now().Truncate(time.Hour)
// Mimic GMail behavior
now = now.Add(time.Hour * (time.Duration)(now.Hour()/12*12-now.Hour()))
template := "01/02/06"
if date.After(now.Add(-12 * time.Hour)) {
template = "15:04"
} else if date.Year() >= now.Year() {
template = "Jan 02"
}
return date.Format(template)
}
func (msg Message) String() string {
sender := msg.Envelope.Sender
sender_str := sender[0].PersonalName
if len(sender_str) == 0 {
sender_str = sender[0].MailboxName + "@" + sender[0].HostName
}
is_unseen := IsUnseen(msg.Flags)
unseen_str := " "
if is_unseen {
unseen_str = "*"
}
date_str := formatDate(msg.Envelope.Date)
str := unseen_str + " " + sender_str + " " +
msg.Envelope.Subject + " " + strings.Join(msg.Flags, " ") + " " +
date_str
return str
}
func (msg Message) DrawMessage(s tcell.Screen, y int) {
w, _ := s.Size()
sender := msg.Envelope.Sender
sender_str := sender[0].PersonalName
if len(sender_str) == 0 {
sender_str = sender[0].MailboxName + "@" + sender[0].HostName
}
sender_str = TruncateFillRight(sender_str, 20)
is_unseen := IsUnseen(msg.Flags)
unseen_str := " "
if is_unseen {
unseen_str = "*"
}
str := unseen_str + " " + sender_str + " " +
msg.Envelope.Subject + " " + strings.Join(msg.Flags, " ")
date_str := " " + formatDate(msg.Envelope.Date)
str = TruncateFillRight(str, w-len(date_str)) + date_str
_ = EmitStr(s, 0, y, tcell.StyleDefault.Bold(is_unseen), str)
}
|
//go:generate go get -v github.com/mailru/easyjson/...
//go:generate go get github.com/alvaroloes/enumer
//go:generate go get github.com/jteeuwen/go-bindata/...
//go:generate go get github.com/elazarl/go-bindata-assetfs/...
//go:generate go get github.com/golang/protobuf/protoc-gen-go
//go:generate go get github.com/gogo/protobuf/...
//go:generate protoc --gogofaster_out=. -Iproto onnx.proto3
package onnx
|
package handlers
import (
"github.com/assignments-fixed-ssunni12/servers/gateway/models/users"
"github.com/assignments-fixed-ssunni12/servers/gateway/sessions"
)
//TODO: define a handler context struct that
//will be a receiver on any of your HTTP
//handler functions that need access to
//globals, such as the key used for signing
//and verifying SessionIDs, the session store
//and the user store
type Context struct {
Key string
SessionsStore *sessions.RedisStore /* *sessions.MemStore */
UsersStore *users.MySQLStore /* *users.MyMockStore */
}
func NewContext(key string, session *sessions.RedisStore /* *sessions.MemStore */, user *users.MySQLStore /* *users.MyMockStore */) *Context {
if session == nil || user == nil || key == "" {
return nil
}
context := Context{key, session, user}
return &context
}
|
package main
import "fmt"
func main() {
var arr [5]float64
arr[0] = 100
arr[1] = 200
arr[2] = 300
arr[3] = 400
arr[4] = 500
for i, value := range arr {
fmt.Println(value, i)
}
arr2 := [5]float64{1, 2, 3, 4, 5}
for _, value := range arr2 {
fmt.Println(value)
}
}
|
package dnszeppelin
import (
"encoding/binary"
"fmt"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
mkdns "github.com/miekg/dns"
"github.com/stretchr/testify/assert"
"net"
"testing"
"time"
)
/* Helpers */
func generateUDPPacket(payload []byte) gopacket.Packet {
var options gopacket.SerializeOptions
options.FixLengths = true
buffer := gopacket.NewSerializeBuffer()
ethernetLayer := &layers.Ethernet{
SrcMAC: net.HardwareAddr{0xFF, 0xAA, 0xFA, 0xAA, 0xFF, 0xAA},
DstMAC: net.HardwareAddr{0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD},
EthernetType: layers.EthernetTypeIPv4,
}
ipLayer := &layers.IPv4{
Version: 4,
Protocol: layers.IPProtocolUDP,
SrcIP: net.IP{127, 0, 0, 1},
DstIP: net.IP{8, 8, 8, 8},
}
udpLayer := &layers.UDP{
SrcPort: 53,
DstPort: 53,
}
gopacket.SerializeLayers(buffer, options,
ethernetLayer,
ipLayer,
udpLayer,
gopacket.Payload(payload),
)
return gopacket.NewPacket(buffer.Bytes(), layers.LayerTypeEthernet, gopacket.Lazy)
}
func packTCP(payload []byte, seq uint32, syn bool) gopacket.Packet {
// Generate the packet
var options gopacket.SerializeOptions
options.FixLengths = true
buffer := gopacket.NewSerializeBuffer()
ethernetLayer := &layers.Ethernet{
SrcMAC: net.HardwareAddr{0xFF, 0xAA, 0xFA, 0xAA, 0xFF, 0xAA},
DstMAC: net.HardwareAddr{0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD},
EthernetType: layers.EthernetTypeIPv4,
}
ipLayer := &layers.IPv4{
Version: 4,
Protocol: layers.IPProtocolTCP,
SrcIP: net.IP{127, 0, 0, 1},
DstIP: net.IP{8, 8, 8, 8},
}
tcpLayer := &layers.TCP{
Seq: seq,
SYN: syn,
SrcPort: layers.TCPPort(53),
DstPort: layers.TCPPort(53),
}
gopacket.SerializeLayers(buffer, options,
ethernetLayer,
ipLayer,
tcpLayer,
gopacket.Payload(payload),
)
return gopacket.NewPacket(buffer.Bytes(), layers.LayerTypeEthernet, gopacket.Lazy)
}
func readResultOrTiemout(data chan DNSResult) (*DNSResult, error) {
timer := time.NewTimer(10 * time.Second)
defer timer.Stop()
select {
case packet := <-data:
return &packet, nil
case <-timer.C:
return nil, fmt.Errorf("Result read timeout")
}
}
func TestCaptureDNSParse(t *testing.T) {
t.Parallel()
rChannel, capturer := createDefaultCapturer()
defer close(capturer.options.Done)
data := new(mkdns.Msg)
data.SetQuestion("example.com.", mkdns.TypeA)
pack, _ := data.Pack()
capturer.processing <- generateUDPPacket(pack)
result, err := readResultOrTiemout(rChannel)
if assert.NoError(t, err) {
assert.Equal(t, 1, len(result.DNS.Question), "DNS Question decoded incorrectly")
assert.Equal(t, "example.com.", result.DNS.Question[0].Name, "DNS Question decoded incorrectly")
assert.Equal(t, mkdns.TypeA, result.DNS.Question[0].Qtype, "DNS Question decoded incorrectly")
}
}
func TestCaptureIP4(t *testing.T) {
t.Parallel()
rChannel, capturer := createDefaultCapturer()
defer close(capturer.options.Done)
defer close(rChannel)
data := new(mkdns.Msg)
data.SetQuestion("example.com.", mkdns.TypeA)
pack, _ := data.Pack()
capturer.processing <- generateUDPPacket(pack)
result, err := readResultOrTiemout(rChannel)
if assert.NoError(t, err) {
assert.Equal(t, uint8(4), result.IPVersion, "DNS IP Version parsed incorrectly")
assert.Equal(t, net.IPv4(127, 0, 0, 1)[12:], result.SrcIP, "DNS Source IP parsed incorrectly")
assert.Equal(t, net.IPv4(8, 8, 8, 8)[12:], result.DstIP, "DNS Dest IP parsed incorrectly")
assert.Equal(t, "udp", result.Protocol, "DNS Dest IP parsed incorrectly")
assert.Equal(t, uint16(len(pack)), result.PacketLength, "DNS Dest IP parsed incorrectly")
}
}
func TestCaptureFragmentedIP4(t *testing.T) {
t.Parallel()
rChannel, capturer := createDefaultCapturer()
defer close(capturer.options.Done)
defer close(rChannel)
data := new(mkdns.Msg)
data.SetQuestion("example.com.", mkdns.TypeA)
pack, _ := data.Pack()
// Generate the udp packet
var options gopacket.SerializeOptions
options.FixLengths = true
buffer := gopacket.NewSerializeBuffer()
gopacket.SerializeLayers(buffer, options,
&layers.UDP{
SrcPort: 53,
DstPort: 53,
},
gopacket.Payload(pack),
)
udpPacket := buffer.Bytes()
// Generate the fragmented ip packets
a := udpPacket[:16]
b := udpPacket[16:]
// Send a
buffer = gopacket.NewSerializeBuffer()
gopacket.SerializeLayers(buffer, options,
&layers.Ethernet{
SrcMAC: net.HardwareAddr{0xFF, 0xAA, 0xFA, 0xAA, 0xFF, 0xAA},
DstMAC: net.HardwareAddr{0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD},
EthernetType: layers.EthernetTypeIPv4,
},
&layers.IPv4{
Version: 4,
Protocol: layers.IPProtocolUDP,
SrcIP: net.IP{127, 0, 0, 1},
DstIP: net.IP{8, 8, 8, 8},
Flags: layers.IPv4MoreFragments,
},
gopacket.Payload(a),
)
capturer.processing <- gopacket.NewPacket(buffer.Bytes(), layers.LayerTypeEthernet, gopacket.Lazy)
// Send b
buffer = gopacket.NewSerializeBuffer()
gopacket.SerializeLayers(buffer, options,
&layers.Ethernet{
SrcMAC: net.HardwareAddr{0xFF, 0xAA, 0xFA, 0xAA, 0xFF, 0xAA},
DstMAC: net.HardwareAddr{0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD},
EthernetType: layers.EthernetTypeIPv4,
},
&layers.IPv4{
Version: 4,
Protocol: layers.IPProtocolUDP,
SrcIP: net.IP{127, 0, 0, 1},
DstIP: net.IP{8, 8, 8, 8},
Flags: 0,
FragOffset: 2,
},
gopacket.Payload(b),
)
capturer.processing <- gopacket.NewPacket(buffer.Bytes(), layers.LayerTypeEthernet, gopacket.Lazy)
result, err := readResultOrTiemout(rChannel)
if assert.NoError(t, err) {
assert.Equal(t, net.IPv4(127, 0, 0, 1)[12:], result.SrcIP, "DNS Source IP parsed incorrectly")
assert.Equal(t, net.IPv4(8, 8, 8, 8)[12:], result.DstIP, "DNS Dest IP parsed incorrectly")
assert.Equal(t, uint8(4), result.IPVersion, "DNS Dest IP parsed incorrectly")
assert.Equal(t, "udp", result.Protocol, "DNS Dest IP parsed incorrectly")
assert.Equal(t, uint16(len(pack)), result.PacketLength, "DNS Dest IP parsed incorrectly")
}
}
func TestCaptureIP6(t *testing.T) {
t.Parallel()
rChannel, capturer := createDefaultCapturer()
defer close(capturer.options.Done)
defer close(rChannel)
data := new(mkdns.Msg)
data.SetQuestion("example.com.", mkdns.TypeA)
pack, _ := data.Pack()
// Generate the packet
var options gopacket.SerializeOptions
options.FixLengths = true
buffer := gopacket.NewSerializeBuffer()
gopacket.SerializeLayers(buffer, options,
&layers.Ethernet{
SrcMAC: net.HardwareAddr{0xFF, 0xAA, 0xFA, 0xAA, 0xFF, 0xAA},
DstMAC: net.HardwareAddr{0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD},
EthernetType: layers.EthernetTypeIPv6,
},
&layers.IPv6{
Version: 6,
NextHeader: layers.IPProtocolUDP,
SrcIP: net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
DstIP: net.IP{32, 1, 72, 96, 72, 96, 0, 0, 0, 0, 0, 0, 0, 0, 136, 136},
},
&layers.UDP{
SrcPort: layers.UDPPort(53),
DstPort: layers.UDPPort(53),
},
gopacket.Payload(pack),
)
capturer.processing <- gopacket.NewPacket(buffer.Bytes(), layers.LayerTypeEthernet, gopacket.Lazy)
result, err := readResultOrTiemout(rChannel)
if assert.NoError(t, err) {
assert.Equal(t, uint8(6), result.IPVersion, "DNS IP Version parsed incorrectly")
assert.Equal(t, net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, result.SrcIP, "DNS Source IP parsed incorrectly")
assert.Equal(t, net.IP{32, 1, 72, 96, 72, 96, 0, 0, 0, 0, 0, 0, 0, 0, 136, 136}, result.DstIP, "DNS Dest IP parsed incorrectly")
assert.Equal(t, "udp", result.Protocol, "DNS Dest IP parsed incorrectly")
assert.Equal(t, uint16(len(pack)), result.PacketLength, "DNS Dest IP parsed incorrectly")
assert.Equal(t, 1, len(result.DNS.Question), "IPv6 dns question have unexpected count")
assert.Equal(t, "example.com.", result.DNS.Question[0].Name, "IPv6 dns question parsed incorrectly")
}
}
func TestCaptureIP6Fragmented(t *testing.T) {
t.Parallel()
rChannel, capturer := createDefaultCapturer()
defer close(capturer.options.Done)
defer close(rChannel)
data := new(mkdns.Msg)
data.SetQuestion("example.com.", mkdns.TypeA)
pack, _ := data.Pack()
// Generate the udp packet
var options gopacket.SerializeOptions
options.FixLengths = true
buffer := gopacket.NewSerializeBuffer()
gopacket.SerializeLayers(buffer, options,
&layers.UDP{
SrcPort: 53,
DstPort: 53,
},
gopacket.Payload(pack),
)
udpPacket := buffer.Bytes()
// Generate the fragmented ip packets
a := udpPacket[:16]
b := udpPacket[16:]
// Generate the fragment header manually
frag := make([]byte, 8)
frag[0] = 17 // UDP
frag[3] = 1 // MoreFragments
// Send a
buffer = gopacket.NewSerializeBuffer()
gopacket.SerializeLayers(buffer, options,
&layers.Ethernet{
SrcMAC: net.HardwareAddr{0xFF, 0xAA, 0xFA, 0xAA, 0xFF, 0xAA},
DstMAC: net.HardwareAddr{0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD},
EthernetType: layers.EthernetTypeIPv6,
},
&layers.IPv6{
Version: 6,
NextHeader: layers.IPProtocolIPv6Fragment,
SrcIP: net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
DstIP: net.IP{32, 1, 72, 96, 72, 96, 0, 0, 0, 0, 0, 0, 0, 0, 136, 136},
},
gopacket.Payload(frag),
gopacket.Payload(a),
)
capturer.processing <- gopacket.NewPacket(buffer.Bytes(), layers.LayerTypeEthernet, gopacket.Lazy)
// Send b
frag[3] = byte(uint(2) << 3) // Offset 1
buffer = gopacket.NewSerializeBuffer()
gopacket.SerializeLayers(buffer, options,
&layers.Ethernet{
SrcMAC: net.HardwareAddr{0xFF, 0xAA, 0xFA, 0xAA, 0xFF, 0xAA},
DstMAC: net.HardwareAddr{0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD},
EthernetType: layers.EthernetTypeIPv6,
},
&layers.IPv6{
Version: 6,
NextHeader: layers.IPProtocolIPv6Fragment,
SrcIP: net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
DstIP: net.IP{32, 1, 72, 96, 72, 96, 0, 0, 0, 0, 0, 0, 0, 0, 136, 136},
},
gopacket.Payload(frag),
gopacket.Payload(b),
)
capturer.processing <- gopacket.NewPacket(buffer.Bytes(), layers.LayerTypeEthernet, gopacket.Lazy)
result, err := readResultOrTiemout(rChannel)
if assert.NoError(t, err) {
assert.Equal(t, uint8(6), result.IPVersion, "DNS IP Version parsed incorrectly")
assert.Equal(t, net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, result.SrcIP, "DNS Source IP parsed incorrectly")
assert.Equal(t, net.IP{32, 1, 72, 96, 72, 96, 0, 0, 0, 0, 0, 0, 0, 0, 136, 136}, result.DstIP, "DNS Dest IP parsed incorrectly")
assert.Equal(t, "udp", result.Protocol, "DNS Dest IP parsed incorrectly")
assert.Equal(t, uint16(len(pack)), result.PacketLength, "DNS Dest IP parsed incorrectly")
assert.Equal(t, 1, len(result.DNS.Question), "IPv6 dns question have unexpected count")
assert.Equal(t, "example.com.", result.DNS.Question[0].Name, "IPv6 dns question parsed incorrectly")
}
}
func TestCaptureTCP(t *testing.T) {
t.Parallel()
// Generate the data
data := new(mkdns.Msg)
data.SetQuestion("example.com.", mkdns.TypeA)
payload, _ := data.Pack()
buf := []byte{0, 0}
binary.BigEndian.PutUint16(buf, uint16(len(payload)))
buf = append(buf, payload...)
packet := packTCP(buf, 1, true)
// Send the packet
rChannel, capturer := createDefaultCapturer()
defer close(capturer.options.Done)
defer close(rChannel)
capturer.processing <- packet
result, err := readResultOrTiemout(rChannel)
if assert.NoError(t, err) {
assert.Equal(t, 1, len(result.DNS.Question), "TCP Question decoded incorrectly")
assert.Equal(t, uint8(4), result.IPVersion, "DNS Dest IP parsed incorrectly")
assert.Equal(t, "tcp", result.Protocol, "DNS Dest IP parsed incorrectly")
assert.Equal(t, uint16(len(payload)), result.PacketLength, "DNS Dest IP parsed incorrectly")
}
}
func TestCaptureTCPDivided(t *testing.T) {
t.Parallel()
// Generate the data
data := new(mkdns.Msg)
data.SetQuestion("example.com.", mkdns.TypeA)
payload, _ := data.Pack()
buf := []byte{0, 0}
binary.BigEndian.PutUint16(buf, uint16(len(payload)))
buf = append(buf, payload...)
a := buf[:len(buf)/2]
b := buf[len(buf)/2:]
packetA := packTCP(a, 10, true)
packetB := packTCP(b, 10+uint32(len(a))+1, false)
//return
// Send the packet
rChannel, capturer := createDefaultCapturer()
defer close(capturer.options.Done)
defer close(rChannel)
capturer.processing <- packetB
capturer.processing <- packetA
result, err := readResultOrTiemout(rChannel)
if assert.NoError(t, err) {
assert.Equal(t, 1, len(result.DNS.Question), "TCP Question decoded incorrectly")
}
}
|
// parse includes/config.txt into a map
//
// format of includes/config.txt:
// key=value
//
// access by:
// value := config["key"]
//
package utils
import (
"bufio"
"log"
"os"
"regexp"
"strings"
)
var config map[string]string
var confFile string
//sample o
func sample() {
confFile = "config.txt"
config = ReadConfig(confFile)
}
//ReadConfig o
func ReadConfig(filenameFullpath string) map[string]string {
prg := "ReadConfig()"
var options map[string]string
options = make(map[string]string)
file, err := os.Open(filenameFullpath)
if err != nil {
log.Printf("%s: os.Open(): %s\n", prg, err)
return options
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, "=") == true {
re, err := regexp.Compile(`([^=]+)=(.*)`)
if err != nil {
log.Printf("%s: regexp.Compile(): error=%s", prg, err)
return options
}
configOption := re.FindStringSubmatch(line)[1]
configValue := re.FindStringSubmatch(line)[2]
options[configOption] = configValue
log.Printf("%s: out[]: %s ... config_option=%s, config_value=%s\n", prg, line, configOption, configValue)
}
}
log.Printf("%s: options[]: %+v\n", prg, options)
if err := scanner.Err(); err != nil {
log.Printf("%s: scanner.Err(): %s\n", prg, err)
return options
}
return options
}
|
package main
import (
"fmt"
"path/filepath"
"github.com/jraams/aoc-2020/helpers"
)
type move struct {
dx int
dy int
}
func main() {
// Load input from file
inputPath, _ := filepath.Abs("input")
inputValues := helpers.GetInputValues(inputPath)
// Part 1
encounteredTrees, _ := slideDown(inputValues, []move{{dx: 3, dy: 1}})
fmt.Printf("Solution part 1: encountered %d trees", encounteredTrees)
fmt.Println()
// Part 2
_, productOfEncounteredTrees := slideDown(inputValues, []move{{dx: 1, dy: 1}, {dx: 3, dy: 1}, {dx: 5, dy: 1}, {dx: 7, dy: 1}, {dx: 1, dy: 2}})
fmt.Printf("Solution part 2: product of encountered trees is %d", productOfEncounteredTrees)
fmt.Println()
}
func slideDown(input []string, moves []move) (int, int) {
encounteredTrees, productOfEncounteredTrees := 0, 1
for _, move := range moves {
n := slideDownOnce(input, move.dx, move.dy)
encounteredTrees += n
productOfEncounteredTrees *= n
}
return encounteredTrees, productOfEncounteredTrees
}
func slideDownOnce(input []string, rightVel int, downVel int) int {
encounteredTrees := 0
x := 0
for y := downVel; y < len(input); y += downVel {
x += rightVel
square := input[y][x%len(input[y])]
if string(square) == "#" {
encounteredTrees++
}
}
return encounteredTrees
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor_test
import (
"bytes"
"encoding/json"
"fmt"
"regexp"
"strconv"
"strings"
"testing"
"time"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/parser/auth"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/types"
"github.com/stretchr/testify/require"
)
func TestExplainPrivileges(t *testing.T) {
store := testkit.CreateMockStore(t)
se, err := session.CreateSession4Test(store)
require.NoError(t, err)
require.NoError(t, se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk := testkit.NewTestKit(t, store)
tk.SetSession(se)
tk.MustExec("create database explaindatabase")
tk.MustExec("use explaindatabase")
tk.MustExec("create table t (id int)")
tk.MustExec("create view v as select * from t")
tk.MustExec(`create user 'explain'@'%'`)
tk1 := testkit.NewTestKit(t, store)
se, err = session.CreateSession4Test(store)
require.NoError(t, err)
require.NoError(t, se.Auth(&auth.UserIdentity{Username: "explain", Hostname: "%"}, nil, nil, nil))
tk1.SetSession(se)
tk.MustExec(`grant select on explaindatabase.v to 'explain'@'%'`)
tk1.MustQuery("show databases").Check(testkit.Rows("INFORMATION_SCHEMA", "explaindatabase"))
tk1.MustExec("use explaindatabase")
tk1.MustQuery("select * from v")
err = tk1.ExecToErr("explain format = 'brief' select * from v")
require.Equal(t, plannercore.ErrViewNoExplain.Error(), err.Error())
tk.MustExec(`grant show view on explaindatabase.v to 'explain'@'%'`)
tk1.MustQuery("explain format = 'brief' select * from v")
tk.MustExec(`revoke select on explaindatabase.v from 'explain'@'%'`)
err = tk1.ExecToErr("explain format = 'brief' select * from v")
require.Equal(t, plannercore.ErrTableaccessDenied.GenWithStackByArgs("SELECT", "explain", "%", "v").Error(), err.Error())
// https://github.com/pingcap/tidb/issues/34326
tk.MustExec("create table t1 (i int)")
tk.MustExec("create table t2 (j int)")
tk.MustExec("create table t3 (k int, secret int)")
tk.MustExec("create view v1 as select * from t1")
tk.MustExec("create view v2 as select * from v1, t2")
tk.MustExec("create view v3 as select k from t3")
tk.MustExec("grant select, show view on explaindatabase.v2 to 'explain'@'%'")
tk.MustExec("grant show view on explaindatabase.v1 to 'explain'@'%'")
tk.MustExec("grant select, show view on explaindatabase.t3 to 'explain'@'%'")
tk.MustExec("grant select, show view on explaindatabase.v3 to 'explain'@'%'")
tk1.MustGetErrMsg("explain select * from v1", "[planner:1142]SELECT command denied to user 'explain'@'%' for table 'v1'")
tk1.MustGetErrCode("explain select * from v2", errno.ErrViewNoExplain)
tk1.MustQuery("explain select * from t3")
tk1.MustQuery("explain select * from v3")
}
func TestExplainCartesianJoin(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (v int)")
cases := []struct {
sql string
isCartesianJoin bool
}{
{"explain format = 'brief' select * from t t1, t t2", true},
{"explain format = 'brief' select * from t t1 where exists (select 1 from t t2 where t2.v > t1.v)", true},
{"explain format = 'brief' select * from t t1 where exists (select 1 from t t2 where t2.v in (t1.v+1, t1.v+2))", true},
{"explain format = 'brief' select * from t t1, t t2 where t1.v = t2.v", false},
}
for _, ca := range cases {
rows := tk.MustQuery(ca.sql).Rows()
ok := false
for _, row := range rows {
str := fmt.Sprintf("%v", row)
if strings.Contains(str, "CARTESIAN") {
ok = true
}
}
require.Equal(t, ca.isCartesianJoin, ok)
}
}
func TestExplainWrite(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int)")
tk.MustQuery("explain analyze insert into t select 1")
tk.MustQuery("select * from t").Check(testkit.Rows("1"))
tk.MustQuery("explain analyze update t set a=2 where a=1")
tk.MustQuery("select * from t").Check(testkit.Rows("2"))
tk.MustQuery("explain format = 'brief' insert into t select 1")
tk.MustQuery("select * from t").Check(testkit.Rows("2"))
tk.MustQuery("explain analyze insert into t select 1")
tk.MustQuery("explain analyze replace into t values (3)")
tk.MustQuery("select * from t order by a").Check(testkit.Rows("1", "2", "3"))
}
func TestExplainAnalyzeMemory(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (v int, k int, key(k))")
tk.MustExec("insert into t values (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)")
checkMemoryInfo(t, tk, "explain analyze select * from t order by v")
checkMemoryInfo(t, tk, "explain analyze select * from t order by v limit 5")
checkMemoryInfo(t, tk, "explain analyze select /*+ HASH_JOIN(t1, t2) */ t1.k from t t1, t t2 where t1.v = t2.v+1")
checkMemoryInfo(t, tk, "explain analyze select /*+ MERGE_JOIN(t1, t2) */ t1.k from t t1, t t2 where t1.k = t2.k+1")
checkMemoryInfo(t, tk, "explain analyze select /*+ INL_JOIN(t1, t2) */ t1.k from t t1, t t2 where t1.k = t2.k and t1.v=1")
checkMemoryInfo(t, tk, "explain analyze select /*+ INL_HASH_JOIN(t1, t2) */ t1.k from t t1, t t2 where t1.k = t2.k and t1.v=1")
checkMemoryInfo(t, tk, "explain analyze select /*+ INL_MERGE_JOIN(t1, t2) */ t1.k from t t1, t t2 where t1.k = t2.k and t1.v=1")
checkMemoryInfo(t, tk, "explain analyze select sum(k) from t group by v")
checkMemoryInfo(t, tk, "explain analyze select sum(v) from t group by k")
checkMemoryInfo(t, tk, "explain analyze select * from t")
checkMemoryInfo(t, tk, "explain analyze select k from t use index(k)")
checkMemoryInfo(t, tk, "explain analyze select * from t use index(k)")
checkMemoryInfo(t, tk, "explain analyze select v+k from t")
}
func checkMemoryInfo(t *testing.T, tk *testkit.TestKit, sql string) {
memCol := 6
ops := []string{"Join", "Reader", "Top", "Sort", "LookUp", "Projection", "Selection", "Agg"}
rows := tk.MustQuery(sql).Rows()
for _, row := range rows {
strs := make([]string, len(row))
for i, c := range row {
strs[i] = c.(string)
}
if strings.Contains(strs[3], "cop") {
continue
}
shouldHasMem := false
for _, op := range ops {
if strings.Contains(strs[0], op) {
shouldHasMem = true
break
}
}
if shouldHasMem {
require.NotEqual(t, "N/A", strs[memCol])
} else {
require.Equal(t, "N/A", strs[memCol])
}
}
}
func TestMemoryAndDiskUsageAfterClose(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (v int, k int, key(k))")
batch := 128
limit := tk.Session().GetSessionVars().MaxChunkSize*2 + 10
var buf bytes.Buffer
for i := 0; i < limit; {
buf.Reset()
_, err := buf.WriteString("insert into t values ")
require.NoError(t, err)
for j := 0; j < batch && i < limit; i, j = i+1, j+1 {
if j > 0 {
_, err = buf.WriteString(", ")
require.NoError(t, err)
}
_, err = buf.WriteString(fmt.Sprintf("(%v,%v)", i, i))
require.NoError(t, err)
}
tk.MustExec(buf.String())
}
SQLs := []string{"select v+abs(k) from t",
"select v from t where abs(v) > 0",
"select v from t order by v",
"select count(v) from t", // StreamAgg
"select count(v) from t group by v", // HashAgg
}
for _, sql := range SQLs {
tk.MustQuery(sql)
require.Equal(t, int64(0), tk.Session().GetSessionVars().StmtCtx.MemTracker.BytesConsumed())
require.Greater(t, tk.Session().GetSessionVars().StmtCtx.MemTracker.MaxConsumed(), int64(0))
require.Equal(t, int64(0), tk.Session().GetSessionVars().StmtCtx.DiskTracker.BytesConsumed())
}
}
func TestExplainAnalyzeExecutionInfo(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (v int, k int, key(k))")
tk.MustExec("insert into t values (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)")
checkExecutionInfo(t, tk, "explain analyze select * from t order by v")
checkExecutionInfo(t, tk, "explain analyze select * from t order by v limit 5")
checkExecutionInfo(t, tk, "explain analyze select /*+ HASH_JOIN(t1, t2) */ t1.k from t t1, t t2 where t1.v = t2.v+1")
checkExecutionInfo(t, tk, "explain analyze select /*+ MERGE_JOIN(t1, t2) */ t1.k from t t1, t t2 where t1.k = t2.k+1")
checkExecutionInfo(t, tk, "explain analyze select /*+ INL_JOIN(t1, t2) */ t1.k from t t1, t t2 where t1.k = t2.k and t1.v=1")
checkExecutionInfo(t, tk, "explain analyze select /*+ INL_HASH_JOIN(t1, t2) */ t1.k from t t1, t t2 where t1.k = t2.k and t1.v=1")
checkExecutionInfo(t, tk, "explain analyze select /*+ INL_MERGE_JOIN(t1, t2) */ t1.k from t t1, t t2 where t1.k = t2.k and t1.v=1")
checkExecutionInfo(t, tk, "explain analyze select sum(k) from t group by v")
checkExecutionInfo(t, tk, "explain analyze select sum(v) from t group by k")
checkExecutionInfo(t, tk, "explain analyze select * from t")
checkExecutionInfo(t, tk, "explain analyze select k from t use index(k)")
checkExecutionInfo(t, tk, "explain analyze select * from t use index(k)")
checkExecutionInfo(t, tk, "explain analyze with recursive cte(a) as (select 1 union select a + 1 from cte where a < 1000) select * from cte;")
tk.MustExec("set @@foreign_key_checks=0")
tk.MustExec("CREATE TABLE IF NOT EXISTS nation ( N_NATIONKEY BIGINT NOT NULL,N_NAME CHAR(25) NOT NULL,N_REGIONKEY BIGINT NOT NULL,N_COMMENT VARCHAR(152),PRIMARY KEY (N_NATIONKEY));")
tk.MustExec("CREATE TABLE IF NOT EXISTS part ( P_PARTKEY BIGINT NOT NULL,P_NAME VARCHAR(55) NOT NULL,P_MFGR CHAR(25) NOT NULL,P_BRAND CHAR(10) NOT NULL,P_TYPE VARCHAR(25) NOT NULL,P_SIZE BIGINT NOT NULL,P_CONTAINER CHAR(10) NOT NULL,P_RETAILPRICE DECIMAL(15,2) NOT NULL,P_COMMENT VARCHAR(23) NOT NULL,PRIMARY KEY (P_PARTKEY));")
tk.MustExec("CREATE TABLE IF NOT EXISTS supplier ( S_SUPPKEY BIGINT NOT NULL,S_NAME CHAR(25) NOT NULL,S_ADDRESS VARCHAR(40) NOT NULL,S_NATIONKEY BIGINT NOT NULL,S_PHONE CHAR(15) NOT NULL,S_ACCTBAL DECIMAL(15,2) NOT NULL,S_COMMENT VARCHAR(101) NOT NULL,PRIMARY KEY (S_SUPPKEY),CONSTRAINT FOREIGN KEY SUPPLIER_FK1 (S_NATIONKEY) references nation(N_NATIONKEY));")
tk.MustExec("CREATE TABLE IF NOT EXISTS partsupp ( PS_PARTKEY BIGINT NOT NULL,PS_SUPPKEY BIGINT NOT NULL,PS_AVAILQTY BIGINT NOT NULL,PS_SUPPLYCOST DECIMAL(15,2) NOT NULL,PS_COMMENT VARCHAR(199) NOT NULL,PRIMARY KEY (PS_PARTKEY,PS_SUPPKEY),CONSTRAINT FOREIGN KEY PARTSUPP_FK1 (PS_SUPPKEY) references supplier(S_SUPPKEY),CONSTRAINT FOREIGN KEY PARTSUPP_FK2 (PS_PARTKEY) references part(P_PARTKEY));")
tk.MustExec("CREATE TABLE IF NOT EXISTS orders ( O_ORDERKEY BIGINT NOT NULL,O_CUSTKEY BIGINT NOT NULL,O_ORDERSTATUS CHAR(1) NOT NULL,O_TOTALPRICE DECIMAL(15,2) NOT NULL,O_ORDERDATE DATE NOT NULL,O_ORDERPRIORITY CHAR(15) NOT NULL,O_CLERK CHAR(15) NOT NULL,O_SHIPPRIORITY BIGINT NOT NULL,O_COMMENT VARCHAR(79) NOT NULL,PRIMARY KEY (O_ORDERKEY),CONSTRAINT FOREIGN KEY ORDERS_FK1 (O_CUSTKEY) references customer(C_CUSTKEY));")
tk.MustExec("CREATE TABLE IF NOT EXISTS lineitem ( L_ORDERKEY BIGINT NOT NULL,L_PARTKEY BIGINT NOT NULL,L_SUPPKEY BIGINT NOT NULL,L_LINENUMBER BIGINT NOT NULL,L_QUANTITY DECIMAL(15,2) NOT NULL,L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL,L_DISCOUNT DECIMAL(15,2) NOT NULL,L_TAX DECIMAL(15,2) NOT NULL,L_RETURNFLAG CHAR(1) NOT NULL,L_LINESTATUS CHAR(1) NOT NULL,L_SHIPDATE DATE NOT NULL,L_COMMITDATE DATE NOT NULL,L_RECEIPTDATE DATE NOT NULL,L_SHIPINSTRUCT CHAR(25) NOT NULL,L_SHIPMODE CHAR(10) NOT NULL,L_COMMENT VARCHAR(44) NOT NULL,PRIMARY KEY (L_ORDERKEY,L_LINENUMBER),CONSTRAINT FOREIGN KEY LINEITEM_FK1 (L_ORDERKEY) references orders(O_ORDERKEY),CONSTRAINT FOREIGN KEY LINEITEM_FK2 (L_PARTKEY,L_SUPPKEY) references partsupp(PS_PARTKEY, PS_SUPPKEY));")
checkExecutionInfo(t, tk, "select nation, o_year, sum(amount) as sum_profit from ( select n_name as nation, extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount from part, supplier, lineitem, partsupp, orders, nation where s_suppkey = l_suppkey and ps_suppkey = l_suppkey and ps_partkey = l_partkey and p_partkey = l_partkey and o_orderkey = l_orderkey and s_nationkey = n_nationkey and p_name like '%dim%' ) as profit group by nation, o_year order by nation, o_year desc;")
tk.MustExec("drop table if exists nation")
tk.MustExec("drop table if exists part")
tk.MustExec("drop table if exists supplier")
tk.MustExec("drop table if exists partsupp")
tk.MustExec("drop table if exists orders")
tk.MustExec("drop table if exists lineitem")
}
func checkExecutionInfo(t *testing.T, tk *testkit.TestKit, sql string) {
executionInfoCol := 4
rows := tk.MustQuery(sql).Rows()
for _, row := range rows {
strs := make([]string, len(row))
for i, c := range row {
strs[i] = c.(string)
}
require.NotEqual(t, "time:0s, loops:0, rows:0", strs[executionInfoCol])
}
}
func TestExplainAnalyzeActRowsNotEmpty(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, b int, index (a))")
tk.MustExec("insert into t values (1, 1)")
checkActRowsNotEmpty(t, tk, "explain analyze select * from t t1, t t2 where t1.b = t2.a and t1.b = 2333")
}
func checkActRowsNotEmpty(t *testing.T, tk *testkit.TestKit, sql string) {
actRowsCol := 2
rows := tk.MustQuery(sql).Rows()
for _, row := range rows {
strs := make([]string, len(row))
for i, c := range row {
strs[i] = c.(string)
}
require.NotEqual(t, "", strs[actRowsCol])
}
}
func checkActRows(t *testing.T, tk *testkit.TestKit, sql string, expected []string) {
actRowsCol := 2
rows := tk.MustQuery("explain analyze " + sql).Rows()
require.Equal(t, len(expected), len(rows))
for id, row := range rows {
strs := make([]string, len(row))
for i, c := range row {
strs[i] = c.(string)
}
require.Equal(t, expected[id], strs[actRowsCol], fmt.Sprintf("error comparing %s", sql))
}
}
func TestCheckActRowsWithUnistore(t *testing.T) {
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.EnableCollectExecutionInfo = true
})
store := testkit.CreateMockStore(t)
// testSuite1 use default mockstore which is unistore
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("set tidb_cost_model_version=2")
tk.MustExec("drop table if exists t_unistore_act_rows")
tk.MustExec("create table t_unistore_act_rows(a int, b int, index(a, b))")
tk.MustExec("insert into t_unistore_act_rows values (1, 0), (1, 0), (2, 0), (2, 1)")
tk.MustExec("analyze table t_unistore_act_rows")
tk.MustExec("set @@tidb_merge_join_concurrency= 5;")
type testStruct struct {
sql string
expected []string
}
tests := []testStruct{
{
sql: "select * from t_unistore_act_rows",
expected: []string{"4", "4"},
},
{
sql: "select * from t_unistore_act_rows where a > 1",
expected: []string{"2", "2"},
},
{
sql: "select * from t_unistore_act_rows where a > 1 and b > 0",
expected: []string{"1", "1", "2"},
},
{
sql: "select b from t_unistore_act_rows",
expected: []string{"4", "4"},
},
{
sql: "select * from t_unistore_act_rows where b > 0",
expected: []string{"1", "1", "4"},
},
{
sql: "select count(*) from t_unistore_act_rows",
expected: []string{"1", "1", "1", "4"},
},
{
sql: "select count(*) from t_unistore_act_rows group by a",
expected: []string{"2", "2", "2", "4"},
},
{
sql: "select count(*) from t_unistore_act_rows group by b",
expected: []string{"2", "4", "4"},
},
{
sql: "with cte(a) as (select a from t_unistore_act_rows) select (select 1 from cte limit 1) from cte;",
expected: []string{"4", "1", "1", "1", "4", "4", "4", "4", "4"},
},
{
sql: "select a, row_number() over (partition by b) from t_unistore_act_rows;",
expected: []string{"4", "4", "4", "4", "4", "4", "4"},
},
{
sql: "select /*+ merge_join(t1, t2) */ * from t_unistore_act_rows t1 join t_unistore_act_rows t2 on t1.b = t2.b;",
expected: []string{"10", "10", "4", "4", "4", "4", "4", "4", "4", "4", "4", "4"},
},
}
// Default RPC encoding may cause statistics explain result differ and then the test unstable.
tk.MustExec("set @@tidb_enable_chunk_rpc = on")
for _, test := range tests {
checkActRows(t, tk, test.sql, test.expected)
}
}
func TestExplainAnalyzeCTEMemoryAndDiskInfo(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int)")
tk.MustExec("insert into t with recursive cte(a) as (select 1 union select a + 1 from cte where a < 1000) select * from cte;")
rows := tk.MustQuery("explain analyze with recursive cte(a) as (select 1 union select a + 1 from cte where a < 1000)" +
" select * from cte, t;").Rows()
require.NotEqual(t, "N/A", rows[4][7].(string))
require.Equal(t, "0 Bytes", rows[4][8].(string))
tk.MustExec("set @@tidb_mem_quota_query=10240;")
rows = tk.MustQuery("explain analyze with recursive cte(a) as (select 1 union select a + 1 from cte where a < 1000)" +
" select * from cte, t;").Rows()
require.NotEqual(t, "N/A", rows[4][7].(string))
require.NotEqual(t, "N/A", rows[4][8].(string))
}
func TestExplainStatementsSummary(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustQuery("desc select * from information_schema.statements_summary").Check(testkit.Rows(
`MemTableScan_4 10000.00 root table:STATEMENTS_SUMMARY `))
tk.MustQuery("desc select * from information_schema.statements_summary where digest is null").Check(testkit.RowsWithSep("|",
`Selection_5|8000.00|root| isnull(Column#5)`, `└─MemTableScan_6|10000.00|root|table:STATEMENTS_SUMMARY|`))
tk.MustQuery("desc select * from information_schema.statements_summary where digest = 'abcdefg'").Check(testkit.RowsWithSep(" ",
`MemTableScan_5 10000.00 root table:STATEMENTS_SUMMARY digests: ["abcdefg"]`))
tk.MustQuery("desc select * from information_schema.statements_summary where digest in ('a','b','c')").Check(testkit.RowsWithSep(" ",
`MemTableScan_5 10000.00 root table:STATEMENTS_SUMMARY digests: ["a","b","c"]`))
}
func TestFix29401(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists tt123;")
tk.MustExec(`CREATE TABLE tt123 (
id int(11) NOT NULL,
a bigint(20) DEFAULT NULL,
b char(20) DEFAULT NULL,
c datetime DEFAULT NULL,
d double DEFAULT NULL,
e json DEFAULT NULL,
f decimal(40,6) DEFAULT NULL,
PRIMARY KEY (id) /*T![clustered_index] CLUSTERED */,
KEY a (a),
KEY b (b),
KEY c (c),
KEY d (d),
KEY f (f)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;`)
tk.MustExec(" explain select /*+ inl_hash_join(t1) */ * from tt123 t1 join tt123 t2 on t1.b=t2.e;")
}
func TestIssue35296AndIssue43024(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int , c int, d int, e int, primary key(a), index ib(b), index ic(c), index idd(d), index ie(e));")
rows := tk.MustQuery("explain analyze select * from t where a = 10 or b = 30 or c = 10 or d = 1 or e = 90;").Rows()
require.Contains(t, rows[0][0], "IndexMerge")
require.NotRegexp(t, "^time:0s", rows[1][5])
require.NotRegexp(t, "^time:0s", rows[2][5])
require.NotRegexp(t, "^time:0s", rows[3][5])
require.NotRegexp(t, "^time:0s", rows[4][5])
require.NotRegexp(t, "^time:0s", rows[5][5])
}
func TestIssue35911(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t1(a int, b int);")
tk.MustExec("create table t2(a int, b int, index ia(a));")
tk.MustExec("insert into t1 value (1,1), (2,2), (3,3), (4,4), (5,5), (6,6);")
tk.MustExec("insert into t2 value (1,1), (2,2), (3,3), (4,4), (5,5), (6,6);")
tk.MustExec("set @@tidb_executor_concurrency = 5;")
// case 1 of #35911
tk.MustExec("set @@tidb_enable_parallel_apply = 0;")
rows := tk.MustQuery("explain analyze select * from t1 where exists (select tt1.* from (select * from t2 where a = t1.b) as tt1 join (select * from t2 where a = t1.b) as tt2 on tt1.b = tt2.b);").Rows()
extractTime, err := regexp.Compile("^time:(.*?),")
require.NoError(t, err)
timeStr1 := extractTime.FindStringSubmatch(rows[4][5].(string))[1]
time1, err := time.ParseDuration(timeStr1)
require.NoError(t, err)
timeStr2 := extractTime.FindStringSubmatch(rows[5][5].(string))[1]
time2, err := time.ParseDuration(timeStr2)
require.NoError(t, err)
// The duration of IndexLookUp should be longer than its build side child
require.LessOrEqual(t, time2, time1)
// case 2 of #35911
tk.MustExec("set @@tidb_enable_parallel_apply = 1;")
rows = tk.MustQuery("explain analyze select * from t1 where exists (select tt1.* from (select * from t2 where a = t1.b) as tt1 join (select * from t2 where a = t1.b) as tt2 on tt1.b = tt2.b);").Rows()
extractConcurrency, err := regexp.Compile(`table_task: [{].*concurrency: (\d+)[}]`)
require.NoError(t, err)
concurrencyStr := extractConcurrency.FindStringSubmatch(rows[4][5].(string))[1]
concurrency, err := strconv.ParseInt(concurrencyStr, 10, 64)
require.NoError(t, err)
// To be consistent with other operators, we should not aggregate the concurrency in the runtime stats.
require.EqualValues(t, 5, concurrency)
}
func TestIssue35105(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key)")
tk.MustExec("insert into t values (2)")
tk.MustExec("set @@tidb_constraint_check_in_place=1")
require.Error(t, tk.ExecToErr("explain analyze insert into t values (1), (2), (3)"))
tk.MustQuery("select * from t").Check(testkit.Rows("2"))
}
func flatJSONPlan(j *plannercore.ExplainInfoForEncode) (res []*plannercore.ExplainInfoForEncode) {
if j == nil {
return
}
res = append(res, j)
for _, child := range j.SubOperators {
res = append(res, flatJSONPlan(child)...)
}
return
}
func TestExplainJSON(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(id int, key(id))")
tk.MustExec("create table t2(id int, key(id))")
cases := []string{
"select * from t1",
"select count(*) from t2",
"select * from t1, t2 where t1.id = t2.id",
"select /*+ merge_join(t1, t2)*/ * from t1, t2 where t1.id = t2.id",
"with top10 as ( select * from t1 order by id desc limit 10 ) select * from top10 where id in (1,2)",
"insert into t1 values(1)",
"delete from t2 where t2.id > 10",
"update t2 set id = 1 where id =2",
"select * from t1 where t1.id < (select sum(t2.id) from t2 where t2.id = t1.id)",
}
// test syntax
tk.MustExec("explain format = 'tidb_json' select * from t1")
tk.MustExec("explain format = tidb_json select * from t1")
tk.MustExec("explain format = 'TIDB_JSON' select * from t1")
tk.MustExec("explain format = TIDB_JSON select * from t1")
tk.MustExec("explain analyze format = 'tidb_json' select * from t1")
tk.MustExec("explain analyze format = tidb_json select * from t1")
tk.MustExec("explain analyze format = 'TIDB_JSON' select * from t1")
tk.MustExec("explain analyze format = TIDB_JSON select * from t1")
// explain
for _, sql := range cases {
jsonForamt := "explain format = tidb_json " + sql
rowForamt := "explain format = row " + sql
resJSON := tk.MustQuery(jsonForamt).Rows()
resRow := tk.MustQuery(rowForamt).Rows()
j := new([]*plannercore.ExplainInfoForEncode)
require.NoError(t, json.Unmarshal([]byte(resJSON[0][0].(string)), j))
var flatJSONRows []*plannercore.ExplainInfoForEncode
for _, row := range *j {
flatJSONRows = append(flatJSONRows, flatJSONPlan(row)...)
}
require.Equal(t, len(flatJSONRows), len(resRow))
for i, row := range resRow {
require.Contains(t, row[0], flatJSONRows[i].ID)
require.Equal(t, flatJSONRows[i].EstRows, row[1])
require.Equal(t, flatJSONRows[i].TaskType, row[2])
require.Equal(t, flatJSONRows[i].AccessObject, row[3])
require.Equal(t, flatJSONRows[i].OperatorInfo, row[4])
}
}
// explain analyze
for _, sql := range cases {
jsonForamt := "explain analyze format = tidb_json " + sql
rowForamt := "explain analyze format = row " + sql
resJSON := tk.MustQuery(jsonForamt).Rows()
resRow := tk.MustQuery(rowForamt).Rows()
j := new([]*plannercore.ExplainInfoForEncode)
require.NoError(t, json.Unmarshal([]byte(resJSON[0][0].(string)), j))
var flatJSONRows []*plannercore.ExplainInfoForEncode
for _, row := range *j {
flatJSONRows = append(flatJSONRows, flatJSONPlan(row)...)
}
require.Equal(t, len(flatJSONRows), len(resRow))
for i, row := range resRow {
require.Contains(t, row[0], flatJSONRows[i].ID)
require.Equal(t, flatJSONRows[i].EstRows, row[1])
require.Equal(t, flatJSONRows[i].ActRows, row[2])
require.Equal(t, flatJSONRows[i].TaskType, row[3])
require.Equal(t, flatJSONRows[i].AccessObject, row[4])
require.Equal(t, flatJSONRows[i].OperatorInfo, row[6])
// executeInfo, memory, disk maybe vary in multi execution
require.NotEqual(t, flatJSONRows[i].ExecuteInfo, "")
require.NotEqual(t, flatJSONRows[i].MemoryInfo, "")
require.NotEqual(t, flatJSONRows[i].DiskInfo, "")
}
}
}
func TestExplainFormatInCtx(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("set @@session.tidb_enable_non_prepared_plan_cache = 1")
explainFormats := []string{
types.ExplainFormatBrief,
types.ExplainFormatDOT,
types.ExplainFormatHint,
types.ExplainFormatROW,
types.ExplainFormatVerbose,
types.ExplainFormatTraditional,
types.ExplainFormatBinary,
types.ExplainFormatTiDBJSON,
types.ExplainFormatCostTrace,
types.ExplainFormatPlanCache,
}
tk.MustExec("select * from t")
tk.MustExec("explain analyze select * from t")
require.Equal(t, tk.Session().GetSessionVars().StmtCtx.InExplainStmt, true)
require.Equal(t, tk.Session().GetSessionVars().StmtCtx.ExplainFormat, types.ExplainFormatROW)
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
for _, format := range explainFormats {
tk.MustExec(fmt.Sprintf("explain analyze format = '%v' select * from t", format))
require.Equal(t, tk.Session().GetSessionVars().StmtCtx.InExplainStmt, true)
require.Equal(t, tk.Session().GetSessionVars().StmtCtx.ExplainFormat, format)
if format != types.ExplainFormatPlanCache {
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
} else {
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
}
}
}
func TestExplainFormatPlanCache(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("set @@session.tidb_enable_non_prepared_plan_cache = 1")
tk.MustExec("select * from t limit 1")
tk.MustExec("select * from t limit 1")
// miss
tk.MustExec("explain format = 'plan_cache' select * from (select * from t) t1 limit 1")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 skip non-prepared plan-cache: queries that have sub-queries are not supported"))
tk.MustExec("explain format = 'plan_cache' select * from (select * from t) t1 limit 1")
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
tk.MustExec("explain analyze format = 'plan_cache' select * from (select * from t) t1 limit 1")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 skip non-prepared plan-cache: queries that have sub-queries are not supported"))
tk.MustExec("explain analyze format = 'plan_cache' select * from (select * from t) t1 limit 1")
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
// hit
tk.MustExec("explain format = 'plan_cache' select * from t")
tk.MustQuery("show warnings").Check(testkit.Rows())
tk.MustExec("explain format = 'plan_cache' select * from t")
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
tk.MustExec("explain analyze format = 'plan_cache' select * from t")
tk.MustQuery("show warnings").Check(testkit.Rows())
tk.MustExec("explain analyze format = 'plan_cache' select * from t")
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
// will not use plan cache
explainFormats := []string{
types.ExplainFormatBrief,
types.ExplainFormatDOT,
types.ExplainFormatHint,
types.ExplainFormatROW,
types.ExplainFormatVerbose,
types.ExplainFormatTraditional,
types.ExplainFormatBinary,
types.ExplainFormatTiDBJSON,
types.ExplainFormatCostTrace,
}
tk.MustExec("explain select * from t")
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
for _, format := range explainFormats {
tk.MustExec(fmt.Sprintf("explain format = '%v' select * from t", format))
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
}
}
|
package drouter
// DefaultPrefix defines the default command prefix of plugins.
var DefaultPrefix = "/"
// Plugin defines the structure of a disgord plugin.
type Plugin struct {
// ImportName defines the import name of the plugin.
ImportName string
// Prefix defines the commands prefix.
Prefix string
// RootCommand defines the base plugin's root/ base command
RootCommand Command
// Listeners defines the different event handlers
// of the plugin (see https://godoc.org/github.com/andersfylling/disgord/event).
Listeners map[string][]interface{}
// Wrappers Contains the registered sub-commands of the plugin.
Commands []*Command
// IsReady is true is the module was loaded and installed into the client.
IsReady bool
}
// Use appends given callbacks to a plugin to call
// whenever a command is being invoked.
func (plugin *Plugin) Use(callbackFuncs ...callbackFunc) *Plugin {
// FIXME: we should put them as global wrappers in Plugin
// instead of the root command.
plugin.RootCommand.Use(callbackFuncs...)
return plugin
}
// SetPrefix sets the plugin commands prefix (can be empty for no prefix).
func (plugin *Plugin) SetPrefix(prefix string) *Plugin {
plugin.Prefix = prefix
return plugin
}
// Handler defines the function to invoke whenever the plugin command
// is being invoked.
func (plugin *Plugin) Handler(callbackFunc callbackFunc) *Plugin {
plugin.RootCommand.Handler(callbackFunc)
return plugin
}
// On registers given handlers to be invoked whenever the event is fired.
func (plugin *Plugin) On(eventName string, inputs ...interface{}) *Plugin {
existingEvents := plugin.Listeners[eventName]
if existingEvents != nil {
plugin.Listeners[eventName] = append(existingEvents, inputs...)
} else {
plugin.Listeners[eventName] = inputs
}
return plugin
}
// Command creates a new sub-command for the plugin.
func (plugin *Plugin) Command(names ...string) *Command {
newCommand := &Command{
Names: NewStringSet(names...),
}
plugin.Commands = append(plugin.Commands, newCommand)
return newCommand
}
// Help sets the help text of a command. The first line is
// the short and straightforward documentation. The whole text
// is the long and descriptive documentation.
func (plugin *Plugin) Help(helpText string) *Plugin {
plugin.RootCommand.Help(helpText)
return plugin
}
// Activate marks a plugin as ready.
func (plugin *Plugin) Activate() {
// TODO: we should dispatch setUp(...)
plugin.IsReady = true
}
|
package transport
import (
"net/http"
"github.com/sumelms/microservice-course/internal/course/endpoints"
"github.com/sumelms/microservice-course/pkg/errors"
kittransport "github.com/go-kit/kit/transport"
kithttp "github.com/go-kit/kit/transport/http"
"github.com/go-kit/log"
"github.com/gorilla/mux"
"github.com/sumelms/microservice-course/internal/course/domain"
)
func NewHTTPHandler(r *mux.Router, s domain.ServiceInterface, logger log.Logger) {
opts := []kithttp.ServerOption{
kithttp.ServerErrorHandler(kittransport.NewLogErrorHandler(logger)),
kithttp.ServerErrorEncoder(errors.EncodeError),
}
// Course handlers
listCourseHandler := endpoints.NewListCourseHandler(s, opts...)
createCourseHandler := endpoints.NewCreateCourseHandler(s, opts...)
findCourseHandler := endpoints.NewFindCourseHandler(s, opts...)
updateCourseHandler := endpoints.NewUpdateCourseHandler(s, opts...)
deleteCourseHandler := endpoints.NewDeleteCourseHandler(s, opts...)
r.Handle("/courses", createCourseHandler).Methods(http.MethodPost)
r.Handle("/courses", listCourseHandler).Methods(http.MethodGet)
r.Handle("/courses/{uuid}", findCourseHandler).Methods(http.MethodGet)
r.Handle("/courses/{uuid}", updateCourseHandler).Methods(http.MethodPut)
r.Handle("/courses/{uuid}", deleteCourseHandler).Methods(http.MethodDelete)
// Subscription handlers
listSubscriptionHandler := endpoints.NewListSubscriptionHandler(s, opts...)
createSubscriptionHandler := endpoints.NewCreateSubscriptionHandler(s, opts...)
findSubscriptionHandler := endpoints.NewFindSubscriptionHandler(s, opts...)
deleteSubscriptionHandler := endpoints.NewDeleteSubscriptionHandler(s, opts...)
updateSubscriptionHandler := endpoints.NewUpdateSubscriptionHandler(s, opts...)
r.Handle("/subscriptions", listSubscriptionHandler).Methods(http.MethodGet)
r.Handle("/subscriptions", createSubscriptionHandler).Methods(http.MethodPost)
r.Handle("/subscriptions/{uuid}", findSubscriptionHandler).Methods(http.MethodGet)
r.Handle("/subscriptions/{uuid}", deleteSubscriptionHandler).Methods(http.MethodDelete)
r.Handle("/subscriptions/{uuid}", updateSubscriptionHandler).Methods(http.MethodPut)
}
|
package main
import (
"fmt"
"time"
)
// go มี statement พิเ ศษชื่อว่า selectทำงานคล้ายกับ switch แต่ใช้กับ channel
func main() {
// channel จะทำงานแบบ synchronize
c1 := make(chan string)
c2 := make(chan string)
/* buffered channel คือ การส่งค่าพารามิเตอร์ตัวที่ 2 เข้าไปใน func make
buffered เป็น Asynchonousคือรันได้เลยไม่ต้องรอ
c3 := make(chan int, 1) จะได้ buffered channel ที่มีความจุเป็น 1
*/
c3 := make(chan int, 1)
go func() {
for {
c1 <- "from 1"
// พิมพ์ from 1 ทุก 2 วินาที
time.Sleep(time.Second * 2)
}
}()
go func() {
for {
c2 <- "from 2"
// พิมพ์ from 2 ทุก 3 วินาที
time.Sleep(time.Second * 3)
}
}()
go func() {
for {
c3 <- 00
time.Sleep(time.Second * 4)
}
}()
go func() {
for {
select {
// ส่งค่าที่เก็บใน channel c1 ไปยัง msg1
case msg1 := <-c1:
fmt.Println("Message 1", msg1)
case msg2 := <-c2:
fmt.Println("Message 2", msg2)
case msg3 := <-c3:
fmt.Println("Channel : ", msg3)
// select ถูกใช้บ่อยในการทำ timeout
case <-time.After(time.Second):
fmt.Println("timeout")
/* time.After จะสร้าง channell ขึ้นมาและส่งไปตอนที่ครบกำหนดเวลา
สามารถระบุกรณี default ได้ด้วย
กรณี default จะเกิดทันทีเมื่อไม่มี channel ใดพร้อม */
default:
fmt.Println("nothing ready")
}
}
}()
// เอาค่าทั้งหมดมาแสดง
var input string
fmt.Scanln(&input)
}
|
//source: https://doc.qt.io/qt-5/qtgui-openglwindow-example.html
package main
import (
"os"
"github.com/therecipe/qt/gui"
)
const (
vertexShaderSource = "attribute highp vec4 posAttr;\n" +
"attribute lowp vec4 colAttr;\n" +
"varying lowp vec4 col;\n" +
"uniform highp mat4 matrix;\n" +
"void main() {\n" +
" col = colAttr;\n" +
" gl_Position = matrix * posAttr;\n" +
"}\n"
fragmentShaderSource = "varying lowp vec4 col;\n" +
"void main() {\n" +
" gl_FragColor = col;\n" +
"}\n"
)
type TriangleWindow struct {
OpenGLWindow
_ func() `constructor:"init"`
m_posAttr uint
m_colAttr uint
m_matrixUniform int
m_program *gui.QOpenGLShaderProgram
m_frame float32
}
func (w *TriangleWindow) init() {
w.OpenGLWindow.initializeLazy = w.initialize
w.OpenGLWindow.renderLazy = w.render
w.ConnectEvent(w.OpenGLWindow.event)
w.ConnectExposeEvent(w.OpenGLWindow.exposeEvent)
}
func (w *TriangleWindow) initialize() {
w.m_program = gui.NewQOpenGLShaderProgram(w)
w.m_program.AddShaderFromSourceCode(gui.QOpenGLShader__Vertex, vertexShaderSource)
w.m_program.AddShaderFromSourceCode(gui.QOpenGLShader__Fragment, fragmentShaderSource)
w.m_program.Link()
w.m_posAttr = uint(w.m_program.AttributeLocation("posAttr"))
w.m_colAttr = uint(w.m_program.AttributeLocation("colAttr"))
w.m_matrixUniform = w.m_program.UniformLocation("matrix")
}
func (w *TriangleWindow) render(painter *gui.QPainter) {
retinaScale := int(w.DevicePixelRatio())
w.GlViewport(0, 0, w.Width()*retinaScale, w.Height()*retinaScale)
w.GlClear(GL_COLOR_BUFFER_BIT)
w.m_program.Bind()
matrix := gui.NewQMatrix4x4()
matrix.Perspective(60, float32(4)/float32(3), 0.1, 100)
matrix.Translate3(0, 0, -2)
matrix.Rotate2(100*w.m_frame/float32(w.Screen().RefreshRate()), 0, 1, 0)
w.m_program.SetUniformValue23(w.m_matrixUniform, matrix)
w.GlVertexAttribPointer(w.m_posAttr, 2, GL_FLOAT, GL_FALSE, 0, vertices)
w.GlVertexAttribPointer(w.m_colAttr, 3, GL_FLOAT, GL_FALSE, 0, colors)
w.GlEnableVertexAttribArray(0)
w.GlEnableVertexAttribArray(1)
w.GlDrawArrays(GL_TRIANGLES, 0, 3)
w.GlDisableVertexAttribArray(1)
w.GlDisableVertexAttribArray(0)
w.m_program.Release()
w.m_frame++
}
func main() {
gui.NewQGuiApplication(len(os.Args), os.Args)
format := gui.NewQSurfaceFormat()
format.SetSamples(16)
window := NewTriangleWindow(nil)
window.SetFormat(format)
window.Resize2(640, 480)
window.Show()
window.setAnimating(true)
gui.QGuiApplication_Exec()
}
|
package main
import (
"bufio"
"fmt"
"os"
)
func main() {
scanner := bufio.NewScanner(os.Stdin)
scanner.Split(bufio.ScanLines)
var arr [16]int
for i := 0; i < 4; i++ {
scanner.Scan()
var a, b, c, d int
fmt.Sscanf(scanner.Text(), "%d %d %d %d", &a, &b, &c, &d)
arr[i*4] = a
arr[i*4+1] = b
arr[i*4+2] = c
arr[i*4+3] = d
}
scanner.Scan()
var mv int
fmt.Sscanf(scanner.Text(), "%d", &mv)
var res [16]int
for i := 0; i < 4; i++ {
var a, b, c, d int
if mv == 0 {
a = i * 4
b = i*4 + 1
c = i*4 + 2
d = i*4 + 3
} else if mv == 1 {
a = i
b = i + 4
c = i + 8
d = i + 12
} else if mv == 2 {
a = i*4 + 3
b = i*4 + 2
c = i*4 + 1
d = i * 4
} else {
a = i + 12
b = i + 8
c = i + 4
d = i
}
var temp [4]int
x := 0
if arr[a] != 0 {
temp[x] = arr[a]
x++
}
if arr[b] != 0 {
temp[x] = arr[b]
x++
}
if arr[c] != 0 {
temp[x] = arr[c]
x++
}
if arr[d] != 0 {
temp[x] = arr[d]
x++
}
if temp[0] == temp[1] {
res[a] = 2 * temp[0]
res[b] = temp[2]
res[c] = temp[3]
res[d] = 0
if temp[2] == temp[3] {
res[b] = 2 * temp[2]
res[c] = 0
}
} else {
res[a] = temp[0]
if temp[1] == temp[2] {
res[b] = 2 * temp[1]
res[c] = temp[3]
res[d] = 0
} else {
res[b] = temp[1]
if temp[2] == temp[3] {
res[c] = 2 * temp[2]
res[d] = 0
} else {
res[c] = temp[2]
res[d] = temp[3]
}
}
}
}
for i := 0; i < 4; i++ {
fmt.Printf("%d %d %d %d\n", res[i*4], res[i*4+1], res[i*4+2], res[i*4+3])
}
}
|
package init
import (
_ "github.com/GM-Publicchain/gm/plugin/mempool/para" //auto gen
_ "github.com/GM-Publicchain/gm/plugin/mempool/price" //auto gen
_ "github.com/GM-Publicchain/gm/plugin/mempool/score" //auto gen
)
|
package plugins
import (
"io"
"os"
"os/exec"
"path/filepath"
"github.com/hashicorp/go-multierror"
)
//Loads the plugins at the given path
//Returns a valid plugin or a non-nil error
func Load(path string) (Plugin, error) {
cmd := exec.Command(path)
stdin, err := cmd.StdinPipe()
if err != nil {
return nil, err
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
stderr, err := cmd.StderrPipe()
if err == nil {
go io.Copy(os.Stderr, stderr)
}
go cmd.Start()
return NewRemotePlugin(stdout, stdin)
}
//
func LoadAll(path string) ([]Plugin, error) {
dir, err := os.Open(path)
if err != nil {
return nil, err
}
dirinfo, err := dir.Stat()
if err != nil {
return nil, err
}
if !dirinfo.IsDir() {
return nil, NotDirectory
}
files, err := dir.Readdir(0)
if err != nil {
return nil, err
}
plugins := make([]Plugin, 0, len(files))
for _, file := range files {
if file.IsDir() {
continue
}
// Not a directory. A file. Attempt execution
pl, pl_err := Load(filepath.Join(path, file.Name()))
if err == nil {
plugins = append(plugins, pl)
} else {
err = multierror.Append(err, pl_err)
}
}
return plugins, err
}
|
package main
import (
"flag"
"fmt"
"log"
"github.com/BurntSushi/toml"
"github.com/sideshow/apns2"
"github.com/sideshow/apns2/certificate"
)
type Config struct {
Filename string `toml:"filename"`
Password string `toml:"password"`
DeviceToken string `toml:"device_token"`
Topic string `toml:"topic"`
Mode string `toml:"mode"`
}
type Client struct {
apnsClient *apns2.Client
topic string
}
func NewClient(certPath, password, topic, mode string) (*Client, error) {
cert, err := certificate.FromP12File(certPath, password)
if err != nil {
return nil, err
}
var client *apns2.Client
switch mode {
case "production":
client = apns2.NewClient(cert).Production()
default:
client = apns2.NewClient(cert).Development()
}
return &Client{
apnsClient: client,
topic: topic,
}, err
}
func (c *Client) Push(message, deviceToken string) (*apns2.Response, error) {
payload := fmt.Sprintf(`{"aps":{"alert":"%s"}}`, message)
return c.apnsClient.Push(&apns2.Notification{
DeviceToken: deviceToken,
Topic: c.topic,
Payload: payload,
})
}
func main() {
var (
message = flag.String("message", "Hello!", "")
)
flag.Parse()
var c Config
if _, err := toml.DecodeFile("config.toml", &c); err != nil {
log.Fatal(err)
}
client, err := NewClient(c.Filename, c.Password, c.Topic, c.Mode)
if err != nil {
log.Fatal(err)
}
resp, err := client.Push(*message, c.DeviceToken)
if err != nil {
log.Fatal(err)
}
fmt.Printf("%v %v %v\n", resp.StatusCode, resp.ApnsID, resp.Reason)
}
|
package cache
import (
"container/list"
"testing"
)
func newBase(size int) *base {
return &base{
size: size,
items: make(map[interface{}]*list.Element, size+1),
evictList: list.New(),
}
}
func TestBase(t *testing.T) {
c := newBase(128)
for i := 0; i < 256; i++ {
c.set(i, i, 0)
}
if l := c.length(); l != 128 {
t.Fatalf("bad length: %v", l)
}
for i, k := range c.keys() {
if v, ok := c.get(k); !ok || v != k || v != i+128 {
t.Fatalf("bad key: %v", k)
}
}
for i := 0; i < 128; i++ {
_, ok := c.get(i)
if ok {
t.Fatal("should be evicted")
}
}
for i := 128; i < 256; i++ {
_, ok := c.get(i)
if !ok {
t.Fatal("should not be evicted")
}
}
for i := 128; i < 192; i++ {
ok := c.delete(i)
if !ok {
t.Fatal("should be contained")
}
ok = c.delete(i)
if ok {
t.Fatal("should not be contained")
}
_, ok = c.get(i)
if ok {
t.Fatal("should be deleted")
}
}
c.get(192) // expect 192 to be last key in l.Keys()
for i, k := range c.keys() {
if (i < 63 && k != i+193) || (i == 63 && k != 192) {
t.Fatalf("out of order key: %v", k)
}
}
c.purge()
if l := c.length(); l != 0 {
t.Fatalf("bad len: %v", l)
}
if _, ok := c.get(200); ok {
t.Fatal("should contain nothing")
}
}
func TestBase_Contains(t *testing.T) {
c := newBase(2)
c.set(1, 1, 0)
c.set(2, 2, 0)
if !c.contain(1) {
t.Fatal("1 should be contained")
}
c.set(3, 3, 0)
if c.contain(1) {
t.Fatal("Contains should not have updated recent-ness of 1")
}
}
func TestLRU_Peek(t *testing.T) {
c := newBase(2)
c.set(1, 1, 0)
c.set(2, 2, 0)
if v, ok := c.peek(1); !ok || v != 1 {
t.Fatalf("1 should be set to 1: %v, %v", v, ok)
}
c.set(3, 3, 0)
if c.contain(1) {
t.Fatal("should not have updated recent-ness of 1")
}
}
|
package constants
const (
DaySeconds = 86400
)
|
package client
import (
"encoding/json"
"errors"
"fmt"
"log"
"net/http"
"thorium-go/requests"
)
import "bytes"
import "io/ioutil"
var address string = "52.25.124.72"
var port int = 6960
func PingMaster() (bool, error) {
endpoint := fmt.Sprintf("http://%s:%d/status", address, port)
req, err := http.NewRequest("GET", endpoint, bytes.NewBuffer([]byte("")))
if err != nil {
return false, err
}
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
log.Print("ping master - error:\n", err)
return false, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return false, errors.New("http status error")
} else {
return true, nil
}
}
func LoginRequest(username string, password string) (string, error) {
var loginReq request.Authentication
loginReq.Username = username
loginReq.Password = password
jsonBytes, err := json.Marshal(&loginReq)
if err != nil {
return "", err
}
req, err := http.NewRequest("POST", fmt.Sprintf("http://%s:%d/clients/login", address, port), bytes.NewBuffer(jsonBytes))
if err != nil {
log.Print("error with request: ", err)
return "err", err
}
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
log.Print("error with sending request", err)
return "err", err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
tokenString := bytes.NewBuffer(body).String()
log.Print("account token:\n", tokenString)
return tokenString, nil
}
func CharacterSelectRequest(token string, id int) (string, error) {
var selectReq request.SelectCharacter
selectReq.AccountToken = token
selectReq.ID = id
jsonBytes, err := json.Marshal(&selectReq)
if err != nil {
return "", err
}
req, err := http.NewRequest("POST", fmt.Sprintf("http://%s:%d/characters/%d/select", address, port, id), bytes.NewBuffer(jsonBytes))
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
log.Print("Error with request 2: ", err)
return "err", err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
return string(body), nil
}
func CharacterCreateRequest(token string, name string) (string, error) {
var charCreateReq request.CreateCharacter
charCreateReq.AccountToken = token
charCreateReq.Name = name
jsonBytes, err := json.Marshal(&charCreateReq)
if err != nil {
return "", err
}
req, err := http.NewRequest("POST", fmt.Sprintf("http://%s:%d/characters/new", address, port), bytes.NewBuffer(jsonBytes))
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
log.Print("Error with request 2: ", err)
return "err", err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
log.Print("create character response: ", string(body))
return string(body), nil
}
func DisconnectRequest(token string) (string, error) {
buf := []byte(token)
req, err := http.NewRequest("POST", fmt.Sprintf("http://%s:%d/clients/disconnect", address, port), bytes.NewBuffer(buf))
if err != nil {
log.Print("error with request: ", err)
return "err", err
}
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
log.Print("error with sending request", err)
return "err", err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
log.Print("disonnect response: ", string(body))
return string(body), nil
}
|
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"os"
)
// delBranchCmd represents the delBranch command
var delBranchCmd = &cobra.Command{
Use: "del_branch",
Short: "del_branch is used to delete a branch.",
Long: `del_branch is used to delete a branch.`,
Run: delBranch,
}
func init() {
delBranchCmd.PersistentFlags().StringP("target", "t", "", "Set the branch name to be deleted with a regular expression.")
rootCmd.AddCommand(delBranchCmd)
}
func delBranch(cmd *cobra.Command, args []string) {
if target, err := cmd.PersistentFlags().GetString("target"); err == nil {
if target == "" {
fmt.Println("target should not be blank")
os.Exit(2)
}
var gitBranchResult *GitCmdResult
// git branch
{
cmd, err := newGitCmdExecutor([]string{""}, []string{}, []string{}, "", false, false)
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
gitBranchResult, err = cmd.ExecuteCmd(&GitBranchRunner{})
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
}
// git branch -d
{
cmd, err := newGitCmdExecutor([]string{"D"}, gitBranchResult.result, []string{}, target, true, dryRun)
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
cmd.ExecuteCmd(&GitBranchRunner{})
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
}
}
}
|
package gohdk
import (
"encoding/json"
"runtime"
"syscall/js"
"unsafe"
)
|
package npm
import (
"path/filepath"
"strings"
"github.com/omeid/gonzo"
"github.com/omeid/gonzo/context"
"github.com/go-gonzo/archive/tar"
"github.com/go-gonzo/compress/gzip"
"github.com/go-gonzo/fs"
"github.com/go-gonzo/npm/commonjs/package"
"github.com/go-gonzo/npm/commonjs/registry/client"
"github.com/go-gonzo/path"
"github.com/go-gonzo/util"
"github.com/go-gonzo/web"
)
func Install(dist string, packages ...string) func(ctx context.Context) error {
return func(ctx context.Context) error {
pkgs, err := Packages(packages...)
if err != nil {
return err
}
return Get(ctx, pkgs...).Then(
fs.Dest(dist),
)
}
}
//An arrya of packages in "PRODUCT@tag/version/range" Format.
func Packages(packages ...string) ([]pkg.Package, error) {
pkgs := []pkg.Package{}
for _, p := range packages {
pp := strings.Split(p, "@")
var name, version string
name = pp[0]
if len(pp) == 1 || pp[1] == "" {
version = "latest"
} else {
version = pp[1]
}
pkg, err := client.Get(name, version)
if err != nil {
return nil, err
}
pkgs = append(pkgs, *pkg)
}
return pkgs, nil
}
func Get(ctx context.Context, pkgs ...pkg.Package) gonzo.Pipe {
var all []gonzo.Pipe
for _, pkg := range pkgs {
if pkg.Dist.Tarball == "" {
ctx.Info("EMPTY", pkg.Name)
continue
}
all = append(all, get(ctx, pkg))
}
return util.Merge(ctx, all...)
}
func get(ctx context.Context, pkg pkg.Package) gonzo.Pipe {
return web.Get(ctx, pkg.Dist.Tarball).Pipe(
gzip.Uncompress(),
tar.Untar(tar.Options{
StripComponenets: 1,
}),
path.Rename(func(old string) string {
return filepath.Join(pkg.Name, old)
}),
)
}
|
// Copyright 2021 Akamai Technologies, Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plugin
import (
"context"
"fmt"
dns "github.com/akamai/AkamaiOPEN-edgegrid-golang/configdns-v2"
"github.com/akamai/edgedns-registrar-coordinator/registrar"
log "github.com/apex/log"
"path/filepath"
"plugin"
"sync"
)
const ()
var (
pluginMutex = &sync.Mutex{}
// TODO Create mutex map with plugin name as index?
)
// Plugin Registrar
type PluginRegistrar struct {
registrar.BaseRegistrarProvider
pluginConfig *registrar.PluginConfig
pluginArgs *registrar.PluginFuncArgs
pluginResult *registrar.PluginFuncResult
pluginGetDomains func()
pluginGetDomain func()
pluginGetMasterIPs func()
pluginGetTsigKey func()
pluginGetServeAlgorithm func()
pluginTest bool // flag for testing.
}
func lookupSymbols(plug *plugin.Plugin, reg *PluginRegistrar) error {
sym, err := plug.Lookup("LibPluginArgs")
if err != nil {
log.Errorf("Plugin library does not support RegistrarProvider interface. Error: %s", err.Error())
return err
}
reg.pluginArgs = sym.(*registrar.PluginFuncArgs)
sym, err = plug.Lookup("LibPluginResult")
if err != nil {
log.Errorf("Plugin library does not support RegistrarProvider interface. Error: %s", err.Error())
return err
}
reg.pluginResult = sym.(*registrar.PluginFuncResult)
sym, err = plug.Lookup("GetDomains")
if err != nil {
log.Errorf("Plugin library does not support RegistrarProvider interface. Error: %s", err.Error())
return err
}
/*
if _, ok := sym.(registrar.PluginRegistrarProvider); !ok {
log.Errorf("Plugin library failed validation. Error: %s", err.Error())
return err
}
*/
reg.pluginGetDomains = sym.(func())
sym, err = plug.Lookup("GetDomain")
if err != nil {
log.Errorf("Plugin library does not support RegistrarProvider interface. Error: %s", err.Error())
return err
}
reg.pluginGetDomain = sym.(func())
sym, err = plug.Lookup("GetTsigKey")
if err != nil {
log.Errorf("Plugin library does not support RegistrarProvider interface. Error: %s", err.Error())
return err
}
reg.pluginGetTsigKey = sym.(func())
sym, err = plug.Lookup("GetServeAlgorithm")
if err != nil {
log.Errorf("Plugin library does not support RegistrarProvider interface. Error: %s", err.Error())
return err
}
reg.pluginGetServeAlgorithm = sym.(func())
sym, err = plug.Lookup("GetMasterIPs")
if err != nil {
log.Errorf("Plugin library does not support RegistrarProvider interface. Error: %s", err.Error())
return err
}
reg.pluginGetMasterIPs = sym.(func())
return nil
}
// NewPluginRegistrar initializes a new plugin registrar
func NewPluginRegistrar(ctx context.Context, pluginConfig registrar.PluginConfig) (*PluginRegistrar, error) {
var err error
pluginMutex.Lock()
defer pluginMutex.Unlock()
log := ctx.Value("appLog").(*log.Entry)
log.Debugf("Entering NewPluginRegistrar")
pluginRegistrar := PluginRegistrar{pluginConfig: &pluginConfig}
// Parse validation should ensure path is not empty
regPlugin, err := plugin.Open(pluginConfig.PluginLibPath)
if err != nil {
log.Errorf("Failed to open provided plugin library. Error: %s", err.Error())
return nil, err
}
// Get plugin in name
pluginConfig.PluginName = filepath.Base(pluginConfig.PluginLibPath)
if err = lookupSymbols(regPlugin, &pluginRegistrar); err != nil {
log.Errorf("Plugin library failed validation. Error: %s", err.Error())
return nil, err
}
// initialize the plugin
newPluginLibRegistrar, err := regPlugin.Lookup("NewPluginLibRegistrar")
if err != nil {
log.Errorf("Plugin library does not support RegistrarProvider interface. Error: %s", err.Error())
return nil, err
}
pluginRegistrar.pluginArgs.PluginArg = pluginConfig
if !pluginRegistrar.pluginTest {
// clear ResultObj
pluginRegistrar.pluginResult.PluginError = nil
pluginRegistrar.pluginResult.PluginResult = nil
}
newPluginLibRegistrar.(func())()
if pluginRegistrar.pluginResult.PluginError != nil {
log.Errorf("Plugin library failed to initialize. Error: %s", pluginRegistrar.pluginResult.PluginError.Error())
return nil, pluginRegistrar.pluginResult.PluginError
}
pluginConfig.Registrar = regPlugin
return &pluginRegistrar, nil
}
func (r *PluginRegistrar) GetDomains(ctx context.Context) ([]string, error) {
var domainsList = []string{}
log := ctx.Value("appLog").(*log.Entry)
log.Debug("Entering Plugin registrar GetDomains")
// Synchronize library calls
pluginMutex.Lock()
defer pluginMutex.Unlock()
if !r.pluginTest {
// clear ResultObj
r.pluginResult.PluginError = nil
r.pluginResult.PluginResult = nil
}
log.Debugf("Invoking %s library GetDomains", r.pluginConfig.PluginName)
r.pluginGetDomains()
if r.pluginResult.PluginError != nil {
log.Errorf("Plugin library GetDomains failed. %s", r.pluginResult.PluginError.Error())
return domainsList, fmt.Errorf("Plugin library GetDomains failed.")
}
dl, ok := r.pluginResult.PluginResult.([]string)
if !ok {
log.Debugf("Unexpected Plugin library GetDomains return value: %v", r.pluginResult.PluginResult)
return domainsList, fmt.Errorf("Unexpected Plugin library GetDomains return value type")
}
for _, d := range dl {
domainsList = append(domainsList, d)
}
log.Debugf("Plugin GetDomains result: %v", domainsList)
return domainsList, nil
}
func (r *PluginRegistrar) GetDomain(ctx context.Context, domain string) (*registrar.Domain, error) {
log := ctx.Value("appLog").(*log.Entry)
log.Debug("Entering Plugin registrar GetDomain")
// Synchronize library calls
pluginMutex.Lock()
defer pluginMutex.Unlock()
if !r.pluginTest {
// clear ResultObj
r.pluginResult.PluginError = nil
r.pluginResult.PluginResult = nil
r.pluginArgs.PluginArg = domain
}
log.Debugf("Invoking %s library GetDomain", r.pluginConfig.PluginName)
r.pluginGetDomain()
if r.pluginResult.PluginError != nil {
return nil, r.pluginResult.PluginError
}
log.Debugf("Plugin GetDomain result: %v", r.pluginResult.PluginResult)
libDom, ok := r.pluginResult.PluginResult.(registrar.Domain)
if !ok {
return nil, fmt.Errorf("Unexpected Plugin library GetDomain return value type")
}
return ®istrar.Domain{
Name: libDom.Name,
Type: libDom.Type,
SignAndServe: libDom.SignAndServe,
SignAndServeAlgorithm: libDom.SignAndServeAlgorithm,
Masters: libDom.Masters,
TsigKey: libDom.TsigKey,
}, nil
}
func (r *PluginRegistrar) GetTsigKey(ctx context.Context, domain string) (tsigKey *dns.TSIGKey, err error) {
log := ctx.Value("appLog").(*log.Entry)
log.Debug("Entering Plugin registrar GetTsigKey")
// Synchronize library calls
pluginMutex.Lock()
defer pluginMutex.Unlock()
if !r.pluginTest {
// clear ResultObj
r.pluginResult.PluginError = nil
r.pluginResult.PluginResult = nil
r.pluginArgs.PluginArg = domain
}
log.Debugf("Invoking %s library GetTsigKey", r.pluginConfig.PluginName)
r.pluginGetTsigKey()
if r.pluginResult.PluginError != nil {
return nil, r.pluginResult.PluginError
}
libTsig, ok := r.pluginResult.PluginResult.(dns.TSIGKey)
if !ok {
log.Debugf("Unexpected Plugin library GetTsigKey return value: %v", r.pluginResult.PluginResult)
return nil, fmt.Errorf("Unexpected Plugin library GetTsigKey return value type")
}
tsigKey.Name = libTsig.Name
tsigKey.Algorithm = libTsig.Algorithm
tsigKey.Secret = libTsig.Secret
log.Debugf("Returning Registrar GetTsigKey result")
return
}
func (r *PluginRegistrar) GetServeAlgorithm(ctx context.Context, domain string) (algo string, err error) {
log := ctx.Value("appLog").(*log.Entry)
log.Debug("Entering Plugin registrar GetServeAlgorithm")
// Synchronize library calls
pluginMutex.Lock()
defer pluginMutex.Unlock()
if !r.pluginTest {
// clear ResultObj
r.pluginResult.PluginError = nil
r.pluginResult.PluginResult = nil
r.pluginArgs.PluginArg = domain
}
log.Debugf("Invoking %s library GetServeAlgorithm", r.pluginConfig.PluginName)
r.pluginGetServeAlgorithm()
if r.pluginResult.PluginError != nil {
return "", r.pluginResult.PluginError
}
algo, ok := r.pluginResult.PluginResult.(string)
if !ok {
log.Debugf("Unexpected Plugin library GetServeAlgorithm return value: %v", r.pluginResult.PluginResult)
return "", fmt.Errorf("Unexpected Plugin library GetServeAlgorithm return value type")
}
log.Debugf("Returning Registrar GetServeAlgorithm result")
return
}
func (r *PluginRegistrar) GetMasterIPs(ctx context.Context) ([]string, error) {
var masters = []string{}
log := ctx.Value("appLog").(*log.Entry)
log.Debug("Entering Plugin registrar GetMasterIPs")
// Synchronize library calls
pluginMutex.Lock()
defer pluginMutex.Unlock()
if !r.pluginTest {
// clear ResultObj
r.pluginResult.PluginError = nil
r.pluginResult.PluginResult = nil
}
log.Debugf("Invoking %s library GetMasterIPs", r.pluginConfig.PluginName)
r.pluginGetMasterIPs()
if r.pluginResult.PluginError != nil {
return masters, r.pluginResult.PluginError
}
mlist, ok := r.pluginResult.PluginResult.([]string)
if !ok {
log.Debugf("Unexpected Plugin library GetMasterIPs return value: %v", r.pluginResult.PluginResult)
return masters, fmt.Errorf("Unexpected Plugin library GetMasterIPs return value type")
}
log.Debugf("Plugin GetMasterIPs result: %v", mlist)
masters = append(masters, mlist...)
return masters, nil
}
|
package main
import (
"context"
"fmt"
"os"
"reflect"
"strings"
"github.com/govim/govim"
"github.com/govim/govim/cmd/govim/config"
"github.com/govim/govim/cmd/govim/internal/golang_org_x_tools/lsp/protocol"
"github.com/govim/govim/cmd/govim/internal/golang_org_x_tools/span"
"github.com/kr/pretty"
)
const (
goplsConfigNoDocsOnHover = "noDocsOnHover"
goplsConfigHoverKind = "hoverKind"
goplsDeepCompletion = "deepCompletion"
goplsCompletionMatcher = "matcher"
goplsStaticcheck = "staticcheck"
goplsCompleteUnimported = "completeUnimported"
goplsGoImportsLocalPrefix = "local"
goplsCompletionBudget = "completionBudget"
goplsTempModfile = "tempModfile"
goplsVerboseOutput = "verboseOutput"
goplsEnv = "env"
goplsAnalyses = "analyses"
)
var _ protocol.Client = (*govimplugin)(nil)
func (g *govimplugin) ShowMessage(ctxt context.Context, params *protocol.ShowMessageParams) error {
defer absorbShutdownErr()
g.logGoplsClientf("ShowMessage callback: %v", params.Message)
var hl string
switch params.Type {
case protocol.Error:
hl = "ErrorMsg"
case protocol.Warning:
hl = "WarningMsg"
default:
return nil
}
g.Schedule(func(govim.Govim) error {
opts := make(map[string]interface{})
opts["mousemoved"] = "any"
opts["moved"] = "any"
opts["padding"] = []int{0, 1, 0, 1}
opts["wrap"] = true
opts["border"] = []int{}
opts["highlight"] = hl
opts["line"] = 1
opts["close"] = "click"
g.ChannelCall("popup_create", strings.Split(params.Message, "\n"), opts)
return nil
})
return nil
}
func (g *govimplugin) ShowMessageRequest(context.Context, *protocol.ShowMessageRequestParams) (*protocol.MessageActionItem, error) {
defer absorbShutdownErr()
panic("ShowMessageRequest not implemented yet")
}
func (g *govimplugin) LogMessage(ctxt context.Context, params *protocol.LogMessageParams) error {
defer absorbShutdownErr()
g.logGoplsClientf("LogMessage callback: %v", pretty.Sprint(params))
return nil
}
func (g *govimplugin) Telemetry(context.Context, interface{}) error {
defer absorbShutdownErr()
panic("Telemetry not implemented yet")
}
func (g *govimplugin) RegisterCapability(ctxt context.Context, params *protocol.RegistrationParams) error {
defer absorbShutdownErr()
g.logGoplsClientf("RegisterCapability: %v", pretty.Sprint(params))
return nil
}
func (g *govimplugin) UnregisterCapability(context.Context, *protocol.UnregistrationParams) error {
defer absorbShutdownErr()
panic("UnregisterCapability not implemented yet")
}
func (g *govimplugin) WorkspaceFolders(context.Context) ([]protocol.WorkspaceFolder, error) {
defer absorbShutdownErr()
panic("WorkspaceFolders not implemented yet")
}
func (g *govimplugin) Configuration(ctxt context.Context, params *protocol.ParamConfiguration) ([]interface{}, error) {
defer absorbShutdownErr()
g.logGoplsClientf("Configuration: %v", pretty.Sprint(params))
g.vimstate.configLock.Lock()
conf := g.vimstate.config
defer g.vimstate.configLock.Unlock()
// gopls now sends params.Items for each of the configured
// workspaces. For now, we assume that the first item will be
// for the section "gopls" and only configure that. We will
// configure further workspaces when we add support for them.
if len(params.Items) == 0 || params.Items[0].Section != "gopls" {
return nil, fmt.Errorf("govim gopls client: expected at least one item, with the first section \"gopls\"")
}
res := make([]interface{}, len(params.Items))
goplsConfig := make(map[string]interface{})
goplsConfig[goplsConfigHoverKind] = "FullDocumentation"
if conf.CompletionDeepCompletions != nil {
goplsConfig[goplsDeepCompletion] = *conf.CompletionDeepCompletions
}
if conf.CompletionMatcher != nil {
goplsConfig[goplsCompletionMatcher] = *conf.CompletionMatcher
}
if conf.Staticcheck != nil {
goplsConfig[goplsStaticcheck] = *conf.Staticcheck
}
if conf.CompleteUnimported != nil {
goplsConfig[goplsCompleteUnimported] = *conf.CompleteUnimported
}
if conf.GoImportsLocalPrefix != nil {
goplsConfig[goplsGoImportsLocalPrefix] = *conf.GoImportsLocalPrefix
}
if conf.CompletionBudget != nil {
goplsConfig[goplsCompletionBudget] = *conf.CompletionBudget
}
if g.vimstate.config.TempModfile != nil {
goplsConfig[goplsTempModfile] = *conf.TempModfile
}
if os.Getenv(string(config.EnvVarGoplsVerbose)) == "true" {
goplsConfig[goplsVerboseOutput] = true
}
if conf.Analyses != nil {
goplsConfig[goplsAnalyses] = *conf.Analyses
}
if g.vimstate.config.GoplsEnv != nil {
// It is safe not to copy the map here because a new config setting from
// Vim creates a new map.
goplsConfig[goplsEnv] = *conf.GoplsEnv
}
res[0] = goplsConfig
g.logGoplsClientf("Configuration response: %v", pretty.Sprint(res))
return res, nil
}
func (g *govimplugin) ApplyEdit(context.Context, *protocol.ApplyWorkspaceEditParams) (*protocol.ApplyWorkspaceEditResponse, error) {
defer absorbShutdownErr()
panic("ApplyEdit not implemented yet")
}
func (g *govimplugin) Event(context.Context, *interface{}) error {
defer absorbShutdownErr()
panic("Event not implemented yet")
}
func (g *govimplugin) PublishDiagnostics(ctxt context.Context, params *protocol.PublishDiagnosticsParams) error {
defer absorbShutdownErr()
g.logGoplsClientf("PublishDiagnostics callback: %v", pretty.Sprint(params))
g.diagnosticsChangedLock.Lock()
uri := span.URI(params.URI)
curr, ok := g.rawDiagnostics[uri]
g.rawDiagnostics[uri] = params
g.diagnosticsChanged = true
g.diagnosticsChangedLock.Unlock()
if !ok {
if len(params.Diagnostics) == 0 {
return nil
}
} else if reflect.DeepEqual(curr, params) {
// Whilst we await a solution https://github.com/golang/go/issues/32443
// use reflect.DeepEqual to avoid hard-coding the comparison
return nil
}
g.Schedule(func(govim.Govim) error {
v := g.vimstate
if v.userBusy {
return nil
}
return v.handleDiagnosticsChanged()
})
return nil
}
func (g *govimplugin) Progress(ctxt context.Context, params *protocol.ProgressParams) error {
defer absorbShutdownErr()
panic("Progress not implemented yet")
}
func (g *govimplugin) WorkDoneProgressCreate(ctxt context.Context, params *protocol.WorkDoneProgressCreateParams) error {
defer absorbShutdownErr()
panic("WorkDoneProgressCreate not implemented yet")
}
func absorbShutdownErr() {
if r := recover(); r != nil && r != govim.ErrShuttingDown {
panic(r)
}
}
func (g *govimplugin) logGoplsClientf(format string, args ...interface{}) {
if format[len(format)-1] != '\n' {
format = format + "\n"
}
g.Logf("gopls client start =======================\n"+format+"gopls client end =======================\n", args...)
}
|
package dial
import (
"github.com/go-redis/redis/v7"
)
var RedisDB *redis.Client
func InitRedisClient(addr string,password string) {
client := redis.NewClient(&redis.Options{
Addr: addr,
Password: password,
DB:1,
})
RedisDB = client
return
}
|
package fs
import (
"io/ioutil"
"os"
"testing"
)
func TestMkfs(t *testing.T) {
fh, err := ioutil.TempFile("", "btrfs-testing.")
if err != nil {
t.Error(err)
}
// get a 1GB non-allocated file
if err := fh.Truncate(1 * 1024 * 1024 * 1024); err != nil {
t.Error(err)
}
if err := fh.Close(); err != nil {
t.Error(err)
}
defer os.Remove(fh.Name())
if err := Mkfs(fh.Name()); err != nil {
t.Error(err)
}
}
|
package usecase
import (
"crypto/md5"
"crypto/tls"
"fmt"
"net/http"
"regexp"
"strings"
"github.com/PuerkitoBio/goquery"
"github.com/djimenez/iconv-go"
"github.com/pkg/errors"
"github.com/kenshaw/baseconv"
)
// Fetcher export
type Fetcher struct {
tls bool
dns *string
dnsReg *regexp.Regexp
}
// NewFetcher export
func (f *Fetcher) NewFetcher(tls bool, dns *string) {
f.dns = dns
f.tls = tls
f.dnsReg = regexp.MustCompile("https?://.*?/")
}
// Match export
func (f *Fetcher) Match(url *string) bool {
getDNS := f.dnsReg.FindAllString(*url, 1)[0]
if strings.Contains(getDNS, "https://") {
getDNS = strings.Replace(getDNS, "https://", "", 1)
} else {
getDNS = strings.Replace(getDNS, "http://", "", 1)
}
return getDNS[:len(getDNS)-1] == *f.dns
}
// FetchDoc export
func (f *Fetcher) FetchDoc(url *string) (*goquery.Document, error) {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{
Transport: tr,
}
res, err := client.Get(*url)
if err != nil {
return nil, errors.Wrap(err, "When FetchDoc client.Get")
}
defer res.Body.Close()
doc, err := goquery.NewDocumentFromReader(res.Body)
if err != nil {
return nil, errors.Wrap(err, "When FetchDoc")
}
return doc, nil
}
// FetchDocWithEncoding export
func (f *Fetcher) FetchDocWithEncoding(url *string, charset string) (*goquery.Document, []*http.Cookie, error) {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{
Transport: tr,
}
res, err := client.Get(*url)
if err != nil {
return nil, nil, errors.Wrap(err, "When FetchDocWithEncoding client.Get")
}
defer res.Body.Close()
// Convert the designated charset HTML to utf-8 encoded HTML.
// `charset` being one of the charsets known by the iconv package.
utfBody, err := iconv.NewReader(res.Body, charset, "utf-8")
if err != nil {
return nil, nil, errors.Wrap(err, "When FetchDocWithEncoding Convert UTF-8")
}
// use utfBody using goquery
doc, err := goquery.NewDocumentFromReader(utfBody)
if err != nil {
return nil, nil, errors.Wrap(err, "When FetchDocWithEncoding")
}
return doc, res.Cookies(), nil
}
// GenerateID export
func (f *Fetcher) GenerateID(url *string) *string {
hash := md5.Sum([]byte(*url))
md5str2 := fmt.Sprintf("%x", hash)
id, _ := baseconv.Convert(md5str2, baseconv.DigitsHex, baseconv.Digits62)
id = id[:7]
return &id
}
|
package inttest
import (
"github.com/Sirupsen/logrus"
"github.com/joatmon08/ovs_exporter/utils"
"time"
"errors"
"testing"
)
const (
TCP = "tcp"
UNIX = "unix"
CREATE_WAIT_TIME = 2 * time.Second
EXEC_WAIT_TIME = 5 * time.Second
INTTEST_NETWORK = "ovs_exporter_inttest_network"
INTTEST_NETWORK_CIDR = "172.19.0.0"
OPENVSWITCH_IP = "172.19.0.2"
OPENVSWITCH_PORT = ":6640"
EXPORTER_PORT = ":9177"
OPENVSWITCH_JSON = "openvswitch"
EXPORTER_JSON = "ovs_exporter"
BRIDGE_ID = "br0"
PORT_ID = "eth0"
IP = "192.168.128.5"
OVS_STATE = "openvswitch_up"
OVS_INTERFACES = "openvswitch_interfaces_total"
OVS_PORTS = "openvswitch_ports_total"
)
var (
BridgeMetric = "openvswitch_interfaces_statistics{name=\"" + BRIDGE_ID + "\",stat=\"rx_bytes\"}"
AddBridge = "ovs-vsctl add-br " + BRIDGE_ID
SetDatapath = "ovs-vsctl set bridge " + BRIDGE_ID + " datapath_type=netdev"
AddPort = "ovs-vsctl add-port " + BRIDGE_ID + " " + PORT_ID
CreateBridge = AddBridge + " && " + SetDatapath + " && " + AddPort
ConfigureBridge = "ifconfig " + BRIDGE_ID + " " + IP
OVSUNIXCommand = "app -listen-port " + EXPORTER_PORT
OVSTCPCommand = OVSUNIXCommand + " -uri " + OPENVSWITCH_IP + OPENVSWITCH_PORT
)
type testSetupObject struct {
ovsConnectionMode string
containerExecCmd string
ovsContainerID string
ovsExporterContainerID string
networkID string
metrics map[string]string
}
func createContainers(exporterCmd string) (ovsContainerID string, ovsExporterContainerID string) {
var err error
ovsArgs := &utils.OptionalContainerArgs{
Network: INTTEST_NETWORK,
}
if exporterCmd == OVSUNIXCommand {
ovsArgs.HostBinds = []string{
"/tmp/openvswitch:/usr/local/var/run/openvswitch",
}
}
ovsContainerID, err = utils.CreateContainer(OPENVSWITCH_JSON, ovsArgs)
if err != nil {
panic(err)
}
err = utils.StartContainer(ovsContainerID)
if err != nil {
panic(err)
}
logrus.Debugf("created ovs container %s", ovsContainerID)
exporterArgs := &utils.OptionalContainerArgs{
Network: INTTEST_NETWORK,
Cmd: exporterCmd,
}
if exporterCmd == OVSUNIXCommand {
exporterArgs.HostBinds = []string{
"/tmp/openvswitch:/var/run/openvswitch",
}
}
ovsExporterContainerID, err = utils.CreateContainer(EXPORTER_JSON, exporterArgs)
if err != nil {
panic(err)
}
err = utils.StartContainer(ovsExporterContainerID)
if err != nil {
panic(err)
}
logrus.Debugf("created ovs exporter container %s", ovsExporterContainerID)
time.Sleep(CREATE_WAIT_TIME)
return ovsContainerID, ovsExporterContainerID
}
func RetrieveMetrics(testSetup *testSetupObject) (error) {
ovsClient := utils.NewOVSExporterClient("http://localhost:9177")
metrics, err := ovsClient.GetExporterMetrics()
if err != nil {
return err
}
if len(metrics) == 0 {
return errors.New("no metrics, metrics map is empty")
}
testSetup.metrics = metrics
return nil
}
func Setup(t *testing.T, testSetup *testSetupObject) (*testSetupObject) {
var ovsEntrypoint string
networkID, err := utils.CreateNetwork(INTTEST_NETWORK, INTTEST_NETWORK_CIDR)
if err != nil {
t.Error(err)
}
testSetup.networkID = networkID
switch connection := testSetup.ovsConnectionMode; connection {
case TCP:
ovsEntrypoint = OVSTCPCommand
case UNIX:
ovsEntrypoint = OVSUNIXCommand
default:
t.Error("Specify unix or tcp mode for OVS container")
}
ovs, exporter := createContainers(ovsEntrypoint)
testSetup.ovsExporterContainerID = exporter
testSetup.ovsContainerID = ovs
if testSetup.containerExecCmd == "" {
return testSetup
}
commands := []string{utils.SHELL, utils.COMMAND_OPTION, testSetup.containerExecCmd}
if err := utils.ExecuteContainer(ovs, commands); err != nil {
t.Error(err)
}
time.Sleep(EXEC_WAIT_TIME)
return testSetup
}
func Teardown(ovsContainerID string, ovsExporterContainerID string, networkID string) {
if err := utils.DeleteContainer(ovsExporterContainerID); err != nil {
logrus.Error(err)
}
if err := utils.DeleteContainer(ovsContainerID); err != nil {
logrus.Error(err)
}
if err := utils.DeleteNetwork(networkID); err != nil {
logrus.Error(err)
}
}
|
package mongoapi
import (
"fmt"
"time"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
type Student struct{
Name string `bson:"name"`
Age uint8 `bson:"age"`
ID uint8 `bson:"id"`
}
func main() {
const (
Host = "10.47.2.8:27017"
Username = ""
Password = ""
Database = "NewTest"
Collection = "TestStudent"
)
student := Student{
Name: "Anthony",
Age: 21,
ID: 0000,
}
session, err := mgo.DialWithInfo(&mgo.DialInfo{
Addrs: []string{Host},
Username: Username,
Password: Password,
Database: Database,
})
if err != nil {
panic(err)
}
defer session.Close()
fmt.Println("\nConnected to", session.LiveServers())
coll := session.DB(Database).C(Collection)
now1 := time.Now()
nano1 := now1.UnixNano()
//fmt.Println("\nTime before the insertion of the document: ", now1)
//fmt.Println(nano1)
err = coll.Insert(student)
now2 := time.Now()
nano2 := now2.UnixNano()
//fmt.Println("\nTime after the insertion of the document: ", now2)
//fmt.Println(nano2)
diff := nano2 - nano1
fmt.Println("\nTime to insert the document: ", diff, " ns\n")
if err != nil {
panic(err)
}
fmt.Println("Document inserted successfully!")
stud := "Anthony"
now3 := time.Now()
nano3 := now3.UnixNano()
//fmt.Println("\nTime before reading the document: ", now3)
//fmt.Println(nano3)
age, err := coll.Find(bson.M{"name": stud}).Count()
now4 := time.Now()
nano4 := now4.UnixNano()
//fmt.Println("\nTime after reading the document: ", now4)
//fmt.Println(nano4)
diff2 := nano4 - nano3
fmt.Println("\nTime to read the document: ", diff2, " ns\n")
if err != nil {
panic(err)
}
fmt.Printf("%s appears %d times.\n", stud, age)
}
|
package main
import "fmt"
import "time"
func processStuff(value int, channel chan<- int) {
time.Sleep(time.Second * 1)
channel <- value
}
func main() {
startTime := time.Now()
numSeconds := 86400 // Number of seconds in a day.
channel := make(chan int, numSeconds)
for i := 0; i < numSeconds; i++ {
go processStuff(i, channel)
}
for i := 0; i < numSeconds; i++ {
fmt.Println(<-channel)
}
fmt.Println(time.Now().Sub(startTime))
}
|
package merchants
import "github.com/mixnote/mixnote-api-go/src/core/models"
type IMerchant interface {
Name() string
Identifier() string
Customer(*models.User) (interface{},error)
}
|
/*
You have 2 numbers, both stored separate as numeric data type.
First number is always 6 digits long.
Second number can vary between 1 and 4 digits. If it's less than 4 digits, it needs to be padded with numeric value 0.
End result always needs to be 10 digits number.
Order has to be respected. n1|n2
Example #1:
n1 = 111111
n2 = 2222
result = 1111112222
Example #2:
n1 = 333333
n2 = 44
result = 3333330044
The rule is that you can only use numeric data types (number, int, float, decimal) to get the desired result.
*/
package main
import (
"fmt"
"strconv"
)
func main() {
assert(concat(111111, 2222) == 1111112222)
assert(concat(333333, 44) == 3333330044)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func concat(x, y uint64) uint64 {
s := fmt.Sprintf("%d%04d", x, y)
r, _ := strconv.ParseUint(s, 10, 64)
return r
}
|
/*
创建者 : Luke
日期 : 2020年3月17日
联系方式: soekchl@163.com
*/
package main
import (
. "github.com/soekchl/myUtils"
nl "github.com/soekchl/networkLayer"
"time"
)
func main() {
go func() {
time.Sleep(time.Second)
Client()
}()
Server()
}
func Client() {
client, err := nl.Dial("tcp", ":1111", 5)
if err != nil {
Error(err)
return
}
Notice("客户端连接成功!")
clientLoop(client)
}
func clientLoop(session *nl.Session) {
data := &nl.FormatData{
Id: 1,
Seq: 2,
Body: []byte{1, 3, 4, 5, 4},
}
err := session.Send(data)
if err != nil {
Error(err)
return
}
Notice("Send Ok!")
data, err = session.Receive()
if err != nil {
Error(err)
return
}
Notice("Client Recv: ", data)
}
func Server() {
server, err := nl.Listen("tcp", ":1111", 5, nl.HandlerFunc(serverLoop))
if err != nil {
Error(err)
return
}
Notice("服务器开启!")
server.Serve()
}
func serverLoop(session *nl.Session) {
defer session.Close()
Notice("服务器 接收连接:", session.RemoteAddr())
for {
data, err := session.Receive()
if err != nil {
Error(err)
return
}
Notice(data)
session.Send(data)
}
}
|
package main
import (
"fmt"
)
func main() {
c := make(chan int)
go func() {
for i := 0; i < 10; i++ {
c <- i
}
close(c)
}()
for n := range c {
fmt.Println(n)
}
}
/*
remember to close your channel
if you do not close your channel, you will receive this error
fatal error: all goroutines are asleep - deadlock!
************** IMPORTANT **************
YOU NEED GO VERSION 1.5.2 OR GREATER
otherwise you will receive this error
fatal error: all goroutines are asleep - deadlock!
*/
// go run main.go
// 0
// 1
// 2
// 3
// 4
// 5
// 6
// 7
// 8
// 9
|
package util
import (
"github.com/golang/glog"
coreapi "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metaapi "k8s.io/apimachinery/pkg/apis/meta/v1"
coreset "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/util/retry"
regopclient "github.com/openshift/cluster-image-registry-operator/pkg/client"
)
func CreateOrUpdateSecret(name string, namespace string, data map[string]string) error {
kubeconfig, err := regopclient.GetConfig()
if err != nil {
return err
}
client, err := coreset.NewForConfig(kubeconfig)
if err != nil {
return err
}
secretName := name + "-private-configuration"
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
cur, err := client.Secrets(namespace).Get(secretName, metaapi.GetOptions{})
if err != nil {
if !errors.IsNotFound(err) {
return err
}
glog.Warningf("secret %s/%s not found: %s, creating", namespace, secretName, err)
cur = &coreapi.Secret{
ObjectMeta: metaapi.ObjectMeta{
Name: name + "-private-configuration",
Namespace: namespace,
},
}
}
if cur.StringData == nil {
cur.StringData = make(map[string]string)
}
for k, v := range data {
cur.StringData[k] = v
}
if errors.IsNotFound(err) {
_, err := client.Secrets(namespace).Create(cur)
return err
}
_, err = client.Secrets(namespace).Update(cur)
return err
})
}
|
package main
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func main() {
testTree := initTree()
println(isBalanced(testTree))
}
func initTree() *TreeNode {
// 3,9,20,null,null,15,7
ret := &TreeNode{Val: 3, Left: &TreeNode{Val: 9}, Right: &TreeNode{Val: 20, Left: &TreeNode{Val: 15}, Right: &TreeNode{Val: 7}}}
return ret
}
func isBalanced(root *TreeNode) bool {
_, isBalance := getNodeDepth(root)
return isBalance
}
func getNodeDepth(node *TreeNode) (int, bool) {
if node == nil {
return 0, true
}
leftDepth, leftIsBalance := getNodeDepth(node.Left)
rightDepth, rightIsBalance := getNodeDepth(node.Right)
if leftIsBalance && rightIsBalance && leftDepth-rightDepth <= 1 && leftDepth-rightDepth >= -1 {
max := leftDepth
if rightDepth > leftDepth {
max = rightDepth
}
return max + 1, true
}
return 0, false
}
|
package leetcode
import "fmt"
// 正向打印 list
func listPrint(node *ListNode) {
if node == nil {
fmt.Println(node)
return
}
for node != nil {
fmt.Print(node.Val)
if node.Next != nil {
fmt.Print("->")
}
node = node.Next
}
fmt.Println()
}
// 打印复杂链表
func printComplexList(node *ComplexNode) {
curr := node
nodes := make(map[int]int)
idx := 0
for curr != nil {
nodes[curr.Val] = idx
idx++
curr = curr.Next
}
curr = node
var res [][]interface{}
for curr != nil {
if curr.Random != nil {
res = append(res, []interface{}{curr.Val, nodes[curr.Random.Val]})
} else {
res = append(res, []interface{}{curr.Val, nil})
}
curr = curr.Next
}
fmt.Println(res)
}
func getListLen(head *ListNode) int {
lens := 0
curr := head
for curr != nil {
lens++
curr = curr.Next
}
return lens
}
|
/*
The localfile backend, for dealing with a Vagrant catalog on a local filesystem
*/
package caryatid
import (
"fmt"
"io/ioutil"
"log"
"net/url"
"os"
"path"
"path/filepath"
"regexp"
"github.com/mrled/caryatid/internal/util"
)
type CaryatidLocalFileBackend struct {
VagrantCatalogRootPath string
VagrantCatalogPath string
Manager *BackendManager
}
func (backend *CaryatidLocalFileBackend) SetManager(manager *BackendManager) (err error) {
backend.Manager = manager
backend.VagrantCatalogPath, err = getValidLocalPath(backend.Manager.CatalogUri)
if err != nil {
fmt.Printf("Error trying to parse local catalog path from URI: %v\n", err)
return
}
backend.VagrantCatalogRootPath, _ = path.Split(backend.VagrantCatalogPath)
return
}
func (cb *CaryatidLocalFileBackend) GetManager() (manager *BackendManager, err error) {
manager = cb.Manager
if manager == nil {
err = fmt.Errorf("The Manager property was not set")
}
return
}
func (backend *CaryatidLocalFileBackend) GetCatalogBytes() (catalogBytes []byte, err error) {
catalogBytes, err = ioutil.ReadFile(backend.VagrantCatalogPath)
if os.IsNotExist(err) {
log.Printf("No file at '%v'; starting with empty catalog\n", backend.VagrantCatalogPath)
catalogBytes = []byte("{}")
err = nil
} else if err != nil {
log.Printf("Error trying to read catalog: %v\n", err)
}
return
}
func (backend *CaryatidLocalFileBackend) SetCatalogBytes(serializedCatalog []byte) (err error) {
err = os.MkdirAll(backend.VagrantCatalogRootPath, 0777)
if err != nil {
log.Printf("Error trying to create the catalog root path at '%v': %v\b", backend.VagrantCatalogRootPath, err)
return
}
err = ioutil.WriteFile(backend.VagrantCatalogPath, serializedCatalog, 0666)
if err != nil {
log.Println("Error trying to write catalog: ", err)
return
}
log.Println(fmt.Sprintf("Catalog updated on disk to reflect new value"))
return
}
func (backend *CaryatidLocalFileBackend) CopyBoxFile(localPath string, boxName string, boxVersion string, boxProvider string) (err error) {
var boxUri string
boxUri, err = BoxUriFromCatalogUri(backend.VagrantCatalogPath, boxName, boxVersion, boxProvider)
if err != nil {
fmt.Printf("Error trying to determine box URI: %v\n", err)
return
}
remoteBoxPath, err := getValidLocalPath(boxUri)
if err != nil {
fmt.Printf("Error trying to parse local artifact path from URI: %v\n", err)
return
}
remoteBoxParentPath, _ := path.Split(remoteBoxPath)
err = os.MkdirAll(remoteBoxParentPath, 0777)
if err != nil {
log.Println("Error trying to create the box directory: ", err)
return
}
log.Printf("Successfully created directory at %v\n", remoteBoxParentPath)
written, err := util.CopyFile(localPath, remoteBoxPath)
if err != nil {
log.Printf("Error trying to copy '%v' to '%v' file: %v\n", localPath, remoteBoxPath, err)
return
}
log.Printf("Copied %v bytes from original path at '%v' to new location at '%v'\n", written, localPath, remoteBoxPath)
return
}
func (backend *CaryatidLocalFileBackend) DeleteFile(uri string) (err error) {
var (
u *url.URL
path string
)
u, err = url.Parse(uri)
if err != nil {
return fmt.Errorf("Could not parse '%v' as URI: %v", uri, err)
}
if u.Scheme != backend.Scheme() {
return fmt.Errorf("Expected scheme '%v' but was given a URI with scheme '%v'", backend.Scheme(), u.Scheme)
}
if path, err = getValidLocalPath(uri); err != nil {
return
}
if err = os.Remove(path); err != nil {
return
}
return
}
func (backend *CaryatidLocalFileBackend) Scheme() string {
return "file"
}
// Get a valid local path from a URI
// Converts URI paths (with '/' separator) to Windows paths (with '\' separator) when on Windows
// On Windows, a URI will sometimes be in the form 'file:///C:/path/to/something' (or 'file:///C:\\path\\to\\something')
func getValidLocalPath(uri string) (outpath string, err error) {
u, err := url.Parse(uri)
if err != nil {
return
}
outpath = u.Path
if u.Path == "" {
err = fmt.Errorf("No valid path information was provided in the URI '%v'", uri)
return
}
// On Windows, valid URIs look like file:///C:/whatever or file:///C:\\whatever
// The naivePath variable will contain that leading slash, like "/C:/whatever" or "/C:\\whatever"
// If the path looks like that, strip the leading slash
matched, err := regexp.MatchString("^/[a-zA-Z]:", outpath)
if err != nil {
return
} else if matched {
outpath = outpath[1:len(outpath)]
}
// Get an absolute path
// If on Windows, replaces any forward slashes in the URI with backslashes
outpath = filepath.Clean(outpath)
return
}
|
package main
import (
"bufio"
"fmt"
"net/http"
"os"
"strings"
)
func main() {
var resultUrls []string
if len(os.Args) != 2 {
fmt.Println("Provide one url")
return
}
url := os.Args[1]
resp, err := http.Get(url)
defer resp.Body.Close()
if err != nil {
fmt.Println(err)
}
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
if href := scanner.Text(); strings.Contains(href, "href=") {
for _, val := range strings.Split(href, " ") {
if strings.Contains(val, "href") {
val = strings.Split(val, "\"")[1]
if val != "#" {
resultUrls = append(resultUrls, val)
}
}
}
}
}
for _, val := range resultUrls {
fmt.Println(val)
}
}
|
package main
import (
"fmt"
"math/rand"
"sync"
"time"
)
//使用确定数量的goroutine来处理确定数量的工作
const (
grNum = 4 // goroutine数
taskLoad = 10 // 工作量
)
var wg5 sync.WaitGroup //wait for a collection goroutine to finish
func init() {
rand.Seed(time.Now().Unix()) //使用指定的种子值, 初始化默认资源到确定状态
}
func main() {
tasks := make(chan string, taskLoad) //带缓冲字符串型通道
wg5.Add(grNum) //设置WaitGroup计数=2
for gr := 1; gr <= grNum; gr++ { //批量创建goroutine
go worker(tasks, gr)
}
//发送数据到通道
for post := 1; post <= taskLoad; post++ {
tasks <- fmt.Sprintf("Task : %d", post)
}
close(tasks) //关闭通道, 不再发送数据到通道, 从通道接受完缓冲数据后, 若继续接收将返回通道类型的零值而不是阻塞
wg5.Wait() //阻塞, 直到WaitGroup计数=0, 即所有goroutine完成
}
func worker(tasks chan string, worker int) {
defer wg5.Done() //WaitGroup计数-1
for {
// 等待分配工作
task, ok := <-tasks
if !ok { //通道被关闭且其缓冲区数据被接收完
fmt.Printf("Worker: %d : Shutting Down\n", worker)
break
}
fmt.Printf("Worker: %d : Started %s\n", worker, task) //开工
time.Sleep(time.Duration(rand.Int63n(100)) * time.Millisecond) //模拟工作时间
fmt.Printf("Worker: %d : Completed %s\n", worker, task) //收工
}
}
/*
1.带缓冲通道
1.从通道接收数据前能存储数据
2.不强制要求收发数据的goroutine必须同时准备好/完成
3.只有通道内可用缓冲区 < 发送数据时, 发送goroutine才会阻塞 | 只有通道中没有数据时, 接收goroutine才会阻塞
*/
|
// Package gmachine implements a simple virtual CPU, known as the G-machine.
package gmachine
// DefaultMemSize is the number of 64-bit words of memory which will be
// allocated to a new G-machine by default.
const DefaultMemSize = 1024
const (
// OpHALT terminates Gmachine
OpHALT uint64 = iota
// OpNOOP does nothing
OpNOOP
// OpINCA increments A
OpINCA
// OpDECA decrements A
OpDECA
// OpSETA set A to based on current P
OpSETA
)
// Gmachine represents a G-machine.
type Gmachine struct {
P uint64 // P is the program counter, holds the actual program number
A uint64 // A is the accumulator to store the restult of last operation
Memory []uint64
}
// New creates and initializes a new G-machine.
func New() *Gmachine {
gm := Gmachine{}
gm.Memory = make([]uint64, DefaultMemSize)
return &gm
}
// Run starts Gmachine.
func (gm *Gmachine) Run() {
for gm.P < DefaultMemSize {
gm.P++
switch gm.Memory[gm.P-1] {
case OpHALT:
return
case OpNOOP:
// do nothing
case OpINCA:
gm.A++
case OpDECA:
gm.A--
case OpSETA:
gm.A = gm.Memory[gm.P]
gm.P++
default:
// not used...
}
}
}
// RunProgram loads the program to Gmachine memory, then starts it by calling Run().
func (gm *Gmachine) RunProgram(mem []uint64) {
for k, v := range mem {
gm.Memory[k] = v
}
gm.Run()
}
|
package main
import (
"encoding/json"
"log"
"net/http"
"strconv"
"github.com/gorilla/mux"
"tarot"
)
type SucceedJson struct {
Succeed bool `json:"succeed"`
}
type NewPartyJson struct {
Succeed bool `json:"succeed"`
Seat int `json:"seat"`
}
type ReadyJson struct {
Ready bool `json:"ready"`
}
type PlayerTurnJson struct {
PlayerTurn int `json:"playerTurn"`
}
type HistoryJson struct {
FirstPlayer int `json:"firstPlayer"`
Cards [tarot.NB_PLAYERS]tarot.Card `json:"cards"`
}
var party tarot.Party
/**
* @api {get} /hand/:id Request Hand information.
* @apiName GetHandEndpoint
* @apiGroup Hand
*
* @apiParam {Number} id Users unique ID.
*
* @apiSuccess {List} cards List of cards.
*/
func GetHandEndpoint(w http.ResponseWriter, req *http.Request) {
id, _ := strconv.Atoi(mux.Vars(req)["id"])
json.NewEncoder(w).Encode(party.Players[id].CardsToJson())
}
/**
* @api {get} /newparty Start a new party
* @apiName GetNewpartyEndpoint
* @apiGroup Newparty
*
* @apiSuccess {Boolean} succeed Does the party successfuly start.
*/
func GetNewpartyEndpoint(w http.ResponseWriter, req *http.Request) {
party = tarot.NewParty()
party.Seats.AvailableSeats[0] = false
npJson := NewPartyJson{Succeed: true, Seat: 0}
json.NewEncoder(w).Encode(npJson)
}
/**
* @api {get} /newparty/status Request if all seats are ready.
* @apiName GetNewpartyStatusEndpoint
* @apiGroup Newparty
*
* @apiSuccess {Boolean} ready Readyness of the party.
*/
func GetNewpartyStatusEndpoint(w http.ResponseWriter, req *http.Request) {
ready := true
for _, seat := range party.Seats.AvailableSeats {
if seat {
ready = false
}
}
r := ReadyJson{Ready: ready}
json.NewEncoder(w).Encode(r)
}
/**
* @api {get} /newparty/available_seats Request seats availability.
* @apiName GetNewpartyAvailableseatsEndpoint
* @apiGroup Newparty
*
* @apiSuccess {List} availableSeats List of booleans.
*/
func GetNewpartyAvailableseatsEndpoint(w http.ResponseWriter, req *http.Request) {
json.NewEncoder(w).Encode(party.Seats)
}
/**
* @api {post} /newparty/available_seats/:id Take place in the 'id' seat.
* @apiName PostNewpartyAvailableseatsEndpoint
* @apiGroup Newparty
*
* @apiParam {Number} id Users unique ID.
*
* @apiSuccess {List} availableSeats List of booleans.
*/
func PostNewpartyAvailableseatsEndpoint(w http.ResponseWriter, req *http.Request) {
id, _ := strconv.Atoi(mux.Vars(req)["id"])
party.Seats.AvailableSeats[id] = false
json.NewEncoder(w).Encode(party.Seats)
}
/**
* @api {get} /table Request Hand information.
* @apiName GetTableEndpoint
* @apiGroup Table
*
* @apiSuccess {[2]float32} scores Actual score of defender/taker.
* @apiSuccess {[NB_PLAYERS]Card} cards on the table.
* @apiSuccess {Integer} playerTurn ID of the player turn.
* @apiSuccess {Integer} firstPlayer ID of the first player who played.
* @apiSuccess {Integer} trickNb Trick's number.
* @apiSuccess {[NB_PLAYERS]int} isTaker Return the taker status of players.
*/
func GetTableEndpoint(w http.ResponseWriter, req *http.Request) {
json.NewEncoder(w).Encode(party.Table)
}
/**
* @api {get} /table/valid_cards/:id Request valid cards in Hand.
* @apiName GetTableValidCardsEndpoint
* @apiGroup Table
*
* @apiSuccess {Boolean} isYourTurn It is your turn to play.
* @apiSuccess {[]Card} validCards Return valid cards.
*/
func GetTableValidCardsEndpoint(w http.ResponseWriter, req *http.Request) {
id, _ := strconv.Atoi(mux.Vars(req)["id"])
json.NewEncoder(w).Encode(party.ValidCards(id))
}
/**
* @api {post} /table/:id/:color/:number Play a card.
* @apiName PostTableEndpoint
* @apiGroup Table
*
* @apiParam {Number} id Users unique ID.
* @apiParam {Number} color Color of the playing card.
* @apiParam {Number} number Number of the playing card.
*
* @apiSuccess {Boolean} succeed Does the card can be played.
*/
func PostTableEndpoint(w http.ResponseWriter, req *http.Request) {
id, _ := strconv.Atoi(mux.Vars(req)["id"])
color, _ := strconv.Atoi(mux.Vars(req)["color"])
number, _ := strconv.Atoi(mux.Vars(req)["number"])
c := tarot.Card{Color: tarot.Color(color), Number: number}
b := party.PlayCard(c, id)
succeed := SucceedJson{Succeed: b}
json.NewEncoder(w).Encode(succeed)
}
/**
* @api {get} /table/trick Request Trick information.
* @apiName GetTablePlayerTurnEndpoint
* @apiGroup Table
*
* @apiSuccess {Boolean} playerTurn Current trick.
*/
func GetTablePlayerTurnEndpoint(w http.ResponseWriter, req *http.Request) {
trick := PlayerTurnJson{PlayerTurn: party.Table.PlayerTurn}
json.NewEncoder(w).Encode(trick)
}
//TODO: Ready to play the next trick
/**
* @api {get} /table/:trick/:id Get ready for the next trick.
* @apiName GetTableTrickIdEndpoint
* @apiGroup Table
*
* @apiParam {Number} trick Trick Number.
* @apiParam {Number} id Users unique ID.
*
* @apiSuccess {Empty} Empty Empty brace.
*/
func GetTableTrickIdEndpoint(w http.ResponseWriter, req *http.Request) {
// trick, _ := strconv.Atoi(mux.Vars(req)["trick"])
// id, _ := strconv.Atoi(mux.Vars(req)["id"])
// fmt.Println(trick, id)
json.NewEncoder(w).Encode("{}")
}
/**
* @api {get} /history/:trickNb Get old trick.
* @apiName GetHistoryEndpoint
* @apiGroup History
*
* @apiParam {Number} trickNb Trick number.
*
* @apiSuccess {Number} firstPlayer Trick first player.
* @apiSuccess {[3]Cards} cards Trick cards.
*/
func GetHistoryEndpoint(w http.ResponseWriter, req *http.Request) {
trickNb, _ := strconv.Atoi(mux.Vars(req)["trickNb"])
if trickNb >= 0 && trickNb < tarot.NB_CARDS_PER_PLAYER {
json.NewEncoder(w).Encode(
HistoryJson{
FirstPlayer: party.Table.HistoryFirstPlayer[trickNb],
Cards: party.Table.HistoryCards[trickNb],
})
} else {
json.NewEncoder(w).Encode(HistoryJson{})
}
}
func main() {
party = tarot.NewParty()
router := mux.NewRouter()
router.HandleFunc("/hand/{id}", GetHandEndpoint).Methods("GET")
router.HandleFunc("/newparty", GetNewpartyEndpoint).Methods("GET")
router.HandleFunc("/newparty/status", GetNewpartyStatusEndpoint).Methods("GET")
router.HandleFunc("/newparty/available_seats", GetNewpartyAvailableseatsEndpoint).Methods("GET")
router.HandleFunc("/newparty/available_seats/{id}", PostNewpartyAvailableseatsEndpoint).Methods("POST")
router.HandleFunc("/table", GetTableEndpoint).Methods("GET")
router.HandleFunc("/table/valid_cards/{id}", GetTableValidCardsEndpoint).Methods("GET")
router.HandleFunc("/table/{id}/{color}/{number}", PostTableEndpoint).Methods("POST")
router.HandleFunc("/table/trick", GetTablePlayerTurnEndpoint).Methods("GET")
router.HandleFunc("/table/{trick}/{id}", GetTableTrickIdEndpoint).Methods("GET")
router.HandleFunc("/history/{trickNb}", GetHistoryEndpoint).Methods("GET")
log.Fatal(http.ListenAndServe(":12345", router))
}
|
/* Cliente del chat server que consuma los datos de las
goroutines y channles */
package main
import (
"io"
"log"
"net"
"os"
)
// Manejo de errores
func main() {
conn, err := net.Dial("tcp", "localhost:8005")
if err != nil {
log.Fatal(err)
}
// Definir variable done que ignore errores
done := make(chan struct{})
// Definir función que utilzia el paquet io
go func() {
io.Copy(os.Stdout, conn) // ignorando errores
log.Println("Terminamos")
// avisar al goroutine principal
done <- struct{}{}
}()
mustCopy(conn, os.Stdin)
conn.Close()
// esperando que la goroutine del background termine
<-done
}
func mustCopy(dst io.Writer, src io.Reader) {
if _,err := io.Copy(dst, src); err != nil {
log.Fatal(err)
}
}
|
package main
import "testing"
func Test_race(t *testing.T) {
race()
}
|
package etcd
import (
"context"
"fmt"
"time"
"github.com/coreos/etcd/clientv3"
//"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"grm-service/log"
. "grm-service/util"
)
var (
dialTimeout = 5 * time.Second
requestTimeout = 10 * time.Second
)
const (
KEY_SPLIT = "/"
KEY_SESSION = "/titanauth/session"
KEY_USERS = "/titanauth/users"
KEY_CAPTCHA = "/titanauth/captcha"
KEY_GRM_EXPLORER = "/titangrm/explorer"
KEY_GRM_DATAWORKER = "/titangrm/dataworker"
KEY_GRM_SEARCHER_HISTORY = "/titangrm/searcher/history"
KEY_GRM_USERDATA = "/titangrm/user_data"
KEY_GRM_STORAGE = "/titangrm/storage"
KYE_GRM_COMMENT = "/titangrm/comments"
KEY_GRM_TYPE_AGGR = "/titangrm/aggregation"
)
type DynamicEtcd struct {
Endpoints []string
Cli *clientv3.Client
}
func (e *DynamicEtcd) Connect() error {
if e.Cli != nil {
return nil
}
var err error
e.Cli, err = clientv3.New(clientv3.Config{
Endpoints: e.Endpoints,
DialTimeout: dialTimeout,
})
if err != nil {
log.Error(err)
return err
}
return nil
}
func (e *DynamicEtcd) IsConnect() bool {
return e.Cli == nil
}
func (e *DynamicEtcd) DisConnect() error {
return e.Cli.Close()
}
// 获取用户id
func (e *DynamicEtcd) GetUserId(session string) (string, error) {
if len(session) == 0 {
return "", fmt.Errorf(TR("Invalid user session:%s", session))
}
//fmt.Println("session key:", KEY_SESSION+"/"+session)
resp, err := e.Cli.Get(context.Background(), KEY_SESSION+"/"+session)
if err != nil {
log.Println("Failed to get user id (%s):", session, err)
return "", fmt.Errorf(TR("Failed to get user id (%s):", session, err))
}
if len(resp.Kvs) == 0 {
return "", fmt.Errorf(TR("Session is timeout:%s", session))
}
return string(resp.Kvs[0].Value), nil
}
// 获取用户名
func (e *DynamicEtcd) GetUserName(id string) (string, error) {
if len(id) == 0 {
return "", fmt.Errorf(TR("Invalid user id:%s", id))
}
key := fmt.Sprintf("%s/%s", KEY_USERS, id)
resp, err := e.Cli.Get(context.Background(), key+"/name")
if err != nil {
log.Printf("Failed to get user name (%s):\n", key, err)
return "", err
}
if len(resp.Kvs) == 0 {
log.Println("Failed to get user name :", key)
return "", fmt.Errorf(TR("Invalid user id:", id))
}
return string(resp.Kvs[0].Value), nil
}
|
package http
import (
"encoding/json"
"fmt"
"github.com/twainy/goban"
"github.com/twainy/tiroler/crawler"
"github.com/zenazn/goji"
"github.com/zenazn/goji/web"
"net/http"
"reflect"
"runtime"
"strconv"
)
func Start() {
Setup(goji.DefaultMux)
goji.Serve()
}
func Setup(m *web.Mux) {
route(m)
goban.Setup("./conf/redis.json")
}
func route(m *web.Mux) {
// Add routes to the global handler
setGetHandler(m, "/", Root)
// Use Sinatra-style patterns in your URLs
setGetHandler(m, "/novel/:ncode", responseCache(getNovelInfo))
setGetHandler(m, "/novel_content/:ncode/:sublist_id", responseCache(getNovelContent))
// Middleware can be used to inject behavior into your app. The
// middleware for this application are defined in middleware.go, but you
// can put them wherever you like.
m.Use(Json)
}
// Root route (GET "/"). Print a list of greets.
func Root(w http.ResponseWriter, r *http.Request) {
http.Error(w, http.StatusText(404), 404)
}
func setGetHandler(m *web.Mux, pattern interface{}, handler interface{}) {
m.Get(pattern, handler)
}
func responseCache(handler func(c web.C, w http.ResponseWriter, r *http.Request) string) interface{} {
return func(c web.C, w http.ResponseWriter, r *http.Request) {
cache_key := runtime.FuncForPC(reflect.ValueOf(handler).Pointer()).Name()
for k, v := range c.URLParams {
cache_key = "v2" + cache_key + k + "_" + v
}
json_str, err := goban.Get(cache_key)
if err != nil || json_str == "" {
fmt.Println("use crawling")
json_str = handler(c, w, r)
goban.Setex(cache_key, 180, json_str)
}
fmt.Println("output"+json_str)
fmt.Fprint(w, json_str)
}
}
// GetUser finds a given user and her greets (GET "/user/:name")
func getNovelInfo(c web.C, w http.ResponseWriter, r *http.Request) string {
ncode := c.URLParams["ncode"]
fmt.Println("get novel info novel", ncode)
novel, _ := crawler.GetNovel(ncode)
json_response, _ := json.Marshal(novel)
return string(json_response)
}
// GetUser finds a given user and her greets (GET "/user/:name")
func getNovelContent(c web.C, w http.ResponseWriter, r *http.Request) string {
ncode := c.URLParams["ncode"]
sublist_id,_ := strconv.Atoi(c.URLParams["sublist_id"])
fmt.Println("get novel content novel", ncode, sublist_id)
novel := crawler.GetNovelContent(ncode, sublist_id)
json_response, _ := json.Marshal(novel)
return string(json_response)
}
// PlainText sets the content-type of responses to text/plain.
func Json(h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/json")
h.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
|
package main
import (
"bufio"
"fmt"
"io"
"log"
"os"
"path/filepath"
)
func main() {
// ファイル学習用.ファイル名を指定して作成.
scanner := bufio.NewScanner(os.Stdin)
fmt.Printf("作成するファイル名を入力してください\n > ")
for scanner.Scan() {
filename := scanner.Text()
if len(filename) > 0 {
createFile(filename)
break
} else {
fmt.Printf("入力が不正です\n")
}
}
}
func createFile(name string) {
filepath := filepath.Join(name)
fmt.Printf("outputfile: %s/%s\n", getCurrent(), filepath)
df, err := os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
log.Fatal(err.Error())
}
defer func() {
if err := df.Close(); err != nil {
log.Fatal(err.Error())
}
}()
if _, err := io.WriteString(df, "go!go!"); err != nil {
log.Fatal(err.Error())
}
}
func getCurrent() string {
var err error
var current string
if current, err = os.Getwd(); err != nil {
log.Fatal(err.Error())
}
return current
}
|
package examples
import (
"net/http"
"html/template"
"github.com/kilfu0701/gogae/upload"
"google.golang.org/appengine"
"google.golang.org/appengine/log"
)
const (
uploadEntry = "/api/photo/upload"
uploadTemplateHTML = `
<html><body>
<form action="{{.}}" method="POST" enctype="multipart/form-data">
Upload File: <input type="file" name="file"><br>
<input type="submit" name="submit" value="Submit">
</form></body></html>
`
indexTemplateHTML = `
<html><body>
Generate Upload URL API => <a href="{{.UploadEntry}}">{{.UploadEntry}}</a>
</body></html>
`
apiTemplateHTML = `
<html><body>
Your upload URL is => <b>{{.}}</b>
</body></html>
`
)
type Data struct {
UploadEntry string
UploadURL string
}
func init() {
http.HandleFunc("/", handleIndex)
http.HandleFunc(uploadEntry, handleGenerateUrl)
}
func handleIndex(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
w.Header().Set("Content-Type", "text/html")
var indexTemplate = template.Must(template.New("index").Parse(indexTemplateHTML))
data := Data{
UploadEntry: uploadEntry,
}
err := indexTemplate.Execute(w, &data)
if err != nil {
log.Errorf(ctx, "%v", err)
}
}
func handleGenerateUrl(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
settings := upload.Settings{
Bucket: "asd",
Folder: "customer/photo",
MaxSize: 1024 * 1024 * 10, // 10MB
BlobUrl: uploadEntry,
}
url, _ := upload.GenerateUploadURL(ctx, &settings)
var apiTemplate = template.Must(template.New("api").Parse(apiTemplateHTML))
err := apiTemplate.Execute(w, url)
if err != nil {
log.Errorf(ctx, "%v", err)
}
}
|
/*
* @lc app=leetcode.cn id=92 lang=golang
*
* [92] 反转链表 II
*/
// @lc code=start
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
package main
import "fmt"
type ListNode struct {
Val int
Next *ListNode
}
func main() {
v1 := ListNode{ 1, nil}
v2 := ListNode{ 2, nil}
v3 := ListNode{ 3, nil}
v4 := ListNode{ 4, nil}
v5 := ListNode{ 5, nil}
v1.Next = &v2
v2.Next = &v3
v3.Next = &v4
v4.Next = &v5
print(&v1)
l1 := reverseBetween(&v1, 2, 4)
fmt.Println("reverse list")
print(l1)
}
func print(head *ListNode) {
if head == nil {
return
}
var current *ListNode = head
for current != nil {
fmt.Printf("%d ", current.Val)
current = current.Next
}
fmt.Println()
}
func reverseBetween(head *ListNode, m int, n int) *ListNode {
cur := head
count := 1
var prev1 *ListNode
for cur != nil && count >= m {
prev1 = cur
cur = cur.Next
count++
}
tmp := cur // just mark a place
var next *ListNode
var prev2 *ListNode = nil
count--
for cur != nil && count >= n {
next = cur.Next
cur.Next = prev2
prev2 = cur
cur = next
count++
}
print(prev2)
if m > 1 {
prev1.Next = prev2
tmp.Next = cur
return head
} else {
return prev2
}
}
// @lc code=end
|
package email
import (
"../ses"
"../config"
)
func ConfirmEmail(to string, token string) {
emailData := ses.Email{
To: to,
From: config.Config.NoReplyEmail,
Text: "Your whitelist submission is well received.\n\n" +
"To finish the whitelist application process please confirm your email by following the link/n" +
"https://mdl.life/whitelist/confirm_email?token=" + token + "\n\n" +
"The instructions of how to purchase the MDL Tokens to be send soon is confirmation that you have passed the whitelist.\n\n" +
"For inquiries and support please contact support@mdl.life",
HTML: "<h3 style=\"color:purple;\">Your whitelist submission is well received.</h3><br>" +
"To finish the whitelist application process please confirm your email by clicking the link<br>" +
"<a href=\"https://mdl.life/whitelist/confirm_email?token=" + token + "\">" + "https://mdl.life/whitelist/confirm_email?token=" + token + "</a><br><br>" +
"The instructions of how to purchase the MDL Tokens to be send soon is confirmation that you have passed the whitelist.<br><br>" +
"For inquiries and support please contact <a href=\"mailto:support@mdl.life\">support@mdl.life</a>",
Subject: "MDL Talent Hub: Whitelist application received",
ReplyTo: config.Config.ReplyEmail,
}
ses.SendEmail(emailData)
}
|
package main
import (
"errors"
"fmt"
"log"
"net/http"
"github.com/dikhan/http_goclient"
"github.com/go-openapi/loads"
"github.com/go-openapi/spec"
"github.com/hashicorp/terraform/helper/schema"
)
const API_KEY_HEADER_NAME = "api_key_header"
const API_KEY_QUERY_NAME = "api_key_query"
type ProviderFactory struct {
Name string
DiscoveryApiUrl string
}
type ApiKey struct {
Name string
Value string
}
type SecuritySchemaDefinition struct {
ApiKeyHeader ApiKey
ApiKeyQuery ApiKey
}
type ProviderConfig struct {
SecuritySchemaDefinitions map[string]SecuritySchemaDefinition
}
func (p ProviderFactory) createProvider() *schema.Provider {
apiSpecAnalyser, err := p.createApiSpecAnalyser()
if err != nil {
log.Fatalf("error occurred while retrieving api specification. Error=%s", err)
}
provider, err := p.generateProviderFromApiSpec(apiSpecAnalyser)
if err != nil {
log.Fatalf("error occurred while creating schema provider. Error=%s", err)
}
return provider
}
func (p ProviderFactory) createApiSpecAnalyser() (*ApiSpecAnalyser, error) {
if p.DiscoveryApiUrl == "" {
return nil, errors.New("required param 'apiUrl' missing")
}
apiSpec, err := loads.JSONSpec(p.DiscoveryApiUrl)
if err != nil {
return nil, fmt.Errorf("error occurred when retrieving api spec from %s. Error=%s", p.DiscoveryApiUrl, err)
}
apiSpecAnalyser := &ApiSpecAnalyser{apiSpec}
return apiSpecAnalyser, nil
}
func (p ProviderFactory) generateProviderFromApiSpec(apiSpecAnalyser *ApiSpecAnalyser) (*schema.Provider, error) {
resourceMap := map[string]*schema.Resource{}
for resourceName, resourceInfo := range apiSpecAnalyser.getCrudResources() {
r := ResourceFactory{
http_goclient.HttpClient{HttpClient: &http.Client{}},
resourceInfo,
}
resource := r.createSchemaResource()
resourceName := p.getProviderResourceName(resourceName)
resourceMap[resourceName] = resource
}
provider := &schema.Provider{
Schema: p.createTerraformProviderSchema(apiSpecAnalyser.d.Spec().SecurityDefinitions),
ResourcesMap: resourceMap,
ConfigureFunc: p.configureProvider(apiSpecAnalyser.d.Spec().SecurityDefinitions),
}
return provider, nil
}
// createTerraformProviderSchema adds support for specific provider configuration such as api key which will
// be used as the authentication mechanism when making http requests to the service provider
func (p ProviderFactory) createTerraformProviderSchema(securityDefinitions spec.SecurityDefinitions) map[string]*schema.Schema {
s := map[string]*schema.Schema{}
for _, secDef := range securityDefinitions {
if secDef.Type == "apiKey" {
var key string
switch secDef.In {
case "header":
key = API_KEY_HEADER_NAME
case "query":
key = API_KEY_QUERY_NAME
}
s[key] = &schema.Schema{
Type: schema.TypeString,
Required: true,
}
}
}
return s
}
func (p ProviderFactory) configureProvider(securityDefinitions spec.SecurityDefinitions) schema.ConfigureFunc {
return func(data *schema.ResourceData) (interface{}, error) {
config := ProviderConfig{}
config.SecuritySchemaDefinitions = map[string]SecuritySchemaDefinition{}
for secDefName, secDef := range securityDefinitions {
if secDef.Type == "apiKey" {
securitySchemaDefinition := SecuritySchemaDefinition{}
switch secDef.In {
case "header":
securitySchemaDefinition.ApiKeyHeader = ApiKey{secDef.Name, data.Get(API_KEY_HEADER_NAME).(string)}
case "query":
securitySchemaDefinition.ApiKeyQuery = ApiKey{secDef.Name, data.Get(API_KEY_QUERY_NAME).(string)}
}
config.SecuritySchemaDefinitions[secDefName] = securitySchemaDefinition
}
}
return config, nil
}
}
func (p ProviderFactory) getProviderResourceName(resourceName string) string {
fullResourceName := fmt.Sprintf("%s_%s", p.Name, resourceName)
return fullResourceName
}
|
package protocol
type Op struct {
OpHeader *Header
}
func (p *Op) Header() *Header {
return p.OpHeader
}
|
package sstable
import (
"encoding/binary"
"io"
"sort"
"github.com/cloud9-tools/go-sstable/internal"
)
func Build(w io.Writer, data []Pair) error {
sort.Sort(byKey(data))
if len(data) > kmaxuint32 {
return ErrTooManyRecords
}
var offset = 8 + uint64(len(magic))
for _, item := range data {
if len(item.Key) > kmaxkeylen {
return ErrKeyTooLong
}
if len(item.Value) > kmaxvaluelen {
return ErrValueTooLong
}
offset += 16 + uint64(len(item.Key))
}
maxoffset := offset
for _, item := range data {
maxoffset += uint64(len(item.Value))
}
if maxoffset > kmaxuint32 {
return ErrTooMuchData
}
n := len(magic)
var tmp [16]byte
copy(tmp[0:n], []byte(magic))
binary.BigEndian.PutUint32(tmp[n:n+4], uint32(len(data)))
n += 4
binary.BigEndian.PutUint32(tmp[n:n+4], internal.NewCRC(tmp[0:n]).Value())
n += 4
if _, err := w.Write(tmp[0:n]); err != nil {
return err
}
for _, item := range data {
lenKey := uint32(len(item.Key))
lenValue := uint32(len(item.Value))
tmp[0] = uint8(lenKey)
tmp[1] = uint8(lenValue >> 16)
tmp[2] = uint8(lenValue >> 8)
tmp[3] = uint8(lenValue)
binary.BigEndian.PutUint32(tmp[4:8], uint32(offset))
binary.BigEndian.PutUint32(tmp[8:12], internal.NewCRC(item.Value).Value())
cksum := internal.NewCRC(tmp[0:12])
cksum = cksum.Update([]byte(item.Key))
binary.BigEndian.PutUint32(tmp[12:16], cksum.Value())
if _, err := w.Write(tmp[0:16]); err != nil {
return err
}
if _, err := w.Write([]byte(item.Key)); err != nil {
return err
}
offset += uint64(len(item.Value))
}
for _, item := range data {
if _, err := w.Write(item.Value); err != nil {
return err
}
}
return nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.