text stringlengths 11 4.05M |
|---|
/* Copyright © 2021
Author : mehtaarn000
Email : arnavm834@gmail.com
*/
package core
import (
"os/exec"
"runtime"
"io/ioutil"
"ssc/utils"
)
const bash_script = `
JSON=$(curl -sL https://api.github.com/repos/mehtaarn000/SuperSourceControl/releases)
SSCPATH=$(which ssc)
ZIPBALL=$(echo "$JSON" | tr '\r\n' ' ' | jq -r '.[0] | .zipball_url')
curl -L "$ZIPBALL" > /tmp/ssc.zip
unzip /tmp/ssc.zip -d /tmp/SuperSourceControl
ZIPDIR=$(ls /tmp/SuperSourceControl)
FULLPATH="/tmp/SuperSourceControl/$ZIPDIR/ssc.go"
go build -o "/tmp/SuperSourceControl/$ZIPDIR/ssc" "$FULLPATH"
mv "/tmp/SuperSourceControl/$ZIPDIR/ssc" "$SSCPATH"
rm -rf "/tmp/SuperSourceControl"
`
// Update ssc on a Unix-like system
func Update() {
switch runtime.GOOS {
case "darwin", "freebsd", "openbsd", "linux", "netbsd":
err := ioutil.WriteFile("./.ssc/tmp/update.sh", []byte(bash_script), 0777)
cmd := exec.Command("sh", "./.ssc/tmp/update.sh")
_, err = cmd.Output()
if err != nil {
utils.Exit(err)
}
default:
utils.Exit("Updates are only supported on Unix based systems.")
}
} |
package main
import (
"fmt"
)
func fn(arg int) int {
return arg + 42
}
func main() {
x := fn(54)
fmt.Printf("Hello world %d", x)
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package policy
import (
"context"
"time"
"chromiumos/tast/common/fixture"
"chromiumos/tast/common/pci"
"chromiumos/tast/common/policy"
"chromiumos/tast/common/policy/fakedms"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/checked"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/restriction"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/policyutil"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: SystemTimezoneAutomaticDetection,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Check of SystemTimezoneAutomaticDetection policy by checking the settings page",
Contacts: []string{
"vsavu@google.com", // Test author
},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome"},
Fixture: fixture.ChromeEnrolledLoggedIn,
SearchFlags: []*testing.StringPair{
pci.SearchFlag(&policy.SystemTimezoneAutomaticDetection{}, pci.VerifiedFunctionalityUI),
},
})
}
func SystemTimezoneAutomaticDetection(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(chrome.HasChrome).Chrome()
fdms := s.FixtValue().(fakedms.HasFakeDMS).FakeDMS()
// Connect to Test API to use it with the UI library.
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to create Test API connection: ", err)
}
for _, param := range []struct {
// name is the subtest name.
name string
// value is the policy value.
value *policy.SystemTimezoneAutomaticDetection
// wantRestriction indicates whether the setting can be changed by the user.
wantRestriction restriction.Restriction
// selectedOption is the selected state for timezone detection.
selectedOption string
// selectedDetection is the state for timezone detection.
selectedDetection string
}{
{
name: "all",
value: &policy.SystemTimezoneAutomaticDetection{Val: 4},
wantRestriction: restriction.Disabled,
selectedOption: "Set automatically",
selectedDetection: "Use Wi-Fi or mobile networks to determine location",
},
{
name: "wifi",
value: &policy.SystemTimezoneAutomaticDetection{Val: 3},
wantRestriction: restriction.Disabled,
selectedOption: "Set automatically",
selectedDetection: "Use only Wi-Fi to determine location",
},
{
name: "ip",
value: &policy.SystemTimezoneAutomaticDetection{Val: 2},
wantRestriction: restriction.Disabled,
selectedOption: "Set automatically",
selectedDetection: "Use your IP address to determine location (default)",
},
{
name: "never",
value: &policy.SystemTimezoneAutomaticDetection{Val: 1},
wantRestriction: restriction.Disabled,
selectedOption: "Choose from list",
selectedDetection: "Automatic time zone detection is disabled",
},
{
name: "user",
value: &policy.SystemTimezoneAutomaticDetection{Val: 0},
wantRestriction: restriction.None,
selectedOption: "Set automatically",
selectedDetection: "Use your IP address to determine location (default)",
},
} {
s.Run(ctx, param.name, func(ctx context.Context, s *testing.State) {
defer faillog.DumpUITreeWithScreenshotOnError(ctx, s.OutDir(), s.HasError, cr, "ui_tree_"+param.name)
// Perform cleanup.
if err := policyutil.ResetChrome(ctx, fdms, cr); err != nil {
s.Fatal("Failed to clean up: ", err)
}
// Update policies.
// Retry to make sure they are applied. This is just an experiment for applying device policies.
if err := testing.Poll(ctx, func(ctx context.Context) error {
return policyutil.ServeAndVerify(ctx, fdms, cr, []policy.Policy{param.value})
}, &testing.PollOptions{Timeout: 30 * time.Second}); err != nil {
s.Fatal("Failed to update policies: ", err)
}
// Open the time zone settings page.
if err := policyutil.OSSettingsPage(ctx, cr, "dateTime/timeZone").
SelectNode(ctx, nodewith.
Role(role.RadioButton).
Name(param.selectedOption)).
Checked(checked.True).
Restriction(param.wantRestriction).
Verify(); err != nil {
s.Error("Unexpected OS settings state: ", err)
}
// Check the currently selected detection mode.
uia := uiauto.New(tconn)
text, err := uia.Info(ctx, nodewith.Name("Time zone detection method"))
if err != nil {
s.Fatal("Could not find current detection mode: ", err)
}
if text.Value != param.selectedDetection {
s.Errorf("Invalid detection mode: got %q; want %q", text.Value, param.selectedDetection)
}
})
}
}
|
package a
import (
"a/schema"
)
func foutside() {
_ = schema.Schema{
Type: schema.TypeList,
}
_ = schema.Schema{
Type: schema.TypeSet,
}
}
|
package itest
import (
"context"
"encoding/hex"
"github.com/btcsuite/btcd/btcutil"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/stretchr/testify/require"
)
// testTrackPayments tests whether a client that calls the TrackPayments api
// receives payment updates.
func testTrackPayments(net *lntest.NetworkHarness, t *harnessTest) {
// Open a channel between alice and bob.
net.EnsureConnected(t.t, net.Alice, net.Bob)
channel := openChannelAndAssert(
t, net, net.Alice, net.Bob,
lntest.OpenChannelParams{
Amt: btcutil.Amount(300000),
},
)
defer closeChannelAndAssert(t, net, net.Alice, channel, true)
err := net.Alice.WaitForNetworkChannelOpen(channel)
require.NoError(t.t, err, "unable to wait for channel to open")
ctxb := context.Background()
ctxt, cancelTracker := context.WithCancel(ctxb)
defer cancelTracker()
// Call the TrackPayments api to listen for payment updates.
tracker, err := net.Alice.RouterClient.TrackPayments(
ctxt,
&routerrpc.TrackPaymentsRequest{
NoInflightUpdates: false,
},
)
require.NoError(t.t, err, "failed to call TrackPayments successfully.")
// Create an invoice from bob.
var amountMsat int64 = 1000
invoiceResp, err := net.Bob.AddInvoice(
ctxb,
&lnrpc.Invoice{
ValueMsat: amountMsat,
},
)
require.NoError(t.t, err, "unable to add invoice.")
invoice, err := net.Bob.LookupInvoice(
ctxb,
&lnrpc.PaymentHash{
RHashStr: hex.EncodeToString(invoiceResp.RHash),
},
)
require.NoError(t.t, err, "unable to find invoice.")
// Send payment from alice to bob.
paymentClient, err := net.Alice.RouterClient.SendPaymentV2(
ctxb,
&routerrpc.SendPaymentRequest{
PaymentRequest: invoice.PaymentRequest,
TimeoutSeconds: 60,
},
)
require.NoError(t.t, err, "unable to send payment.")
// Make sure the payment doesn't error due to invalid parameters or so.
_, err = paymentClient.Recv()
require.NoError(t.t, err, "unable to get payment update.")
// Assert the first payment update is an inflight update.
update1, err := tracker.Recv()
require.NoError(t.t, err, "unable to receive payment update 1.")
require.Equal(
t.t, lnrpc.PaymentFailureReason_FAILURE_REASON_NONE,
update1.FailureReason,
)
require.Equal(t.t, lnrpc.Payment_IN_FLIGHT, update1.Status)
require.Equal(t.t, invoice.PaymentRequest, update1.PaymentRequest)
require.Equal(t.t, amountMsat, update1.ValueMsat)
// Assert the second payment update is a payment success update.
update2, err := tracker.Recv()
require.NoError(t.t, err, "unable to receive payment update 2.")
require.Equal(
t.t, lnrpc.PaymentFailureReason_FAILURE_REASON_NONE,
update2.FailureReason,
)
require.Equal(t.t, lnrpc.Payment_SUCCEEDED, update2.Status)
require.Equal(t.t, invoice.PaymentRequest, update2.PaymentRequest)
require.Equal(t.t, amountMsat, update2.ValueMsat)
require.Equal(
t.t, hex.EncodeToString(invoice.RPreimage),
update2.PaymentPreimage,
)
}
|
package v1
import (
"fmt"
"path"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/storageos/cluster-operator/internal/pkg/image"
"github.com/storageos/cluster-operator/internal/pkg/toleration"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// ClusterPhase is the phase of the storageos cluster at a given point in time.
type ClusterPhase string
// Constants for operator defaults values and different phases.
const (
ClusterPhaseInitial ClusterPhase = ""
// A cluster is in running phase when the cluster health is reported
// healthy, all the StorageOS nodes are ready.
ClusterPhaseRunning ClusterPhase = "Running"
// A cluster is in creating phase when the cluster resource provisioning as
// started
ClusterPhaseCreating ClusterPhase = "Creating"
// A cluster is in pending phase when the creation hasn't started. This can
// happen if there's an existing cluster and the new cluster provisioning is
// not allowed by the operator.
ClusterPhasePending ClusterPhase = "Pending"
// A cluster is in terminating phase when the cluster delete is initiated.
// The cluster object is waiting for the finalizers to be executed.
ClusterPhaseTerminating ClusterPhase = "Terminating"
// Use kube-system as the default namespace as so that our containers won't
// get evicted when there is resource contention. Works with
// "system-node-critical" priority class.
DefaultNamespace = "kube-system"
DefaultStorageClassName = "fast"
DefaultServiceName = "storageos"
DefaultServiceType = "ClusterIP"
DefaultServiceExternalPort = 5705
DefaultServiceInternalPort = 5705
DefaultIngressHostname = "storageos.local"
DefaultPluginRegistrationPath = "/var/lib/kubelet/plugins_registry"
OldPluginRegistrationPath = "/var/lib/kubelet/plugins"
DefaultCSIEndpoint = "/storageos/csi.sock"
DefaultCSIRegistrarSocketDir = "/var/lib/kubelet/device-plugins/"
DefaultCSIKubeletDir = "/var/lib/kubelet"
DefaultCSIPluginDir = "/storageos/"
DefaultCSIDeviceDir = "/dev"
DefaultCSIRegistrationDir = DefaultPluginRegistrationPath
DefaultCSIKubeletRegistrationPath = "/storageos/csi.sock"
DefaultCSIDriverRegistrationMode = "node-register"
DefaultCSIDriverRequiresAttachment = "true"
DefaultCSIDeploymentStrategy = "deployment"
)
func getDefaultCSIEndpoint(pluginRegistrationPath string) string {
return fmt.Sprintf("%s%s%s", "unix://", pluginRegistrationPath, DefaultCSIEndpoint)
}
func getDefaultCSIPluginDir(pluginRegistrationPath string) string {
return path.Join(pluginRegistrationPath, DefaultCSIPluginDir)
}
func getDefaultCSIKubeletRegistrationPath(pluginRegistrationPath string) string {
return path.Join(pluginRegistrationPath, DefaultCSIKubeletRegistrationPath)
}
// StorageOSClusterSpec defines the desired state of StorageOSCluster
// +k8s:openapi-gen=true
type StorageOSClusterSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
// Join is the join token used for service discovery.
Join string `json:"join,omitempty"`
// CSI defines the configurations for CSI.
CSI StorageOSClusterCSI `json:"csi,omitempty"`
// Namespace is the kubernetes Namespace where storageos resources are
// provisioned.
Namespace string `json:"namespace,omitempty"`
// StorageClassName is the name of default StorageClass created for
// StorageOS volumes.
StorageClassName string `json:"storageClassName,omitempty"`
// Service is the Service configuration for the cluster nodes.
Service StorageOSClusterService `json:"service,omitempty"`
// SecretRefName is the name of the secret object that contains all the
// sensitive cluster configurations.
SecretRefName string `json:"secretRefName"`
// SecretRefNamespace is the namespace of the secret reference.
SecretRefNamespace string `json:"secretRefNamespace"`
// SharedDir is the shared directory to be used when the kubelet is running
// in a container.
// Typically: "/var/lib/kubelet/plugins/kubernetes.io~storageos".
// If not set, defaults will be used.
SharedDir string `json:"sharedDir,omitempty"`
// Ingress defines the ingress configurations used in the cluster.
Ingress StorageOSClusterIngress `json:"ingress,omitempty"`
// Images defines the various container images used in the cluster.
Images ContainerImages `json:"images,omitempty"`
// KVBackend defines the key-value store backend used in the cluster.
KVBackend StorageOSClusterKVBackend `json:"kvBackend,omitempty"`
// Pause is to pause the operator for the cluster.
Pause bool `json:"pause,omitempty"`
// Debug is to set debug mode of the cluster.
Debug bool `json:"debug,omitempty"`
// NodeSelectorTerms is to set the placement of storageos pods using
// node affinity requiredDuringSchedulingIgnoredDuringExecution.
NodeSelectorTerms []corev1.NodeSelectorTerm `json:"nodeSelectorTerms,omitempty"`
// Tolerations is to set the placement of storageos pods using
// pod toleration.
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
// Resources is to set the resource requirements of the storageos containers.
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
// Disable Pod Fencing. With StatefulSets, Pods are only re-scheduled if
// the Pod has been marked as killed. In practice this means that failover
// of a StatefulSet pod is a manual operation.
//
// By enabling Pod Fencing and setting the `storageos.com/fenced=true` label
// on a Pod, StorageOS will enable automated Pod failover (by killing the
// application Pod on the failed node) if the following conditions exist:
//
// - Pod fencing has not been explicitly disabled.
// - StorageOS has determined that the node the Pod is running on is
// offline. StorageOS uses Gossip and TCP checks and will retry for 30
// seconds. At this point all volumes on the failed node are marked
// offline (irrespective of whether fencing is enabled) and volume
// failover starts.
// - The Pod has the label `storageos.com/fenced=true` set.
// - The Pod has at least one StorageOS volume attached.
// - Each StorageOS volume has at least 1 healthy replica.
//
// When Pod Fencing is disabled, StorageOS will not perform any interaction
// with Kubernetes when it detects that a node has gone offline.
// Additionally, the Kubernetes permissions required for Fencing will not be
// added to the StorageOS role.
DisableFencing bool `json:"disableFencing,omitempty"`
// Disable Telemetry.
DisableTelemetry bool `json:"disableTelemetry,omitempty"`
// Disable TCMU can be set to true to disable the TCMU storage driver. This
// is required when there are multiple storage systems running on the same
// node and you wish to avoid conflicts. Only one TCMU-based storage system
// can run on a node at a time.
//
// Disabling TCMU will degrade performance.
DisableTCMU bool `json:"disableTCMU,omitempty"`
// Force TCMU can be set to true to ensure that TCMU is enabled or
// cause StorageOS to abort startup.
//
// At startup, StorageOS will automatically fallback to non-TCMU mode if
// another TCMU-based storage system is running on the node. Since non-TCMU
// will degrade performance, this may not always be desired.
ForceTCMU bool `json:"forceTCMU,omitempty"`
// TLSEtcdSecretRefName is the name of the secret object that contains the
// etcd TLS certs. This secret is shared with etcd, therefore it's not part
// of the main storageos secret.
TLSEtcdSecretRefName string `json:"tlsEtcdSecretRefName,omitempty"`
// TLSEtcdSecretRefNamespace is the namespace of the etcd TLS secret object.
TLSEtcdSecretRefNamespace string `json:"tlsEtcdSecretRefNamespace,omitempty"`
// K8sDistro is the name of the Kubernetes distribution where the operator
// is being deployed. It should be in the format: `name[-1.0]`, where the
// version is optional and should only be appended if known. Suitable names
// include: `openshift`, `rancher`, `aks`, `gke`, `eks`, or the deployment
// method if using upstream directly, e.g `minishift` or `kubeadm`.
//
// Setting k8sDistro is optional, and will be used to simplify cluster
// configuration by setting appropriate defaults for the distribution. The
// distribution information will also be included in the product telemetry
// (if enabled), to help focus development efforts.
K8sDistro string `json:"k8sDistro,omitempty"`
// Disable StorageOS scheduler extender.
DisableScheduler bool `json:"disableScheduler,omitempty"`
}
// StorageOSClusterStatus defines the observed state of StorageOSCluster
// +k8s:openapi-gen=true
type StorageOSClusterStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
Phase ClusterPhase `json:"phase,omitempty"`
NodeHealthStatus map[string]NodeHealth `json:"nodeHealthStatus,omitempty"`
Nodes []string `json:"nodes,omitempty"`
Ready string `json:"ready,omitempty"`
Members MembersStatus `json:"members,omitempty"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// StorageOSCluster is the Schema for the storageosclusters API
// +k8s:openapi-gen=true
// +kubebuilder:printcolumn:name="ready",type="string",JSONPath=".status.ready",description="Ready status of the storageos nodes."
// +kubebuilder:printcolumn:name="status",type="string",JSONPath=".status.phase",description="Status of the whole cluster."
// +kubebuilder:printcolumn:name="age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:resource:path=storageosclusters,shortName=stos
// +kubebuilder:singular=storageoscluster
// +kubebuilder:subresource:status
type StorageOSCluster struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec StorageOSClusterSpec `json:"spec,omitempty"`
Status StorageOSClusterStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// StorageOSClusterList contains a list of StorageOSCluster
type StorageOSClusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []StorageOSCluster `json:"items"`
}
func init() {
SchemeBuilder.Register(&StorageOSCluster{}, &StorageOSClusterList{})
}
// MembersStatus stores the status details of cluster member nodes.
type MembersStatus struct {
// Ready are the storageos cluster members that are ready to serve requests.
// The member names are the same as the node IPs.
Ready []string `json:"ready,omitempty"`
// Unready are the storageos cluster nodes not ready to serve requests.
Unready []string `json:"unready,omitempty"`
}
// GetResourceNS returns the namespace where all the resources should be provisioned.
func (s StorageOSClusterSpec) GetResourceNS() string {
if s.Namespace != "" {
return s.Namespace
}
return DefaultNamespace
}
// GetStorageClassName returns the name of default StorageClass created with the
// StorageOS cluster.
func (s StorageOSClusterSpec) GetStorageClassName() string {
if s.StorageClassName != "" {
return s.StorageClassName
}
return DefaultStorageClassName
}
// GetNodeContainerImage returns node container image.
func (s StorageOSClusterSpec) GetNodeContainerImage() string {
if s.Images.NodeContainer != "" {
return s.Images.NodeContainer
}
return image.GetDefaultImage(image.StorageOSNodeImageEnvVar, image.DefaultNodeContainerImage)
}
// GetInitContainerImage returns init container image.
func (s StorageOSClusterSpec) GetInitContainerImage() string {
if s.Images.InitContainer != "" {
return s.Images.InitContainer
}
return image.GetDefaultImage(image.StorageOSInitImageEnvVar, image.DefaultInitContainerImage)
}
// GetCSINodeDriverRegistrarImage returns CSI node driver registrar container image.
func (s StorageOSClusterSpec) GetCSINodeDriverRegistrarImage(csiv1 bool) string {
if s.Images.CSINodeDriverRegistrarContainer != "" {
return s.Images.CSINodeDriverRegistrarContainer
}
if csiv1 {
return image.GetDefaultImage(image.CSIv1NodeDriverRegistrarImageEnvVar, image.CSIv1NodeDriverRegistrarContainerImage)
}
return image.GetDefaultImage(image.CSIv0DriverRegistrarImageEnvVar, image.CSIv0DriverRegistrarContainerImage)
}
// GetCSIClusterDriverRegistrarImage returns CSI cluster driver registrar
// container image.
func (s StorageOSClusterSpec) GetCSIClusterDriverRegistrarImage() string {
if s.Images.CSIClusterDriverRegistrarContainer != "" {
return s.Images.CSIClusterDriverRegistrarContainer
}
return image.GetDefaultImage(image.CSIv1ClusterDriverRegistrarImageEnvVar, image.CSIv1ClusterDriverRegistrarContainerImage)
}
// GetCSIExternalProvisionerImage returns CSI external provisioner container image.
func (s StorageOSClusterSpec) GetCSIExternalProvisionerImage(csiv1 bool) string {
if s.Images.CSIExternalProvisionerContainer != "" {
return s.Images.CSIExternalProvisionerContainer
}
if csiv1 {
return image.GetDefaultImage(image.CSIv1ExternalProvisionerImageEnvVar, image.CSIv1ExternalProvisionerContainerImageV2)
}
return image.GetDefaultImage(image.CSIv0ExternalProvisionerImageEnvVar, image.CSIv0ExternalProvisionerContainerImage)
}
// GetCSIExternalAttacherImage returns CSI external attacher container image.
// CSI v0, CSI v1 on k8s 1.13 and CSI v1 on k8s 1.14+ require different versions
// of external attacher.
// k8s 1.17+ should use the v3 attacher. We expect to remove support for k8s
// 1.14 and earlier soon.
func (s StorageOSClusterSpec) GetCSIExternalAttacherImage(csiv1 bool, attacherv2Supported bool, attacherV3Supported bool) string {
if s.Images.CSIExternalAttacherContainer != "" {
return s.Images.CSIExternalAttacherContainer
}
if csiv1 {
if attacherV3Supported {
return image.GetDefaultImage(image.CSIv1ExternalAttacherv3ImageEnvVar, image.CSIv1ExternalAttacherContainerImageV3)
}
if attacherv2Supported {
return image.GetDefaultImage(image.CSIv1ExternalAttacherv2ImageEnvVar, image.CSIv1ExternalAttacherContainerImageV2)
}
return image.GetDefaultImage(image.CSIv1ExternalAttacherImageEnvVar, image.CSIv1ExternalAttacherContainerImage)
}
return image.GetDefaultImage(image.CSIv0ExternalAttacherImageEnvVar, image.CSIv0ExternalAttacherContainerImage)
}
// GetCSIExternalResizerImage returns CSI external resizer container image.
func (s StorageOSClusterSpec) GetCSIExternalResizerImage() string {
if s.Images.CSIExternalResizerContainer != "" {
return s.Images.CSIExternalResizerContainer
}
return image.GetDefaultImage(image.CSIv1ExternalResizerContainerImageEnvVar, image.CSIv1ExternalResizerContainerImage)
}
// GetCSILivenessProbeImage returns CSI liveness probe container image.
func (s StorageOSClusterSpec) GetCSILivenessProbeImage() string {
if s.Images.CSILivenessProbeContainer != "" {
return s.Images.CSILivenessProbeContainer
}
return image.GetDefaultImage(image.CSIv1LivenessProbeImageEnvVar, image.CSIv1LivenessProbeContainerImage)
}
// GetHyperkubeImage returns hyperkube container image for a given k8s version.
// If an image is set explicitly in the cluster configuration, that image is
// returned.
func (s StorageOSClusterSpec) GetHyperkubeImage(k8sVersion string) string {
if s.Images.HyperkubeContainer != "" {
return s.Images.HyperkubeContainer
}
// NOTE: Hyperkube is not being used anywhere for now. Hyperkube image is
// not available to be set via environment variable.
// Add version prefix "v" in the tag.
return fmt.Sprintf("%s:v%s", image.DefaultHyperkubeContainerRegistry, k8sVersion)
}
// GetKubeSchedulerImage returns kube-scheduler container image for a given k8s
// version. If an image is set explicitly in the cluster configuration, that
// image is returned.
func (s StorageOSClusterSpec) GetKubeSchedulerImage(k8sVersion string) string {
if s.Images.KubeSchedulerContainer != "" {
return s.Images.KubeSchedulerContainer
}
// Kube-scheduler image is dynamically selected based on the k8s version.
// We create an image name for a fallback image based on the k8s version.
// If kube-scheduler image is not specified in the environment variable, the
// fallback image is used.
// Add version prefix "v" in the tag.
fallbackImage := fmt.Sprintf("%s:v%s", image.DefaultKubeSchedulerContainerRegistry, k8sVersion)
return image.GetDefaultImage(image.KubeSchedulerImageEnvVar, fallbackImage)
}
// GetNFSServerImage returns NFS server container image used as the default
// image in the cluster.
func (s StorageOSClusterSpec) GetNFSServerImage() string {
if s.Images.NFSContainer != "" {
return s.Images.NFSContainer
}
return image.GetDefaultImage(image.NFSImageEnvVar, image.DefaultNFSContainerImage)
}
// GetAPIManagerImage returns the API Manager container image used as the
// default image in the cluster.
func (s StorageOSClusterSpec) GetAPIManagerImage() string {
if s.Images.APIManagerContainer != "" {
return s.Images.APIManagerContainer
}
return image.GetDefaultImage(image.APIManagerEnvVar, image.DefaultAPIManagerImage)
}
// GetServiceName returns the service name.
func (s StorageOSClusterSpec) GetServiceName() string {
if s.Service.Name != "" {
return s.Service.Name
}
return DefaultServiceName
}
// APIServiceEndpoint returns the API endpoint string that can be used within
// the cluster.
func (s StorageOSClusterSpec) APIServiceEndpoint() string {
return fmt.Sprintf("http://%s.%s.svc:%d", s.GetServiceName(), s.GetResourceNS(), s.GetServiceExternalPort())
}
// GetServiceType returns the service type.
func (s StorageOSClusterSpec) GetServiceType() string {
if s.Service.Type != "" {
return s.Service.Type
}
return DefaultServiceType
}
// GetServiceExternalPort returns the service external port.
func (s StorageOSClusterSpec) GetServiceExternalPort() int {
if s.Service.ExternalPort != 0 {
return s.Service.ExternalPort
}
return DefaultServiceExternalPort
}
// GetServiceInternalPort returns the service internal port.
func (s StorageOSClusterSpec) GetServiceInternalPort() int {
if s.Service.InternalPort != 0 {
return s.Service.InternalPort
}
return DefaultServiceInternalPort
}
// GetIngressHostname returns the ingress host name.
func (s StorageOSClusterSpec) GetIngressHostname() string {
if s.Ingress.Hostname != "" {
return s.Ingress.Hostname
}
return DefaultIngressHostname
}
// GetCSIEndpoint returns the CSI unix socket endpoint path.
func (s StorageOSClusterSpec) GetCSIEndpoint(csiv1 bool) string {
if s.CSI.Endpoint != "" {
return s.CSI.Endpoint
}
if csiv1 {
return getDefaultCSIEndpoint(DefaultPluginRegistrationPath)
}
return getDefaultCSIEndpoint(OldPluginRegistrationPath)
}
// GetCSIRegistrarSocketDir returns the CSI registrar socket dir.
func (s StorageOSClusterSpec) GetCSIRegistrarSocketDir() string {
if s.CSI.RegistrarSocketDir != "" {
return s.CSI.RegistrarSocketDir
}
return DefaultCSIRegistrarSocketDir
}
// GetCSIKubeletDir returns the Kubelet dir.
func (s StorageOSClusterSpec) GetCSIKubeletDir() string {
if s.CSI.KubeletDir != "" {
return s.CSI.KubeletDir
}
return DefaultCSIKubeletDir
}
// GetCSIPluginDir returns the CSI plugin dir.
func (s StorageOSClusterSpec) GetCSIPluginDir(csiv1 bool) string {
if s.CSI.PluginDir != "" {
return s.CSI.PluginDir
}
if csiv1 {
return getDefaultCSIPluginDir(DefaultPluginRegistrationPath)
}
return getDefaultCSIPluginDir(OldPluginRegistrationPath)
}
// GetCSIDeviceDir returns the CSI device dir.
func (s StorageOSClusterSpec) GetCSIDeviceDir() string {
if s.CSI.DeviceDir != "" {
return s.CSI.DeviceDir
}
return DefaultCSIDeviceDir
}
// GetCSIRegistrationDir returns the CSI registration dir.
func (s StorageOSClusterSpec) GetCSIRegistrationDir(csiv1 bool) string {
if s.CSI.RegistrationDir != "" {
return s.CSI.RegistrationDir
}
if csiv1 {
return DefaultCSIRegistrationDir
}
// CSI Registration Dir and Plugin Registration Path are the same.
return OldPluginRegistrationPath
}
// GetCSIKubeletRegistrationPath returns the CSI Kubelet Registration Path.
func (s StorageOSClusterSpec) GetCSIKubeletRegistrationPath(csiv1 bool) string {
if s.CSI.KubeletRegistrationPath != "" {
return s.CSI.KubeletRegistrationPath
}
if csiv1 {
return getDefaultCSIKubeletRegistrationPath(DefaultPluginRegistrationPath)
}
return getDefaultCSIKubeletRegistrationPath(OldPluginRegistrationPath)
}
// GetCSIDriverRegistrationMode returns the CSI Driver Registration Mode.
func (s StorageOSClusterSpec) GetCSIDriverRegistrationMode() string {
if s.CSI.DriverRegistrationMode != "" {
return s.CSI.DriverRegistrationMode
}
return DefaultCSIDriverRegistrationMode
}
// GetCSIDriverRequiresAttachment returns the CSI Driver Requires Attachment
func (s StorageOSClusterSpec) GetCSIDriverRequiresAttachment() string {
if s.CSI.DriverRequiresAttachment != "" {
return s.CSI.DriverRequiresAttachment
}
return DefaultCSIDriverRequiresAttachment
}
// GetCSIVersion returns the CSI Driver version.
func (s StorageOSClusterSpec) GetCSIVersion(csiv1 bool) string {
if s.CSI.Version != "" {
return s.CSI.Version
}
if csiv1 {
return "v1"
}
return "v0"
}
// GetCSIDeploymentStrategy returns the CSI helper deployment strategy value.
func (s StorageOSClusterSpec) GetCSIDeploymentStrategy() string {
if s.CSI.DeploymentStrategy != "" {
return s.CSI.DeploymentStrategy
}
return DefaultCSIDeploymentStrategy
}
// GetNodeTolerations returns the tolerations applied to all the StorageOS node
// container pod. Tolerations are sorted on key name to ensure idempotency.
func (s StorageOSClusterSpec) GetNodeTolerations() []corev1.Toleration {
t := mergeTolerations(toleration.GetDefaultNodeTolerations(), s.Tolerations)
toleration.Sort(t)
return t
}
// GetHelperTolerations returns the tolerations applied to all the StorageOS
// helper pods. Tolerations are sorted on key name to ensure idempotency.
func (s StorageOSClusterSpec) GetHelperTolerations(tolerationSeconds int64) []corev1.Toleration {
t := mergeTolerations(toleration.GetDefaultHelperTolerations(tolerationSeconds), s.Tolerations)
toleration.Sort(t)
return t
}
// mergeTolerations merges a slice of tolerations on top of a base slice,
// overwriting duplicate keys.
func mergeTolerations(base []corev1.Toleration, overlay []corev1.Toleration) []corev1.Toleration {
tolerations := make(map[string]corev1.Toleration)
for _, t := range append(base, overlay...) {
tolerations[t.Key] = t
}
var ret []corev1.Toleration
for _, t := range tolerations {
ret = append(ret, t)
}
return ret
}
// ContainerImages contains image names of all the containers used by the operator.
type ContainerImages struct {
NodeContainer string `json:"nodeContainer,omitempty"`
InitContainer string `json:"initContainer,omitempty"`
CSINodeDriverRegistrarContainer string `json:"csiNodeDriverRegistrarContainer,omitempty"`
CSIClusterDriverRegistrarContainer string `json:"csiClusterDriverRegistrarContainer,omitempty"`
CSIExternalProvisionerContainer string `json:"csiExternalProvisionerContainer,omitempty"`
CSIExternalAttacherContainer string `json:"csiExternalAttacherContainer,omitempty"`
CSIExternalResizerContainer string `json:"csiExternalResizerContainer,omitempty"`
CSILivenessProbeContainer string `json:"csiLivenessProbeContainer,omitempty"`
HyperkubeContainer string `json:"hyperkubeContainer,omitempty"`
KubeSchedulerContainer string `json:"kubeSchedulerContainer,omitempty"`
NFSContainer string `json:"nfsContainer,omitempty"`
APIManagerContainer string `json:"apiManagerContainer,omitempty"`
}
// StorageOSClusterCSI contains CSI configurations.
type StorageOSClusterCSI struct {
Enable bool `json:"enable,omitempty"`
Version string `json:"version,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
EnableProvisionCreds bool `json:"enableProvisionCreds,omitempty"`
EnableControllerPublishCreds bool `json:"enableControllerPublishCreds,omitempty"`
EnableNodePublishCreds bool `json:"enableNodePublishCreds,omitempty"`
EnableControllerExpandCreds bool `json:"enableControllerExpandCreds,omitempty"`
RegistrarSocketDir string `json:"registrarSocketDir,omitempty"`
KubeletDir string `json:"kubeletDir,omitempty"`
PluginDir string `json:"pluginDir,omitempty"`
DeviceDir string `json:"deviceDir,omitempty"`
RegistrationDir string `json:"registrationDir,omitempty"`
KubeletRegistrationPath string `json:"kubeletRegistrationPath,omitempty"`
DriverRegistrationMode string `json:"driverRegisterationMode,omitempty"`
DriverRequiresAttachment string `json:"driverRequiresAttachment,omitempty"`
DeploymentStrategy string `json:"deploymentStrategy,omitempty"`
}
// StorageOSClusterService contains Service configurations.
type StorageOSClusterService struct {
Name string `json:"name"`
Type string `json:"type"`
ExternalPort int `json:"externalPort,omitempty"`
InternalPort int `json:"internalPort,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
}
// StorageOSClusterIngress contains Ingress configurations.
type StorageOSClusterIngress struct {
Enable bool `json:"enable,omitempty"`
Hostname string `json:"hostname,omitempty"`
TLS bool `json:"tls,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
}
// NodeHealth contains health status of a node.
type NodeHealth struct {
DirectfsInitiator string `json:"directfsInitiator,omitempty"`
Director string `json:"director,omitempty"`
KV string `json:"kv,omitempty"`
KVWrite string `json:"kvWrite,omitempty"`
Nats string `json:"nats,omitempty"`
Presentation string `json:"presentation,omitempty"`
Rdb string `json:"rdb,omitempty"`
}
// StorageOSClusterKVBackend stores key-value store backend configurations.
type StorageOSClusterKVBackend struct {
Address string `json:"address,omitempty"`
Backend string `json:"backend,omitempty"`
}
|
package main
import (
"context"
"fmt"
"time"
"github.com/google/uuid"
vine "github.com/lack-io/vine/service"
log "github.com/lack-io/vine/service/logger"
proto "github.com/lack-io/vine-example/pubsub/proto"
)
// send events using the publisher
func sendEv(topic string, p vine.Event) {
t := time.NewTicker(time.Second)
for _ = range t.C {
// create new event
ev := &proto.Event{
Id: uuid.New().String(),
Timestamp: time.Now().Unix(),
Message: fmt.Sprintf("Messaging you all day on %s", topic),
}
log.Infof("publishing %+v\n", ev)
// publish an event
if err := p.Publish(context.Background(), ev); err != nil {
log.Infof("error publishing: %v", err)
}
}
}
func main() {
// create a service
service := vine.NewService(
vine.Name("go.vine.cli.pubsub"),
)
// parse command line
service.Init()
// create publisher
pub1 := vine.NewEvent("example.topic.pubsub.1", service.Client())
pub2 := vine.NewEvent("example.topic.pubsub.2", service.Client())
// pub to topic 1
go sendEv("example.topic.pubsub.1", pub1)
// pub to topic 2
go sendEv("example.topic.pubsub.2", pub2)
// block forever
select {}
}
|
package v3
import (
"bytes"
"fmt"
"github.com/cockroachdb/cockroach/pkg/util/treeprinter"
)
func init() {
registerOperator(orderByOp, "order-by", orderByClass{})
}
func newOrderByExpr(input *expr) *expr {
return &expr{
op: orderByOp,
children: []*expr{input},
}
}
type orderByClass struct{}
var _ operatorClass = orderByClass{}
func (orderByClass) kind() operatorKind {
return logicalKind | relationalKind
}
func (orderByClass) layout() exprLayout {
return exprLayout{}
}
func (orderByClass) format(e *expr, tp treeprinter.Node) {
n := formatRelational(e, tp)
formatExprs(n, "inputs", e.inputs())
}
func (orderByClass) initKeys(e *expr, state *queryState) {
}
func (orderByClass) updateProps(e *expr) {
}
func (orderByClass) requiredProps(required *physicalProps, child int) *physicalProps {
return nil
}
// orderSpec defines the ordering of columns provided or required by a
// relation. A negative value indicates descending ordering on the column index
// "-(value+1)".
type ordering []bitmapIndex
func (o ordering) String() string {
var buf bytes.Buffer
o.format(&buf)
return buf.String()
}
func (o ordering) format(buf *bytes.Buffer) {
for i, col := range o {
if i > 0 {
buf.WriteString(",")
}
if col >= 0 {
fmt.Fprintf(buf, "+%d", col)
} else {
fmt.Fprintf(buf, "-%d", -(col + 1))
}
}
}
// Provides returns true iff the receiver is a prefix of the required ordering.
func (o ordering) provides(required ordering) bool {
if len(o) < len(required) {
return false
}
for i := range required {
if o[i] != required[i] {
return false
}
}
return true
}
|
package ccmd
import (
"time"
"github.com/izumin5210/clicontrib/pkg/cbuild"
"github.com/spf13/cobra"
)
// NewVersionCommand returns a new command object for printing version information.
func NewVersionCommand() *cobra.Command {
return &cobra.Command{
Use: "version",
Short: "Print version information",
Long: "Print version information",
SilenceErrors: true,
SilenceUsage: true,
Run: func(cmd *cobra.Command, _ []string) {
cfg := cbuild.Default
releaseType := "stable"
if cfg.GitTag != cfg.GitNearestTag || cfg.GitNearestTag == "" {
releaseType = "canary"
}
fmtStr := "%s %s %s (%s %s)\n"
if cfg.GitTreeState != cbuild.TreeStateClean {
fmtStr = "%s %s %s (%s %s dirty)\n"
}
cmd.Printf(
fmtStr,
cfg.Name,
cfg.Version,
releaseType,
cfg.BuildTime.Format(time.RFC3339),
cfg.GitCommit[:7],
)
},
}
}
|
package webglglobe
import (
"github.com/go4orward/gowebgl/wcommon"
"github.com/go4orward/gowebgl/webgl2d"
)
type GlobeMarkLayer struct {
ScnObjs []*webgl2d.SceneObject // 2D SceneObjects to be rendered (in CAMERA space)
}
func NewGlobeMarkLayer() *GlobeMarkLayer {
mlayer := GlobeMarkLayer{}
mlayer.ScnObjs = make([]*webgl2d.SceneObject, 0)
return &mlayer
}
// ----------------------------------------------------------------------------
// Mark (single instance with its own geometry)
// ----------------------------------------------------------------------------
func (self *GlobeMarkLayer) AddMark(geometry *webgl2d.Geometry, color string) *GlobeMarkLayer {
return self
}
// ----------------------------------------------------------------------------
// Mark with Instance Poses (multiple instances sharing the same geometry)
// ----------------------------------------------------------------------------
func (self *GlobeMarkLayer) AddMarkWithPoses(geometry *webgl2d.Geometry, poses *wcommon.SceneObjectPoses) *GlobeMarkLayer {
return self
}
|
package commands
import (
"os/exec"
)
/*
Функция EXEC() – получает путь к файлу, который хочет открыть пользователь и при помощи
функции exec.Command и cmd.Run() выполняет запрос на запуск программы.
*/
func EXEC(pathToExeFile string) (err error) {
cmd := exec.Command(pathToExeFile)
cmd.Run()
return nil
}
|
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/span"
"github.com/cockroachdb/cockroach/pkg/sql/stats"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/logtags"
)
type requestedStat struct {
columns []descpb.ColumnID
histogram bool
histogramMaxBuckets int
name string
inverted bool
}
const histogramSamples = 10000
// maxTimestampAge is the maximum allowed age of a scan timestamp during table
// stats collection, used when creating statistics AS OF SYSTEM TIME. The
// timestamp is advanced during long operations as needed. See TableReaderSpec.
//
// The lowest TTL we recommend is 10 minutes. This value must be be lower than
// that.
var maxTimestampAge = settings.RegisterDurationSetting(
"sql.stats.max_timestamp_age",
"maximum age of timestamp during table statistics collection",
5*time.Minute,
)
func (dsp *DistSQLPlanner) createStatsPlan(
planCtx *PlanningCtx, desc catalog.TableDescriptor, reqStats []requestedStat, job *jobs.Job,
) (*PhysicalPlan, error) {
if len(reqStats) == 0 {
return nil, errors.New("no stats requested")
}
details := job.Details().(jobspb.CreateStatsDetails)
// Calculate the set of columns we need to scan.
var colCfg scanColumnsConfig
var tableColSet catalog.TableColSet
for _, s := range reqStats {
for _, c := range s.columns {
if !tableColSet.Contains(c) {
tableColSet.Add(c)
colCfg.wantedColumns = append(colCfg.wantedColumns, tree.ColumnID(c))
}
}
}
// Create the table readers; for this we initialize a dummy scanNode.
scan := scanNode{desc: desc}
err := scan.initDescDefaults(colCfg)
if err != nil {
return nil, err
}
var colIdxMap catalog.TableColMap
for i, c := range scan.cols {
colIdxMap.Set(c.GetID(), i)
}
sb := span.MakeBuilder(planCtx.EvalContext(), planCtx.ExtendedEvalCtx.Codec, desc, scan.index)
scan.spans, err = sb.UnconstrainedSpans()
if err != nil {
return nil, err
}
scan.isFull = true
p, err := dsp.createTableReaders(planCtx, &scan)
if err != nil {
return nil, err
}
if details.AsOf != nil {
// If the read is historical, set the max timestamp age.
val := maxTimestampAge.Get(&dsp.st.SV)
for i := range p.Processors {
spec := p.Processors[i].Spec.Core.TableReader
spec.MaxTimestampAgeNanos = uint64(val)
}
}
var sketchSpecs, invSketchSpecs []execinfrapb.SketchSpec
sampledColumnIDs := make([]descpb.ColumnID, len(scan.cols))
for _, s := range reqStats {
spec := execinfrapb.SketchSpec{
SketchType: execinfrapb.SketchType_HLL_PLUS_PLUS_V1,
GenerateHistogram: s.histogram,
HistogramMaxBuckets: uint32(s.histogramMaxBuckets),
Columns: make([]uint32, len(s.columns)),
StatName: s.name,
}
for i, colID := range s.columns {
colIdx, ok := colIdxMap.Get(colID)
if !ok {
panic("necessary column not scanned")
}
streamColIdx := uint32(p.PlanToStreamColMap[colIdx])
spec.Columns[i] = streamColIdx
sampledColumnIDs[streamColIdx] = colID
}
if s.inverted {
// Find the first inverted index on the first column. Although there may be
// more, we don't currently have a way of using more than one or deciding which
// one is better.
// TODO(mjibson): allow multiple inverted indexes on the same column (i.e.,
// with different configurations). See #50655.
col := s.columns[0]
for _, index := range desc.PublicNonPrimaryIndexes() {
if index.GetType() == descpb.IndexDescriptor_INVERTED && index.InvertedColumnID() == col {
spec.Index = index.IndexDesc()
break
}
}
// Even if spec.Index is nil because there isn't an inverted index on
// the requested stats column, we can still proceed. We aren't generating
// histograms in that case so we don't need an index descriptor to generate the
// inverted index entries.
invSketchSpecs = append(invSketchSpecs, spec)
} else {
sketchSpecs = append(sketchSpecs, spec)
}
}
// Set up the samplers.
sampler := &execinfrapb.SamplerSpec{
Sketches: sketchSpecs,
InvertedSketches: invSketchSpecs,
}
for _, s := range reqStats {
sampler.MaxFractionIdle = details.MaxFractionIdle
if s.histogram {
sampler.SampleSize = histogramSamples
}
}
// The sampler outputs the original columns plus a rank column, four
// sketch columns, and two inverted histogram columns.
outTypes := make([]*types.T, 0, len(p.GetResultTypes())+5)
outTypes = append(outTypes, p.GetResultTypes()...)
// An INT column for the rank of each row.
outTypes = append(outTypes, types.Int)
// An INT column indicating the sketch index.
outTypes = append(outTypes, types.Int)
// An INT column indicating the number of rows processed.
outTypes = append(outTypes, types.Int)
// An INT column indicating the number of rows that have a NULL in any sketch
// column.
outTypes = append(outTypes, types.Int)
// A BYTES column with the sketch data.
outTypes = append(outTypes, types.Bytes)
// An INT column indicating the inverted sketch index.
outTypes = append(outTypes, types.Int)
// A BYTES column with the inverted index key datum.
outTypes = append(outTypes, types.Bytes)
p.AddNoGroupingStage(
execinfrapb.ProcessorCoreUnion{Sampler: sampler},
execinfrapb.PostProcessSpec{},
outTypes,
execinfrapb.Ordering{},
)
// Estimate the expected number of rows based on existing stats in the cache.
tableStats, err := planCtx.ExtendedEvalCtx.ExecCfg.TableStatsCache.GetTableStats(planCtx.ctx, desc.GetID())
if err != nil {
return nil, err
}
var rowsExpected uint64
if len(tableStats) > 0 {
overhead := stats.AutomaticStatisticsFractionStaleRows.Get(&dsp.st.SV)
// Convert to a signed integer first to make the linter happy.
rowsExpected = uint64(int64(
// The total expected number of rows is the same number that was measured
// most recently, plus some overhead for possible insertions.
float64(tableStats[0].RowCount) * (1 + overhead),
))
}
// Set up the final SampleAggregator stage.
agg := &execinfrapb.SampleAggregatorSpec{
Sketches: sketchSpecs,
InvertedSketches: invSketchSpecs,
SampleSize: sampler.SampleSize,
SampledColumnIDs: sampledColumnIDs,
TableID: desc.GetID(),
JobID: job.ID(),
RowsExpected: rowsExpected,
}
// Plan the SampleAggregator on the gateway, unless we have a single Sampler.
node := dsp.gatewayNodeID
if len(p.ResultRouters) == 1 {
node = p.Processors[p.ResultRouters[0]].Node
}
p.AddSingleGroupStage(
node,
execinfrapb.ProcessorCoreUnion{SampleAggregator: agg},
execinfrapb.PostProcessSpec{},
[]*types.T{},
)
p.PlanToStreamColMap = []int{}
return p, nil
}
func (dsp *DistSQLPlanner) createPlanForCreateStats(
planCtx *PlanningCtx, job *jobs.Job,
) (*PhysicalPlan, error) {
details := job.Details().(jobspb.CreateStatsDetails)
reqStats := make([]requestedStat, len(details.ColumnStats))
histogramCollectionEnabled := stats.HistogramClusterMode.Get(&dsp.st.SV)
for i := 0; i < len(reqStats); i++ {
histogram := details.ColumnStats[i].HasHistogram && histogramCollectionEnabled
histogramMaxBuckets := defaultHistogramBuckets
if details.ColumnStats[i].HistogramMaxBuckets > 0 {
histogramMaxBuckets = int(details.ColumnStats[i].HistogramMaxBuckets)
}
reqStats[i] = requestedStat{
columns: details.ColumnStats[i].ColumnIDs,
histogram: histogram,
histogramMaxBuckets: histogramMaxBuckets,
name: details.Name,
inverted: details.ColumnStats[i].Inverted,
}
}
tableDesc := tabledesc.NewBuilder(&details.Table).BuildImmutableTable()
return dsp.createStatsPlan(planCtx, tableDesc, reqStats, job)
}
func (dsp *DistSQLPlanner) planAndRunCreateStats(
ctx context.Context,
evalCtx *extendedEvalContext,
planCtx *PlanningCtx,
txn *kv.Txn,
job *jobs.Job,
resultRows *RowResultWriter,
) error {
ctx = logtags.AddTag(ctx, "create-stats-distsql", nil)
physPlan, err := dsp.createPlanForCreateStats(planCtx, job)
if err != nil {
return err
}
dsp.FinalizePlan(planCtx, physPlan)
recv := MakeDistSQLReceiver(
ctx,
resultRows,
tree.DDL,
evalCtx.ExecCfg.RangeDescriptorCache,
txn,
evalCtx.ExecCfg.Clock,
evalCtx.Tracing,
evalCtx.ExecCfg.ContentionRegistry,
nil, /* testingPushCallback */
)
defer recv.Release()
dsp.Run(planCtx, txn, physPlan, recv, evalCtx, nil /* finishedSetupFn */)()
return resultRows.Err()
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package azure
import (
"bytes"
"compress/gzip"
"encoding/base64"
"text/template"
)
// Startup script used to find/format/mount all local or attached disks.
// Each disk is mounted as /data<disknum>, and, in addition, a symlink
// created from /mnt/data<disknum> to the mount point.
// azureStartupArgs specifies template arguments for the setup template.
type azureStartupArgs struct {
RemoteUser string // The uname for /data* directories.
AttachedDiskLun *int // Use attached disk, with specified LUN; Use local ssd if nil.
}
const azureStartupTemplate = `#!/bin/bash
# Script for setting up a Azure machine for roachprod use.
set -xe
mount_opts="defaults"
{{if .AttachedDiskLun}}
# Setup network attached storage
devices=("/dev/disk/azure/scsi1/lun{{.AttachedDiskLun}}")
{{else}}
# Setup local storage.
devices=($(realpath -qe /dev/disk/by-id/nvme-* | sort -u))
{{end}}
if (( ${#devices[@]} == 0 ));
then
# Use /mnt directly.
echo "No attached or NVME disks found, creating /mnt/data1"
mkdir -p /mnt/data1
chown {{.RemoteUser}} /mnt/data1
else
for d in "${!devices[@]}"; do
disk=${devices[$d]}
mount="/data$((d+1))"
sudo mkdir -p "${mount}"
sudo mkfs.ext4 -F "${disk}"
sudo mount -o "${mount_opts}" "${disk}" "${mount}"
echo "${disk} ${mount} ext4 ${mount_opts} 1 1" | sudo tee -a /etc/fstab
ln -s "${mount}" "/mnt/$(basename $mount)"
done
chown {{.RemoteUser}} /data*
fi
# increase the number of concurrent unauthenticated connections to the sshd
# daemon. See https://en.wikibooks.org/wiki/OpenSSH/Cookbook/Load_Balancing.
# By default, only 10 unauthenticated connections are permitted before sshd
# starts randomly dropping connections.
sh -c 'echo "MaxStartups 64:30:128" >> /etc/ssh/sshd_config'
# Crank up the logging for issues such as:
# https://github.com/cockroachdb/cockroach/issues/36929
sed -i'' 's/LogLevel.*$/LogLevel DEBUG3/' /etc/ssh/sshd_config
service sshd restart
# increase the default maximum number of open file descriptors for
# root and non-root users. Load generators running a lot of concurrent
# workers bump into this often.
sh -c 'echo "root - nofile 1048576\n* - nofile 1048576" > /etc/security/limits.d/10-roachprod-nofiles.conf'
# Send TCP keepalives every minute since GCE will terminate idle connections
# after 10m. Note that keepalives still need to be requested by the application
# with the SO_KEEPALIVE socket option.
cat <<EOF > /etc/sysctl.d/99-roachprod-tcp-keepalive.conf
net.ipv4.tcp_keepalive_time=60
net.ipv4.tcp_keepalive_intvl=60
net.ipv4.tcp_keepalive_probes=5
EOF
# Enable core dumps
cat <<EOF > /etc/security/limits.d/core_unlimited.conf
* soft core unlimited
* hard core unlimited
root soft core unlimited
root hard core unlimited
EOF
mkdir -p /mnt/data1/cores
chmod a+w /mnt/data1/cores
CORE_PATTERN="/mnt/data1/cores/core.%e.%p.%h.%t"
echo "$CORE_PATTERN" > /proc/sys/kernel/core_pattern
sed -i'~' 's/enabled=1/enabled=0/' /etc/default/apport
sed -i'~' '/.*kernel\\.core_pattern.*/c\\' /etc/sysctl.conf
echo "kernel.core_pattern=$CORE_PATTERN" >> /etc/sysctl.conf
sysctl --system # reload sysctl settings
touch /mnt/data1/.roachprod-initialized
`
// evalStartupTemplate evaluates startup template defined above and returns
// a cloud-init base64 encoded custom data used to configure VM.
//
// Errors in startup files are hard to debug. If roachprod create does not complete,
// CTRL-c while roachprod waiting for initialization to complete (otherwise, roachprod
// tries to destroy partially created cluster).
// Then, ssh to one of the machines:
// 1. /var/log/cloud-init-output.log contains the output of all the steps
// performed by cloud-init, including the steps performed by above script.
// 2. You can extract uploaded script and try executing/debugging it via:
// sudo cloud-init query userdata > script.sh
func evalStartupTemplate(args azureStartupArgs) (string, error) {
cloudInit := bytes.NewBuffer(nil)
encoder := base64.NewEncoder(base64.StdEncoding, cloudInit)
gz := gzip.NewWriter(encoder)
t := template.Must(template.New("start").Parse(azureStartupTemplate))
if err := t.Execute(gz, args); err != nil {
return "", err
}
if err := gz.Close(); err != nil {
return "", err
}
if err := encoder.Close(); err != nil {
return "", err
}
return cloudInit.String(), nil
}
|
package router
import (
"down-date-server/src/api"
"github.com/gin-gonic/gin"
)
func InitBaseRoute(Router *gin.RouterGroup) {
UserRouter := Router.Group("")
{
UserRouter.POST("register", api.Register)
UserRouter.POST("login", api.Login)
}
}
func IintAuthRoute(Router *gin.RouterGroup) {
UserRouter := Router.Group("")
{
UserRouter.GET("search_all_user", api.SearchUser)
UserRouter.GET("search_user", api.SearchUserByNickname)
}
}
|
package main
import "fmt"
func main() {
favSport := "coding"
switch favSport {
case "football":
fmt.Println("Football")
case "hockey":
fmt.Println("Hockey")
default:
fmt.Println("You don't have favorite sport")
}
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package youtube
import (
"context"
"regexp"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/cuj"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/quicksettings"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/chrome/webutil"
"chromiumos/tast/local/input"
"chromiumos/tast/testing"
)
const (
mouseMoveDuration = 500 * time.Millisecond
longUITimeout = time.Minute
shortUITimeout = 5 * time.Second
)
var (
videoPlayer = nodewith.NameStartingWith("YouTube Video Player").Role(role.GenericContainer)
video = nodewith.Role(role.Video).Ancestor(videoPlayer)
videoButton = nodewith.Role(role.Button).Ancestor(videoPlayer).NameRegex(regexp.MustCompile("^(Pause|Play).*"))
)
// YtWeb defines the struct related to youtube web.
type YtWeb struct {
br *browser.Browser
tconn *chrome.TestConn
kb *input.KeyboardEventWriter
ui *uiauto.Context
ytConn *chrome.Conn
ytWinID int
uiHdl cuj.UIActionHandler
extendedDisplay bool
}
// NewYtWeb creates an instance of YtWeb.
func NewYtWeb(br *browser.Browser, tconn *chrome.TestConn, kb *input.KeyboardEventWriter, extendedDisplay bool, ui *uiauto.Context, uiHdl cuj.UIActionHandler) *YtWeb {
return &YtWeb{
br: br,
tconn: tconn,
kb: kb,
ui: ui,
uiHdl: uiHdl,
extendedDisplay: extendedDisplay,
}
}
// Install installs the Youtube app with apk.
func (y *YtWeb) Install(ctx context.Context) error {
return nil
}
// OpenAndPlayVideo opens a youtube video on chrome.
func (y *YtWeb) OpenAndPlayVideo(video VideoSrc) uiauto.Action {
return func(ctx context.Context) (err error) {
testing.ContextLog(ctx, "Open Youtube web")
y.ytConn, err = y.uiHdl.NewChromeTab(ctx, y.br, video.URL, true)
if err != nil {
return errors.Wrap(err, "failed to open youtube tab")
}
// Lacros will focus on the search bar after navigating, press Enter to make sure the focus is on the webarea.
searchBar := nodewith.Role(role.TextField).Name("Address and search bar").Focused()
if err := uiauto.IfSuccessThen(y.ui.Exists(searchBar), y.kb.AccelAction("Enter"))(ctx); err != nil {
return err
}
if err := webutil.WaitForYoutubeVideo(ctx, y.ytConn, 0); err != nil {
return errors.Wrap(err, "failed to wait for video element")
}
// If prompted to open in YouTube app, instruct device to stay in Chrome.
stayInChrome := nodewith.Name("Stay in Chrome").Role(role.Button)
if err := uiauto.IfSuccessThen(
y.ui.WithTimeout(shortUITimeout).WaitUntilExists(stayInChrome),
func(ctx context.Context) error {
testing.ContextLog(ctx, "dialog popped up and asked whether to switch to YouTube app")
rememberReg := regexp.MustCompile("Remember (my|this) choice")
rememberChoice := nodewith.NameRegex(rememberReg).Role(role.CheckBox)
if err := y.uiHdl.Click(rememberChoice)(ctx); err != nil {
return err
}
if err := y.uiHdl.Click(stayInChrome)(ctx); err != nil {
return err
}
testing.ContextLog(ctx, "instructed device to stay on YouTube web")
return nil
},
)(ctx); err != nil {
return errors.Wrap(err, "failed to instruct device to stay on YouTube web")
}
if err := clearNotificationPrompts(ctx, y.ui); err != nil {
return errors.Wrap(err, "failed to clear notification prompts")
}
// Use keyboard to play/pause video and ensure PageLoad.PaintTiming.NavigationToLargestContentfulPaint2
// can be generated correctly. See b/240998447.
if err := uiauto.Combine("pause and play with keyboard",
y.Pause(),
y.Play(),
)(ctx); err != nil {
return errors.Wrap(err, "failed to pause and play before switching quality")
}
// Sometimes prompts to grant permission appears after opening a video for a while.
if err := clearNotificationPrompts(ctx, y.ui); err != nil {
return errors.Wrap(err, "failed to clear notification prompts")
}
// Default expected display is main display.
if err := cuj.SwitchWindowToDisplay(ctx, y.tconn, y.kb, y.extendedDisplay)(ctx); err != nil {
if y.extendedDisplay {
return errors.Wrap(err, "failed to switch Youtube to the extended display")
}
return errors.Wrap(err, "failed to switch Youtube to the main display")
}
if err := y.SkipAd()(ctx); err != nil {
return errors.Wrap(err, "failed to click 'Skip Ad' button")
}
if err := y.SwitchQuality(video.Quality)(ctx); err != nil {
return errors.Wrapf(err, "failed to switch resolution to %s", video.Quality)
}
y.ytWinID, err = getFirstWindowID(ctx, y.tconn)
if err != nil {
return errors.Wrap(err, "failed to get window ID")
}
// Ensure the video is playing.
return uiauto.IfFailThen(y.IsPlaying(), y.Play())(ctx)
}
}
// SwitchQuality switches youtube quality.
func (y *YtWeb) SwitchQuality(resolution string) uiauto.Action {
return func(ctx context.Context) error {
testing.ContextLog(ctx, "Switch video quality to ", resolution)
settings := nodewith.Name("Settings").Role(role.PopUpButton).Ancestor(videoPlayer)
quality := nodewith.NameStartingWith("Quality").Role(role.MenuItem).Ancestor(videoPlayer)
if err := y.ui.WaitUntilExists(videoPlayer)(ctx); err != nil {
return errors.Wrap(err, "failed to find 'YouTube Video Player'")
}
startTime := time.Now()
// The setting panel will automatically disappear if it does not receive any event after a few seconds.
// Dut to the different response time of different DUTs, we need to combine these actions in Poll() to
// make quality switch works reliably.
if err := testing.Poll(ctx, func(ctx context.Context) error {
// If an ad is playing, skip it before proceeding.
if err := y.SkipAd()(ctx); err != nil {
return errors.Wrap(err, "failed to click 'Skip Ad' button")
}
// Use DoDefault to avoid fauilure on lacros (see bug b/229003599).
if err := y.ui.DoDefault(settings)(ctx); err != nil {
return errors.Wrap(err, "failed to call DoDefault on settings button")
}
if err := y.ui.WithTimeout(10 * time.Second).WaitUntilExists(quality)(ctx); err != nil {
if y.extendedDisplay {
return errors.Wrap(err, "failed to show the setting panel and click it on extended display")
}
return errors.Wrap(err, "failed to show the setting panel and click it on internal display")
}
testing.ContextLogf(ctx, "Elapsed time to click setting panel: %.3f s", time.Since(startTime).Seconds())
return nil
}, &testing.PollOptions{Interval: 3 * time.Second, Timeout: 30 * time.Second}); err != nil {
return errors.Wrap(err, "failed to click setting panel")
}
// Use DoDefault to avoid fauilure on lacros (see bug b/229003599).
if err := y.ui.DoDefault(quality)(ctx); err != nil {
return errors.Wrap(err, "failed to click 'Quality'")
}
resolutionFinder := nodewith.NameStartingWith(resolution).Role(role.MenuItemRadio).Ancestor(videoPlayer)
if err := y.ui.DoDefault(resolutionFinder)(ctx); err != nil {
return errors.Wrapf(err, "failed to click %q", resolution)
}
if err := waitForYoutubeReadyState(ctx, y.ytConn); err != nil {
return errors.Wrap(err, "failed to wait for Youtube ready state")
}
// Keep the video playing anyway when switch the quality is finished.
return uiauto.IfFailThen(y.IsPlaying(), y.Play())(ctx)
}
}
// EnterFullScreen switches youtube video to full screen.
func (y *YtWeb) EnterFullScreen(ctx context.Context) error {
testing.ContextLog(ctx, "Make Youtube video full screen")
if ytWin, err := ash.GetWindow(ctx, y.tconn, y.ytWinID); err != nil {
return errors.Wrap(err, "failed to get youtube window")
} else if ytWin.State == ash.WindowStateFullscreen {
return nil
}
// Notification prompts are sometimes shown in fullscreen.
if err := clearNotificationPrompts(ctx, y.ui); err != nil {
return errors.Wrap(err, "failed to clear notification prompts")
}
fullscreenBtn := nodewith.Name("Full screen (f)").Role(role.Button)
if err := y.ui.DoDefault(fullscreenBtn)(ctx); err != nil {
return errors.Wrap(err, "failed to click full screen button")
}
if err := waitWindowStateFullscreen(ctx, y.tconn, YoutubeWindowTitle); err != nil {
return errors.Wrap(err, "failed to tap full screen button")
}
if err := waitForYoutubeReadyState(ctx, y.ytConn); err != nil {
return errors.Wrap(err, "failed to wait for Youtube ready state")
}
return nil
}
// ExitFullScreen exits Youtube video from full screen.
func (y *YtWeb) ExitFullScreen(ctx context.Context) error {
testing.ContextLog(ctx, "Exit Youtube video from full screen")
if ytWin, err := ash.GetWindow(ctx, y.tconn, y.ytWinID); err != nil {
return errors.Wrap(err, "failed to get youtube window")
} else if ytWin.State != ash.WindowStateFullscreen {
return nil
}
if err := clearNotificationPrompts(ctx, y.ui); err != nil {
return errors.Wrap(err, "failed to clear notification prompts")
}
// Move the mouse to a specific location to ensure focus on the video now.
// Sometimes the button name will be "Exit full screen" or remain "Full screen" even when entering full screen.
// It's more stable to use keyboard shortcuts.
if err := uiauto.NamedCombine("click exit full screen button",
y.ui.MouseMoveTo(videoButton, mouseMoveDuration),
y.kb.AccelAction("f"),
)(ctx); err != nil {
return errors.Wrap(err, "failed to click exit full screen button")
}
if err := waitWindowStateExitFullscreen(ctx, y.tconn, YoutubeWindowTitle); err != nil {
return errors.Wrap(err, "failed to tap exit full screen button")
}
if err := waitForYoutubeReadyState(ctx, y.ytConn); err != nil {
return errors.Wrap(err, "failed to wait for Youtube ready state")
}
return nil
}
// SkipAd skips the ad.
func (y *YtWeb) SkipAd() uiauto.Action {
return func(ctx context.Context) error {
testing.ContextLog(ctx, "Checking for YouTube ads")
adText := nodewith.NameContaining("Ad").Role(role.StaticText).Ancestor(videoPlayer).First()
skipAdButton := nodewith.NameStartingWith("Skip Ad").Role(role.Button)
return testing.Poll(ctx, func(ctx context.Context) error {
if err := y.ui.WithTimeout(shortUITimeout).WaitUntilExists(adText)(ctx); err != nil {
testing.ContextLog(ctx, "No ads found")
return nil
}
if err := y.ui.Exists(skipAdButton)(ctx); err != nil {
return errors.Wrap(err, "'Skip Ads' button not available yet")
}
if err := y.uiHdl.Click(skipAdButton)(ctx); err != nil {
return errors.Wrap(err, "failed to click 'Skip Ads'")
}
return errors.New("have not determined whether the ad has been skipped successfully")
}, &testing.PollOptions{Timeout: longUITimeout})
}
}
// MaximizeWindow maximizes the youtube video.
func (y *YtWeb) MaximizeWindow(ctx context.Context) error {
testing.ContextLog(ctx, "Maximize Youtube video window")
if ytWin, err := ash.GetWindow(ctx, y.tconn, y.ytWinID); err != nil {
return errors.Wrap(err, "failed to get youtube window")
} else if ytWin.State == ash.WindowStateMaximized {
return nil
}
maximizeButton := nodewith.Name("Maximize").HasClass("FrameCaptionButton").Role(role.Button)
if err := y.uiHdl.Click(maximizeButton)(ctx); err != nil {
return errors.Wrap(err, "failed to maximize the window")
}
if err := ash.WaitForCondition(ctx, y.tconn, func(w *ash.Window) bool {
return w.ID == y.ytWinID && w.State == ash.WindowStateMaximized && !w.IsAnimating
}, &testing.PollOptions{Timeout: 5 * time.Second}); err != nil {
return errors.Wrap(err, "failed to wait for window to become maximized")
}
return nil
}
// MinimizeWindow minimizes the youtube video.
func (y *YtWeb) MinimizeWindow(ctx context.Context) error {
testing.ContextLog(ctx, "Minimize Youtube video window")
if ytWin, err := ash.GetWindow(ctx, y.tconn, y.ytWinID); err != nil {
return errors.Wrap(err, "failed to get youtube window")
} else if ytWin.State == ash.WindowStateMinimized {
return nil
}
minimizeButton := nodewith.Name("Minimize").HasClass("FrameCaptionButton").Role(role.Button)
if err := y.uiHdl.Click(minimizeButton)(ctx); err != nil {
return errors.Wrap(err, "failed to minimize the window")
}
if err := ash.WaitForCondition(ctx, y.tconn, func(w *ash.Window) bool {
return w.ID == y.ytWinID && w.State == ash.WindowStateMinimized && !w.IsAnimating
}, &testing.PollOptions{Timeout: 5 * time.Second}); err != nil {
return errors.Wrap(err, "failed to wait for window to become minimized")
}
return nil
}
// RestoreWindow restores the youtube video to normal state.
func (y *YtWeb) RestoreWindow(ctx context.Context) error {
testing.ContextLog(ctx, "Restore Youtube video window")
if _, err := ash.SetWindowState(ctx, y.tconn, y.ytWinID, ash.WMEventNormal, true /* waitForStateChange */); err != nil {
return errors.Wrap(err, "failed to set the window state to normal")
}
if err := ash.WaitForCondition(ctx, y.tconn, func(w *ash.Window) bool {
return w.ID == y.ytWinID && w.State == ash.WindowStateNormal && !w.IsAnimating
}, &testing.PollOptions{Timeout: 5 * time.Second}); err != nil {
return errors.Wrap(err, "failed to wait for window to become normal")
}
return nil
}
// PauseAndPlayVideo verifies video playback on youtube web.
func (y *YtWeb) PauseAndPlayVideo(ctx context.Context) error {
return uiauto.NamedCombine("pause and play video",
y.SkipAd(),
// The video should be playing at this point. However, we'll double check to make sure
// as we have seen a few cases where the video became paused automatically.
uiauto.IfFailThen(y.IsPlaying(), y.Play()),
y.Pause(),
y.Play(),
)(ctx)
}
// Play returns a function to play the video.
func (y *YtWeb) Play() uiauto.Action {
return uiauto.IfSuccessThen(y.IsPaused(), uiauto.NamedCombine("play video",
y.ui.WithTimeout(longUITimeout).RetryUntil(y.kb.TypeAction("k"), y.IsPlaying())))
}
// Pause returns a function to pause the video.
func (y *YtWeb) Pause() uiauto.Action {
return uiauto.IfSuccessThen(y.IsPlaying(), uiauto.NamedCombine("pause video",
y.ui.WithTimeout(longUITimeout).RetryUntil(y.kb.TypeAction("k"), y.IsPaused())))
}
// StartCast casts YouTube video to a specified screen connected to ADT-3.
func (y *YtWeb) StartCast(accessCode string) uiauto.Action {
accessCodeTextField := nodewith.Name("Type the access code to start casting").Role(role.TextField).Editable()
incorrectPasswordText := nodewith.Name("You've entered an incorrect access code. Try again.").Role(role.StaticText)
enterCastCode := uiauto.NamedCombine("enter the access code",
y.kb.TypeAction(accessCode),
y.kb.AccelAction("Enter"),
)
return uiauto.NamedCombine("start casting the video",
quicksettings.StartCast(y.tconn),
y.ui.WaitUntilExists(accessCodeTextField),
enterCastCode,
uiauto.IfSuccessThen(y.ui.WithTimeout(shortUITimeout).WaitUntilExists(incorrectPasswordText),
uiauto.Combine("input access code again",
y.kb.AccelAction("Ctrl+A"),
y.kb.AccelAction("Backspace"),
enterCastCode,
),
),
y.Pause(),
y.Play(),
)
}
// StopCast stops casting YouTube video.
func (y *YtWeb) StopCast() uiauto.Action {
return uiauto.NamedAction("stop casting the video", quicksettings.StopCast(y.tconn))
}
// ResetCastStatus resets the cast settings if the YouTube video is already casting.
func (y *YtWeb) ResetCastStatus() uiauto.Action {
youtubeWindow := nodewith.NameContaining("YouTube").Role(role.Window).HasClass("Widget")
customizeChromeButton := nodewith.Name("Chrome").Role(role.PopUpButton).Ancestor(youtubeWindow)
castDialog := nodewith.NameStartingWith("Cast").Role(role.AlertDialog).Ancestor(youtubeWindow)
availableButton := nodewith.NameContaining("Available").Role(role.Button).Ancestor(castDialog).First()
return uiauto.NamedCombine("reset to available",
y.uiHdl.Click(customizeChromeButton),
uiauto.IfSuccessThen(y.ui.WithTimeout(shortUITimeout).WaitUntilExists(availableButton), y.uiHdl.Click(availableButton)),
)
}
const (
playButton = "Play (k)"
pauseButton = "Pause (k)"
)
func (y *YtWeb) getPlayButtonTitle(ctx context.Context) (result string, err error) {
script := `document.querySelector(".ytp-play-button").title`
if err := y.ytConn.Eval(ctx, script, &result); err != nil {
return result, errors.Wrap(err, "failed to get result")
}
return result, nil
}
// IsPlaying checks if the video is playing now.
func (y *YtWeb) IsPlaying() uiauto.Action {
return func(ctx context.Context) error {
return testing.Poll(ctx, func(ctx context.Context) error {
title, err := y.getPlayButtonTitle(ctx)
if err != nil {
return err
}
if title == pauseButton {
testing.ContextLog(ctx, "Youtube is playing")
return nil
}
return errors.Errorf("youtube is not playing; got (%s)", title)
}, &testing.PollOptions{Timeout: 10 * time.Second})
}
}
// IsPaused checks if the video is paused now.
func (y *YtWeb) IsPaused() uiauto.Action {
return func(ctx context.Context) error {
return testing.Poll(ctx, func(ctx context.Context) error {
title, err := y.getPlayButtonTitle(ctx)
if err != nil {
return err
}
if title == playButton {
testing.ContextLog(ctx, "Youtube is paused")
return nil
}
return errors.Errorf("youtube is not paused; got (%s)", title)
}, &testing.PollOptions{Timeout: 10 * time.Second})
}
}
// waitForYoutubeReadyState does wait youtube video ready state then return.
func waitForYoutubeReadyState(ctx context.Context, conn *chrome.Conn) error {
startTime := time.Now()
// Wait for element to appear.
return testing.Poll(ctx, func(ctx context.Context) error {
// Querying the main <video> node in youtube page.
var state bool
if err := conn.Call(ctx, &state, `() => {
let video = document.querySelector("#movie_player > div.html5-video-container > video");
return video.readyState === 4 && video.buffered.length > 0;
}`); err != nil {
return err
}
if !state {
return errors.New("failed to wait for youtube on ready state")
}
testing.ContextLogf(ctx, "Elapsed time when waiting for youtube ready state: %.3f s", time.Since(startTime).Seconds())
return nil
}, &testing.PollOptions{Interval: time.Second, Timeout: longUITimeout})
}
// Close closes the resources related to video.
func (y *YtWeb) Close(ctx context.Context) {
if y.ytConn != nil {
y.ytConn.CloseTarget(ctx)
y.ytConn.Close()
y.ytConn = nil
}
}
// clearNotificationPrompts finds and clears some youtube web prompts.
func clearNotificationPrompts(ctx context.Context, ui *uiauto.Context) error {
tartgetPrompts := nodewith.NameRegex(regexp.MustCompile("(Allow|Never|NO THANKS)")).Role(role.Button)
nodes, err := ui.NodesInfo(ctx, tartgetPrompts)
if err != nil {
return err
}
if len(nodes) == 0 {
return nil
}
testing.ContextLog(ctx, "Start to clear notification prompts")
prompts := []string{"Allow", "Never", "NO THANKS"}
for _, name := range prompts {
tartgetPrompt := nodewith.Name(name).Role(role.Button)
if err := uiauto.IfSuccessThen(
ui.WithTimeout(shortUITimeout).WaitUntilExists(tartgetPrompt),
ui.DoDefaultUntil(tartgetPrompt, ui.WithTimeout(shortUITimeout).WaitUntilGone(tartgetPrompt)),
)(ctx); err != nil {
testing.ContextLogf(ctx, "Failed to clear prompt %q", name)
return err
}
}
return nil
}
// PerformFrameDropsTest checks for dropped frames percent and checks if it is below the threshold.
func (y *YtWeb) PerformFrameDropsTest(ctx context.Context) error {
// If we see more than 10% video frame drops it will be visible to the user and will impact the viewing experience.
const frameDropThreshold float64 = 10.0
var decodedFrameCount, droppedFrameCount int
videoElement := "document.querySelector('#movie_player video')"
if err := y.ytConn.Eval(ctx, videoElement+".getVideoPlaybackQuality().totalVideoFrames", &decodedFrameCount); err != nil {
return errors.Wrap(err, "failed to get decoded framecount")
}
if err := y.ytConn.Eval(ctx, videoElement+".getVideoPlaybackQuality().droppedVideoFrames", &droppedFrameCount); err != nil {
return errors.Wrap(err, "failed to get dropped framecount")
}
droppedFramePercent := 100.0
if decodedFrameCount != 0 {
droppedFramePercent = 100.0 * float64(droppedFrameCount/decodedFrameCount)
}
if droppedFramePercent > frameDropThreshold {
return errors.Errorf("frame drops rate %.2f (dropped %d, decoded %d) higher than allowed threshold %.2f", droppedFramePercent, droppedFrameCount, decodedFrameCount, frameDropThreshold)
}
return nil
}
// YtWebConn returns connection of youtube web.
func (y *YtWeb) YtWebConn() *chrome.Conn {
return y.ytConn
}
|
package main
import "fmt"
func summaryRanges(nums []int) []string {
if len(nums) == 0 {
return []string{}
}
var ret []string
start := nums[0]
end := nums[0]
for i := 1; i < len(nums); i++ {
if nums[i]-1 == nums[i-1] {
end = nums[i]
} else {
if start != end {
ret = append(ret, fmt.Sprintf("%d->%d", start, end))
} else {
ret = append(ret, fmt.Sprint(start))
}
start = nums[i]
end = nums[i]
}
}
if start != end {
ret = append(ret, fmt.Sprintf("%d->%d", start, end))
} else {
ret = append(ret, fmt.Sprint(start))
}
return ret
}
|
// Copyright 2014 Dirk Jablonowski. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lcd20x4
import (
"github.com/dirkjabl/bricker"
"github.com/dirkjabl/bricker/device"
)
// ClearDisplay is a subscriber to clear the LCD display.
func ClearDisplay(id string, uid uint32, handler func(device.Resulter, error)) *device.Device {
return device.Generator{
Id: device.FallbackId(id, "ClearDisplay"),
Fid: function_clear_display,
Uid: uid,
Handler: handler,
WithPacket: true}.CreateDevice()
}
// ClearDisplayFuture is the future version of the ClearDisplay subscriber.
func ClearDisplayFuture(brick *bricker.Bricker, connectorname string, uid uint32) bool {
future := make(chan bool)
sub := ClearDisplay("cleardisplayfuture"+device.GenId(), uid,
func(r device.Resulter, err error) {
future <- device.IsEmptyResultOk(r, err)
})
err := brick.Subscribe(sub, connectorname)
if err != nil {
return false
}
b := <-future
close(future)
return b
}
|
package config
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestLoadConfig(t *testing.T) {
config := LoadConfig("../testdata/config/example.yml")
assert.Equal(t, "127.0.0.1:9999", config.Address)
}
func TestLoadDefaultConfig(t *testing.T) {
wd, _ := os.Getwd()
os.Chdir("../testdata/config")
config := LoadConfig("")
assert.Equal(t, "127.0.0.1:7777", config.Address)
os.Chdir(wd)
}
|
package database
import (
"database/sql"
"strings"
"fmt"
)
// execQuery is a simple wrapper for the "Exec" sql method.
func (gbs *goBanksSql) execQuery(query string, args ...interface{},
) (sql.Result, error) {
fmt.Printf("%s : %+v\n", query, args)
return gbs.db.Exec(query, args...)
}
// getRows is a simple wrapper for the "Query" sql method.
func (gbs *goBanksSql) getRows(query string,
args ...interface{}) (*sql.Rows, error) {
fmt.Printf("%s : %+v\n", query, args)
return gbs.db.Query(query, args...)
}
// joinStringsWithSpace ... joins strings with a space character.
// example: joinStringsWithSpace("foo", "bar", "baz") => "foo bar baz"
func joinStringsWithSpace(queries ...string) string {
return strings.Join(queries, " ")
}
// constructSelectString constructs the beginning of a SELECT sql request
// based on the wanted fields.
// example: constructSelectString("foo", []{"aa","bb") ->
// "Select aa, bb FROM "foo"
func constructSelectString(tablename string, fields []string) string {
var str = "SELECT "
var fieldLength = len(fields)
if fieldLength > 0 {
str += fields[0] + " "
}
for i := 1; i < fieldLength; i++ {
str += ", " + fields[i]
}
str += " FROM " + tablename
return str
}
// constructDeleteString constructs the simple beginning of a DELETE sql
// request.
// example: constructDeleteString("foo") -> "DELETE FROM foo"
func constructDeleteString(tablename string) string {
return "DELETE FROM " + tablename
}
// constructLimitString constructs a simple SQL LIMIT instruction.
// example: constructDeleteString(100) -> "LIMIT 100"
func constructLimitString(limit uint) string {
return "LIMIT " + string(limit)
}
// updateTable performs a UPDATE request on any database table.
// Here are the arguments it needs:
// - tablename: the name of the database table
// - conditions: the "where string", optional
// - conditionsArgs: arguments for the where string, optional
// - fields: the wanted fields to update
// - args: the new values for the wanted fields
func (gbs *goBanksSql) updateTable(tablename string,
conditions string, conditionsArgs []interface{}, fields []string,
args []interface{}) (err error) {
var sqlQuery string
var requestStr string = "UPDATE " + tablename + " SET "
var valuesStr string = ""
var fieldsLength = len(fields)
for i, field := range fields {
valuesStr += field + "=?"
if i < fieldsLength-1 {
valuesStr += ", "
}
}
valuesStr += " " + conditions
args = append(args, conditionsArgs...)
sqlQuery = requestStr + " " + valuesStr
_, err = gbs.execQuery(sqlQuery, args...)
return
}
// removeElemFromTable remove sql row(s) based on the table name, a single
// field and its value.
// example: removeElemFromTable("my_table", "id", 5)
func (gbs *goBanksSql) removeElemFromTable(tablename string, field string,
value interface{}) error {
_, err := gbs.execQuery("DELETE FROM "+tablename+" WHERE "+field+
"=?", value)
return err
}
// insertInTable performs an INSERT INTO SQL call based on the given
// table name, fields and values. It returns the Id of the new given row
// and a possible sql error.
// example: insertInTable("my_table", []string{"foo", "biz"},
// []int{55, 31) => 2, nil
func (gbs *goBanksSql) insertInTable(tablename string, fields []string,
values []interface{}) (int, error) {
var sqlQuery string
var requestStr string = "INSERT INTO " + tablename + "("
var valuesStr string = "values ("
var res sql.Result
var fieldsLength = len(fields)
for i, field := range fields {
requestStr += field
valuesStr += "?"
if i < fieldsLength-1 {
requestStr += ", "
valuesStr += ", "
}
}
requestStr += ")"
valuesStr += ")"
sqlQuery = requestStr + " " + valuesStr
res, err := gbs.execQuery(sqlQuery, values...)
if err != nil {
return 0, err
}
var id64 int64
id64, err = res.LastInsertId()
return int(id64), err
}
// stripIdField browse a map[string]string and delete the one(s) having the
// value "id".
// example: stripIdField(map[string]string{"toto":"foo", "tutu":"id",
// "titi": "bar"}) => map[string]string{"toto": "foo", "titi": "bar"}
func stripIdField(fields map[string]string) []string {
var res = make([]string, 0)
for _, field := range fields {
if field != "id" {
res = append(res, field)
}
}
return res
}
// Returns a []string by obtaining values from a map[string]string while
// filtering the keys through a []string
func filterFields(fields []string, fieldsMap map[string]string) []string {
var res = make([]string, 0)
for _, field := range fields {
if val, ok := fieldsMap[field]; ok {
res = append(res, val)
}
}
return res
}
func addConditionEq(cString *string, args *[]interface{}, field string,
x interface{}) {
addConditionOperator(cString, args, field, x, "=")
}
func addConditionGEq(cString *string, args *[]interface{}, field string,
x interface{}) {
addConditionOperator(cString, args, field, x, ">=")
}
func addConditionLEq(cString *string, args *[]interface{}, field string,
x interface{}) {
addConditionOperator(cString, args, field, x, "<=")
}
func addConditionOperator(cString *string, args *[]interface{}, field string,
x interface{}, operator string) {
if len(*cString) > 0 {
*cString += "AND "
}
*cString += field + " " + operator + " ? "
*args = append(*args, x)
}
// /!\ Only works with []int and []string right now
func addConditionOneOf(cString *string, args *[]interface{}, field string,
x interface{}) bool {
var temp_str string
var temp_args []interface{}
switch x.(type) {
case []int:
val, _ := x.([]int)
if len(val) <= 0 {
return false
}
temp_str, temp_args = addSqlFilterIntArray(field, val...)
case []string:
val, _ := x.([]string)
if len(val) <= 0 {
return false
}
temp_str, temp_args = addSqlFilterStringArray(field, val...)
}
if len(*cString) > 0 {
*cString += "AND "
}
*cString += temp_str + " "
*args = append(*args, temp_args...)
return true
}
// addSqlFilterArray construct both the "condition string" and the sql
// arguments for the db.Query method, from the wanted array
// Example of a returned "condition string":
// ( myField = ? OR myField = ? )
// Example of a returned sql arguments array:
// []{myValue, myValue}
//
// Warning: Does not work with array of strings, you need to call
// addSqlStringArray for that.
//
// "Repeated' for lack of generic. Surely not the best solution ever.
// Will see about that later.
func addSqlFilterArray(fieldName string,
elems ...interface{}) (fstring string, farg []interface{}) {
var len = len(elems)
fstring += "( "
for i, name := range elems {
farg = append(farg, name)
fstring += fieldName + " = ? "
if i < len-1 {
fstring += "OR "
}
}
fstring += ")"
return
}
// see addSqlFilterArray
func addSqlFilterIntArray(fieldName string,
elems ...int) (fstring string, farg []interface{}) {
var len = len(elems)
fstring += "( "
for i, name := range elems {
farg = append(farg, name)
fstring += fieldName + " = ? "
if i < len-1 {
fstring += "OR "
}
}
fstring += ")"
return
}
// see addSqlFilterArray
func addSqlFilterStringArray(fieldName string,
elems ...string) (fstring string, farg []interface{}) {
var len = len(elems)
fstring += "( "
for i, name := range elems {
farg = append(farg, name)
fstring += fieldName + " = ? "
if i < len-1 {
fstring += "OR "
}
}
fstring += ")"
return fstring, farg
}
func processFilterQuery(conditionString string,
args []interface{}, ok bool) (string, []interface{}, bool) {
if !ok {
return "", nil, false
}
if len(conditionString) < 0 {
return "", nil, true
}
return joinStringsWithSpace("WHERE", conditionString), args, true
}
func addFilterOneOf(cString *string, args *[]interface{}, field string,
filter dbFilterInterface) bool {
if filter.isFilterActivated() {
return addConditionOneOf(cString, args, field, filter.getFilterValue())
}
return true
}
func addFiltersOneOf(cString *string, args *[]interface{}, fields []string,
filters ...dbFilterInterface) bool {
for i, filter := range filters {
if filter.isFilterActivated() {
if ok := addConditionOneOf(cString, args, fields[i],
filter.getFilterValue()); !ok {
return false
}
}
}
return true
}
func addFilterGEq(cString *string, args *[]interface{}, field string,
filter dbFilterInterface) {
if filter.isFilterActivated() {
addConditionGEq(cString, args, field, filter.getFilterValue())
}
}
func addFilterLEq(cString *string, args *[]interface{}, field string,
filter dbFilterInterface) {
if filter.isFilterActivated() {
addConditionLEq(cString, args, field, filter.getFilterValue())
}
}
func addFilterEq(cString *string, args *[]interface{}, field string,
filter dbFilterInterface) {
if filter.isFilterActivated() {
addConditionEq(cString, args, field, filter.getFilterValue())
}
}
|
package controllers
import (
"net/http"
"rnl360-api/entity"
"rnl360-api/models"
u "rnl360-api/utils"
)
var GetAllResponseType = func(w http.ResponseWriter, r *http.Request) {
var rsponseTypeList []models.ResponseTypeModel
err_list := entity.GetAllResponseType(&rsponseTypeList)
if err_list != nil {
u.Respond(w, u.Message(false, "Rsponse type list error !", err_list.Error()))
return
} else {
if len(rsponseTypeList) == 0 {
u.Respond(w, u.Message(false, "Rsponse type list empty !", ""))
return
}
}
resp := u.Message(true, "Rsponse type list found!", "")
resp["results"] = rsponseTypeList
u.Respond(w, resp)
}
|
package cli
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestDisable(t *testing.T) {
for _, tc := range []struct {
name string
args []string
expectedEnabled []string
expectedError string
}{
{
"normal",
[]string{"enabled_a", "enabled_b"},
[]string{"enabled_c", "(Tiltfile)"},
"",
},
{
"all",
[]string{"--all"},
[]string{"(Tiltfile)"},
"",
},
{
"all+names",
[]string{"--all", "enabled_b"},
nil,
"cannot use --all with resource names",
},
{
"no names",
nil,
nil,
"must specify at least one resource",
},
{
"nonexistent resource",
[]string{"foo"},
nil,
"no such resource \"foo\"",
},
{
"Tiltfile",
[]string{"(Tiltfile)"},
nil,
"(Tiltfile) cannot be enabled or disabled",
},
} {
t.Run(tc.name, func(t *testing.T) {
f := newEnableFixture(t)
f.createResources()
cmd := disableCmd{}
c := cmd.register()
err := c.Flags().Parse(tc.args)
require.NoError(t, err)
err = cmd.run(f.ctx, c.Flags().Args())
if tc.expectedError != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tc.expectedError)
// if there's an error, expect enabled states to remain the same
tc.expectedEnabled = []string{"enabled_a", "enabled_b", "enabled_c", "(Tiltfile)"}
} else {
require.NoError(t, err)
}
require.ElementsMatch(t, tc.expectedEnabled, f.enabledResources())
})
}
}
|
package main
import "fmt"
func main() {
//format printing example
fmt.Printf("true && true\t %v\n", true && true)
fmt.Printf("true && false\t %v\n",true && false)
fmt.Printf("true ||true\t %v\n",true || true)
fmt.Printf("true || false\t %v\n",true || false)
fmt.Printf("!true\t %v\n",!true)
fmt.Printf("!false\t %v\n",!false)
}
//fmt.Println(true && true) //true AND true becomes true
//fmt.Println(true && false) //true AND false becomes false
//fmt.Println(true || true) //true OR true is true -- one or the other
//fmt.Println(true || false) //true OR false becomes true
//fmt.Println(!true) //not true becomes false
|
// Copyright The Shipwright Contributors
//
// SPDX-License-Identifier: Apache-2.0
package resources
import (
buildv1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1"
"github.com/shipwright-io/build/pkg/config"
"github.com/shipwright-io/build/pkg/reconciler/buildrun/resources/sources"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
)
// AmendTaskSpecWithSources adds steps, results and volumes for spec.source and spec.sources
func AmendTaskSpecWithSources(
cfg *config.Config,
taskSpec *v1beta1.TaskSpec,
build *buildv1alpha1.Build,
) {
// create the step for spec.source, either Git or Bundle
switch {
case build.Spec.Source.BundleContainer != nil:
sources.AppendBundleStep(cfg, taskSpec, build.Spec.Source, "default")
case build.Spec.Source.URL != "":
sources.AppendGitStep(cfg, taskSpec, build.Spec.Source, "default")
}
// create the step for spec.sources, this will eventually change into different steps depending on the type of the source
if build.Spec.Sources != nil {
for _, source := range *build.Spec.Sources {
// today, we only have HTTP sources
sources.AppendHTTPStep(cfg, taskSpec, source)
}
}
}
|
package main
import "fmt"
var res [][]int
func main() {
res = [][]int{}
head := &TreeNode{}
levelTree(head, 0)
fmt.Println(res)
}
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
/*
3
/ \
9 20
/ \
15 7
*/
func levelTree(head *TreeNode, level int) {
if head != nil {
//当前层初始化
if len(res) == level {
res = append(res, []int{})
}
res[level] = append(res[level], head.Val)
levelTree(head.Left, level+1)
levelTree(head.Right, level+1)
}
}
|
package main
import "fmt"
import "os"
func swap(s []int, index1 int, index2 int) {
s[index1], s[index2] = s[index2], s[index1]
}
type heap struct {
data []int
size int
}
func (h *heap) left(index int) int { return (index+1)*2 - 1 }
func (h *heap) right(index int) int { return (index + 1) * 2 }
func (h *heap) parent(index int) int { return (index+1)/2 - 1 }
func (h *heap) printSubTree(index int, depth int) {
if index >= h.size {
return
}
h.printSubTree(h.left(index), depth+1)
for i := 0; i < depth; i++ {
fmt.Printf("\t")
}
fmt.Printf("%d \n", h.data[index])
h.printSubTree(h.right(index), depth+1)
}
func (h *heap) grow() {
newData := make([]int, 2*len(h.data))
for i, v := range h.data {
newData[i] = v
}
h.data = newData
}
func (h *heap) heapify(index int) {
for {
left := h.left(index)
right := h.right(index)
target := index
if left < h.size && h.data[left] > h.data[target] {
target = left
}
if right < h.size && h.data[right] > h.data[target] {
target = right
}
if index == target {
break
}
swap(h.data, index, target)
index = target
}
}
func (h *heap) insert(e int) {
if h.size <= len(h.data) {
h.grow()
}
i := h.size
for i > 0 && h.data[h.parent(i)] < e {
h.data[i] = h.data[h.parent(i)]
i = h.parent(i)
}
h.data[i] = e
h.size++
}
func (h *heap) pop() int {
if h.size == 0 {
fmt.Printf("CANNOT POP: HEAP ALREADY EMPTY\n")
os.Exit(0)
}
h.size--
max := h.data[0]
h.data[0] = h.data[h.size]
h.heapify(0)
return max
}
func main() {
h := heap{make([]int, 4), 0}
unordered := []int{8, 1337, 5, 100, 66, 2, 32, 19, 89, 10, 11, 17, 27, 44, 88}
fmt.Printf("Inserting numbers...\n")
for _, value := range unordered {
fmt.Printf("%d, ", value)
h.insert(value)
}
h.printSubTree(0, 0)
fmt.Printf("\nPopping numbers..\n")
for {
value := h.pop()
fmt.Printf("%d, ", value)
}
}
|
package session
// var (
// SessManager session.Manager = session.NewManager(&Builder{}, &session.ManagerConfig{
// Expire: 15 * time.Minute,
// Capacity: 100000,
// })
// )
// func GetSession(uid int64) (*Session, bool) {
// lock.RLock()
// defer lock.RUnlock()
//
// s, ok := active[uid]
// return s, ok
// }
//
// func GetActiveSession() map[int64]*Session {
// return active
// }
//
// func NewSessionIfNotExist(uid int64) (*Session, error) {
// lock.Lock()
// defer lock.Unlock()
//
// _, ok := active[uid]
// if !ok {
// err := manager.RPCGameServer.SetRecord(uid)
// if err != nil {
// log.Printf("GetSession: record error: %v", err)
// return nil, common.ErrConnectUserInOtherGameSvr
// }
//
// newS, err := NewSession(uid)
// if err != nil {
// log.Printf("ERROR: new session error: %v", err)
// return nil, err
// }
//
// active[uid] = newS
//
// // err = manager.RPCGameServer.SetBalancerCount(len(active))
// // if err != nil {
// // log.Printf("set active count error: %v", err)
// // }
// }
//
// return active[uid], nil
// }
|
package servgo
import (
"fmt"
"net"
)
type Server struct {
addr string
numWorkers, maxQueued int
getHandler func(Request) Response
}
func NewServer(addr string, numWorkers, maxQueued int) Server {
server := Server{
addr: addr,
numWorkers: numWorkers,
maxQueued: maxQueued,
}
return server
}
func (s *Server) SetGetHandler(f func(Request) Response) {
s.getHandler = f
}
func (s *Server) GetHandler() func(Request) Response {
return s.getHandler
}
func (s *Server) Run() error {
ln, err := net.Listen("tcp", s.addr)
if err != nil {
return err
}
var requestQueue = make(chan net.Conn, s.maxQueued)
for i := 0; i < s.numWorkers; i++ {
go worker(i, s, requestQueue)
}
for {
cl, err := ln.Accept()
if err != nil {
fmt.Println(err)
continue
}
requestQueue <- cl
}
return nil
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package builtins
import (
"context"
"fmt"
"strings"
"github.com/cockroachdb/apd/v2"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/util/duration"
"github.com/cockroachdb/cockroach/pkg/util/ring"
"github.com/cockroachdb/errors"
)
// indexedValue combines a value from the row with the index of that row.
type indexedValue struct {
value tree.Datum
idx int
}
// slidingWindow maintains a deque of values along with corresponding indices
// based on cmp function:
// for Min behavior, cmp = -a.Compare(b)
// for Max behavior, cmp = a.Compare(b)
//
// It assumes that the frame bounds will never go back, i.e. non-decreasing
// sequences of frame start and frame end indices.
type slidingWindow struct {
values ring.Buffer
evalCtx *tree.EvalContext
cmp func(*tree.EvalContext, tree.Datum, tree.Datum) int
}
func makeSlidingWindow(
evalCtx *tree.EvalContext, cmp func(*tree.EvalContext, tree.Datum, tree.Datum) int,
) *slidingWindow {
return &slidingWindow{
evalCtx: evalCtx,
cmp: cmp,
}
}
// add first removes all values that are "smaller or equal" (depending on cmp)
// from the end of the deque and then appends 'iv' to the end. This way, the
// deque always contains unique values sorted in descending order of their
// "priority" (when we encounter duplicates, we always keep the one with the
// largest idx).
func (sw *slidingWindow) add(iv *indexedValue) {
for i := sw.values.Len() - 1; i >= 0; i-- {
if sw.cmp(sw.evalCtx, sw.values.Get(i).(*indexedValue).value, iv.value) > 0 {
break
}
sw.values.RemoveLast()
}
sw.values.AddLast(iv)
}
// removeAllBefore removes all values from the beginning of the deque that have
// indices smaller than given 'idx'. This operation corresponds to shifting the
// start of the frame up to 'idx'.
func (sw *slidingWindow) removeAllBefore(idx int) {
for i := 0; i < sw.values.Len() && i < idx; i++ {
if sw.values.Get(i).(*indexedValue).idx >= idx {
break
}
sw.values.RemoveFirst()
}
}
func (sw *slidingWindow) string() string {
var builder strings.Builder
for i := 0; i < sw.values.Len(); i++ {
builder.WriteString(fmt.Sprintf("(%v, %v)\t", sw.values.Get(i).(*indexedValue).value, sw.values.Get(i).(*indexedValue).idx))
}
return builder.String()
}
func (sw *slidingWindow) reset() {
sw.values.Reset()
}
type slidingWindowFunc struct {
sw *slidingWindow
prevEnd int
}
// Compute implements WindowFunc interface.
func (w *slidingWindowFunc) Compute(
ctx context.Context, evalCtx *tree.EvalContext, wfr *tree.WindowFrameRun,
) (tree.Datum, error) {
frameStartIdx, err := wfr.FrameStartIdx(ctx, evalCtx)
if err != nil {
return nil, err
}
frameEndIdx, err := wfr.FrameEndIdx(ctx, evalCtx)
if err != nil {
return nil, err
}
if !wfr.Frame.DefaultFrameExclusion() {
// We cannot use a sliding window approach because we have a frame
// exclusion clause - some rows will be in and out of the frame which
// breaks the necessary assumption, so we fallback to a naive quadratic
// approach.
var res tree.Datum
for idx := frameStartIdx; idx < frameEndIdx; idx++ {
if skipped, err := wfr.IsRowSkipped(ctx, idx); err != nil {
return nil, err
} else if skipped {
continue
}
args, err := wfr.ArgsByRowIdx(ctx, idx)
if err != nil {
return nil, err
}
if res == nil {
res = args[0]
} else {
if w.sw.cmp(evalCtx, args[0], res) > 0 {
res = args[0]
}
}
}
if res == nil {
// Spec: the frame is empty, so we return NULL.
return tree.DNull, nil
}
return res, nil
}
// We need to discard all values that are no longer in the frame.
w.sw.removeAllBefore(frameStartIdx)
// We need to add all values that just entered the frame and have not been
// added yet.
for idx := max(w.prevEnd, frameStartIdx); idx < frameEndIdx; idx++ {
if skipped, err := wfr.IsRowSkipped(ctx, idx); err != nil {
return nil, err
} else if skipped {
continue
}
args, err := wfr.ArgsByRowIdx(ctx, idx)
if err != nil {
return nil, err
}
value := args[0]
if value == tree.DNull {
// Null value can neither be minimum nor maximum over a window frame with
// non-null values, so we're not adding them to the sliding window. The
// case of a window frame with no non-null values is handled below.
continue
}
w.sw.add(&indexedValue{value: value, idx: idx})
}
w.prevEnd = frameEndIdx
if w.sw.values.Len() == 0 {
// Spec: the frame is empty, so we return NULL.
return tree.DNull, nil
}
// The datum with "highest priority" within the frame is at the very front
// of the deque.
return w.sw.values.GetFirst().(*indexedValue).value, nil
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
// Reset implements tree.WindowFunc interface.
func (w *slidingWindowFunc) Reset(context.Context) {
w.prevEnd = 0
w.sw.reset()
}
// Close implements WindowFunc interface.
func (w *slidingWindowFunc) Close(context.Context, *tree.EvalContext) {
w.sw = nil
}
// slidingWindowSumFunc applies sliding window approach to summation over
// a frame. It assumes that the frame bounds will never go back, i.e.
// non-decreasing sequences of frame start and frame end indices.
type slidingWindowSumFunc struct {
agg tree.AggregateFunc // one of the four SumAggregates
prevStart, prevEnd int
// lastNonNullIdx is the index of the latest non-null value seen in the
// sliding window so far. noNonNullSeen indicates non-null values are yet to
// be seen.
lastNonNullIdx int
}
const noNonNullSeen = -1
func newSlidingWindowSumFunc(agg tree.AggregateFunc) *slidingWindowSumFunc {
return &slidingWindowSumFunc{
agg: agg,
lastNonNullIdx: noNonNullSeen,
}
}
// removeAllBefore subtracts the values from all the rows that are no longer in
// the frame.
func (w *slidingWindowSumFunc) removeAllBefore(
ctx context.Context, evalCtx *tree.EvalContext, wfr *tree.WindowFrameRun,
) error {
frameStartIdx, err := wfr.FrameStartIdx(ctx, evalCtx)
if err != nil {
return err
}
for idx := w.prevStart; idx < frameStartIdx && idx < w.prevEnd; idx++ {
if skipped, err := wfr.IsRowSkipped(ctx, idx); err != nil {
return err
} else if skipped {
continue
}
args, err := wfr.ArgsByRowIdx(ctx, idx)
if err != nil {
return err
}
value := args[0]
if value == tree.DNull {
// Null values do not contribute to the running sum, so there is nothing
// to subtract once they leave the window frame.
continue
}
switch v := value.(type) {
case *tree.DInt:
err = w.agg.Add(ctx, tree.NewDInt(-*v))
case *tree.DDecimal:
d := tree.DDecimal{}
d.Neg(&v.Decimal)
err = w.agg.Add(ctx, &d)
case *tree.DFloat:
err = w.agg.Add(ctx, tree.NewDFloat(-*v))
case *tree.DInterval:
err = w.agg.Add(ctx, &tree.DInterval{Duration: duration.Duration{}.Sub(v.Duration)})
default:
err = errors.AssertionFailedf("unexpected value %v", v)
}
if err != nil {
return err
}
}
return nil
}
// Compute implements WindowFunc interface.
func (w *slidingWindowSumFunc) Compute(
ctx context.Context, evalCtx *tree.EvalContext, wfr *tree.WindowFrameRun,
) (tree.Datum, error) {
frameStartIdx, err := wfr.FrameStartIdx(ctx, evalCtx)
if err != nil {
return nil, err
}
frameEndIdx, err := wfr.FrameEndIdx(ctx, evalCtx)
if err != nil {
return nil, err
}
if !wfr.Frame.DefaultFrameExclusion() {
// We cannot use a sliding window approach because we have a frame
// exclusion clause - some rows will be in and out of the frame which
// breaks the necessary assumption, so we fallback to a naive quadratic
// approach.
w.agg.Reset(ctx)
for idx := frameStartIdx; idx < frameEndIdx; idx++ {
if skipped, err := wfr.IsRowSkipped(ctx, idx); err != nil {
return nil, err
} else if skipped {
continue
}
args, err := wfr.ArgsByRowIdx(ctx, idx)
if err != nil {
return nil, err
}
if err = w.agg.Add(ctx, args[0]); err != nil {
return nil, err
}
}
return w.agg.Result()
}
// We need to discard all values that are no longer in the frame.
if err = w.removeAllBefore(ctx, evalCtx, wfr); err != nil {
return nil, err
}
// We need to sum all values that just entered the frame and have not been
// added yet.
for idx := max(w.prevEnd, frameStartIdx); idx < frameEndIdx; idx++ {
if skipped, err := wfr.IsRowSkipped(ctx, idx); err != nil {
return nil, err
} else if skipped {
continue
}
args, err := wfr.ArgsByRowIdx(ctx, idx)
if err != nil {
return nil, err
}
if args[0] != tree.DNull {
w.lastNonNullIdx = idx
err = w.agg.Add(ctx, args[0])
if err != nil {
return nil, err
}
}
}
w.prevStart = frameStartIdx
w.prevEnd = frameEndIdx
// If last non-null value has index smaller than the start of the window
// frame, then only nulls can be in the frame. This holds true as well for
// the special noNonNullsSeen index.
onlyNulls := w.lastNonNullIdx < frameStartIdx
if frameStartIdx == frameEndIdx || onlyNulls {
// Either the window frame is empty or only null values are in the frame,
// so we return NULL as per spec.
return tree.DNull, nil
}
return w.agg.Result()
}
// Reset implements tree.WindowFunc interface.
func (w *slidingWindowSumFunc) Reset(ctx context.Context) {
w.prevStart = 0
w.prevEnd = 0
w.lastNonNullIdx = noNonNullSeen
w.agg.Reset(ctx)
}
// Close implements WindowFunc interface.
func (w *slidingWindowSumFunc) Close(ctx context.Context, _ *tree.EvalContext) {
w.agg.Close(ctx)
}
// avgWindowFunc uses slidingWindowSumFunc to compute average over a frame.
type avgWindowFunc struct {
sum *slidingWindowSumFunc
}
// Compute implements WindowFunc interface.
func (w *avgWindowFunc) Compute(
ctx context.Context, evalCtx *tree.EvalContext, wfr *tree.WindowFrameRun,
) (tree.Datum, error) {
sum, err := w.sum.Compute(ctx, evalCtx, wfr)
if err != nil {
return nil, err
}
if sum == tree.DNull {
// Spec: the frame is empty, so we return NULL.
return tree.DNull, nil
}
frameSize := 0
frameStartIdx, err := wfr.FrameStartIdx(ctx, evalCtx)
if err != nil {
return nil, err
}
frameEndIdx, err := wfr.FrameEndIdx(ctx, evalCtx)
if err != nil {
return nil, err
}
for idx := frameStartIdx; idx < frameEndIdx; idx++ {
if skipped, err := wfr.IsRowSkipped(ctx, idx); err != nil {
return nil, err
} else if skipped {
continue
}
args, err := wfr.ArgsByRowIdx(ctx, idx)
if err != nil {
return nil, err
}
if args[0] == tree.DNull {
// Null values do not count towards the number of rows that contribute
// to the sum, so we're omitting them from the frame.
continue
}
frameSize++
}
switch t := sum.(type) {
case *tree.DFloat:
return tree.NewDFloat(*t / tree.DFloat(frameSize)), nil
case *tree.DDecimal:
var avg tree.DDecimal
count := apd.New(int64(frameSize), 0)
_, err := tree.DecimalCtx.Quo(&avg.Decimal, &t.Decimal, count)
return &avg, err
case *tree.DInt:
dd := tree.DDecimal{}
dd.SetInt64(int64(*t))
var avg tree.DDecimal
count := apd.New(int64(frameSize), 0)
_, err := tree.DecimalCtx.Quo(&avg.Decimal, &dd.Decimal, count)
return &avg, err
case *tree.DInterval:
return &tree.DInterval{Duration: t.Duration.Div(int64(frameSize))}, nil
default:
return nil, errors.AssertionFailedf("unexpected SUM result type: %s", t)
}
}
// Reset implements tree.WindowFunc interface.
func (w *avgWindowFunc) Reset(ctx context.Context) {
w.sum.Reset(ctx)
}
// Close implements WindowFunc interface.
func (w *avgWindowFunc) Close(ctx context.Context, evalCtx *tree.EvalContext) {
w.sum.Close(ctx, evalCtx)
}
|
package server
import (
"github.com/ericelsken/golflog"
"net/http"
)
const (
STATIC_DIR = "/bower_components/"
TEMPL_DIR = "/app/templates/"
IMG_DIR = "/images/"
JS_DIR = "/app-ui/js/"
CSS_DIR = "/app-ui/css/"
TEMPLATE_DIR = "/app-ui/js/templates/"
)
type Server struct {
mux http.Handler
}
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
s.mux.ServeHTTP(w, r)
}
func Create() *Server {
s := &Server{
mux: addMiddleware(createMux()),
}
return s
}
func createMux() *http.ServeMux {
mux := http.NewServeMux()
mux.Handle("/", http.FileServer(http.Dir("."+TEMPL_DIR)))
mux.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("."+STATIC_DIR))))
mux.Handle("/images/", http.StripPrefix("/images/", http.FileServer(http.Dir("."+IMG_DIR))))
mux.Handle("/js/", http.StripPrefix("/js/", http.FileServer(http.Dir("."+JS_DIR))))
mux.Handle("/css/", http.StripPrefix("/css/", http.FileServer(http.Dir("."+CSS_DIR))))
mux.Handle("/templates/", http.StripPrefix("/templates/", http.FileServer(http.Dir("."+TEMPLATE_DIR))))
return mux
}
func addMiddleware(h http.Handler) http.Handler {
result := golflog.Middleware(h)
return result
}
|
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package jobs_test
import (
"bytes"
"context"
"encoding/hex"
"math"
"reflect"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/optionalnodeliveness"
"github.com/cockroachdb/cockroach/pkg/sql/sqlliveness"
"github.com/cockroachdb/cockroach/pkg/sql/sqlliveness/slinstance"
"github.com/cockroachdb/cockroach/pkg/sql/sqlliveness/slstorage"
"github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
)
func TestRoundtripJob(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
s, _, _ := serverutils.StartServer(t, base.TestServerArgs{})
registry := s.JobRegistry().(*jobs.Registry)
defer s.Stopper().Stop(ctx)
jobID := registry.MakeJobID()
storedJob := registry.NewJob(jobs.Record{
Description: "beep boop",
Username: security.MakeSQLUsernameFromPreNormalizedString("robot"),
DescriptorIDs: descpb.IDs{42},
Details: jobspb.RestoreDetails{},
Progress: jobspb.RestoreProgress{},
}, jobID)
if err := storedJob.Created(ctx); err != nil {
t.Fatal(err)
}
retrievedJob, err := registry.LoadJob(ctx, jobID)
if err != nil {
t.Fatal(err)
}
if e, a := storedJob, retrievedJob; !reflect.DeepEqual(e, a) {
//diff := strings.Join(pretty.Diff(e, a), "\n")
t.Fatalf("stored job did not match retrieved job:\n%+v\n%+v", e, a)
}
}
func TestRegistryResumeExpiredLease(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
defer jobs.ResetConstructors()()
ctx := context.Background()
ver201 := cluster.MakeTestingClusterSettingsWithVersions(
roachpb.Version{Major: 20, Minor: 1},
roachpb.Version{Major: 20, Minor: 1},
true)
s, _, _ := serverutils.StartServer(t, base.TestServerArgs{Settings: ver201})
defer s.Stopper().Stop(ctx)
// Disable leniency for instant expiration
jobs.LeniencySetting.Override(&s.ClusterSettings().SV, 0)
const cancelInterval = time.Duration(math.MaxInt64)
const adoptInterval = time.Microsecond
slinstance.DefaultTTL.Override(&s.ClusterSettings().SV, 2*adoptInterval)
slinstance.DefaultHeartBeat.Override(&s.ClusterSettings().SV, adoptInterval)
db := s.DB()
clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond)
nodeLiveness := jobs.NewFakeNodeLiveness(4)
newRegistry := func(id roachpb.NodeID) *jobs.Registry {
var c base.NodeIDContainer
c.Set(ctx, id)
idContainer := base.NewSQLIDContainer(0, &c)
ac := log.AmbientContext{Tracer: tracing.NewTracer()}
sqlStorage := slstorage.NewStorage(
s.Stopper(), clock, db, s.InternalExecutor().(sqlutil.InternalExecutor), s.ClusterSettings(),
)
sqlInstance := slinstance.NewSQLInstance(s.Stopper(), clock, sqlStorage, s.ClusterSettings())
r := jobs.MakeRegistry(
ac, s.Stopper(), clock, optionalnodeliveness.MakeContainer(nodeLiveness), db,
s.InternalExecutor().(sqlutil.InternalExecutor), idContainer, sqlInstance,
s.ClusterSettings(), base.DefaultHistogramWindowInterval(), jobs.FakePHS, "",
nil, /* knobs */
)
if err := r.Start(ctx, s.Stopper(), cancelInterval, adoptInterval); err != nil {
t.Fatal(err)
}
return r
}
const jobCount = 3
drainAdoptionLoop := func() {
// Every turn of the registry's adoption loop will generate exactly one call
// to nodeLiveness.GetLivenesses. Only after we've witnessed one call for
// each job, plus one more call, can we be sure that all work has been
// completed.
//
// Waiting for only jobCount calls to nodeLiveness.GetLivenesses is racy, as
// we might perform our assertions just as the last turn of registry loop
// observes our injected liveness failure, if any.
for i := 0; i < jobCount+1; i++ {
<-nodeLiveness.GetLivenessesCalledCh
}
}
// jobMap maps node IDs to job IDs.
jobMap := make(map[roachpb.NodeID]jobspb.JobID)
hookCallCount := 0
// resumeCounts maps jobs IDs to number of start/resumes.
resumeCounts := make(map[jobspb.JobID]int)
// done prevents jobs from finishing.
done := make(chan struct{})
// resumeCalled does a locked, blocking send when a job is started/resumed. A
// receive on it will block until a job is running.
resumeCalled := make(chan struct{})
var lock syncutil.Mutex
jobs.RegisterConstructor(jobspb.TypeBackup, func(job *jobs.Job, _ *cluster.Settings) jobs.Resumer {
lock.Lock()
hookCallCount++
lock.Unlock()
return jobs.FakeResumer{
OnResume: func(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
case resumeCalled <- struct{}{}:
case <-done:
}
lock.Lock()
resumeCounts[job.ID()]++
lock.Unlock()
select {
case <-ctx.Done():
return ctx.Err()
case <-done:
return nil
}
},
}
})
for i := 0; i < jobCount; i++ {
nodeid := roachpb.NodeID(i + 1)
rec := jobs.Record{
Details: jobspb.BackupDetails{},
Progress: jobspb.BackupProgress{},
}
job, err := jobs.TestingCreateAndStartJob(ctx, newRegistry(nodeid), db, rec)
if err != nil {
t.Fatal(err)
}
// Wait until the job is running.
<-resumeCalled
lock.Lock()
jobMap[nodeid] = job.ID()
lock.Unlock()
}
drainAdoptionLoop()
if e, a := jobCount, hookCallCount; e != a {
t.Fatalf("expected hookCallCount to be %d, but got %d", e, a)
}
drainAdoptionLoop()
if e, a := jobCount, hookCallCount; e != a {
t.Fatalf("expected hookCallCount to be %d, but got %d", e, a)
}
nodeLiveness.FakeSetExpiration(1, hlc.MinTimestamp)
drainAdoptionLoop()
<-resumeCalled
testutils.SucceedsSoon(t, func() error {
lock.Lock()
defer lock.Unlock()
if hookCallCount <= jobCount {
return errors.Errorf("expected hookCallCount to be > %d, but got %d", jobCount, hookCallCount)
}
return nil
})
testutils.SucceedsSoon(t, func() error {
lock.Lock()
defer lock.Unlock()
if e, a := 2, resumeCounts[jobMap[1]]; e != a {
return errors.Errorf("expected resumeCount to be %d, but got %d", e, a)
}
return nil
})
// We want to verify that simply incrementing the epoch does not
// result in the job being rescheduled.
nodeLiveness.FakeIncrementEpoch(3)
drainAdoptionLoop()
select {
case <-resumeCalled:
t.Fatal("Incrementing an epoch should not reschedule a job")
default:
}
// When we reset the liveness of the node, though, we should get
// a reschedule.
nodeLiveness.FakeSetExpiration(3, hlc.MinTimestamp)
drainAdoptionLoop()
<-resumeCalled
close(done)
testutils.SucceedsSoon(t, func() error {
lock.Lock()
defer lock.Unlock()
if e, a := 1, resumeCounts[jobMap[3]]; e > a {
return errors.Errorf("expected resumeCount to be > %d, but got %d", e, a)
}
if e, a := 1, resumeCounts[jobMap[2]]; e > a {
return errors.Errorf("expected resumeCount to be > %d, but got %d", e, a)
}
count := 0
for _, ct := range resumeCounts {
count += ct
}
if e, a := 4, count; e > a {
return errors.Errorf("expected total jobs to be > %d, but got %d", e, a)
}
return nil
})
}
func TestRegistryResumeActiveLease(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
defer jobs.TestingSetAdoptAndCancelIntervals(10*time.Millisecond, 10*time.Millisecond)()
resumeCh := make(chan jobspb.JobID)
defer jobs.ResetConstructors()()
jobs.RegisterConstructor(jobspb.TypeBackup, func(job *jobs.Job, _ *cluster.Settings) jobs.Resumer {
return jobs.FakeResumer{
OnResume: func(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
case resumeCh <- job.ID():
return nil
}
},
}
})
ctx := context.Background()
s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop(ctx)
payload, err := protoutil.Marshal(&jobspb.Payload{
Lease: &jobspb.Lease{NodeID: 1, Epoch: 1},
Details: jobspb.WrapPayloadDetails(jobspb.BackupDetails{}),
})
if err != nil {
t.Fatal(err)
}
progress, err := protoutil.Marshal(&jobspb.Progress{
Details: jobspb.WrapProgressDetails(jobspb.BackupProgress{}),
})
if err != nil {
t.Fatal(err)
}
var id jobspb.JobID
sqlutils.MakeSQLRunner(sqlDB).QueryRow(t,
`INSERT INTO system.jobs (status, payload, progress) VALUES ($1, $2, $3) RETURNING id`,
jobs.StatusRunning, payload, progress).Scan(&id)
if e, a := id, <-resumeCh; e != a {
t.Fatalf("expected job %d to be resumed, but got %d", e, a)
}
}
// TestExpiringSessionsDoesNotTouchTerminalJobs will ensure that we do not
// update the claim_session_id field of jobs when expiring sessions or claiming
// jobs.
func TestExpiringSessionsAndClaimJobsDoesNotTouchTerminalJobs(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
// Don't adopt, cancel rapidly.
defer jobs.TestingSetAdoptAndCancelIntervals(10*time.Hour, 10*time.Millisecond)()
ctx := context.Background()
s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop(ctx)
payload, err := protoutil.Marshal(&jobspb.Payload{
Details: jobspb.WrapPayloadDetails(jobspb.BackupDetails{}),
})
if err != nil {
t.Fatal(err)
}
progress, err := protoutil.Marshal(&jobspb.Progress{
Details: jobspb.WrapProgressDetails(jobspb.BackupProgress{}),
})
if err != nil {
t.Fatal(err)
}
tdb := sqlutils.MakeSQLRunner(sqlDB)
const insertQuery = `
INSERT
INTO system.jobs (
status,
payload,
progress,
claim_session_id,
claim_instance_id
)
VALUES ($1, $2, $3, $4, $5)
RETURNING id;
`
terminalStatuses := []jobs.Status{jobs.StatusSucceeded, jobs.StatusCanceled, jobs.StatusFailed}
terminalIDs := make([]jobspb.JobID, len(terminalStatuses))
terminalClaims := make([][]byte, len(terminalStatuses))
for i, s := range terminalStatuses {
terminalClaims[i] = uuid.MakeV4().GetBytes() // bogus claim
tdb.QueryRow(t, insertQuery, s, payload, progress, terminalClaims[i], 42).
Scan(&terminalIDs[i])
}
var nonTerminalID jobspb.JobID
tdb.QueryRow(t, insertQuery, jobs.StatusRunning, payload, progress, uuid.MakeV4().GetBytes(), 42).
Scan(&nonTerminalID)
checkClaimEqual := func(id jobspb.JobID, exp []byte) error {
const getClaimQuery = `SELECT claim_session_id FROM system.jobs WHERE id = $1`
var claim []byte
tdb.QueryRow(t, getClaimQuery, id).Scan(&claim)
if !bytes.Equal(claim, exp) {
return errors.Errorf("expected nil, got %s", hex.EncodeToString(exp))
}
return nil
}
testutils.SucceedsSoon(t, func() error {
return checkClaimEqual(nonTerminalID, nil)
})
for i, id := range terminalIDs {
require.NoError(t, checkClaimEqual(id, terminalClaims[i]))
}
// Update the terminal jobs to set them to have a NULL claim.
for _, id := range terminalIDs {
tdb.Exec(t, `UPDATE system.jobs SET claim_session_id = NULL WHERE id = $1`, id)
}
// At this point, all of the jobs should have a NULL claim.
// Assert that.
for _, id := range append(terminalIDs, nonTerminalID) {
require.NoError(t, checkClaimEqual(id, nil))
}
// Nudge the adoption queue and ensure that only the non-terminal job gets
// claimed.
s.JobRegistry().(*jobs.Registry).TestingNudgeAdoptionQueue()
sess, err := s.SQLLivenessProvider().(sqlliveness.Provider).Session(ctx)
require.NoError(t, err)
testutils.SucceedsSoon(t, func() error {
return checkClaimEqual(nonTerminalID, sess.ID().UnsafeBytes())
})
// Ensure that the terminal jobs still have a nil claim.
for _, id := range terminalIDs {
require.NoError(t, checkClaimEqual(id, nil))
}
}
|
package main
import (
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/matryer/filedb"
"log"
"strings"
)
const (
list = "list"
add = "add"
remove = "remove"
)
type path struct {
Path string
Hash string
}
func (p path) String() string {
return fmt.Sprintf("%s [%s]", p.Path, p.Hash)
}
func main() {
var fatalErr error
defer func() {
if fatalErr != nil {
flag.PrintDefaults()
log.Fatalln(fatalErr)
}
}()
var dbpath = flag.String("db", "./backupdata", "path to database directory")
flag.Parse()
args := flag.Args()
if len(args) < 1 {
fatalErr = errors.New("invalid usage; must specify command")
return
}
db, err := filedb.Dial(*dbpath)
if err != nil {
fatalErr = err
return
}
defer db.Close()
col, err := db.C("paths")
if err != nil {
fatalErr = err
return
}
firstNonFlagArgument := args[0]
switch strings.ToLower(firstNonFlagArgument) {
case list:
fatalErr = listPaths(col)
return
case add:
paths := args[1:]
if len(paths) == 0 {
fatalErr = errors.New("must specify path to add")
return
}
fatalErr = addingPaths(col, paths)
return
case remove:
paths := args[1:]
if len(paths) == 0 {
fatalErr = errors.New("must specify path to remove")
return
}
fatalErr = removingPaths(col, paths)
return
}
}
func removingPaths(col *filedb.C, paths []string) error {
var fatalErr error
var path path
err := col.RemoveEach(func(i int, data []byte) (removed bool, stop bool) {
err := json.Unmarshal(data, &path)
if err != nil {
fatalErr = err
return false, true
}
for _, p := range paths {
if path.Path == p {
fmt.Printf("- %s\n", path)
return true, false
}
}
return false, false
})
if err != nil {
fatalErr = err
}
return fatalErr
}
func addingPaths(col *filedb.C, paths []string) error {
var fatalErr error
for _, p := range paths {
path := &path{Path: p, Hash: "Not yet archived"}
if err := col.InsertJSON(path); err != nil {
fatalErr = err
break
}
fmt.Printf("+ %s\n", path)
}
return fatalErr
}
func listPaths(col *filedb.C) error {
var fatalErr error
var path path
err := col.ForEach(func(i int, data []byte) bool {
err := json.Unmarshal(data, &path)
if err != nil {
fatalErr = err
return true
}
fmt.Printf("= %s\n", path)
return false
})
if err != nil {
fatalErr = err
}
return fatalErr
}
|
package main
import(
"fmt"
"strings"
)
func (c *Challenge) Challenge001() {
msg:=ReadFile(".\\Data\\001\\msg.txt")
url:="map"
fmt.Println(translate(msg))
fmt.Println(translate(url))
}
func getChar(r rune) rune {
// switch {
// case r >= 'a' && r <= 'z':
// return 'a' + (r-'a'+2)%26
// }
if (r >= 'a' && r <= 'z') {
return 'a' + (r-'a'+2)%26
}
return r
}
func translate(instr string) string {
return strings.Map(getChar, instr)
} |
package aws
import (
"context"
"encoding/json"
"fmt"
"log"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/secretsmanager"
)
//Credential is a basic AWS credential
type Credential struct {
// AWS Region
Region string
// AWS Access key ID
AccessKeyID string
// AWS Secret Access Key
SecretAccessKey string
}
func GetSecret(secretID string) interface{} {
var secret interface{}
input := &secretsmanager.GetSecretValueInput{
SecretId: aws.String(secretID),
}
svc := getSecretClient()
result, err := svc.GetSecretValue(context.TODO(), input)
if err != nil {
panic(err.Error())
}
json.Unmarshal([]byte(*result.SecretString), &secret)
return secret
}
func GetSecretWithRegion(secretID string, region string) interface{} {
var secret interface{}
input := &secretsmanager.GetSecretValueInput{
SecretId: aws.String(secretID),
}
svc := getSecretClientWithRegion(region)
result, err := svc.GetSecretValue(context.TODO(), input)
if err != nil {
panic(err.Error())
}
json.Unmarshal([]byte(*result.SecretString), &secret)
return secret
}
func GetSecretLocally(secretID string, region string, localURL string) interface{} {
var secret interface{}
input := &secretsmanager.GetSecretValueInput{
SecretId: aws.String(secretID),
}
svc := getSecretClientLocally(localURL, region)
result, err := svc.GetSecretValue(context.TODO(), input)
if err != nil {
panic(err.Error())
}
json.Unmarshal([]byte(*result.SecretString), &secret)
return secret
}
func GetSecretWithCredential(credential Credential, secretID string) interface{} {
var secret interface{}
input := &secretsmanager.GetSecretValueInput{
SecretId: aws.String(secretID),
}
svc := getSecretClientWithCredential(credential)
result, err := svc.GetSecretValue(context.TODO(), input)
if err != nil {
panic(err.Error())
}
json.Unmarshal([]byte(*result.SecretString), &secret)
return secret
}
func getSecretClient() *secretsmanager.Client {
cfg, err := config.LoadDefaultConfig(context.TODO())
if err != nil {
log.Fatalf("unable to load SDK config, %v", err)
}
if &cfg != nil {
return secretsmanager.NewFromConfig(cfg)
}
return nil
}
func getSecretClientWithRegion(region string) *secretsmanager.Client {
cfg, err := config.LoadDefaultConfig(context.TODO(),
config.WithRegion(region),
)
if err != nil {
log.Fatalf("unable to load SDK config, %v", err)
}
if &cfg != nil {
return secretsmanager.NewFromConfig(cfg)
}
return nil
}
func getSecretClientWithCredential(credential Credential) *secretsmanager.Client {
cfg, err := config.LoadDefaultConfig(context.TODO(),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(credential.AccessKeyID, credential.SecretAccessKey, "")),
config.WithRegion(credential.Region),
)
if err != nil {
log.Fatalf("unable to load SDK config, %v", err)
}
if &cfg != nil {
return secretsmanager.NewFromConfig(cfg)
}
return nil
}
func getSecretClientLocally(localURL, region string) *secretsmanager.Client {
customResolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
if service == secretsmanager.ServiceID && region == region {
return aws.Endpoint{
PartitionID: "aws",
URL: localURL,
SigningRegion: region,
}, nil
}
return aws.Endpoint{}, fmt.Errorf("unknown endpoint requested")
})
cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithEndpointResolver(customResolver))
if err != nil {
log.Fatalf("unable to load SDK config, %v", err)
}
if &cfg != nil {
return secretsmanager.NewFromConfig(cfg)
}
return nil
}
|
package main
import "math"
type sphere struct {
Center vector `json:"center"`
Radius float64 `json:"radius"`
Reflectiveness float64 `json:"reflectiveness"`
Red float64 `json:"red"`
Green float64 `json:"green"`
Blue float64 `json:"blue"`
}
func (s *sphere) getReflectiveness() float64 {
return s.Reflectiveness
}
func (s *sphere) getColorRaw() (float64, float64, float64) {
return s.Red, s.Green, s.Blue
}
func (s *sphere) getUnitNormal(point *vector) *vector {
normal := point.sub(&s.Center)
unormal := normal.unit()
return &unormal
}
func (s *sphere) intersected(c_ray *ray) (float64, bool) {
a := c_ray.direction.X*c_ray.direction.X +
c_ray.direction.Y*c_ray.direction.Y +
c_ray.direction.Z*c_ray.direction.Z
b := 2.0 * ((c_ray.start.X-s.Center.X)*c_ray.direction.X +
(c_ray.start.Y-s.Center.Y)*c_ray.direction.Y +
(c_ray.start.Z-s.Center.Z)*c_ray.direction.Z)
c := (c_ray.start.X-s.Center.X)*(c_ray.start.X-s.Center.X) +
(c_ray.start.Y-s.Center.Y)*(c_ray.start.Y-s.Center.Y) +
(c_ray.start.Z-s.Center.Z)*(c_ray.start.Z-s.Center.Z) -
s.Radius*s.Radius
is_hit := false
i_test := b*b - 4.0*a*c
t1, t2, t_closest := 0.0, 0.0, 0.0
if i_test >= 0.0 {
is_hit = true
t1 = (-b + math.Sqrt(i_test)) / (2.0 * a)
t2 = (-b - math.Sqrt(i_test)) / (2.0 * a)
t1 = in_buffer(t1)
t2 = in_buffer(t2)
if t1 <= 0.0 && t2 <= 0.0 {
is_hit = false // it hit behind or on the viewer
} else if t1 > 0.0 && t2 > 0.0 {
if t1 < t2 {
t_closest = t1
} else {
t_closest = t2
}
} else if t1 > 0.0 {
t_closest = t1
} else if t2 > 0.0 {
t_closest = t2
}
}
return t_closest, is_hit
}
|
package main
import "fmt"
func main() {
x := [5]float64{98, 93, 77, 82, 73}
var total float64 = 0
for _, value := range x {
total += value
}
fmt.Println("Total:", total)
fmt.Println("Avg:", total/float64(len(x)))
}
|
package main
import (
"image/color"
"engo.io/ecs"
"engo.io/engo"
"engo.io/engo/common"
)
const buttonOpenMenu = "OpenMenu"
type game struct{}
func (g *game) Type() string { return sceneGame }
func (g *game) Preload() {}
func (g *game) Setup(world *ecs.World) {
engo.Input.RegisterButton(buttonOpenMenu, engo.Escape)
common.SetBackground(color.White)
world.AddSystem(&common.RenderSystem{})
world.AddSystem(&inputSystem{})
}
type inputSystem struct{}
// Update is ran every frame, with `dt` being the time
// in seconds since the last frame
func (is *inputSystem) Update(float32) {
if engo.Input.Button(buttonOpenMenu).JustPressed() {
engo.SetSceneByName(sceneMainMenu, true)
}
}
// Remove is called whenever an Entity is removed from the World, in order to remove it from this sytem as well
func (is *inputSystem) Remove(ecs.BasicEntity) {}
|
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package typeutil
import (
"errors"
"fmt"
"strconv"
"github.com/milvus-io/milvus/internal/proto/schemapb"
)
func EstimateSizePerRecord(schema *schemapb.CollectionSchema) (int, error) {
res := 0
for _, fs := range schema.Fields {
switch fs.DataType {
case schemapb.DataType_Bool, schemapb.DataType_Int8:
res++
case schemapb.DataType_Int16:
res += 2
case schemapb.DataType_Int32, schemapb.DataType_Float:
res += 4
case schemapb.DataType_Int64, schemapb.DataType_Double:
res += 8
case schemapb.DataType_String:
res += 125 // todo find a better way to estimate string type
case schemapb.DataType_BinaryVector:
for _, kv := range fs.TypeParams {
if kv.Key == "dim" {
v, err := strconv.Atoi(kv.Value)
if err != nil {
return -1, err
}
res += v / 8
break
}
}
case schemapb.DataType_FloatVector:
for _, kv := range fs.TypeParams {
if kv.Key == "dim" {
v, err := strconv.Atoi(kv.Value)
if err != nil {
return -1, err
}
res += v * 4
break
}
}
}
}
return res, nil
}
type SchemaHelper struct {
schema *schemapb.CollectionSchema
nameOffset map[string]int
idOffset map[int64]int
primaryKeyOffset int
}
func CreateSchemaHelper(schema *schemapb.CollectionSchema) (*SchemaHelper, error) {
if schema == nil {
return nil, errors.New("schema is nil")
}
schemaHelper := SchemaHelper{schema: schema, nameOffset: make(map[string]int), idOffset: make(map[int64]int), primaryKeyOffset: -1}
for offset, field := range schema.Fields {
if _, ok := schemaHelper.nameOffset[field.Name]; ok {
return nil, errors.New("duplicated fieldName: " + field.Name)
}
if _, ok := schemaHelper.idOffset[field.FieldID]; ok {
return nil, errors.New("duplicated fieldID: " + strconv.FormatInt(field.FieldID, 10))
}
schemaHelper.nameOffset[field.Name] = offset
schemaHelper.idOffset[field.FieldID] = offset
if field.IsPrimaryKey {
if schemaHelper.primaryKeyOffset != -1 {
return nil, errors.New("primary key is not unique")
}
schemaHelper.primaryKeyOffset = offset
}
}
return &schemaHelper, nil
}
func (helper *SchemaHelper) GetPrimaryKeyField() (*schemapb.FieldSchema, error) {
if helper.primaryKeyOffset == -1 {
return nil, fmt.Errorf("no primary in schema")
}
return helper.schema.Fields[helper.primaryKeyOffset], nil
}
func (helper *SchemaHelper) GetFieldFromName(fieldName string) (*schemapb.FieldSchema, error) {
offset, ok := helper.nameOffset[fieldName]
if !ok {
return nil, fmt.Errorf("fieldName(%s) not found", fieldName)
}
return helper.schema.Fields[offset], nil
}
func (helper *SchemaHelper) GetFieldFromID(fieldID int64) (*schemapb.FieldSchema, error) {
offset, ok := helper.idOffset[fieldID]
if !ok {
return nil, fmt.Errorf("fieldID(%d) not found", fieldID)
}
return helper.schema.Fields[offset], nil
}
func (helper *SchemaHelper) GetVectorDimFromID(filedID int64) (int, error) {
sch, err := helper.GetFieldFromID(filedID)
if err != nil {
return 0, err
}
if !IsVectorType(sch.DataType) {
return 0, fmt.Errorf("field type = %s not has dim", schemapb.DataType_name[int32(sch.DataType)])
}
for _, kv := range sch.TypeParams {
if kv.Key == "dim" {
dim, err := strconv.Atoi(kv.Value)
if err != nil {
return 0, err
}
return dim, nil
}
}
return 0, fmt.Errorf("fieldID(%d) not has dim", filedID)
}
func IsVectorType(dataType schemapb.DataType) bool {
switch dataType {
case schemapb.DataType_FloatVector, schemapb.DataType_BinaryVector:
return true
default:
return false
}
}
func IsIntergerType(dataType schemapb.DataType) bool {
switch dataType {
case schemapb.DataType_Int8, schemapb.DataType_Int16,
schemapb.DataType_Int32, schemapb.DataType_Int64:
return true
default:
return false
}
}
func IsFloatingType(dataType schemapb.DataType) bool {
switch dataType {
case schemapb.DataType_Float, schemapb.DataType_Double:
return true
default:
return false
}
}
|
//
// Copyright (c) SAS Institute Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package signappx
import (
"archive/zip"
"crypto"
"crypto/hmac"
"encoding/base64"
"encoding/xml"
"errors"
"fmt"
"io"
"strings"
"github.com/sassoftware/relic/v7/lib/zipslicer"
)
const blockMapSize = 64 * 1024
var hashAlgs = map[crypto.Hash]string{
crypto.SHA256: "http://www.w3.org/2001/04/xmlenc#sha256",
crypto.SHA384: "http://www.w3.org/2001/04/xmldsig-more#sha384",
crypto.SHA512: "http://www.w3.org/2001/04/xmlenc#sha512",
}
var noHashFiles = map[string]bool{
appxSignature: true,
appxCodeIntegrity: true,
appxContentTypes: true,
appxBlockMap: true,
}
type blockMap struct {
XMLName xml.Name `xml:"http://schemas.microsoft.com/appx/2010/blockmap BlockMap"`
HashMethod string `xml:",attr"`
File []blockFile
Hash crypto.Hash `xml:"-"`
unverifiedSizes bool
}
type blockFile struct {
Name string `xml:",attr"`
Size uint64 `xml:",attr"`
LfhSize int `xml:",attr"`
Block []block
}
type block struct {
Hash string `xml:",attr"`
Size uint64 `xml:",attr,omitempty"`
}
func verifyBlockMap(inz *zip.Reader, files zipFiles, skipDigests bool) error {
isBundle := files[bundleManifestFile] != nil
zf := files[appxBlockMap]
if zf == nil {
return errors.New("missing block map")
}
blob, err := readZipFile(zf)
if err != nil {
return err
}
var bm blockMap
if err := xml.Unmarshal(blob, &bm); err != nil {
return fmt.Errorf("error parsing block map: %w", err)
}
var hash crypto.Hash
for hash2, alg := range hashAlgs {
if alg == bm.HashMethod {
hash = hash2
break
}
}
if hash == 0 {
return errors.New("unsupported hash in block map")
}
bm.Hash = hash
bmfiles := bm.File
for _, zf := range inz.File {
if noHashFiles[zf.Name] || (isBundle && strings.HasSuffix(zf.Name, ".appx")) {
continue
}
if len(bmfiles) == 0 {
return fmt.Errorf("blockmap: unhashed zip file %s", zf.Name)
}
bmf := bmfiles[0]
bmfiles = bmfiles[1:]
name := zipToDos(zf.Name)
if bmf.Name != name {
return fmt.Errorf("blockmap: file mismatch: %s != %s", bmf.Name, name)
} else if bmf.Size != zf.UncompressedSize64 {
return fmt.Errorf("blockmap: file mismatch: %s: size %d != %d", name, bmf.Size, zf.UncompressedSize64)
}
if len(bmf.Block) != int((zf.UncompressedSize64+blockMapSize-1)/blockMapSize) {
return errors.New("blockmap: file mismatch")
}
if skipDigests {
continue
}
r, err := zf.Open()
if err != nil {
return err
}
remaining := zf.UncompressedSize64
for i, block := range bmf.Block {
count := remaining
if count > blockMapSize {
count = blockMapSize
}
remaining -= count
d := hash.New()
if _, err := io.CopyN(d, r, int64(count)); err != nil {
return err
}
calc := d.Sum(nil)
expected, err := base64.StdEncoding.DecodeString(block.Hash)
if err != nil {
return fmt.Errorf("blockmap: %w", err)
}
if !hmac.Equal(calc, expected) {
return fmt.Errorf("blockmap: digest mismatch for %s block %d: calculated %x != found %x", name, i, calc, expected)
}
}
if err := r.Close(); err != nil {
return err
}
if remaining > 0 {
return errors.New("blockmap: file mismatch")
}
}
return nil
}
func (b *blockMap) SetHash(hash crypto.Hash) error {
alg := hashAlgs[hash]
if alg == "" {
return errors.New("unsupported hash algorithm")
}
b.HashMethod = alg
b.Hash = hash
return nil
}
// Copy compressed sizes from the old blockmap since I can't figure out how
// they come up with the numbers and the thing won't install if they're
// wrong...
func (b *blockMap) CopySizes(blob []byte) error {
var orig blockMap
if err := xml.Unmarshal(blob, &orig); err != nil {
return fmt.Errorf("error parsing block map: %w", err)
}
for i, oldf := range orig.File {
zipName := dosToZip(oldf.Name)
if zipName == appxManifest || zipName == bundleManifestFile {
// The only file that gets changed by us. It's stored with no
// compression to avoid screwing up the sizes.
continue
} else if i >= len(b.File) {
return errors.New("old block map has too many files")
}
newf := &b.File[i]
if newf.Name != oldf.Name {
return fmt.Errorf("old block map doesn't match new: %s", oldf.Name)
}
for j, oldblock := range oldf.Block {
newf.Block[j].Size = oldblock.Size
}
}
b.unverifiedSizes = false
return nil
}
func (b *blockMap) AddFile(f *zipslicer.File, raw, cooked io.Writer) error {
bmf := blockFile{Name: zipToDos(f.Name)}
lfh, err := f.GetLocalHeader()
if err != nil {
return fmt.Errorf("hashing zip metadata: %w", err)
}
bmf.LfhSize = len(lfh)
if raw != nil {
if _, err := raw.Write(lfh); err != nil {
return err
}
}
rc, err := f.OpenAndTeeRaw(raw)
if err != nil {
return fmt.Errorf("hashing zip metadata: %w", err)
}
// Copy 64K of uncompressed data at a time, adding block elements as we go
for {
d := b.Hash.New()
w := io.Writer(d)
if cooked != nil {
w = io.MultiWriter(d, cooked)
}
n, err := io.CopyN(w, rc, blockMapSize)
if n > 0 {
bmf.Size += uint64(n)
hash := base64.StdEncoding.EncodeToString(d.Sum(nil))
bmf.Block = append(bmf.Block, block{Hash: hash})
}
if err == io.EOF {
break
} else if err != nil {
return err
}
}
if err := rc.Close(); err != nil {
return err
}
dd, err := f.GetDataDescriptor()
if err != nil {
return fmt.Errorf("hashing zip metadata: %w", err)
}
if raw != nil {
if _, err := raw.Write(dd); err != nil {
return err
}
}
if !(noHashFiles[f.Name] || strings.HasSuffix(f.Name, ".appx")) {
if f.Method != zip.Store {
b.unverifiedSizes = true
}
b.File = append(b.File, bmf)
}
return nil
}
func (b *blockMap) Marshal() ([]byte, error) {
if b.unverifiedSizes {
return nil, errors.New("found compressed files not already in blockmap")
}
return marshalXML(b, false)
}
func zipToDos(name string) string {
return strings.ReplaceAll(name, "/", "\\")
}
func dosToZip(name string) string {
return strings.ReplaceAll(name, "\\", "/")
}
|
package models
import (
"strconv"
"time"
"github.com/jinzhu/gorm"
)
/******************************************************************************************************
说明:系列赛模型
作者:w-zengtao
belongs_to Game
official id : steam_id & hltv_id
User Story
1. 每场系列赛由一到多场(Format)比赛组成
2. 每场系列赛只有左右两支队伍进行比赛
3. Live 表示比赛是否是进行中的状态
4. 系列赛需要有大比分
5. 系列赛需要有当前正在进行中的比赛场数(game_no)(未开始 0 , 打完 == Format + 1)
6. Status 表示系列赛的状态、取消、未开始、进行中、已结束(没有特别直观的意义)
******************************************************************************************************/
type Battle struct {
ID uint `gorm:"primary_key;column:id"`
GameID uint `gorm:"column:game_id"`
LeftTeamID uint `gorm:"column:left_team_id"`
RightTeamID uint `gorm:"column:right_team_id"`
Format uint `gorm:"column:format;default:1"` // BO1 As Default
OfficialID uint `gorm:"column:official_id"`
LeagueID uint `gorm:"column:league_id"`
Live bool `gorm:"default:false"`
StartTime time.Time
Status int
LeftScore int
RightScore int
GameNO int
LeftTeam Team
RightTeam Team
League League
Matches []Match
}
// FilterParams - for scopes
func (v *Battle) FilterParams(params map[string]string) *gorm.DB {
var records = db.Model(v)
if value, ok := params["game_id"]; ok {
id, _ := strconv.Atoi(value)
records = records.Where(&Battle{GameID: uint(id)})
}
if value, ok := params["league_id"]; ok {
id, _ := strconv.Atoi(value)
records = records.Where(&Battle{LeagueID: uint(id)})
}
if value, ok := params["page"]; ok {
page, _ := strconv.Atoi(value)
records = records.Offset((page - 1) * 10).Limit(10)
}
return records
}
|
package command
import (
"encoding/json"
"fmt"
"os"
"strings"
"github.com/coreos/etcdctl/third_party/github.com/codegangsta/cli"
"github.com/coreos/etcdctl/third_party/github.com/coreos/go-etcd/etcd"
)
type handlerFunc func(*cli.Context, *etcd.Client) (*etcd.Response, error)
type printFunc func(*etcd.Response, string)
// dumpCURL blindly dumps all curl output to os.Stderr
func dumpCURL(client *etcd.Client) {
client.OpenCURL()
for {
fmt.Fprintf(os.Stderr, "Curl-Example: %s\n", client.RecvCURL())
}
}
// rawhandle wraps the command function handlers and sets up the
// environment but performs no output formatting.
func rawhandle(c *cli.Context, fn handlerFunc) (*etcd.Response, error) {
peers := c.GlobalString("peers")
client := etcd.NewClient(trimsplit(peers, ","))
if c.GlobalBool("debug") {
go dumpCURL(client)
}
// Sync cluster.
ok := client.SyncCluster()
if c.GlobalBool("debug") {
fmt.Fprintf(os.Stderr, "Cluster-Peers: %s\n",
strings.Join(client.GetCluster(), " "))
}
if !ok {
fmt.Println("Cannot sync with the cluster")
os.Exit(FailedToConnectToHost)
}
// Execute handler function.
return fn(c, client)
}
// handlePrint wraps the command function handlers to parse global flags
// into a client and to properly format the response objects.
func handlePrint(c *cli.Context, fn handlerFunc, pFn printFunc) {
resp, err := rawhandle(c, fn)
// Print error and exit, if necessary.
if err != nil {
fmt.Println("Error:", err)
os.Exit(ErrorFromEtcd)
}
if resp != nil {
pFn(resp, c.GlobalString("output"))
}
}
// handleKey handles a request that wants to do operations on a single key.
func handleKey(c *cli.Context, fn handlerFunc) {
handlePrint(c, fn, printKey)
}
// printKey writes the etcd response to STDOUT in the given format.
func printKey(resp *etcd.Response, format string) {
// printKey is only for keys, error on directories
if resp.Node.Dir == true {
fmt.Fprintln(os.Stderr, fmt.Sprintf("Cannot print key [%s: Is a directory]", resp.Node.Key))
os.Exit(1)
}
// Format the result.
switch format {
case "simple":
fmt.Println(resp.Node.Value)
case "extended":
// Extended prints in a rfc2822 style format
fmt.Println("Key:", resp.Node.Key)
fmt.Println("Created-Index:", resp.Node.CreatedIndex)
fmt.Println("Modified-Index:", resp.Node.ModifiedIndex)
if resp.PrevNode != nil {
fmt.Println("PrevNode.Value:", resp.PrevNode.Value)
}
fmt.Println("TTL:", resp.Node.TTL)
fmt.Println("Etcd-Index:", resp.EtcdIndex)
fmt.Println("Raft-Index:", resp.RaftIndex)
fmt.Println("Raft-Term:", resp.RaftTerm)
fmt.Println("")
fmt.Println(resp.Node.Value)
case "json":
b, err := json.Marshal(resp)
if err != nil {
panic(err)
}
fmt.Println(string(b))
default:
fmt.Fprintln(os.Stderr, "Unsupported output format:", format)
}
}
|
package filelock
import (
"os"
"k8s.io/client-go/tools/clientcmd"
)
func init() {
// We're using our own file locking mechanism
clientcmd.UseModifyConfigLock = false
}
func WithLock(configAccess clientcmd.ConfigAccess, action func() error) error {
return withLock(configAccess.GetDefaultFilename()+".lock", writeLock, action)
}
func WithRLock(configAccess clientcmd.ConfigAccess, action func() error) error {
return withLock(configAccess.GetDefaultFilename()+".lock", readLock, action)
}
func withLock(filename string, lt lockType, action func() error) error {
lockfile, err := os.Create(filename)
if err != nil {
return err
}
err = lock(lockfile, lt)
if err != nil {
return err
}
defer func() { _ = Unlock(lockfile) }()
return action()
}
|
// Copyright © 2020 Weald Technology Trading
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mem
import (
"context"
"sync"
"github.com/wealdtech/walletd/core"
)
// Store holds key/value pairs in-memory.
// This storage is ephemeral; it should not be used for production.
type Store struct {
data map[string][]byte
statesMx sync.RWMutex
}
// New creates a new memory storage.
func New() (*Store, error) {
return &Store{
data: make(map[string][]byte),
}, nil
}
// Fetch fetches a value for a given key.
func (s *Store) Fetch(ctx context.Context, key []byte) ([]byte, error) {
s.statesMx.RLock()
data, exists := s.data[string(key)]
s.statesMx.RUnlock()
if !exists {
return nil, core.ErrNotFound
}
return data, nil
}
// Store stores a value for a given key.
func (s *Store) Store(ctx context.Context, key []byte, value []byte) error {
s.statesMx.Lock()
s.data[string(key)] = value
s.statesMx.Unlock()
return nil
}
|
package main
import (
"bufio"
"context"
"fmt"
"net"
"os"
"sync"
flags "github.com/jessevdk/go-flags"
)
var opts struct {
ResolverIP string `short:"r" long:"resolver" description:"IP of the DNS resolver to use for lookups"`
Protocol string `short:"P" long:"protocol" choice:"tcp" choice:"udp" default:"udp" description:"Protocol to use for lookups"`
Port uint16 `short:"p" long:"port" default:"53" description:"Port to bother the specified DNS resolver on"`
}
func worker(ip string, wg *sync.WaitGroup, res chan string) {
defer wg.Done()
var r *net.Resolver
if opts.ResolverIP != "" {
r = &net.Resolver{
PreferGo: true,
Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
d := net.Dialer{}
return d.DialContext(ctx, opts.Protocol, fmt.Sprintf("%s:%d", opts.ResolverIP, opts.Port))
},
}
}
addr, err := r.LookupAddr(context.Background(), ip)
if err != nil {
return
}
for _, a := range addr {
res <- fmt.Sprintf("%s \t %s", ip, a)
}
}
func main() {
_, err := flags.ParseArgs(&opts, os.Args)
if err != nil {
os.Exit(1)
}
var wg sync.WaitGroup
res := make(chan string)
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
wg.Add(1)
go worker(scanner.Text(), &wg, res)
}
if err := scanner.Err(); err != nil {
fmt.Fprintln(os.Stderr, err)
}
go func() {
wg.Wait()
close(res)
}()
for r := range res {
fmt.Println(r)
}
}
|
package file
import (
"os"
"io"
"fmt"
"regexp"
"encoding/json"
"io/ioutil"
"../ds"
"bytes"
)
func Write_Log(year, month, day string) {
var subEntries []ds.Entry
path := (ds.KConfig.LogPath + "/" + year)
// Make sure that the log dir exists
if ! exists(path) {
create_log_dir(path)
}
path = path+"/"+month
// Make sure that the log dir exists
if ! exists(path) {
create_log_dir(path)
}
path = path+"/"+ day + ".json"
// Make sure that the log file exists
if ! exists(path) {
create_log_file(path)
}
subEntries = ds.Get_AllEntries_On_Date(year, month, day)
if subEntries == nil {
removeFile(path)
} else {
write_JSON_File(path, subEntries)
}
}
func write_JSON_File(path string, i interface{}) {
text, err := json.Marshal(i)
var out bytes.Buffer
json.Indent(&out, text, "", "\t")
if err != nil {
fmt.Println("error:", err)
os.Exit(1)
}
err2 := ioutil.WriteFile(path, out.Bytes(), 0644)
if err2 != nil {
panic(err2)
}
}
func Write_Keyword_File(path string) {
write_JSON_File(path, &ds.KeywordList)
}
func exists(path string) (bool) {
if _, err := os.Stat(path); os.IsNotExist(err) {
return false
}
return true
}
func Copy_Attachments(path, year, month string) string {
var fileName string
regxName := regexp.MustCompile(`\w+\.\w+`)
fileName = regxName.FindString(path)
attachmentsPath := (ds.KConfig.LogPath + "/" + year + "/" + month + "/attachments")
// Make sure attachments directory exists
if ! exists(attachmentsPath) {
create_log_dir(attachmentsPath)
}
//Copy file?
copy_File_Contents(path, (attachmentsPath+"/"+fileName))
return fileName
}
func copy_File_Contents(src, dst string) (err error) {
in, err := os.Open(src)
if err != nil {
return
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return
}
defer func() {
cerr := out.Close()
if err == nil {
err = cerr
}
}()
if _, err = io.Copy(out, in); err != nil {
return
}
err = out.Sync()
return
}
func create_log_dir(path string) {
err := os.Mkdir(path, 0777)
if err != nil {
panic(err)
}
}
func create_log_file(path string) {
file, err := os.Create(path)
if err != nil {
panic(err)
}
file.Close()
}
func removeFile(path string) {
os.Remove(path)
}
|
package parser
import (
"fmt"
"strings"
"github.com/bytesparadise/libasciidoc/pkg/types"
"github.com/davecgh/go-spew/spew"
log "github.com/sirupsen/logrus"
)
const (
// AttributeRefs the "attribute_refs" substitution
AttributeRefs string = "attributes" // TODO: no need to export
// Callouts the "callouts" substitution
Callouts string = "callouts"
// InlinePassthroughs the "inline_passthrough" substitution
InlinePassthroughs string = "inline_passthrough" //nolint:gosec
// Macros the "macros" substitution
Macros string = "macros"
// None the "none" substitution
None string = "none"
// PostReplacements the "post_replacements" substitution
PostReplacements string = "post_replacements"
// Quotes the "quotes" substitution
Quotes string = "quotes"
// Replacements the "replacements" substitution
Replacements string = "replacements"
// SpecialCharacters the "specialchars" substitution
SpecialCharacters string = "specialchars"
)
func normalSubstitutions() *substitutions {
return &substitutions{
sequence: []string{
InlinePassthroughs,
AttributeRefs,
SpecialCharacters,
Quotes,
Replacements,
Macros,
PostReplacements,
},
}
}
func headerSubstitutions() *substitutions {
return &substitutions{
sequence: []string{
InlinePassthroughs,
AttributeRefs,
SpecialCharacters,
Quotes,
Macros,
Replacements,
},
}
}
func attributeSubstitutions() *substitutions {
return &substitutions{
sequence: []string{
InlinePassthroughs,
AttributeRefs,
SpecialCharacters,
Quotes,
// Macros,
Replacements,
},
}
}
func noneSubstitutions() *substitutions {
return &substitutions{}
}
func verbatimSubstitutions() *substitutions {
return &substitutions{
sequence: []string{
Callouts,
SpecialCharacters,
},
}
}
type substitutions struct {
sequence []string
}
func newSubstitutions(b types.WithElements) (*substitutions, error) {
// look-up the optional `subs` attribute in the element
attrSub, found := b.GetAttributes().GetAsString(types.AttrSubstitutions)
if !found {
return defaultSubstitutions(b), nil
}
subs := strings.Split(attrSub, ",")
var result *substitutions
// when dealing with incremental substitutions, use default sub as a baseline and append or prepend the incremental subs
if allIncremental(subs) {
result = defaultSubstitutions(b)
} else {
result = &substitutions{
sequence: make([]string, 0, len(subs)),
}
}
for _, sub := range subs {
// log.Debugf("checking subs '%s'", sub)
switch {
case strings.HasSuffix(sub, "+"): // prepend
if err := result.prepend(strings.TrimSuffix(sub, "+")); err != nil {
return nil, err
}
case strings.HasPrefix(sub, "+"): // append
if err := result.append(strings.TrimPrefix(sub, "+")); err != nil {
return nil, err
}
case strings.HasPrefix(sub, "-"): // remove from all substitutions
if err := result.remove(strings.TrimPrefix(sub, "-")); err != nil {
return nil, err
}
default:
if err := result.append(sub); err != nil {
return nil, err
}
}
}
if log.IsLevelEnabled(log.DebugLevel) {
log.Debugf("substitutions to apply on block of type '%T': %s", b, spew.Sdump(result))
}
return result, nil
}
func defaultSubstitutions(b types.WithElements) *substitutions {
// log.Debugf("looking-up default substitution for block of type '%T'", b)
switch b := b.(type) {
case *types.DelimitedBlock:
switch b.Kind {
case types.Example, types.Quote, types.Verse, types.Sidebar, types.MarkdownQuote, types.Open:
return normalSubstitutions()
case types.Comment, types.Passthrough:
return noneSubstitutions()
default: // includes `types.Listing`, `types.Fenced`, `types.Literal`
return verbatimSubstitutions()
}
case *types.Paragraph:
// if listing paragraph:
switch b.GetAttributes().GetAsStringWithDefault(types.AttrStyle, "") {
case types.Listing:
return verbatimSubstitutions()
case types.Passthrough:
return noneSubstitutions()
default:
return normalSubstitutions()
}
default:
return normalSubstitutions()
}
}
// checks if all the given subs are incremental (ie, prefixed with `+|-` or suffixed with `-`)
func allIncremental(subs []string) bool {
for _, sub := range subs {
if !(strings.HasPrefix(sub, "+") ||
strings.HasPrefix(sub, "-") ||
strings.HasSuffix(sub, "+")) {
return false
}
}
return true
}
func (s *substitutions) toString() string {
return strings.Join(s.sequence, ",")
}
// split the actual substitutions in 2 parts, the first one containing
// all substitutions, the second part all substitutions except `inline_passthrough` and `attributes`
// (or nil if there were no other substitutions)
func (s *substitutions) split() (*substitutions, *substitutions) {
phase1 := &substitutions{
sequence: s.sequence, // all by default (in case not split needed)
}
var phase2 *substitutions
for i, sub := range s.sequence {
if sub == AttributeRefs && i < len(s.sequence)-1 {
phase2 = &substitutions{
sequence: s.sequence[i+1:],
}
}
}
return phase1, phase2
}
func (s *substitutions) contains(expected string) bool {
for _, sub := range s.sequence {
if sub == expected {
return true
}
}
return false
}
func (s *substitutions) append(v string) error {
other, err := substitutionsFor(v)
if err != nil {
return err
}
s.sequence = append(s.sequence, other.sequence...)
return nil
}
func (s *substitutions) prepend(v string) error {
other, err := substitutionsFor(v)
if err != nil {
return err
}
s.sequence = append(other.sequence, s.sequence...)
return nil
}
func (s *substitutions) remove(v string) error {
other, err := substitutionsFor(v)
if err != nil {
return err
}
for _, toRemove := range other.sequence {
sequence := make([]string, 0, len(s.sequence))
for j := range s.sequence {
if s.sequence[j] != toRemove {
sequence = append(sequence, s.sequence[j])
}
}
s.sequence = sequence
}
return nil
}
func substitutionsFor(s string) (*substitutions, error) {
switch s {
case "normal":
return normalSubstitutions(), nil
case "none":
return noneSubstitutions(), nil
case "verbatim":
return verbatimSubstitutions(), nil
case "attributes", "macros", "quotes", "replacements", "post_replacements", "callouts", "specialchars":
return &substitutions{
sequence: []string{s},
}, nil
default:
// TODO: return `none` instead of `err` and log an error with the fragment position (use logger with fields?)
return nil, fmt.Errorf("unsupported substitution: '%v'", s)
}
}
|
package main
import (
"fmt"
"os"
"strings"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
const (
envVarPrefix = "AUTH_"
)
var (
version = "n/a"
commit = "n/a"
)
func main() {
err := newRootCmd().Execute()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func newRootCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "authctl",
TraverseChildren: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
var err error
cmd.Flags().VisitAll(func(f *pflag.Flag) {
optName := strings.ToUpper(f.Name)
optName = strings.ReplaceAll(optName, "-", "_")
varName := envVarPrefix + optName
if val, ok := os.LookupEnv(varName); ok && !f.Changed {
err2 := f.Value.Set(val)
if err2 != nil {
err = fmt.Errorf("invalid environment variable %s: %w", varName, err2)
}
}
})
if err != nil {
return err
}
cmd.SilenceUsage = true
cmd.SilenceErrors = true
return nil
},
}
cmd.AddCommand(
newJWTCmd(),
newPasswdCmd(),
newTokenCmd(),
newCompletionCmd(),
newVersionCmd(),
)
return cmd
}
func newVersionCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "version",
RunE: func(cmd *cobra.Command, args []string) error {
fmt.Println("authctl", version, commit)
return nil
},
}
return cmd
}
func newCompletionCmd() *cobra.Command {
var shell string
cmd := &cobra.Command{
Use: "completion <shell>",
ValidArgs: []string{"bash", "zsh"},
Args: cobra.ExactArgs(1),
Hidden: true,
RunE: func(cmd *cobra.Command, args []string) error {
shell = args[0]
var err error
switch shell {
case "bash":
err = newRootCmd().GenBashCompletion(os.Stdout)
case "zsh":
err = newRootCmd().GenZshCompletion(os.Stdout)
default:
err = fmt.Errorf("unknown shell: %s", shell)
}
return err
},
}
return cmd
}
|
package bytedance
import (
"fmt"
"strconv"
)
func Code1015() {
s1 := "1231312312313214314242424324234131231312312312312312312312312414324234234234234234324"
s2 := "456"
fmt.Println(multiply(s1, s2))
}
/**
给定两个以字符串形式表示的非负整数 num1 和 num2,返回 num1 和 num2 的乘积,它们的乘积也表示为字符串形式。
示例 1:
输入: num1 = "2", num2 = "3"
输出: "6"
示例 2:
输入: num1 = "123", num2 = "456"
输出: "56088"
说明:
num1 和 num2 的长度小于110。
num1 和 num2 只包含数字 0-9。
num1 和 num2 均不以零开头,除非是数字 0 本身。
不能使用任何标准库的大数类型(比如 BigInteger)或直接将输入转换为整数来处理。
*/
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func multiply(num1 string, num2 string) string {
int1, _ := strconv.ParseUint(num1, 10, 64)
int2, _ := strconv.ParseUint(num2, 10, 64)
fmt.Println(int1)
fmt.Println(int2)
return ""
}
|
package pickle
type Rick struct {
Portal string
}
|
package main
import (
"crypto/aes"
"crypto/cipher"
"sync"
)
// block is 128 bit , 16 byte
// key is 256 bit , 32 byte : fixed use 256 aes , not 128 , 196
//type _Taes struct {
// name string
// key []byte
//} // _Taes
// https://golang.org/pkg/crypto/cipher/#example_NewCBCEncrypter
var ___VencAesCbc__only__mux sync.Mutex
func _FencAesCbc__only___(___Vkey []byte, ___Viv []byte, ___VbyteIn []byte, ___VtraceInt int) ([]byte, error) {
defer ___VencAesCbc__only__mux.Unlock()
___VencAesCbc__only__mux.Lock()
var __VoLen int
__VtLen := 0
//_FpfN(" 132811 _FencAesCbc__only___ : len In (%d) , key %x , iv %x", len(*___VbyteIn), *___Vkey, *___Viv)
__VlenInTmp2 := len(___VbyteIn)
_FnotEqExit(" 132819 01 ", 32, len(___Vkey))
_FnotEqExit(" 132819 02 ", 16, len(___Viv))
__Vkey2 := ___Vkey
__Viv2 := ___Viv
__Vremain := __VlenInTmp2 & 0xF
__VneedPat := (0 != __Vremain)
__VtBufEN := []byte{}
if __VneedPat {
__VlenAdd := 16 - __Vremain
__VtLen = __VlenInTmp2 + __VlenAdd
if 0 != (__VtLen & 0xF) {
_Fex(_Spf(" 132819 04 : why not ZERO : __VlenInTmp2 %d , __Vremain %d, __VneedPat %T, __VlenAdd %d , __VtLen %d?",
__VlenInTmp2, __Vremain, __VneedPat, __VlenAdd, __VtLen))
}
__VtBufEN = make([]byte, __VtLen)
__Vpat := _FgenRand_nByte__(uint16(__VlenAdd))
copy(__VtBufEN[__VlenInTmp2:], __Vpat)
copy(__VtBufEN, ___VbyteIn)
__VoLen = __VtLen + 16
//_FpfN(" 132815 : add pat %d", __VlenAdd)
} else {
__VtBufEN = ___VbyteIn
__VoLen = __VlenInTmp2 + 16 // origin Len + iv(16)
}
__Vout4 := make([]byte, __VoLen-16)
__Vout5 := make([]byte, __VoLen)
__VblockEN, __Verr := aes.NewCipher(__Vkey2)
_FerrExit(" 132819 03 ", __Verr)
__VmodeEN := cipher.NewCBCEncrypter(__VblockEN, __Viv2)
__VmodeEN.CryptBlocks(__Vout4, __VtBufEN)
copy(__Vout5[0:16], __Viv2)
copy(__Vout5[16:], __Vout4)
__VblockEN = nil
__VmodeEN = nil
//_FpfNhex(&__Vout5, 32, " 132819 08 _FencAesCbc__only___ : lenIn %d , dataOut: ", __VlenInTmp2)
__VinSideKey := __VtBufEN[7:39]
___CpfN(" 132819 09 Ti:%d , aesENC(noRandPat) inM5{%x} outM5{%x} INfirst20<%x> in<%x> out:<%x> insideKey is <%x> ",
___VtraceInt,
_Fmd5__5x(&___VbyteIn),
_Fmd5__5x(&__Vout5),
__VtBufEN[:20],
__VtBufEN,
__Vout5,
__VinSideKey,
)
return __Vout5, nil
} // _FencAesCbc__only___
func _FencAesCbcExit(___Vkey []byte, ___Viv []byte, ___VbyteIn []byte) []byte {
__Vbyte, __Verr := _FencAesCbc__only___(___Vkey, ___Viv, ___VbyteIn, 0)
_FerrExit(" 182811 ", __Verr)
return __Vbyte
} // _FencAesCbcExit
var ___V_FdecAesCbc__only___mux sync.Mutex
func _FdecAesCbc__only___(___Vkey []byte, ___VbyteIn []byte, ___VtraceIntDE int) ([]byte, error) {
defer ___V_FdecAesCbc__only___mux.Unlock()
___V_FdecAesCbc__only___mux.Lock()
__VoutNull := []byte{}
if 32 != len(___Vkey) {
_FpfNonce(" 838181 key len error (not equals to 32): %d:%s", len(___Vkey), String5s(&___Vkey))
return __VoutNull, nil
}
__VlenIn := len(___VbyteIn)
if __VlenIn < 32 {
return __VoutNull, nil
}
__VdataEnd := (__VlenIn & 0xFFFFFFF0)
//_FpfN(" 838180 __VlenIn %d , __VdataEnd %d ", __VlenIn, __VdataEnd)
__Vout2 := make([]byte, __VdataEnd-16)
//_FpfNhex(___VbyteIn, 82, " 838181 dataIn ")
__Viv := (___VbyteIn)[:16]
__VcipherText := (___VbyteIn)[16:__VdataEnd]
__VblockDE, __Verr := aes.NewCipher(___Vkey) // func NewCipher(key []byte) (cipher.Block, error) // import "crypto/aes"
_FerrExit(" 838182 ", __Verr)
if 2 == 3 {
_FpfNhex(&__Viv, 82, " 838183 iv ")
_FpfNhex(&__VcipherText, 82, " 838184 cipherText ")
}
__VmodeDE := cipher.NewCBCDecrypter(__VblockDE, __Viv)
_FnullExit(" 838186 ", __VmodeDE)
// CryptBlocks(dst, src []byte)
__VmodeDE.CryptBlocks(__Vout2, __VcipherText)
//_FpfNhex(&__Vout2, 82, " 838189 out ")
__VblockDE = nil
__VmodeDE = nil
return __Vout2, nil
} // _FdecAesCbc__only___
|
package network
import (
"fmt"
"net"
"strings"
)
func GetMyIP() string {
allIPs, err := net.InterfaceAddrs()
if err != nil {
fmt.Println("network.GetMyIP()--> Error receiving IPs. IP set to localhost. Consider setting IP manually")
return "localhost"
}
IPString := make([]string, len(allIPs))
for i := range allIPs {
temp := allIPs[i].String()
ip := strings.Split(temp, "/")
IPString[i] = ip[0]
}
var myIP string
for i := range IPString {
if IPString[i][0:3] == "129" {
myIP = IPString[i]
}
}
return myIP
}
|
package httpclient
import (
"context"
"io"
)
type RequestPayload struct {
Context context.Context
Method string
Path string
Query map[string]string
Body io.Reader
}
|
package inject
import (
"bytes"
"html/template"
"github.com/richardcase/vault-initializer/pkg/model"
"k8s.io/api/apps/v1beta1"
)
func ResolveTemplate(deployment *v1beta1.Deployment, pathTemplate string) (string, error) {
pc := model.PathConfig{Namespace: deployment.Namespace, DeploymentName: deployment.Name, ContainerName: deployment.Spec.Template.Spec.Containers[0].Name}
tmpl, err := template.New("pathTemplate").Parse(pathTemplate)
if err != nil {
return "", err
}
buf := new(bytes.Buffer)
err = tmpl.Execute(buf, pc)
if err != nil {
return "", err
}
return buf.String(), nil
}
|
package leetcode
func checkWays(pairs [][]int) int {
adj := map[int]map[int]bool{}
for _, pair := range pairs {
if adj[pair[0]] == nil {
adj[pair[0]] = map[int]bool{}
}
if adj[pair[1]] == nil {
adj[pair[1]] = map[int]bool{}
}
adj[pair[0]][pair[1]] = true
adj[pair[1]][pair[0]] = true
}
n := len(adj)
root := 0
for node, neighbors := range adj {
if len(neighbors) == n-1 {
root = node
break
}
}
if root == 0 {
return 0
}
res := 1
for node, neighbors := range adj {
if node == root {
continue
}
nodeDegree := len(neighbors)
nodeParent := 0
parentDegree := n
for neighbor := range neighbors {
if len(adj[neighbor]) >= nodeDegree && len(adj[neighbor]) < parentDegree {
nodeParent = neighbor
parentDegree = len(adj[nodeParent])
}
}
if nodeParent == 0 {
return 0
}
for neighbor := range neighbors {
if neighbor == nodeParent {
continue
}
if !adj[nodeParent][neighbor] {
return 0
}
}
if nodeDegree == parentDegree {
res = 2
}
}
return res
}
|
package main
import (
"fmt"
"github.com/microcosm-cc/bluemonday"
"github.com/russross/blackfriday"
"io/ioutil"
"os"
)
func main() {
buf, err := ioutil.ReadAll(os.Stdin)
if err != nil {
fmt.Println("error:", err)
os.Exit(1)
}
unsafe := blackfriday.MarkdownCommon(buf)
html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
fmt.Println(string(html))
}
|
package ymdRedisServer
import (
"testing"
"github.com/orestonce/ymd/ymdAssert"
"net"
"github.com/orestonce/ymd/ymdError"
)
func TestNewRedisServer(t *testing.T) {
server := NewRedisServer()
err := server.RunLoal()
ymdAssert.True(err == nil, err)
}
func TestRedisServer_Addr(t *testing.T) {
server := NewRedisServer()
err := server.RunLoal()
ymdError.PanicIfError(err)
ymdAssert.True(server.Addr() != ``)
}
func TestRedisServer_RunLoal(t *testing.T) {
}
func TestRedisServer_RunOnListener(t *testing.T) {
ln, err := net.Listen(`tcp`, `127.0.0.1:0`)
ymdError.PanicIfError(err)
server := NewRedisServer()
done := make(chan error)
go func() {
err := server.RunOnListener(ln)
done <- err
}()
ln.Close()
err = <-done
ymdAssert.True(err != nil)
}
|
package spec_test
/*
// Tests from original Component, should be transferred to new Spec system.
func TestComposable(t *testing.T) {
t.Run("ID can be empty", func(t *testing.T) {
root := control.New()
assert.Equal(root.ID(), "")
})
t.Run("Scheduler() assigns Context", func(t *testing.T) {
box := Box(context.New())
assert.NotNil(box.Context())
})
t.Run("Applied Key", func(t *testing.T) {
root := Box(context.New(), Key("abcd"))
assert.Equal(root.Key(), "abcd")
})
t.Run("Key can be empty", func(t *testing.T) {
root := control.New()
assert.Equal(root.Key(), "")
})
t.Run("Empty key will defer to ID if present", func(t *testing.T) {
root := Box(context.New(), ID("abcd"))
assert.Equal(root.Key(), "abcd")
})
t.Run("Provided ID", func(t *testing.T) {
root := Box(context.New(), ID("root"))
assert.Equal(root.ID(), "root")
})
t.Run("AddChild", func(t *testing.T) {
root := control.New()
one := control.New()
two := control.New()
root.SetWidth(200)
assert.Equal(root.AddChild(one), 1)
assert.Equal(root.AddChild(two), 2)
assert.Equal(one.Parent().ID(), root.ID())
assert.Equal(two.Parent().ID(), root.ID())
if root.Parent() != nil {
t.Error("Expected root.Parent() to be nil")
}
})
t.Run("ChildCount", func(t *testing.T) {
var one, two, three Displayable
root := Box(context.New(), Children(func(c Context) {
one = Box(c, Children(func() {
two = Box(c)
three = Box(c)
}))
}))
assert.Equal(root.ChildCount(), 1)
assert.Equal(root.ChildAt(0), one)
assert.Equal(one.ChildCount(), 2)
assert.Equal(one.ChildAt(0), two)
assert.Equal(one.ChildAt(1), three)
})
t.Run("GetFilteredChildren", func(t *testing.T) {
createTree := func() (Displayable, []Displayable) {
var root, one, two, three, four Displayable
root = Box(context.New(), Children(func(c Context) {
one = Box(c, ID("a-t-one"))
two = Box(c, ID("a-t-two"))
three = Box(c, ID("b-t-three"))
four = Box(c, ID("b-t-four"))
}))
return root, []Displayable{one, two, three, four}
}
allKids := func(d Displayable) bool {
return strings.Index(d.ID(), "-t-") > -1
}
bKids := func(d Displayable) bool {
return strings.Index(d.ID(), "b-") > -1
}
t.Run("returns Empty slice", func(t *testing.T) {
root := control.New()
filtered := root.GetFilteredChildren(allKids)
assert.Equal(len(filtered), 0)
})
t.Run("returns all matched children in simple match", func(t *testing.T) {
root, _ := createTree()
filtered := root.GetFilteredChildren(allKids)
assert.Equal(len(filtered), 4)
})
t.Run("returns all matched children in harder match", func(t *testing.T) {
root, _ := createTree()
filtered := root.GetFilteredChildren(bKids)
assert.Equal(len(filtered), 2)
assert.Equal(filtered[0].ID(), "b-t-three")
assert.Equal(filtered[1].ID(), "b-t-four")
})
})
t.Run("GetChildren returns empty list", func(t *testing.T) {
root := control.New()
children := root.Children()
if children == nil {
t.Error("GetChildren should not return nil")
}
assert.Equal(len(children), 0)
})
t.Run("GetChildren returns new list", func(t *testing.T) {
root := Box(context.New(), Children(func(c Context) {
Box(c)
Box(c)
Box(c)
}))
children := root.Children()
assert.Equal(len(children), 3)
})
t.Run("Empty", func(t *testing.T) {
one := control.New()
two := control.New()
if one.IsContainedBy(two) {
t.Error("Unrelated nodes are not ancestors")
}
})
t.Run("False for same control", func(t *testing.T) {
one := control.New()
if one.IsContainedBy(one) {
t.Error("A control should not be contained by itself")
}
})
t.Run("Child is true", func(t *testing.T) {
one := control.New()
two := control.New()
one.AddChild(two)
if !two.IsContainedBy(one) {
t.Error("One should be an ancestor of two")
}
if one.IsContainedBy(two) {
t.Error("Two is not an ancestor of one")
}
})
t.Run("Deep descendants too", func(t *testing.T) {
one := control.New()
two := control.New()
three := control.New()
four := control.New()
five := control.New()
one.AddChild(two)
two.AddChild(three)
three.AddChild(four)
four.AddChild(five)
if !five.IsContainedBy(one) {
t.Error("Five should be contained by one")
}
if !five.IsContainedBy(two) {
t.Error("Five should be contained by two")
}
if !five.IsContainedBy(three) {
t.Error("Five should be contained by three")
}
if !five.IsContainedBy(four) {
t.Error("Five should be contained by four")
}
})
t.Run("Prunes nested invalidations", func(t *testing.T) {
var one, two, three Displayable
root := Box(context.New(), ID("root"), Children(func(c Context) {
one = Box(c, ID("one"), Children(func() {
two = Box(c, ID("two"), Children(func() {
three = Box(c, ID("three"))
}))
}))
}))
three.InvalidateChildren()
two.InvalidateChildren()
one.InvalidateChildren()
invalidNodes := root.InvalidNodes()
assert.Equal(len(invalidNodes), 1)
assert.Equal(invalidNodes[0].ID(), "one")
})
t.Run("InvalidateChildrenFor always goes to root", func(t *testing.T) {
root := Box(context.New(), Children(func(c Context) {
Box(c, Children(func() {
Box(c, Children(func() {
Box(c, ID("abcd"))
}))
}))
}))
child := root.FindControlById("abcd")
child.InvalidateChildrenFor(child.Parent())
assert.Equal(len(root.InvalidNodes()), 1)
})
t.Run("RemoveChild", func(t *testing.T) {
var one, two, three Displayable
root := Box(context.New(), Children(func(c Context) {
one = Box(c)
two = Box(c)
three = Box(c)
}))
removedFromIndex := root.RemoveChild(two)
assert.Equal(removedFromIndex, 1)
removedFromIndex = root.RemoveChild(two)
assert.Equal(removedFromIndex, -1, "Already removed, not found")
})
t.Run("RemoveAllChildren", func(t *testing.T) {
var one, two, three Displayable
root := Box(context.New(), Children(func(c Context) {
one = Box(c)
two = Box(c)
three = Box(c)
}))
assert.Equal(root.ChildCount(), 3)
root.RemoveAllChildren()
assert.Equal(root.ChildCount(), 0)
assert.Nil(one.Parent())
assert.Nil(two.Parent())
assert.Nil(three.Parent())
})
t.Run("Invalidated siblings are sorted fifo", func(t *testing.T) {
var one, two, three Displayable
root := Box(context.New(), ID("root"), Children(func(c Context) {
one = Box(c, ID("one"), Children(func() {
three = Box(c, ID("three"))
}))
two = Box(c, ID("two"))
}))
three.InvalidateChildren()
two.InvalidateChildren()
one.InvalidateChildren()
nodes := root.InvalidNodes()
assert.Equal(len(nodes), 2, "Expected two")
assert.Equal(nodes[0].ID(), "two")
assert.Equal(nodes[1].ID(), "one")
})
t.Run("GetControlByID", func(t *testing.T) {
var aye, bee, cee, dee, eee Displayable
var setUp = func() {
aye = Box(context.New(), ID("aye"), Children(func(c Context) {
bee = Box(c, ID("bee"), Children(func() {
dee = Box(c, ID("dee"))
eee = Box(c, ID("eee"))
}))
cee = Box(c, ID("cee"))
}))
}
t.Run("Matching returned", func(t *testing.T) {
setUp()
result := aye.FindControlById("aye")
assert.NotNil(result)
assert.Equal(result.ID(), "aye")
})
t.Run("First child returned", func(t *testing.T) {
setUp()
result := aye.FindControlById("bee")
assert.NotNil(result)
assert.Equal(result.ID(), "bee")
})
t.Run("Deep child returned", func(t *testing.T) {
setUp()
result := aye.FindControlById("eee")
assert.NotNil(result)
assert.Equal(result.ID(), "eee")
})
})
t.Run("SelectControls", func(t *testing.T) {
t.Run("By Type", func(t *testing.T) {
root := Box(context.New(), Children(func(c Context) {
HBox(c)
}))
assert.NotNil(root.QuerySelector("HBox"))
})
t.Run("By TraitName", func(t *testing.T) {
root := Box(context.New(), Children(func(c Context) {
Box(c, TraitNames("abcd"))
Box(c, TraitNames("efgh"))
}))
assert.NotNil(root.QuerySelector(".efgh"))
})
})
t.Run("Root returns deeply nested root control", func(t *testing.T) {
var descendant Displayable
root := Box(context.New(), ID("root"), Children(func(c Context) {
Box(c, ID("one"), Children((func() {
Box(c, ID("two"), Children(func() {
Box(c, ID("three"), Children(func() {
Box(c, ID("four"), Children(func() {
Box(c, ID("five"), Children(func() {
descendant = Box(c, ID("child"))
}))
}))
}))
}))
})))
}))
assert.Equal(root.ID(), descendant.Root().ID())
})
t.Run("Root gets Scheduler reference", func(t *testing.T) {
var root, child Displayable
root = Box(context.New(), Children(func(c Context) {
Box(c, Children(func() {
child = Box(c)
}))
}))
assert.NotNil(root.Context())
assert.NotNil(child.Context())
})
t.Run("Path", func(t *testing.T) {
t.Run("root", func(t *testing.T) {
root := Box(context.New(), ID("root"))
assert.Equal(root.Path(), "/root")
})
t.Run("uses Key if ID is empty", func(t *testing.T) {
root := Box(context.New(), Key("abcd"))
assert.Equal(root.Path(), "/abcd")
})
t.Run("uses type if neither Key nor Id are present", func(t *testing.T) {
root := Box(context.New())
assert.Equal(root.Path(), "/Box")
})
t.Run("defaults to TypeName and parent index", func(t *testing.T) {
root := VBox(context.New(), Children(func(c Context) {
Box(c)
Box(c)
HBox(c)
}))
kids := root.Children()
assert.Equal(kids[0].Path(), "/VBox/Box0")
assert.Equal(kids[1].Path(), "/VBox/Box1")
assert.Equal(kids[2].Path(), "/VBox/HBox2")
})
t.Run("with depth", func(t *testing.T) {
var one, two, three, four Displayable
Box(context.New(), ID("root"), Children(func(c Context) {
one = Box(c, ID("one"), Children(func() {
two = Box(c, ID("two"), Children(func() {
three = Box(c, ID("three"))
}))
four = Box(c, ID("four"))
}))
}))
assert.Equal(one.Path(), "/root/one")
assert.Equal(two.Path(), "/root/one/two")
assert.Equal(three.Path(), "/root/one/two/three")
assert.Equal(four.Path(), "/root/one/four")
})
})
}
*/
|
package domain
import "time"
type User struct {
Name string
Mail string
Nick string
Pass string
Date *time.Time
}
func NewUser(name string, mail string, nick string, pass string) *User {
var date = time.Now()
var creado = User{Name: name, Mail: mail, Nick: nick, Pass: pass, Date: &date}
return &creado
}
|
package parser
// ColumnType is enum of column type
type ColumnType int
// Definitions of ColumnType
const (
Char ColumnType = iota
Varchar
Binary
VarBinary
Text
Bool
Int8
Int16
Int32
Int64
Uint8
Uint16
Uint32
Uint64
Float
Reference
)
var columnTypeNames = map[string]ColumnType{
"char": Char,
"varchar": Varchar,
"binary": Binary,
"varbinary": VarBinary,
"text": Text,
"bool": Bool,
"int8": Int8,
"int16": Int16,
"int32": Int32,
"int64": Int64,
"uint8": Uint8,
"uint16": Uint16,
"uint32": Uint32,
"uint64": Uint64,
"float": Float,
// Alias
"string": Varchar,
"int": Int32,
"uint": Uint32,
}
type columnTypeAttr struct {
TypeName string
Length bool
Unsigned bool
}
var columnTypes = map[ColumnType]columnTypeAttr{
Char: columnTypeAttr{TypeName: "CHAR", Length: true},
Varchar: columnTypeAttr{TypeName: "VARCHAR", Length: true},
Binary: columnTypeAttr{TypeName: "BINARY", Length: true},
VarBinary: columnTypeAttr{TypeName: "VARBINARY", Length: true},
Text: columnTypeAttr{TypeName: "TEXT"},
Bool: columnTypeAttr{TypeName: "TINYINT"},
Int8: columnTypeAttr{TypeName: "TINYINT"},
Int16: columnTypeAttr{TypeName: "SMALLINT"},
Int32: columnTypeAttr{TypeName: "INT"},
Int64: columnTypeAttr{TypeName: "BIGINT"},
Uint8: columnTypeAttr{TypeName: "TINYINT", Unsigned: true},
Uint16: columnTypeAttr{TypeName: "SMALLINT", Unsigned: true},
Uint32: columnTypeAttr{TypeName: "INT", Unsigned: true},
Uint64: columnTypeAttr{TypeName: "BIGINT", Unsigned: true},
Float: columnTypeAttr{TypeName: "FLOAT"},
Reference: columnTypeAttr{},
}
|
// by Daniel Rodriguez @sadasant
// Original code written in C# by [Strilanc](https://github.com/Strilanc)
// Original repo: [NotAMonad](https://github.com/Strilanc/NotAMonad)
// This is the fixed code for an Issue I reported here: https://code.google.com/p/go/issues/detail?can=2&start=0&num=100&q=&colspec=ID%20Status%20Stars%20Priority%20Owner%20Reporter%20Summary&groupby=&sort=&id=6247
// remy_o fixed it with this patch: https://codereview.appspot.com/13216043
//
// Issue:
// I'm trying to run this script in go1.1.2 linux/arm:
//
// http://play.golang.org/p/V-76cJjU3D
//
// It says "reg R13 left allocated" on line 43.
//
// It compiles in play.golang.org, but the calculations doesn't match with the expected value (check lines 85..88), the last element "there" appears with value: complex128=(-0-0.6400000000000001i).
//
// The go env output is:
//
// go env
// GOARCH="arm"
// GOBIN=""
// GOCHAR="5"
// GOEXE=""
// GOHOSTARCH="arm"
// GOHOSTOS="linux"
// GOOS="linux"
// GOPATH="/home/sadasant/code/go"
// GORACE=""
// GOROOT="/usr/lib/go"
// GOTOOLDIR="/usr/lib/go/pkg/tool/linux_arm"
// CC="gcc"
// GOGCCFLAGS="-g -O2 -fPIC -marm -pthread"
// CGO_ENABLED="1"
package main
import (
"fmt"
"math"
"reflect"
)
type Val interface{}
type Superposition map[string]Val
type QuantumSuperposition struct {
Dict Superposition
}
func (p *QuantumSuperposition) From(dict map[string]Val) QuantumSuperposition {
p.Dict = dict
return *p
}
func (p QuantumSuperposition) Transform(t func(string) string) QuantumSuperposition {
dict := Superposition{}
for k, v := range p.Dict {
trans := t(k)
if _, ok := dict[trans]; ok {
dict[trans] = dict[trans].(complex128) + v.(complex128)
} else {
if _v, ok := v.([]Val); ok {
dict[trans] = _v[1]
} else {
dict[trans] = v
}
}
}
return new(QuantumSuperposition).From(dict)
}
// FIXME:
// In go1.1.2 linux/arm it breaks with: reg R13 left allocated
func (p QuantumSuperposition) Flatten() QuantumSuperposition {
dict := Superposition{}
for _, l := range p.Dict {
if _l, ok := l.([]Val); ok {
for k, v := range _l[0].(QuantumSuperposition).Dict {
if _, ok := dict[k]; ok {
dict[k] = dict[k].(complex128) + mult(v.(complex128), _l[1].(complex128))
} else {
if _v, ok := v.([]Val); ok {
dict[k] = mult(_v[1].(complex128), _l[1].(complex128))
} else {
dict[k] = mult(v.(complex128), _l[1].(complex128))
}
}
}
} else {
return p
}
}
return new(QuantumSuperposition).From(dict)
}
// As appears here: http://www.clarku.edu/~djoyce/complex/mult.html
func mult(a, b complex128) complex128 {
ra := real(a)
rb := real(b)
ia := imag(a)
ib := imag(b)
r := (ra * rb) - (ia * ib)
i := (ra * ib) + (rb * ia)
// Becasue we can have negative zeroes...
if r == -0 {
r = 0
}
return complex(r, i)
}
func main() {
a1 := new(QuantumSuperposition).From(Superposition{
"hey": complex(1.0/5*3, 0),
"listen": complex(-1.0/5*4, 0),
})
a2 := new(QuantumSuperposition).From(Superposition{
"over": complex(1.0/5*3, 0),
"there": complex(-1.0/5*4, 0),
})
s2 := new(QuantumSuperposition).From(Superposition{
"a1": []Val{a1, complex(1.0/5*3, 0)},
"a2": []Val{a2, complex(0, 1.0/5*4)},
})
s2 = s2.Flatten()
expected := Superposition{
"hey": complex(1.0/25*9, 0),
"listen": complex(-1.0/25*12, 0),
"over": complex(0, 1.0/25*12),
"there": complex(0, -1.0/25*16),
}
// Fix rounding.
c := s2.Dict["there"].(complex128)
i := (math.Floor(imag(c) * 100)/100)+0.01
s2.Dict["there"] = complex(real(c), i)
if reflect.DeepEqual(s2.Dict, expected) != true {
fmt.Printf("DeepEqual\nexpeted: %s\nreceived:%s", expected, s2.Dict)
}
}
|
package validator
type Validator struct {
Errors map[string]string
}
func New() *Validator {
return &Validator{
Errors: make(map[string]string),
}
}
func (v *Validator) AddError(key, message string) {
if _, exists := v.Errors[key]; !exists {
v.Errors[key] = message
}
}
func (v *Validator) Check(condition bool, key, message string) {
if !condition {
v.AddError(key, message)
}
}
func (v *Validator) Valid() bool {
return len(v.Errors) == 0
}
// NOTE helper functions: https://github.com/asaskevich/govalidator
func In(list []string, s string) bool {
for _, v := range list {
if v == s {
return true
}
}
return false
}
|
package mysql
import (
"context"
sq "github.com/Masterminds/squirrel"
"github.com/ddouglas/ledger"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
var merchantsTable = "merchants"
var merchantsColumns = []string{
"id",
"name",
"created_at",
"updated_at",
}
var merchantAliasesTable = "merchant_aliases"
var merchantAliasesColumns = []string{
"alias_id",
"merchant_id",
"alias",
"created_at",
"updated_at",
}
type merchantRepository struct {
db *sqlx.DB
}
func NewMerchantRepository(db *sqlx.DB) ledger.MerchantRepository {
return &merchantRepository{
db: db,
}
}
func (r *merchantRepository) Merchant(ctx context.Context, id string) (*ledger.Merchant, error) {
query, args, err := sq.Select(merchantsColumns...).From(merchantsTable).Where(sq.Eq{
"id": id,
}).ToSql()
if err != nil {
return nil, errors.Errorf("failed to generate sql stmt: %s", err)
}
var merchant = new(ledger.Merchant)
err = r.db.GetContext(ctx, merchant, query, args...)
return merchant, err
}
func (r *merchantRepository) MerchantByAlias(ctx context.Context, alias string) (*ledger.Merchant, error) {
query := `
SELECT
m.id,
m.name,
m.created_at,
m.updated_at
FROM merchant_aliases ma
LEFT JOIN merchants m ON (m.id = ma.merchant_id)
WHERE ma.Alias = ?
`
var merchant = new(ledger.Merchant)
err := r.db.GetContext(ctx, merchant, query, alias)
return merchant, err
}
func (r *merchantRepository) Merchants(ctx context.Context) ([]*ledger.Merchant, error) {
query, args, err := sq.Select(merchantsColumns...).From(merchantsTable).OrderBy("name asc").ToSql()
if err != nil {
return nil, errors.Errorf("failed to generate sql stmt: %s", err)
}
var merchants = make([]*ledger.Merchant, 0)
err = r.db.SelectContext(ctx, &merchants, query, args...)
return merchants, err
}
func (r *merchantRepository) createMerchantQuery(merchant *ledger.Merchant) (string, []interface{}, error) {
return sq.Insert(merchantsTable).Columns(merchantsColumns...).Values(
merchant.ID,
merchant.Name,
sq.Expr(`NOW()`),
sq.Expr(`NOW()`),
).ToSql()
}
func (r *merchantRepository) CreateMerchant(ctx context.Context, merchant *ledger.Merchant) (*ledger.Merchant, error) {
query, args, err := r.createMerchantQuery(merchant)
if err != nil {
return nil, errors.Errorf("failed to generate sql stmt: %s", err)
}
_, err = r.db.ExecContext(ctx, query, args...)
return merchant, err
}
func (r *merchantRepository) CreateMerchantTx(ctx context.Context, tx ledger.Transactioner, merchant *ledger.Merchant) (*ledger.Merchant, error) {
txn, ok := tx.(*transaction)
if !ok {
return nil, ErrInvalidTransaction
}
query, args, err := r.createMerchantQuery(merchant)
if err != nil {
return nil, errors.Errorf("failed to generate sql stmt: %s", err)
}
_, err = txn.ExecContext(ctx, query, args...)
return merchant, err
}
func (r *merchantRepository) updateMerchantQuery(id string, merchant *ledger.Merchant) (string, []interface{}, error) {
return sq.Update(merchantsTable).SetMap(map[string]interface{}{
"name": merchant.Name,
"updated_at": sq.Expr(`NOW()`),
}).Where(sq.Eq{"id": id}).ToSql()
}
func (r *merchantRepository) UpdateMerchant(ctx context.Context, id string, merchant *ledger.Merchant) (*ledger.Merchant, error) {
query, args, err := r.updateMerchantQuery(id, merchant)
if err != nil {
return nil, errors.Errorf("failed to generate sql stmt: %s", err)
}
_, err = r.db.ExecContext(ctx, query, args...)
return merchant, err
}
func (r *merchantRepository) UpdateMerchantTx(ctx context.Context, tx ledger.Transactioner, id string, merchant *ledger.Merchant) (*ledger.Merchant, error) {
txn, ok := tx.(*transaction)
if !ok {
return nil, ErrInvalidTransaction
}
query, args, err := r.updateMerchantQuery(id, merchant)
if err != nil {
return nil, errors.Errorf("failed to generate sql stmt: %s", err)
}
_, err = txn.ExecContext(ctx, query, args...)
return merchant, err
}
func (r *merchantRepository) deleteMerchantQuery(id string) (string, []interface{}, error) {
return sq.Delete(merchantsTable).Where(sq.Eq{"id": id}).ToSql()
}
func (r *merchantRepository) DeleteMerchant(ctx context.Context, id string) error {
query, args, err := r.deleteMerchantQuery(id)
if err != nil {
return errors.Errorf("failed to generate sql stmt: %s", err)
}
_, err = r.db.ExecContext(ctx, query, args...)
return err
}
func (r *merchantRepository) DeleteMerchantTx(ctx context.Context, tx ledger.Transactioner, id string) error {
txn, ok := tx.(*transaction)
if !ok {
return ErrInvalidTransaction
}
query, args, err := r.deleteMerchantQuery(id)
if err != nil {
return errors.Errorf("failed to generate sql stmt: %s", err)
}
_, err = txn.ExecContext(ctx, query, args...)
return err
}
func (r *merchantRepository) MerchantAliasesByMerchantID(ctx context.Context, merchantID string) ([]*ledger.MerchantAlias, error) {
query, args, err := sq.Select(merchantAliasesColumns...).From(merchantAliasesTable).Where(sq.Eq{
"merchant_id": merchantID,
}).ToSql()
if err != nil {
return nil, errors.Errorf("failed to generate sql stmt: %s", err)
}
var aliases = make([]*ledger.MerchantAlias, 0)
err = r.db.SelectContext(ctx, &aliases, query, args...)
return aliases, err
}
func (r *merchantRepository) createMerchantAliasQuery(alias *ledger.MerchantAlias) (string, []interface{}, error) {
return sq.Insert(merchantAliasesTable).SetMap(map[string]interface{}{
"alias_id": alias.AliasID,
"merchant_id": alias.MerchantID,
"alias": alias.Alias,
"created_at": sq.Expr(`NOW()`),
"updated_at": sq.Expr(`NOW()`),
}).ToSql()
}
func (r *merchantRepository) CreateMerchantAlias(ctx context.Context, alias *ledger.MerchantAlias) (*ledger.MerchantAlias, error) {
query, args, err := r.createMerchantAliasQuery(alias)
if err != nil {
return nil, errors.Errorf("failed to generate sql stmt: %s", err)
}
_, err = r.db.ExecContext(ctx, query, args...)
return alias, err
}
func (r *merchantRepository) CreateMerchantAliasTx(ctx context.Context, tx ledger.Transactioner, alias *ledger.MerchantAlias) (*ledger.MerchantAlias, error) {
txn, ok := tx.(*transaction)
if !ok {
return nil, ErrInvalidTransaction
}
query, args, err := r.createMerchantAliasQuery(alias)
if err != nil {
return nil, errors.Errorf("failed to generate sql stmt: %s", err)
}
if err != nil {
return nil, errors.Errorf("failed to generate sql stmt: %s", err)
}
_, err = txn.ExecContext(ctx, query, args...)
return alias, err
}
func (r *merchantRepository) updateMerchantAliasQuery(aliasID string, alias *ledger.MerchantAlias) (string, []interface{}, error) {
return sq.Update(merchantsTable).SetMap(map[string]interface{}{
"merchant_id": alias.MerchantID,
"alias": alias.Alias,
"updated_at": sq.Expr(`NOW()`),
}).Where(sq.Eq{"alias_id": aliasID}).ToSql()
}
func (r *merchantRepository) UpdateMerchantAlias(ctx context.Context, aliasID string, alias *ledger.MerchantAlias) (*ledger.MerchantAlias, error) {
query, args, err := r.updateMerchantAliasQuery(aliasID, alias)
if err != nil {
return nil, errors.Errorf("failed to generate sql stmt: %s", err)
}
_, err = r.db.ExecContext(ctx, query, args...)
return alias, err
}
func (r *merchantRepository) UpdateMerchantAliasTx(ctx context.Context, tx ledger.Transactioner, aliasID string, alias *ledger.MerchantAlias) (*ledger.MerchantAlias, error) {
txn, ok := tx.(*transaction)
if !ok {
return nil, ErrInvalidTransaction
}
query, args, err := r.updateMerchantAliasQuery(aliasID, alias)
if err != nil {
return nil, errors.Errorf("failed to generate sql stmt: %s", err)
}
_, err = txn.ExecContext(ctx, query, args...)
return alias, err
}
|
// A Pulumi package for creating and managing Kubernetes resources.
//
package kubernetes
|
package v0
import (
"bytes"
"math/rand"
"testing"
"github.com/golang/protobuf/proto"
"github.com/grafana/tempo/pkg/tempopb"
"github.com/grafana/tempo/pkg/util/test"
"github.com/stretchr/testify/assert"
)
func TestMarshalUnmarshal(t *testing.T) {
buffer := &bytes.Buffer{}
id := []byte{0x00, 0x01}
req := test.MakeRequest(10, id)
bReq, err := proto.Marshal(req)
assert.NoError(t, err)
o := object{}
_, err = o.MarshalObjectToWriter(id, bReq, buffer)
assert.NoError(t, err)
outID, outObject, err := o.UnmarshalObjectFromReader(buffer)
assert.NoError(t, err)
assert.True(t, bytes.Equal(id, outID))
outReq := &tempopb.PushRequest{}
err = proto.Unmarshal(outObject, outReq)
assert.NoError(t, err)
assert.True(t, proto.Equal(req, outReq))
}
func TestMarshalUnmarshalFromBuffer(t *testing.T) {
buffer := &bytes.Buffer{}
id := []byte{0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01}
rand.Read(id)
o := object{}
var reqs []*tempopb.PushRequest
for i := 0; i < 10; i++ {
req := test.MakeRequest(10, id)
reqs = append(reqs, req)
bReq, err := proto.Marshal(req)
assert.NoError(t, err)
_, err = o.MarshalObjectToWriter(id, bReq, buffer)
assert.NoError(t, err)
}
actualBuffer := buffer.Bytes()
for i := 0; i < 10; i++ {
var outID []byte
var outObject []byte
var err error
actualBuffer, outID, outObject, err = o.UnmarshalAndAdvanceBuffer(actualBuffer)
assert.NoError(t, err)
outReq := &tempopb.PushRequest{}
err = proto.Unmarshal(outObject, outReq)
assert.NoError(t, err)
assert.True(t, bytes.Equal(id, outID))
assert.True(t, proto.Equal(reqs[i], outReq))
}
}
|
package main
import (
"fmt"
"os"
)
const MAXW = 40
const MAXH = 20
// lraudm
func main() {
var canvas [MAXH][MAXW]int
pen := ArtistPen{0, 0, DirUp, PenUp}
prgm := os.Args[1]
for _, cmd := range prgm {
switch string(cmd) {
case "l":
pen.TurnLeft()
case "r":
pen.TurnRight()
case "a":
pen.TurnAround()
case "u":
pen.PenUp()
case "d":
pen.PenDown()
case "m":
pen.Move()
}
if pen.penState == PenDown {
canvas[pen.Y][pen.X] = 1
}
}
for _, y := range canvas {
for _, x := range y {
if x == 1 {
fmt.Print("x")
} else {
fmt.Print(" ")
}
}
fmt.Print("\n")
}
fmt.Print("\n")
}
type ArtistPen struct {
X int
Y int
direction Direction
penState PenState
}
type Direction int
const (
DirUp Direction = 0
DirDown Direction = 1
DirLeft Direction = 2
DirRight Direction = 3
)
type PenState int
const (
PenUp PenState = 0
PenDown PenState = 1
)
func (p *ArtistPen) TurnRight() {
switch p.direction {
case DirUp:
p.direction = DirRight
case DirDown:
p.direction = DirLeft
case DirLeft:
p.direction = DirUp
case DirRight:
p.direction = DirDown
}
}
func (p *ArtistPen) TurnLeft() {
switch p.direction {
case DirUp:
p.direction = DirLeft
case DirDown:
p.direction = DirRight
case DirLeft:
p.direction = DirDown
case DirRight:
p.direction = DirUp
}
}
func (p *ArtistPen) TurnAround() {
switch p.direction {
case DirUp:
p.direction = DirDown
case DirDown:
p.direction = DirUp
case DirLeft:
p.direction = DirRight
case DirRight:
p.direction = DirLeft
}
}
func (p *ArtistPen) PenUp() {
p.penState = PenUp
}
func (p *ArtistPen) PenDown() {
p.penState = PenDown
}
func (p *ArtistPen) Move() {
switch p.direction {
case DirUp:
newPos := p.Y - 1
if newPos >= 0 {
p.Y = newPos
}
case DirDown:
newPos := p.Y + 1
if newPos < MAXH {
p.Y = newPos
}
case DirLeft:
newPos := p.X - 1
if newPos >= 0 {
p.X = newPos
}
case DirRight:
newPos := p.X + 1
if newPos < MAXW {
p.X = newPos
}
}
}
|
package umeng_sdk_push
import (
"encoding/json"
"net/http"
"bytes"
"io/ioutil"
"log"
)
type UmengNotificationData struct {
AppKey string `json:"appkey"`
Timestamp string `json:"timestamp"`
Type string `json:"type"`
DeviceTokens string `json:"device_tokens"`
Alias string `json:"alias"`
AliasType string `json:"alias_type"`
FileId string `json:"file_id"`
Filter string `json:"filter"`
Feedback string `json:"feedback"`
Description string `json:"description"`
ThirdpartyId string `json:"thirdparty_id"`
ProductionMode bool `json:"production_mode"`
Payload interface{} `json:"payload"`
}
type UmengNotification struct {
host string
uploadPath string
postPath string
appMasterSecret string
data *UmengNotificationData
}
func (this *UmengNotification) send() (UmengResult, error) {
var result UmengResult
url := this.host + this.postPath
postBody, err := json.Marshal(this.data)
if err != nil {
log.Println(err.Error())
return result, err
}
sign := MD5("POST" + url + string(postBody) + this.appMasterSecret)
url = url + "?sign=" + sign
bufReader := bytes.NewReader(postBody)
resp, err := http.Post(url, "application/json", bufReader)
defer func() {
resp.Body.Close()
}()
if err != nil {
log.Println(err.Error())
return result, err
}
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Println(err.Error())
return result, err
}
json.Unmarshal(content, &result)
return result, nil
}
|
// Copyright 2019 - 2022 The Samply Community
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fhir
import (
"encoding/json"
"fmt"
"strings"
)
// THIS FILE IS GENERATED BY https://github.com/samply/golang-fhir-models
// PLEASE DO NOT EDIT BY HAND
// OperationParameterUse is documented here http://hl7.org/fhir/ValueSet/operation-parameter-use
type OperationParameterUse int
const (
OperationParameterUseIn OperationParameterUse = iota
OperationParameterUseOut
)
func (code OperationParameterUse) MarshalJSON() ([]byte, error) {
return json.Marshal(code.Code())
}
func (code *OperationParameterUse) UnmarshalJSON(json []byte) error {
s := strings.Trim(string(json), "\"")
switch s {
case "in":
*code = OperationParameterUseIn
case "out":
*code = OperationParameterUseOut
default:
return fmt.Errorf("unknown OperationParameterUse code `%s`", s)
}
return nil
}
func (code OperationParameterUse) String() string {
return code.Code()
}
func (code OperationParameterUse) Code() string {
switch code {
case OperationParameterUseIn:
return "in"
case OperationParameterUseOut:
return "out"
}
return "<unknown>"
}
func (code OperationParameterUse) Display() string {
switch code {
case OperationParameterUseIn:
return "In"
case OperationParameterUseOut:
return "Out"
}
return "<unknown>"
}
func (code OperationParameterUse) Definition() string {
switch code {
case OperationParameterUseIn:
return "This is an input parameter."
case OperationParameterUseOut:
return "This is an output parameter."
}
return "<unknown>"
}
|
package lexer
import (
"github.com/leonhfr/nand2tetris/src/hasm/lexer/errors"
"github.com/leonhfr/nand2tetris/src/hasm/lexer/lexertoken"
)
func LexDestOrComp(lexer *Lexer) LexFn {
for {
if lexer.IsNext(lexertoken.EQUAL_SIGN) {
lexer.Emit(lexertoken.TOKEN_C_DEST)
return LexEqualSign
}
if lexer.IsNext(lexertoken.SEMI_COLON) {
lexer.Emit(lexertoken.TOKEN_C_COMP)
return LexSemiColon
}
lexer.Inc()
if lexer.IsEOF() {
return lexer.Errorf(errors.LEXER_ERROR_UNEXPECTED_EOF)
}
}
}
func LexEqualSign(lexer *Lexer) LexFn {
lexer.Inc()
lexer.Emit(lexertoken.TOKEN_EQUAL_SIGN)
return LexComp
}
func LexSemiColon(lexer *Lexer) LexFn {
lexer.Inc()
lexer.Emit(lexertoken.TOKEN_SEMICOLON)
return LexJump
}
func LexComp(lexer *Lexer) LexFn {
for {
if lexer.Peek() == ' ' {
lexer.Emit(lexertoken.TOKEN_C_COMP)
return LexStart
}
if lexer.IsNext(lexertoken.SEMI_COLON) {
lexer.Emit(lexertoken.TOKEN_C_COMP)
return LexSemiColon
}
if lexer.IsNext(lexertoken.NEWLINE) || lexer.IsNext(lexertoken.CARRIAGE_RETURN) {
lexer.Emit(lexertoken.TOKEN_C_COMP)
return LexStart
}
lexer.Inc()
if lexer.IsEOF() {
return lexer.Errorf(errors.LEXER_ERROR_UNEXPECTED_EOF)
}
}
}
func LexJump(lexer *Lexer) LexFn {
for {
if lexer.Peek() == ' ' {
lexer.Emit(lexertoken.TOKEN_C_JUMP)
return LexStart
}
if lexer.IsNext(lexertoken.NEWLINE) || lexer.IsNext(lexertoken.CARRIAGE_RETURN) {
lexer.Emit(lexertoken.TOKEN_C_JUMP)
return LexStart
}
lexer.Inc()
if lexer.IsEOF() {
return lexer.Errorf(errors.LEXER_ERROR_UNEXPECTED_EOF)
}
}
}
|
package categoryTest
import (
"result"
"net/url"
"strconv"
"userDate"
"commonFun"
"log"
"datePrepare"
"database/sql"
)
var conn *sql.DB
func AddCategoryTest()bool{
//增加测试结果,一个测试写一个
result.TesDetailResult("AddCategoryTest")
//case 1
values := url.Values{
"Tid":{""},
"Name":{"商品测试类型"+strconv.Itoa(userDate.UserNum)},
"Pid":{"0"},
"Action":{"create"},
"Sort":{""},
}
callbackDate:=&commonFun.ResponseStruct{}
err,_:=commonFun.HttpUrlFunc("POST","/createUpdateCategory",values,callbackDate)
if err!=nil{
log.Println(err)
return false
}
result:=commonFun.Expected(callbackDate.Code, 0)
if result == false {
return false
}
return true
}
func GetCategoryId()(error,int){
conn,_=datePrepare.NewTestConn()
var categoryId int
getCategoryIdSql:="select tid from rcp_category where name=?"
rows,err:=conn.Query(getCategoryIdSql,"商品测试类型1")
if err!=nil{
log.Fatal("getCategoryId query error",err.Error())
return err,-1
}else{
for rows.Next(){
err=rows.Scan(&categoryId)
if err!=nil{
log.Fatal("getCategoryId scan error",err.Error())
return err,-1
}
}
}
return nil,categoryId
}
|
package util
import (
"github.com/gin-gonic/gin"
)
// StreamHandleResponse handles the Stream API response.
func StreamHandleResponse(context *gin.Context, body interface{}, err error, messageName string) bool {
if err != nil {
return false
}
context.SSEvent(messageName, body)
return true
}
|
package meta
type Team struct {
TeamID string `json:"teamID"`
TeamNameCH string `json:"teamNameCH"`
TeamNameEN string `json:"teamNameEN"`
}
|
package game
import (
"log"
"github.com/alienteam/omok-server/core"
)
// Game is a main server logic.
type Game struct {
//core.JsonMessageHandler
core.StringMessageHandler
users map[int]*User
rooms map[int]*Room
}
func (s *Game) OnEvent(e core.Event, c *core.Connection, m core.Message) {
switch e {
case core.EventConnected:
log.Println("EVENT_CONN")
c.Send("hello world~")
case core.EventRecv:
log.Printf("EVENT_RECV: %v", m)
case core.EventSend:
log.Printf("EVENT_SEND: %v", m)
c.Close()
case core.EventClosed:
log.Println("EVENT_CLOSED")
}
}
|
/*
* @lc app=leetcode.cn id=89 lang=golang
*
* [89] 格雷编码
*/
package leetcode
import "math"
// @lc code=start
func grayCode(n int) []int {
/* if n == 1 {
return []int{0, 1}
}
gray := grayCode(n - 1)
res := make([]int, 1 << n)
for i := 0; i < len(gray); i++ {
res[i] = gray[i];
res[len(res) - i - 1] = gray[i] ^ (1 << (n - 1))
}
return res*/
res := make([]int, 1<<n)
gray(n, &res)
return res
}
func gray(n int, res *[]int) {
if n == 1 {
(*res)[0], (*res)[1] = 0, 1
return
}
gray(n - 1, res)
length := 1 << (n - 1)
for i := 0; i < length; i++ {
(*res)[(length)<<1-i-1] = (*res)[i] ^ length
}
}
// @lc code=end
|
package main
import (
"net/http"
"net/http/httptest"
"strings"
"testing"
)
func TestSoal1(t *testing.T) {
//with input 3A13 5X19 9Y20 2C18 1N20 3N20 1M21 1F14 9A21 3N21 0E13 5G14 8A23 9E22 3N14
//output 0E13 9E22 9A21 9Y20 8A23 1M21 1N20 1F14 2C18 5X19 5G14 3N21 3N20 3A13
req, err := http.NewRequest("POST", "/soal-1", strings.NewReader("input=3A13 5X19 9Y20 2C18 1N20 3N20 1M21 1F14 9A21 3N21 0E13 5G14 8A23 9E22 3N14"))
if err != nil {
t.Fatal(err)
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
rr := httptest.NewRecorder()
handler := http.HandlerFunc(pustakawan)
handler.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v",
status, http.StatusOK)
}
expected := "0E13 9E22 9A21 9Y20 8A23 1M21 1N20 1F14 2C18 5X19 5G14 3N21 3N20 3A13"
if rr.Body.String() != expected {
t.Errorf("handler returned unexpected body: got %v want %v",
rr.Body.String(), expected)
}
}
|
package re
import (
"regexp"
)
const PATTERN_MY string = `^(?i)(c\.?\s*)?(\d+)/(\-?\d{4}).*`
const PATTERN_MY_LONGFORM string = `^(January|February|March|April|May|June|July|August|September|October|November|December)\s+(\d{4}).*`
const PATTERN_MDY string = `^(?i)(c\.?\s*)?(\d+)/(\d+)/(\-?\d{4}).*`
const PATTERN_EARLY string = `^(?i)(c\.?\s*)?early\s+(\-?\d{4})s.*`
const PATTERN_MID string = `^(?i)(c\.?\s*)?mid\s+(\-?\d{4})s.*`
const PATTERN_LATE string = `^(?i)(c\.?\s*)?late\s+(\-?\d{4})s.*`
const PATTERN_DECADE string = `^(?i)(c\.?\s*)?(\d{3})0s.*$`
const PATTERN_RANGE string = `^(?i)(c\.?\s*)?(\-?\d{4})(?:\s+)?\-(?:\s+)?(\-?\d{4}).*`
const PATTERN_YYYY string = `^(?i)(c\.?\s*)?(\-?)(\d{4}).*`
const PATTERN_MDYHUMAN string = `(?i)(\w+)\s+(\d{1,2})\,?\s+(\-?\d{4}).*`
var MY *regexp.Regexp
var MY_LONGFORM *regexp.Regexp
var MDY *regexp.Regexp
var EARLY *regexp.Regexp
var MID *regexp.Regexp
var LATE *regexp.Regexp
var DECADE *regexp.Regexp
var RANGE *regexp.Regexp
var YYYY *regexp.Regexp
var MDYHUMAN *regexp.Regexp
func init() {
MY = regexp.MustCompile(PATTERN_MY)
MY_LONGFORM = regexp.MustCompile(PATTERN_MY_LONGFORM)
MDY = regexp.MustCompile(PATTERN_MDY)
EARLY = regexp.MustCompile(PATTERN_EARLY)
MID = regexp.MustCompile(PATTERN_MID)
LATE = regexp.MustCompile(PATTERN_LATE)
DECADE = regexp.MustCompile(PATTERN_DECADE)
RANGE = regexp.MustCompile(PATTERN_RANGE)
YYYY = regexp.MustCompile(PATTERN_YYYY)
MDYHUMAN = regexp.MustCompile(PATTERN_MDYHUMAN)
}
|
package Next_Greater_Element_I
import "container/list"
func nextGreaterElement(nums1 []int, nums2 []int) []int {
stack := list.New()
greater := make(map[int]int, len(nums2))
for _, v := range nums2 {
for stack.Len() != 0 {
e := stack.Back()
if e.Value.(int) >= v {
break
}
stack.Remove(e)
greater[e.Value.(int)] = v
}
stack.PushBack(v)
}
for k, v := range nums1 {
n, ok := greater[v]
if !ok {
nums1[k] = -1
continue
}
nums1[k] = n
}
return nums1
}
|
package main
import (
"net/http"
"time"
"github.com/garyburd/redigo/redis"
"log"
"strconv"
"flag"
)
var (
httpServerPort int
)
func init() {
const (
defaultHttpServerPort = 8080
usage = "http server port"
)
flag.IntVar(&httpServerPort, "port", defaultHttpServerPort, usage)
redisPool = &redis.Pool{
MaxIdle: 5,
IdleTimeout: 240 * time.Second,
Dial: func () (redis.Conn, error) {
c, err := redis.Dial("tcp", ":6379")
if err != nil {
return nil, err
}
return c, err
},
}
}
func main() {
log.Println("api.qpcrbox.com")
http.HandleFunc("/v1/qpcr/", qpcrHandler)
http.HandleFunc("/v1/experiment/", experimentHandler)
http.HandleFunc("/v1/rate-limit", rateLimitHandler)
http.HandleFunc("/v1/status", statusHandler)
http.ListenAndServe(":" + strconv.Itoa(httpServerPort), nil)
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package meta contains support code for Tast meta tests.
package meta
import (
"context"
"path/filepath"
"reflect"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"chromiumos/tast/framework/protocol"
"chromiumos/tast/fsutil"
"chromiumos/tast/testing"
)
func init() {
testing.AddFixture(&testing.Fixture{
Name: "metaLocalDataFilesFixture",
Desc: "Demonstrate how to use data files in fixtures",
Contacts: []string{"oka@chromium.org", "tast-owner@google.com"},
Data: []string{
"fixture_data_internal.txt",
"fixture_data_external.txt",
},
Impl: dataFileFixture{},
})
testing.AddFixture(&testing.Fixture{
Name: "metaLocalFixtureDUTFeature",
Desc: "Demonstrate how to access DUT Features in fixtures",
Contacts: []string{"seewaifu@chromium.org", "tast-owner@google.com"},
Data: []string{},
Impl: &dutFeatureFixture{},
})
}
type dataFileFixture struct{}
func (dataFileFixture) SetUp(ctx context.Context, s *testing.FixtState) interface{} {
for _, fn := range []string{
"fixture_data_internal.txt",
"fixture_data_external.txt",
} {
s.Log("Copying ", fn)
if err := fsutil.CopyFile(s.DataPath(fn), filepath.Join(s.OutDir(), fn)); err != nil {
s.Errorf("Failed copying %s: %s", fn, err)
}
}
return nil
}
func (dataFileFixture) Reset(ctx context.Context) error {
return nil
}
func (dataFileFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {}
func (dataFileFixture) PostTest(ctx context.Context, s *testing.FixtTestState) {}
func (dataFileFixture) TearDown(ctx context.Context, s *testing.FixtState) {}
type dutFeatureFixture struct {
feature *protocol.DUTFeatures
}
func (dff *dutFeatureFixture) SetUp(ctx context.Context, s *testing.FixtState) interface{} {
dff.feature = s.Features("")
return dff.feature
}
func (dff *dutFeatureFixture) Reset(ctx context.Context) error {
return nil
}
func (dff *dutFeatureFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {
feature := s.Features("")
allowUnexported := func(reflect.Type) bool { return true }
if diff := cmp.Diff(feature, dff.feature, cmpopts.EquateEmpty(), cmp.Exporter(allowUnexported)); diff != "" {
s.Logf("Got unexpected feature in PreTest (-got +want): %s", diff)
s.Fatal("Got unexpected feature in PreTest")
}
}
func (dff *dutFeatureFixture) PostTest(ctx context.Context, s *testing.FixtTestState) {
feature := s.Features("")
allowUnexported := func(reflect.Type) bool { return true }
if diff := cmp.Diff(feature, dff.feature, cmpopts.EquateEmpty(), cmp.Exporter(allowUnexported)); diff != "" {
s.Logf("Got unexpected feature in PostTest (-got +want): %s", diff)
s.Fatal("Got unexpected feature in PostTest")
}
}
func (dff *dutFeatureFixture) TearDown(ctx context.Context, s *testing.FixtState) {
feature := s.Features("")
allowUnexported := func(reflect.Type) bool { return true }
if diff := cmp.Diff(feature, dff.feature, cmpopts.EquateEmpty(), cmp.Exporter(allowUnexported)); diff != "" {
s.Logf("Got unexpected feature in TearDown (-got +want): %s", diff)
s.Fatal("Got unexpected feature in TearDown")
}
}
|
package conf_test
import (
"testing"
"github.com/BedeWong/iStock/conf"
)
func TestfnInit(t *testing.T) {
//conf.init()
if conf.GetConfig() != conf.GetConfig() {
t.Logf("两次获取的对象不唯一:\n")
t.Logf("obj1:%v\n", conf.GetConfig())
t.Logf("obj2:%v\n", conf.GetConfig())
}
}
//func Test2(t *testing.T) {
// t.Error("就是通不过。")
//} |
package db
import (
"context"
"fmt"
"os"
"github.com/jackc/pgx/v4"
)
var DB *pgx.Conn
func Connect() {
db, err := pgx.Connect(context.Background(), os.Getenv("DATABASE_URL"))
if err != nil {
panic(err)
}
//defer db.Close(context.Background())
DB = db
fmt.Println("DATABASE::CONNECTED")
}
|
package main
import (
"bufio"
"flag"
"fmt"
"os"
"runtime"
"time"
"github.com/otoolep/bleve-bench"
)
var batchSize = flag.Int("batchSize", 100, "batch size for indexing")
var nShards = flag.Int("shards", 1, "number of indexing shards")
var maxprocs = flag.Int("maxprocs", 1, "GOMAXPROCS")
var indexPath = flag.String("index", "indexes", "index storage path")
var docsPath = flag.String("docs", "docs", "path to docs file")
var csv = flag.Bool("csv", false, "summary CSV output")
func main() {
flag.Parse()
runtime.GOMAXPROCS(*maxprocs)
// Remove any existing indexes.
if err := os.RemoveAll(*indexPath); err != nil {
fmt.Println("failed to remove %s.", *indexPath)
os.Exit(1)
}
// Attempt to open the file.
fmt.Printf("Opening docs file %s\n", *docsPath)
f, err := os.Open(*docsPath)
if err != nil {
fmt.Printf("failed to open docs file: %s\n", err.Error())
os.Exit(1)
}
// Read the lines into memory.
docs := make([][]byte, 0, 100000)
reader := bufio.NewReader(f)
var l []byte
l, err = reader.ReadBytes(byte('\n'))
for err == nil {
docs = append(docs, l)
l, err = reader.ReadBytes(byte('\n'))
}
fmt.Printf("%d documents read for indexing.\n", len(docs))
if len(docs)%(*nShards) != 0 {
fmt.Println("Document count must be evenly divisible by shard count")
os.Exit(1)
}
i := indexer.New(*indexPath, *nShards, *batchSize)
if err := i.Open(); err != nil {
fmt.Println("failed to open indexer:", err)
os.Exit(1)
}
startTime := time.Now()
if err := i.Index(docs); err != nil {
fmt.Println("failed to index documents:", err)
os.Exit(1)
}
duration := time.Now().Sub(startTime)
count, err := i.Count()
if err != nil {
fmt.Println("failed to determine total document count")
os.Exit(1)
}
rate := int(float64(count) / duration.Seconds())
fmt.Printf("Commencing indexing. GOMAXPROCS: %d, batch size: %d, shards: %d.\n",
runtime.GOMAXPROCS(-1), *batchSize, *nShards)
fmt.Println("Indexing operation took", duration)
fmt.Printf("%d documents indexed.\n", count)
fmt.Printf("Indexing rate: %d docs/sec.\n", rate)
if *csv {
fmt.Printf("csv,%d,%d,%d,%d,%d,%d\n", len(docs), count, runtime.GOMAXPROCS(-1), *batchSize, *nShards, rate)
}
}
|
package v3
import (
"bytes"
"fmt"
"math"
"sort"
)
// groupID identifies a memo group. Groups have numbers greater than 0; a
// groupID of 0 indicates an empty expression or an unknown group.
type groupID int32
// exprID identifies an expression within its memo group.
type exprID int32
// memoLoc describes the location of an expression in the memo.
type memoLoc struct {
group groupID
expr exprID
}
func (l memoLoc) String() string {
return fmt.Sprintf("%d.%d", l.group, l.expr)
}
const numInlineChildren = 3
// memoExprFingerprint contains the fingerprint of memoExpr. Two memo
// expressions are considered equal if their fingerprints are equal. The
// fast-path case for expressions with 3 or fewer children and which do not
// contain physical properties or private data is for memoExprFingerprint.extra
// to be empty. In the slow-path case, that extra is initialized to distinguish
// such expressions.
type memoExprFingerprint struct {
op operator
children [numInlineChildren]groupID
extra string
}
// memoExpr is a memoized representation of an expression. Unlike expr which
// represents a single expression, a memoExpr roots a forest of
// expressions. This is accomplished by recursively memoizing children and
// storing them in the memo structure. memoExpr.children refers to child groups
// of logically equivalent expressions. Because memoExpr refers to a forest of
// expressions, it is challenging to perform transformations directly upon
// it. Instead, transformations are performed by extracting an expr fragment
// matching a pattern from the memo, performing the transformation and then
// inserting the transformed result back into the memo.
//
// For relational expressions, logical equivalency is defined as equivalent
// group fingerprints (see memoExpr.groupFingerprint()). For scalar
// expressions, logical equivalency is defined as equivalent memoExpr (see
// memoExpr.fingerprint()). While scalar expressions are stored in the memo,
// each scalar expression group contains only a single entry.
type memoExpr struct {
op operator // expr.op
// numChildren and childrenBuf combine to represent expr.children. If an
// expression contains 3 or fewer children they are stored in childrenBuf and
// numChildren indicates the number of children. If the expression contains
// more than 3 children, they are stored at memo.children[numChildren-3-1].
numChildren int32
childrenBuf [numInlineChildren]groupID
physicalProps int32 // expr.physicalProps
private int32 // memo.private[expr.private]
// NB: expr.{props,scalarProps} are the same for all expressions in the group
// and stored in memoGroup.
}
func (e *memoExpr) matchOp(pattern *expr) bool {
return isPatternSentinel(pattern) || pattern.op == e.op
}
func (e *memoExpr) children(m *memo) []groupID {
if e.numChildren <= numInlineChildren {
return e.childrenBuf[:e.numChildren]
}
return m.children[e.numChildren-numInlineChildren-1]
}
func (e *memoExpr) DebugString(m *memo) string {
var buf bytes.Buffer
fmt.Fprintf(&buf, "[%s", e.op)
if e.private > 0 {
p := m.private[e.private]
switch t := p.(type) {
case nil:
case *table:
fmt.Fprintf(&buf, " %s", t.name)
default:
fmt.Fprintf(&buf, " %s", p)
}
}
if props := m.physicalProps[e.physicalProps]; props != nil {
if f := props.fingerprint(); f != "" {
fmt.Fprintf(&buf, " %s", f)
}
}
if len(e.children(m)) > 0 {
fmt.Fprintf(&buf, " [")
for i, c := range e.children(m) {
if i > 0 {
buf.WriteString(" ")
}
if c <= 0 {
buf.WriteString("-")
} else {
fmt.Fprintf(&buf, "%d", c)
}
}
fmt.Fprintf(&buf, "]")
}
buf.WriteString("]")
return buf.String()
}
// fingerprint returns a string which uniquely identifies the expression within
// the context of the memo.
func (e memoExpr) fingerprint(m *memo) memoExprFingerprint {
return e.commonFingerprint(m, e.physicalProps)
}
func (e memoExpr) groupFingerprint(m *memo) memoExprFingerprint {
f := e.commonFingerprint(m, 0)
// TODO(peter): Generalize this normalization. It should probably be operator
// specific.
if e.op == innerJoinOp {
if f.children[0] > f.children[1] {
f.children[0], f.children[1] = f.children[1], f.children[0]
}
}
return f
}
func (e *memoExpr) commonFingerprint(m *memo, physicalProps int32) memoExprFingerprint {
var f memoExprFingerprint
f.op = e.op
children := e.children(m)
if len(children) <= numInlineChildren {
for i := range children {
f.children[i] = children[i]
}
}
if e.private > 0 || physicalProps > 0 || len(children) > numInlineChildren {
var buf bytes.Buffer
p := m.private[e.private]
switch t := p.(type) {
case nil:
case *table:
fmt.Fprintf(&buf, " %s", t.name)
default:
fmt.Fprintf(&buf, " %s", p)
}
if props := m.physicalProps[physicalProps]; props != nil {
if f := props.fingerprint(); f != "" {
fmt.Fprintf(&buf, " %s", f)
}
}
if len(children) > 0 {
fmt.Fprintf(&buf, " [")
for i, c := range e.children(m) {
if i > 0 {
buf.WriteString(" ")
}
if c <= 0 {
buf.WriteString("-")
} else {
fmt.Fprintf(&buf, "%d", c)
}
}
fmt.Fprintf(&buf, "]")
}
f.extra = buf.String()
}
return f
}
func (e *memoExpr) opClass() operatorClass {
return operatorTab[e.op].class
}
// memoOptState maintains the optimization state for a group for a particular
// optimization context.
type memoOptState struct {
// The index of the last optimized expression
optimized exprID
// The location of the lowest cost expression.
loc memoLoc
// The cost of the lowest cost expression.
cost float32
// The opt state of children of the lowest expression.
children []*memoOptState
}
// memoGroup stores a set of logically equivalent expressions. See the comments
// on memoExpr for the definition of logical equivalency.
type memoGroup struct {
// The ID (a.k.a. index, memoLoc.group) of the group within the memo.
id groupID
// The index of the last explored expression. Used by search.
explored exprID
// The index of the last implemented expression. Used by search.
implemented exprID
// A map from memo expression fingerprint to the index of the memo expression
// in the exprs slice. Used to determine if a memoExpr already exists in the
// group.
exprMap map[memoExprFingerprint]exprID
exprs []memoExpr
// The relational properties for the group. Nil if the group contains scalar
// expressions.
props *relationalProps
// The scalar properties for the group. Nil if the group contains relational
// expressions.
scalarProps *scalarProps
// Map from optimization context (i.e. required physicalProperties)
// fingerprint to optimization state (the best plan and cost, the children
// locations associated with that plan, etc).
//
// TODO(peter): We intern the physicalProps in memo, which should allow this
// to be a map[*physicalProps]exprID.
//
// TODO(peter): This doesn't support wildcard matches. For example, we might
// want to find the best plan that provides a particular ordering but we
// don't care about a distribution requirement. This is speculative. We need
// some concrete examples before worrying about wildcard matches.
optMap map[string]*memoOptState
}
func (g *memoGroup) getOptState(required *physicalProps) *memoOptState {
if g.optMap == nil {
g.optMap = make(map[string]*memoOptState)
}
rf := required.fingerprint()
opt, ok := g.optMap[rf]
if !ok {
opt = &memoOptState{cost: math.MaxFloat32}
g.optMap[rf] = opt
}
return opt
}
type memo struct {
// A map from expression fingerprint (memoExpr.fingerprint()) to the index of
// the group the expression resides in.
exprMap map[memoExprFingerprint]groupID
// A map from group fingerprint to the index of the group in the groups
// slice. For relational groups, the fingerprint for a group is the
// fingerprint of the relational properties
// (relationalProps.fingerprint()). For scalar groups, the fingerprint for a
// group is the fingerprint of the memo expression (memoExpr.fingerprint()).
groupMap map[memoExprFingerprint]groupID
// The slice of groups, indexed by group ID (i.e. memoLoc.group). Note the
// group ID 0 is invalid in order to allow zero initialization of expr to
// indicate an expression that did not originate from the memo.
groups []memoGroup
// External storage of child groups for memo expressions which contain more
// than 3 children. See memoExpr.{numChildren,childrenBuf}.
children [][]groupID
// Physical properties attached to memo expressions.
physicalPropsMap map[*physicalProps]int32
physicalProps []*physicalProps
// Private data attached to a memoExpr (indexed by memoExpr.private). Most
// memo expressions do not contain private data allowing a modest savings of
// 12 bytes per memoExpr.
private []interface{}
// The root group in the memo. This is the group for the expression added by
// addRoot (i.e. the expression that we're optimizing).
root groupID
}
func newMemo() *memo {
// NB: group 0 is reserved and intentionally nil so that the 0 group index
// can indicate that we don't know the group for an expression. Similarly,
// index 0 for the private data is reserved.
return &memo{
exprMap: make(map[memoExprFingerprint]groupID),
groupMap: make(map[memoExprFingerprint]groupID),
groups: make([]memoGroup, 1),
physicalPropsMap: make(map[*physicalProps]int32),
physicalProps: make([]*physicalProps, 1),
private: make([]interface{}, 1),
}
}
func (m *memo) String() string {
var buf bytes.Buffer
for _, id := range m.topologicalSort() {
g := &m.groups[id]
fmt.Fprintf(&buf, "%d:", id)
for _, e := range g.exprs {
fmt.Fprintf(&buf, " %s", e.DebugString(m))
}
if false {
for f, opt := range g.optMap {
fmt.Fprintf(&buf, " {%s:%d:%0.1f}", f, opt.loc.expr, opt.cost)
}
}
fmt.Fprintf(&buf, "\n")
}
return buf.String()
}
// topologicalSort returns an ordering of memo groups such that if an expression
// in group i points to group j, i comes before j in the ordering.
func (m *memo) topologicalSort() []groupID {
visited := make([]bool, len(m.groups))
res := make([]groupID, 0, len(m.groups))
for id := range m.groups[1:] {
res = m.dfsVisit(groupID(id+1), visited, res)
}
// The depth first search returned the groups from leaf to root. We want the
// root first, so reverse the results.
for i, j := 0, len(res)-1; i < j; i, j = i+1, j-1 {
res[i], res[j] = res[j], res[i]
}
return res
}
// dfsVisit performs a depth-first search starting from the group, avoiding
// already visited nodes. Returns the visited node in depth-first order.
func (m *memo) dfsVisit(id groupID, visited []bool, res []groupID) []groupID {
if id <= 0 || visited[id] {
return res
}
visited[id] = true
g := &m.groups[id]
for _, e := range g.exprs {
for _, v := range e.children(m) {
res = m.dfsVisit(v, visited, res)
}
}
return append(res, id)
}
func (m *memo) addRoot(e *expr) {
if m.root != 0 {
fatalf("root has already been set")
}
m.root = m.addExpr(e)
}
// addExpr adds an expression to the memo and returns the group it was added to.
func (m *memo) addExpr(e *expr) groupID {
if e.loc.group > 0 && e.loc.expr >= 0 {
// The expression has already been added to the memo.
return e.loc.group
}
// Build a memoExpr and check to see if it already exists in the memo.
me := memoExpr{
op: e.op,
}
if len(e.children) <= numInlineChildren {
me.numChildren = int32(len(e.children))
} else {
idx := int32(len(m.children))
me.numChildren = numInlineChildren + idx + 1
m.children = append(m.children, make([]groupID, len(e.children)))
}
children := me.children(m)
for i, g := range e.children {
if g != nil {
children[i] = m.addExpr(g)
}
}
if e.physicalProps != nil {
i, ok := m.physicalPropsMap[e.physicalProps]
if !ok {
i = int32(len(m.physicalProps))
m.physicalPropsMap[e.physicalProps] = i
m.physicalProps = append(m.physicalProps, e.physicalProps)
}
me.physicalProps = i
}
if e.private != nil {
me.private = int32(len(m.private))
m.private = append(m.private, e.private)
}
// Normalize the child order for operators which are not sensitive to the input
// order.
//
// TODO(peter): this should likely be a method on the operator.
switch me.op {
case listOp, andOp, orOp:
children := me.children(m)
sort.Slice(children, func(i, j int) bool {
return children[i] < children[j]
})
}
ef := me.fingerprint(m)
if group, ok := m.exprMap[ef]; ok {
// The expression already exists in the memo.
if me.numChildren > numInlineChildren {
if me.numChildren-numInlineChildren == int32(len(m.children)) {
// Remove the child slice we added. This is a space optimization and
// not strictly necessary.
m.children = m.children[:len(m.children)-1]
}
}
if me.private == int32(len(m.private))-1 {
// Remove the private data we added.
m.private = m.private[:me.private]
}
return group
}
group := e.loc.group
if group == 0 {
// Determine which group the expression belongs in, creating it if
// necessary.
var ok bool
gf := me.groupFingerprint(m)
group, ok = m.groupMap[gf]
if !ok {
group = groupID(len(m.groups))
m.groups = append(m.groups, memoGroup{
id: group,
exprMap: make(map[memoExprFingerprint]exprID, 1),
props: e.props,
scalarProps: e.scalarProps,
})
m.groupMap[gf] = group
}
}
g := &m.groups[group]
if _, ok := g.exprMap[ef]; !ok {
g.exprMap[ef] = exprID(len(g.exprs))
g.exprs = append(g.exprs, me)
}
m.exprMap[ef] = group
return group
}
// Bind creates a cursor expression rooted at the specified location that
// matches the pattern. The cursor can be advanced with calls to advance().
//
// Returns nil if the pattern does not match any expression rooted at the
// specified location.
func (m *memo) bind(loc memoLoc, pattern *expr) *expr {
g := &m.groups[loc.group]
e := &g.exprs[loc.expr]
if !e.matchOp(pattern) {
return nil
}
cursor := &expr{
op: e.op,
loc: loc,
children: make([]*expr, len(e.children(m))),
props: g.props,
scalarProps: g.scalarProps,
physicalProps: m.physicalProps[e.physicalProps],
private: m.private[e.private],
}
if isPatternLeaf(pattern) {
return cursor
}
// Initialize the child cursors.
for i, g := range e.children(m) {
childPattern := childPattern(pattern, i)
if g == 0 {
// No child present.
if !isPatternSentinel(childPattern) {
return nil
}
// Leave the nil cursor, it will be skipped by advance.
continue
}
cursor.children[i] = m.bindGroup(&m.groups[g], childPattern)
if cursor.children[i] == nil {
// Pattern failed to match.
return nil
}
}
return cursor
}
// advance returns the next cursor expression that matches the pattern.
// The cursor must have been obtained from a previous call to bind() or
// advance().
//
// Returns nil if there are no more expressions that match.
func (m *memo) advance(loc memoLoc, pattern, cursor *expr) *expr {
e := &m.groups[loc.group].exprs[loc.expr]
if loc != cursor.loc || !e.matchOp(pattern) {
fatalf("cursor mismatch: e: %s %s cursor: %s %s", e.op, loc, cursor.op, cursor.loc)
}
if isPatternLeaf(pattern) {
// For a leaf pattern we have only the initial binding.
return nil
}
// We first advance the first child cursor; when that is exhausted, we reset
// it and advance the second cursor. Next time we will start over with
// advancing the first child cursor until it is exhausted.
//
// For example, say we have three children with 2 bindings each:
// child 0 child 1 child 2
// bind: 0 0 0
// advance: 1 0 0
// advance: 0 1 0
// advance: 1 1 0
// advance: 0 0 1
// advance: 1 0 1
// advance: 0 1 1
// advance: 1 1 1
// advance: done
//
// This is somewhat analogous to incrementing an integer (children are digits,
// in reverse order).
for i, childCursor := range cursor.children {
if childCursor == nil {
// Skip the missing child (it must be a pattern leaf).
continue
}
childPattern := childPattern(pattern, i)
g := &m.groups[childCursor.loc.group]
cursor.children[i] = m.advanceGroup(g, childPattern, childCursor)
if cursor.children[i] != nil {
// We successfully advanced a child.
return cursor
}
// Reset the child cursor and advance to the next child.
cursor.children[i] = m.bindGroup(g, childPattern)
}
// We exhausted all child cursors. Nothing more for us to do.
return nil
}
// bindGroup is similar to bind, except that it can bind any expression
// rooted in the given group.
//
// Returns a cursor expression that can be advanced with advanceGroup().
//
// Returns nil if the pattern does not match any expression rooted at the
// specified location.
func (m *memo) bindGroup(g *memoGroup, pattern *expr) *expr {
for i, e := range g.exprs {
if !e.matchOp(pattern) {
continue
}
if cursor := m.bind(memoLoc{g.id, exprID(i)}, pattern); cursor != nil {
return cursor
}
}
// The group has no expressions that match the pattern.
return nil
}
// advanceGroup advances a cursor expression obtained from a previous call to
// bindGroup() or advanceGroup().
//
// Returns nil if there are no more expressions in the group that match the
// pattern.
func (m *memo) advanceGroup(g *memoGroup, pattern, cursor *expr) *expr {
if isPatternLeaf(pattern) {
// For leaf patterns we do not iterate on groups.
return nil
}
// Try to advance the binding for the current expression.
if c := m.advance(cursor.loc, pattern, cursor); c != nil {
return c
}
// Find another expression to bind.
for i, e := range g.exprs[(cursor.loc.expr + 1):] {
if !e.matchOp(pattern) {
continue
}
loc := memoLoc{g.id, exprID(i) + cursor.loc.expr + 1}
if c := m.bind(loc, pattern); c != nil {
return c
}
}
// We've exhausted the group.
return nil
}
// extract recursively extracts the lowest cost expression that provides the
// specified properties from the specified group.
func (m *memo) extract(required *physicalProps, group groupID) *expr {
opt, ok := m.groups[group].optMap[required.fingerprint()]
if !ok {
return nil
}
return m.extractBest(opt)
}
func (m *memo) extractBest(opt *memoOptState) *expr {
if opt == nil {
return nil
}
g := &m.groups[opt.loc.group]
e := &g.exprs[opt.loc.expr]
r := &expr{
op: e.op,
loc: opt.loc,
children: make([]*expr, len(opt.children)),
props: g.props,
scalarProps: g.scalarProps,
physicalProps: m.physicalProps[e.physicalProps],
private: m.private[e.private],
}
for i, c := range opt.children {
r.children[i] = m.extractBest(c)
}
return r
}
|
package main
import (
"github.com/maestre3d/bob/bin"
)
func main() {
// Get bootstrapper
bootstrapper := new(bin.Bootstrap)
// Init CLI
bootstrapper.InitCLI()
}
|
// Copyright (C) 2019 rameshvk. All rights reserved.
// Use of this source code is governed by a MIT-style license
// that can be found in the LICENSE file.
// Package partition provides utilities to partition requests to a cluster.
//
// Every server in the cluster receives requests meant for any
// partition but these requests are then routed to the right partition
// where it is handled.
//
// Each server in the cluster also serves traffic from other servers
// destined for its own partition.
//
// Usage;
//
// Every server in the cluster creates a router at initialization
// time:
//
// endpointRegistry = partition.WithEndpointRegistry(...)
// router, err := partition.New(ctx, "ownIP:2222", handler, endpointRegistry)
//
// The endpoint registry keeps track of all the servers in the cluster
// and their local addresses for inter-server communication
//
// The handler is the servers own implementation of requests routed to
// it by other servers.
//
// When an external request comes in, the server would hash the
// request and then use the router to execute it on the right server
// in its cluster:
//
// resp, err := router.Run(ctx, hash, request)
//
//
// Note that this call would end up being executed on another server
// in the cluster (or on the local server itself if this hash is
// mapped to the local server).
//
// The effectiveness of this strategy depends on how uniform the hash
// is and the mapping strategy. The default mechanism to map hashes
// to specific servers in the cluster is to use a
// highest-random-weight algorithm which can be overridden using the
// WithPicker option.
//
// The inter-server communication is via RPC and this also can be
// configured to alternate mechanisms using the WithNetwork option.
//
//
// The requests and responses are expected to be byte slices. For
// stronger types, protobufs can be used to serialize structures or
// the runner package (see
// https://godoc.org/github.com/tvastar/cluster/pkg/partition/runner)
// for solution using gob-encoding and reflection.
package partition
import (
"context"
"io"
)
// EndpointRegistry manages the live list of endpoints in a cluster.
//
// The partition package does not cache the results so any requirement
// to cache this should be
type EndpointRegistry interface {
RegisterEndpoint(ctx context.Context, addr string) (io.Closer, error)
ListEndpoints(ctx context.Context, refresh bool) ([]string, error)
}
// Network implements the communication network between servers in the clsuter.
type Network interface {
DialClient(ctx context.Context, addr string) (RunCloser, error)
RegisterServer(ctx context.Context, addr string, handler Runner) (io.Closer, error)
}
// Runner executes a single request with the specified hash
type Runner interface {
Run(ctx context.Context, hash uint64, input []byte) ([]byte, error)
}
// RunCloser combines Runner and io.Closer
type RunCloser interface {
Runner
io.Closer
}
var defaultConfig = config{
Network: NewRPCNetwork(nil),
}
// New returns a RunCloser which targets requests to
// specific endpoints based on the hash provided to the request.
//
// This automatically adds the provided address to the cluster. The
// provided handler is used to serve requests meant for the local
// server.
//
// Defaults are used for Picker and Network but EndpointRegistry must
// be specified -- no defaults are used for it.
func New(ctx context.Context, addr string, handler Runner, opts ...Option) (RunCloser, error) {
s := &state{config: defaultConfig, addr: addr, handler: handler}
s.config.pickEndpoint = NewPicker()
for _, opt := range opts {
opt(&s.config)
}
return s.init(ctx)
}
type config struct {
EndpointRegistry
Network
pickEndpoint func(ctx context.Context, list []string, hash uint64) string
}
// Option configures the partitioning algorithm.
type Option func(c *config)
// WithEndpointRegistry specifies how the endpoints
// discovery/registration happens.
//
// There is no defaualt endpoint registry.
//
// RedisRegistry implements a Redis-based endpoint registry.
func WithEndpointRegistry(r EndpointRegistry) Option {
return func(c *config) {
c.EndpointRegistry = r
}
}
// WithPicker specifies how the pick endpoints based on the hash.
//
// The default algorithm is to use the highest random weight
// algorithm (via NewPicker())
func WithPicker(picker func(ctx context.Context, list []string, hash uint64) string) Option {
return func(c *config) {
c.pickEndpoint = picker
}
}
// WithNetwork provides a network implementation for servers in the
// cluster to route requests to eqch othere.
//
// The default mechanism is to use RPC (via NewRPCNetwork)
func WithNetwork(nw Network) Option {
return func(c *config) {
c.Network = nw
}
}
|
package collector_mongod
import (
"github.com/prometheus/client_golang/prometheus"
)
var (
storageEngine = prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "", "storage_engine"),
"The storage engine used by the MongoDB instance",
[]string{"engine"}, nil)
)
// StorageEngineStats
type StorageEngineStats struct {
Name string `bson:"name"`
}
// Export exports the data to prometheus.
func (stats *StorageEngineStats) Export(ch chan<- prometheus.Metric) {
ch <- prometheus.MustNewConstMetric(storageEngine, prometheus.CounterValue, 1, stats.Name)
}
// Describe describes the metrics for prometheus
func (stats *StorageEngineStats) Describe(ch chan<- *prometheus.Desc) {
ch <- storageEngine
}
|
package Group_Anagrams
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestGroupAnagrams(t *testing.T) {
ast := assert.New(t)
ast.Equal([][]string{
[]string{"eat", "tea", "ate"},
[]string{"tan", "nat"},
[]string{"bat"},
},
groupAnagrams([]string{"eat", "tea", "tan", "ate", "nat", "bat"}))
}
|
package controllers
import (
"net/http"
)
var base *Base
func init() {
base = initBase()
}
func Admin(writer http.ResponseWriter, request *http.Request) {
err := base.DisplayHtmlLayOut(writer, "index.html", nil, nil)
base.DisplayErrorHtml(writer, err)
}
func WelCome(writer http.ResponseWriter, req *http.Request) {
err := base.DisplayHtmlLayOut(writer, "welcome.html", nil, []string{
"public/header.html",
})
base.DisplayErrorHtml(writer, err)
}
|
package main
import (
"fmt"
"log"
"net/http"
"runtime/debug"
)
//Client side Error
func (app *application) clientError(w http.ResponseWriter, status int) {
http.Error(w, http.StatusText(status), status)
}
//Server side Error
func (app *application) serverError(w http.ResponseWriter, err error) {
log.Println("IT here")
//Using stacktrace so we can get more information on the server side error for debugging
stacktrace := fmt.Sprintf("%s \n %s", err.Error(), debug.Stack())
//We want to see the actual line where the error occurred so we change the frame depth to 2
app.errorLog.Output(2, stacktrace)
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
}
/* Convenient wrapper for ClientError to send 404 not found response */
func (app *application) NotFound(w http.ResponseWriter) {
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
}
|
package git
import (
"context"
"errors"
"testing"
"github.com/stretchr/testify/assert"
)
func TestResolve(t *testing.T) {
tests := []struct {
name string
cloneFake func(context.Context, string, string, string, string) error
branch string
sha string
expectErr bool
}{
{
name: "resolve branch",
cloneFake: func(ctx context.Context, tempDir, url, branch, sha string) error {
if url == "example.org" && branch == "test" && sha == "" {
return nil
}
return errors.New("wrong URL and branch")
},
branch: "test",
sha: "",
expectErr: false,
},
{
name: "resolve SHA",
cloneFake: func(ctx context.Context, tempDir, url, branch, sha string) error {
if url == "example.org" && branch == "" && sha == "abcdefg" {
return nil
}
return errors.New("wrong URL and SHA")
},
branch: "",
sha: "abcdefg",
expectErr: false,
},
{
name: "neither branch nor SHA set",
cloneFake: nil,
branch: "",
sha: "",
expectErr: true,
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
resolver := &Resolver{
URL: "example.org",
Branch: test.branch,
SHA: test.sha,
OperatorDirectory: "operator",
gitClone: test.cloneFake,
}
_, remover, err := resolver.Resolve(context.Background())
if remover != nil {
defer func() {
assert.NoError(t, remover())
}()
}
if test.expectErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
})
}
}
|
package gpn
import (
"io/ioutil"
"os"
"testing"
)
func TestGetNumPages(t *testing.T) {
file, err := os.Open("pagination.xml")
defer file.Close()
if err != nil {
t.Errorf("can't no open testing file")
}
byts, _ := ioutil.ReadAll(file)
const expected = 519
actual := GetNumPages(byts)
if expected != actual {
t.Errorf("expected %d, got %d", expected, actual)
}
}
|
/*
Package benchmarks includes benchmarks for the components of the SSE
library.
*/
package benchmarks
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mm
import (
"fmt"
"gvisor.dev/gvisor/pkg/context"
)
// InvalidateUnsavable invokes memmap.Mappable.InvalidateUnsavable on all
// Mappables mapped by mm.
func (mm *MemoryManager) InvalidateUnsavable(ctx context.Context) error {
mm.mappingMu.RLock()
defer mm.mappingMu.RUnlock()
for vseg := mm.vmas.FirstSegment(); vseg.Ok(); vseg = vseg.NextSegment() {
if vma := vseg.ValuePtr(); vma.mappable != nil {
if err := vma.mappable.InvalidateUnsavable(ctx); err != nil {
return err
}
}
}
return nil
}
// beforeSave is invoked by stateify.
func (mm *MemoryManager) beforeSave() {
mf := mm.mfp.MemoryFile()
for pseg := mm.pmas.FirstSegment(); pseg.Ok(); pseg = pseg.NextSegment() {
if pma := pseg.ValuePtr(); pma.file != mf {
// InvalidateUnsavable should have caused all such pmas to be
// invalidated.
panic(fmt.Sprintf("Can't save pma %#v with non-MemoryFile of type %T:\n%s", pseg.Range(), pma.file, mm))
}
}
}
// afterLoad is invoked by stateify.
func (mm *MemoryManager) afterLoad() {
mm.haveASIO = mm.p.SupportsAddressSpaceIO()
mf := mm.mfp.MemoryFile()
for pseg := mm.pmas.FirstSegment(); pseg.Ok(); pseg = pseg.NextSegment() {
pseg.ValuePtr().file = mf
}
}
|
package hnapi
const (
// TopStoriesURL is the URL used to retrieve the top hacker news stories item
// numbers.
TopStoriesURL = "https://hacker-news.firebaseio.com/v0/topstories.json"
)
// TopStoriesItemNumbers retrieves up to 500 top and new stories are at
// https://hacker-news.firebaseio.com/v0/topstories and
// https://hacker-news.firebaseio.com/v0/newstories.
//
// Example: https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty
func TopStoriesItemNumbers() (StoryItemNumbers, error) {
ts := StoryItemNumbers{}
err := getJSON(TopStoriesURL, &ts)
if err != nil {
return nil, err
}
return ts, nil
}
// RetrieveTopNStories returns the top n stories from Hacker News. Returns a
// pointer to a slice of Hacker News items.
func RetrieveTopNStories(n int, logger *Logger) (*[]HNItem, error) {
var stories []HNItem
logger.VerbosePrintln("Retrieving Top Story Item Numbers")
topStoriesIDs, err := TopStoriesItemNumbers()
if err != nil {
return nil, err
}
logger.VerbosePrintfln("Retrieving Top %d Stories", n)
for i, storyID := range topStoriesIDs {
if i >= n {
return &stories, nil
}
story := GetItem(storyID)
stories = append(stories, *story)
}
return &stories, nil
}
// StreamTopNStories streams the top n stories from Hacker News.
// Returns a channel over which to receive the stories.
func StreamTopNStories(n int, logger *Logger) (chan *HNItem, error) {
c := make(chan *HNItem)
logger.VerbosePrintln("Retrieving Top Story Item Numbers")
topStoriesIDs, err := TopStoriesItemNumbers()
if err != nil {
logger.Printfln("Failed to retrieve Top Stories Item Numbers with error:\n\n%s.", err.Error())
return nil, err
}
go func(topStoriesIDs StoryItemNumbers, n int, logger *Logger, c chan *HNItem) {
logger.VerbosePrintfln("Retrieving Top %d Stories", n)
for i, storyID := range topStoriesIDs {
if i >= n {
close(c)
return
}
story := GetItem(storyID)
c <- story
}
close(c)
}(topStoriesIDs, n, logger, c)
return c, nil
}
|
package db
type DbConfig struct {
Type string `yaml:"type,omitempty"`
}
var Config DbConfig = Defaults()
func SetConfig(config DbConfig) {
Config = config
}
|
package controllers
import (
"net/http"
"strconv"
"github.com/julienschmidt/httprouter"
"github.com/raggaer/castro/app/database"
"github.com/raggaer/castro/app/models"
"github.com/raggaer/castro/app/util"
)
// Home is the main aac homepage
func Home(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
// Articles per page
perPage := 4
// Get page param
page := ps.ByName("page")
if page == "" {
// Load articles with the page 0
articles, max, err := getArticles(0, perPage)
if err != nil {
util.Logger.Error(err)
return
}
// Render template
util.Template.RenderTemplate(w, r, "home.html", map[string]interface{}{
"articles": articles,
"page": 0,
"max": max / perPage,
})
return
}
// Convert page to int
pageNumber, err := strconv.Atoi(page)
// Check if pageNumber can be a valid page
if err != nil {
http.Redirect(w, r, "/", 302)
return
}
// Get article list for the given page
articles, max, err := getArticles(pageNumber, perPage)
if err != nil {
util.Logger.Error(err)
return
}
// Check if there are any articles for this page
if len(articles) <= 0 {
http.Redirect(w, r, "/", 302)
return
}
// Render template
util.Template.RenderTemplate(w, r, "home.html", map[string]interface{}{
"articles": articles,
"page": pageNumber,
"max": max / perPage,
})
}
// getArticles helper method to load articles with the given
// offset. To make pagination work
func getArticles(page, per int) ([]models.Article, int, error) {
articles := []models.Article{}
maxArticles := 0
if err := database.DB.Table("articles").Count(&maxArticles).Offset(page).Limit(per).Order("id DESC").Scan(&articles).Error; err != nil {
return nil, 0, err
}
return articles, maxArticles, nil
}
|
/**
硬币找零
状态节点为(amount+1)^2矩阵
一维:次数
二维:累加金额
从第1位开始用
*/
package dynamicProgramming
import "fmt"
type CoinChange struct {
arr []int // 面值(元)
cnt int // 硬币个数
amount int // 需要支付的金额
}
func newCoinChange(
arr []int, cnt, amount int) *CoinChange {
return &CoinChange{
arr: arr,
cnt: cnt,
amount: amount,
}
}
func (cc *CoinChange) printStates(states [][]bool) {
for i := 1; i < cc.amount; i++ {
fmt.Printf("%v\n", states[i])
}
}
func (cc *CoinChange) getMin() (int, [][]bool, []int) {
var states = make([][]bool, cc.amount+1)
for i := 0; i < cc.amount+1; i++ {
states[i] = make([]bool, cc.amount+1)
}
for i := 0; i < cc.cnt; i++ {
states[1][cc.arr[i]] = true
}
var i int // 硬币累加次数
for i = 2; i < cc.amount+1; i++ {
for j := 1; j < cc.amount+1; j++ {
if !states[i-1][j] {
continue
}
for k := 0; k < cc.cnt; k++ {
if j+cc.arr[k] <= cc.amount && !states[i][j+cc.arr[k]] {
states[i][j+cc.arr[k]] = true
if j+cc.arr[k] == cc.amount {
goto LoopEnd
}
}
}
}
}
LoopEnd:
// fmt.Println(i)
var coins = make([]int, i)
var x int
if states[i][cc.amount] {
var remain = cc.amount
var j int
for j = i - 1; j > 0; j-- {
// fmt.Printf("j: %d, remain: %d \n", j, remain)
for k := 0; k < cc.cnt; k++ {
if remain == cc.arr[k] && j == 1 {
coins[x] = cc.arr[k]
remain = remain - cc.arr[k]
break
}
if remain > cc.arr[k] && states[j][remain-cc.arr[k]] {
coins[x] = cc.arr[k]
x += 1
remain = remain - cc.arr[k]
break
}
}
}
if states[j+1][remain] && j == 0 {
coins[x] = remain
}
}
return i, states, coins
}
|
package models
type TaxLine struct {
ConektaBase
ID string `json:"id,omitempty"`
Description string `json:"description,omitempty"`
Amount float64 `json:"amount,omitempty"`
ParentID string `json:"parent_id,omitempty"`
}
|
package udwCryptoEncryptV3
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"errors"
"github.com/tachyon-protocol/udw/udwBase64"
"github.com/tachyon-protocol/udw/udwRand"
)
var magicCode4 = [4]byte{0xa7, 0x97, 0x6d, 0x15}
func EncryptV3(key *[32]byte, data []byte) (output []byte) {
Iv := udwRand.MustCryptoRandBytes(16)
block, err := aes.NewCipher((*key)[:])
if err != nil {
panic(err)
}
afterCbcSize := len(data)
output = make([]byte, afterCbcSize+16+4)
copy(output[:16], Iv)
copy(output[16:len(data)+16], data)
copy(output[len(data)+16:len(data)+16+4], magicCode4[:])
ctr := cipher.NewCTR(block, Iv)
ctr.XORKeyStream(output[16:], output[16:])
return output
}
func DecryptV3(key *[32]byte, data []byte) (output []byte, err error) {
if len(data) < 20 {
return nil, errors.New("[udwCrypto.DecryptV3] input data too small")
}
aseKey := key[:]
Iv := data[:16]
block, err := aes.NewCipher(aseKey)
if err != nil {
return nil, err
}
output = make([]byte, len(data)-16)
ctr := cipher.NewCTR(block, Iv)
ctr.XORKeyStream(output, data[16:])
beforeCbcSize := len(data) - 16 - 4
if !bytes.Equal(magicCode4[:], output[beforeCbcSize:beforeCbcSize+4]) {
return nil, errors.New("[udwCrypto.DecryptV3] magicCode not match mtjqaqzgm9")
}
output = output[:beforeCbcSize]
return output, nil
}
func EncryptV3WithBase64(key *[32]byte, input []byte) (output string) {
outputB := EncryptV3(key, input)
output = udwBase64.EncodeByteToStringV2(outputB)
return output
}
func DecryptV3WithBase64(key *[32]byte, s string) (output []byte, err error) {
b1, err := udwBase64.DecodeStringToByteV2(s)
if err != nil {
return nil, err
}
return DecryptV3(key, b1)
}
|
package main
import (
"database/sql"
"flag"
"fmt"
"html/template"
"log"
"net/http"
"strconv"
"time"
"github.com/go-chi/chi"
"github.com/go-chi/chi/middleware"
"github.com/paalka/ewok/pkg/config"
"github.com/paalka/ewok/pkg/db"
"github.com/paalka/ewok/pkg/feed"
)
const (
ITEMS_PER_PAGE = 30
)
func renderTemplate(w http.ResponseWriter, templates *template.Template, tmpl_name string, data interface{}) {
err := templates.ExecuteTemplate(w, tmpl_name+".html", data)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func getPageIndices(db *sql.DB) ([]int, error) {
nPages, err := feed.GetNumFeedPages(db, ITEMS_PER_PAGE)
if err != nil {
return nil, err
}
pageIndices := make([]int, *nPages)
for i, _ := range pageIndices {
pageIndices[i] = i + 1
}
return pageIndices, nil
}
func makeIndexHandler(config config.Config, templates *template.Template, db *sql.DB) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
feeds, err := feed.GetFeeds(db)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
feedItems, err := feed.GetPaginatedFeeds(db, ITEMS_PER_PAGE, 0)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
for _, f := range feeds {
f.Updated = feed.ParseTime(feed.TimeLayout, f.Updated).Format(time.RFC1123)
}
pageIndices, err := getPageIndices(db)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
renderTemplate(w, templates, "index", struct {
FeedItems []feed.EwokItem
Feeds []feed.EwokFeed
PageIndices []int
CurrentPage int
}{feedItems, feeds, pageIndices, 1})
}
}
func makePageHandler(config config.Config, templates *template.Template, db *sql.DB) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
possibleIndex := chi.URLParam(r, "paginationIndex")
if _, err := strconv.Atoi(possibleIndex); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
index, err := strconv.ParseInt(possibleIndex, 10, 64)
if err != nil {
http.Error(w, "Page not found!", http.StatusNotFound)
return
}
offset := index - 1
if offset < 0 {
http.Error(w, "Page not found!", http.StatusNotFound)
return
}
feedItems, err := feed.GetPaginatedFeeds(db, ITEMS_PER_PAGE, uint(offset))
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
feeds, err := feed.GetFeeds(db)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
for _, f := range feeds {
f.Updated = feed.ParseTime(feed.TimeLayout, f.Updated).Format(time.RFC1123)
}
pageIndices, err := getPageIndices(db)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
renderTemplate(w, templates, "index", struct {
FeedItems []feed.EwokItem
Feeds []feed.EwokFeed
PageIndices []int
CurrentPage int
}{feedItems, feeds, pageIndices, int(index)})
}
}
func main() {
portPtr := flag.Int("port", 8080, "The port to use when running the server")
flag.Parse()
config := config.LoadJsonConfig("config.json")
templates := template.Must(template.ParseFiles("web/templates/index.html"))
baseRouter := chi.NewRouter()
baseRouter.Use(middleware.RequestID)
baseRouter.Use(middleware.RealIP)
baseRouter.Use(middleware.Logger)
baseRouter.Use(middleware.Recoverer)
baseRouter.Use(middleware.CloseNotify)
baseRouter.Use(middleware.Timeout(60 * time.Second))
baseRouter.Use(middleware.DefaultCompress)
db := db.GetDatabaseConnection(config.DB_NAME, config.DB_USER, config.DB_PASS)
baseRouter.Get("/page/{paginationIndex}", makePageHandler(config, templates, db))
baseRouter.Get("/", makeIndexHandler(config, templates, db))
FileServer(baseRouter, "/static/", http.Dir("web/static"))
port := *portPtr
log.Printf("Listening to localhost:%d", port)
err := http.ListenAndServe(fmt.Sprintf(":%d", port), baseRouter)
if err != nil {
panic(err)
}
}
|
package courses
import (
"encoding/json"
"github.com/gin-gonic/gin"
"net/http"
)
/* Public */
func SetupRoutes(router *gin.Engine) {
router.GET("/courses/:code", getCourseByCode)
router.GET("/courses/:code/posts", getCoursePosts)
router.POST("/courses/", createCourse)
}
/* GET */
func getCourseByCode(c *gin.Context) {
courseCode := c.Param("code")
course := RequestCourse(courseCode)
c.JSON(http.StatusOK, course)
}
func getCoursePosts(c *gin.Context) {
courseCode := c.Param("code")
posts := RequestCourse(courseCode).Posts
c.JSON(http.StatusOK, posts)
}
/* POST */
func createCourse(c *gin.Context) {
var newCourse Course
json.NewDecoder(c.Request.Body).Decode(&newCourse)
requestCreateCourse(newCourse)
c.Status(http.StatusOK)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.