text
stringlengths 11
4.05M
|
|---|
package firewall
import "github.com/stretchr/testify/mock"
type MockExecFactory struct {
mock.Mock
}
func (_m *MockExecFactory) NewCmd(name string, args ...string) Execer {
ret := _m.Called(name, args)
var r0 Execer
if rf, ok := ret.Get(0).(func(string, ...string) Execer); ok {
r0 = rf(name, args...)
} else {
r0 = ret.Get(0).(Execer)
}
return r0
}
|
package pie
import (
"fmt"
"golang.org/x/exp/constraints"
)
// String transforms a value into a string. Nil values will be treated as empty
// strings.
//
// If the element type implements fmt.Stringer it will be used. Otherwise it
// will fallback to the result of:
//
// fmt.Sprintf("%v")
//
func String[T constraints.Ordered](s T) string {
return fmt.Sprintf("%v", s)
}
|
package main
import "fmt"
type ListNode struct {
Val int
Next *ListNode
}
func main() {
l1 := ListNode{1, &ListNode{2, &ListNode{5, nil}}}
l2 := ListNode{1, &ListNode{3, &ListNode{4, nil}}}
re := mergeTwoLists(&l1, &l2)
for re != nil {
fmt.Println(re.Val)
re = re.Next
}
}
//迭代法
func mergeTwoLists(l1 *ListNode, l2 *ListNode) *ListNode {
preHead := new(ListNode)
pre := preHead
for l1 != nil && l2 != nil {
if l1.Val <= l2.Val {
pre.Next = l1
l1 = l1.Next
} else {
pre.Next = l2
l2 = l2.Next
}
pre = pre.Next
}
if l1 == nil {
pre.Next = l2
} else {
pre.Next = l1
}
return preHead.Next
}
|
// Copyright 2022 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
alphapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/bigqueryreservation/alpha/bigqueryreservation_alpha_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/alpha"
)
// CapacityCommitmentServer implements the gRPC interface for CapacityCommitment.
type CapacityCommitmentServer struct{}
// ProtoToCapacityCommitmentPlanEnum converts a CapacityCommitmentPlanEnum enum from its proto representation.
func ProtoToBigqueryreservationAlphaCapacityCommitmentPlanEnum(e alphapb.BigqueryreservationAlphaCapacityCommitmentPlanEnum) *alpha.CapacityCommitmentPlanEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.BigqueryreservationAlphaCapacityCommitmentPlanEnum_name[int32(e)]; ok {
e := alpha.CapacityCommitmentPlanEnum(n[len("BigqueryreservationAlphaCapacityCommitmentPlanEnum"):])
return &e
}
return nil
}
// ProtoToCapacityCommitmentStateEnum converts a CapacityCommitmentStateEnum enum from its proto representation.
func ProtoToBigqueryreservationAlphaCapacityCommitmentStateEnum(e alphapb.BigqueryreservationAlphaCapacityCommitmentStateEnum) *alpha.CapacityCommitmentStateEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.BigqueryreservationAlphaCapacityCommitmentStateEnum_name[int32(e)]; ok {
e := alpha.CapacityCommitmentStateEnum(n[len("BigqueryreservationAlphaCapacityCommitmentStateEnum"):])
return &e
}
return nil
}
// ProtoToCapacityCommitmentRenewalPlanEnum converts a CapacityCommitmentRenewalPlanEnum enum from its proto representation.
func ProtoToBigqueryreservationAlphaCapacityCommitmentRenewalPlanEnum(e alphapb.BigqueryreservationAlphaCapacityCommitmentRenewalPlanEnum) *alpha.CapacityCommitmentRenewalPlanEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.BigqueryreservationAlphaCapacityCommitmentRenewalPlanEnum_name[int32(e)]; ok {
e := alpha.CapacityCommitmentRenewalPlanEnum(n[len("BigqueryreservationAlphaCapacityCommitmentRenewalPlanEnum"):])
return &e
}
return nil
}
// ProtoToCapacityCommitmentFailureStatus converts a CapacityCommitmentFailureStatus object from its proto representation.
func ProtoToBigqueryreservationAlphaCapacityCommitmentFailureStatus(p *alphapb.BigqueryreservationAlphaCapacityCommitmentFailureStatus) *alpha.CapacityCommitmentFailureStatus {
if p == nil {
return nil
}
obj := &alpha.CapacityCommitmentFailureStatus{
Code: dcl.Int64OrNil(p.GetCode()),
Message: dcl.StringOrNil(p.GetMessage()),
}
for _, r := range p.GetDetails() {
obj.Details = append(obj.Details, *ProtoToBigqueryreservationAlphaCapacityCommitmentFailureStatusDetails(r))
}
return obj
}
// ProtoToCapacityCommitmentFailureStatusDetails converts a CapacityCommitmentFailureStatusDetails object from its proto representation.
func ProtoToBigqueryreservationAlphaCapacityCommitmentFailureStatusDetails(p *alphapb.BigqueryreservationAlphaCapacityCommitmentFailureStatusDetails) *alpha.CapacityCommitmentFailureStatusDetails {
if p == nil {
return nil
}
obj := &alpha.CapacityCommitmentFailureStatusDetails{
TypeUrl: dcl.StringOrNil(p.GetTypeUrl()),
Value: dcl.StringOrNil(p.GetValue()),
}
return obj
}
// ProtoToCapacityCommitment converts a CapacityCommitment resource from its proto representation.
func ProtoToCapacityCommitment(p *alphapb.BigqueryreservationAlphaCapacityCommitment) *alpha.CapacityCommitment {
obj := &alpha.CapacityCommitment{
Name: dcl.StringOrNil(p.GetName()),
SlotCount: dcl.Int64OrNil(p.GetSlotCount()),
Plan: ProtoToBigqueryreservationAlphaCapacityCommitmentPlanEnum(p.GetPlan()),
State: ProtoToBigqueryreservationAlphaCapacityCommitmentStateEnum(p.GetState()),
CommitmentStartTime: dcl.StringOrNil(p.GetCommitmentStartTime()),
CommitmentEndTime: dcl.StringOrNil(p.GetCommitmentEndTime()),
FailureStatus: ProtoToBigqueryreservationAlphaCapacityCommitmentFailureStatus(p.GetFailureStatus()),
RenewalPlan: ProtoToBigqueryreservationAlphaCapacityCommitmentRenewalPlanEnum(p.GetRenewalPlan()),
Project: dcl.StringOrNil(p.GetProject()),
Location: dcl.StringOrNil(p.GetLocation()),
}
return obj
}
// CapacityCommitmentPlanEnumToProto converts a CapacityCommitmentPlanEnum enum to its proto representation.
func BigqueryreservationAlphaCapacityCommitmentPlanEnumToProto(e *alpha.CapacityCommitmentPlanEnum) alphapb.BigqueryreservationAlphaCapacityCommitmentPlanEnum {
if e == nil {
return alphapb.BigqueryreservationAlphaCapacityCommitmentPlanEnum(0)
}
if v, ok := alphapb.BigqueryreservationAlphaCapacityCommitmentPlanEnum_value["CapacityCommitmentPlanEnum"+string(*e)]; ok {
return alphapb.BigqueryreservationAlphaCapacityCommitmentPlanEnum(v)
}
return alphapb.BigqueryreservationAlphaCapacityCommitmentPlanEnum(0)
}
// CapacityCommitmentStateEnumToProto converts a CapacityCommitmentStateEnum enum to its proto representation.
func BigqueryreservationAlphaCapacityCommitmentStateEnumToProto(e *alpha.CapacityCommitmentStateEnum) alphapb.BigqueryreservationAlphaCapacityCommitmentStateEnum {
if e == nil {
return alphapb.BigqueryreservationAlphaCapacityCommitmentStateEnum(0)
}
if v, ok := alphapb.BigqueryreservationAlphaCapacityCommitmentStateEnum_value["CapacityCommitmentStateEnum"+string(*e)]; ok {
return alphapb.BigqueryreservationAlphaCapacityCommitmentStateEnum(v)
}
return alphapb.BigqueryreservationAlphaCapacityCommitmentStateEnum(0)
}
// CapacityCommitmentRenewalPlanEnumToProto converts a CapacityCommitmentRenewalPlanEnum enum to its proto representation.
func BigqueryreservationAlphaCapacityCommitmentRenewalPlanEnumToProto(e *alpha.CapacityCommitmentRenewalPlanEnum) alphapb.BigqueryreservationAlphaCapacityCommitmentRenewalPlanEnum {
if e == nil {
return alphapb.BigqueryreservationAlphaCapacityCommitmentRenewalPlanEnum(0)
}
if v, ok := alphapb.BigqueryreservationAlphaCapacityCommitmentRenewalPlanEnum_value["CapacityCommitmentRenewalPlanEnum"+string(*e)]; ok {
return alphapb.BigqueryreservationAlphaCapacityCommitmentRenewalPlanEnum(v)
}
return alphapb.BigqueryreservationAlphaCapacityCommitmentRenewalPlanEnum(0)
}
// CapacityCommitmentFailureStatusToProto converts a CapacityCommitmentFailureStatus object to its proto representation.
func BigqueryreservationAlphaCapacityCommitmentFailureStatusToProto(o *alpha.CapacityCommitmentFailureStatus) *alphapb.BigqueryreservationAlphaCapacityCommitmentFailureStatus {
if o == nil {
return nil
}
p := &alphapb.BigqueryreservationAlphaCapacityCommitmentFailureStatus{}
p.SetCode(dcl.ValueOrEmptyInt64(o.Code))
p.SetMessage(dcl.ValueOrEmptyString(o.Message))
sDetails := make([]*alphapb.BigqueryreservationAlphaCapacityCommitmentFailureStatusDetails, len(o.Details))
for i, r := range o.Details {
sDetails[i] = BigqueryreservationAlphaCapacityCommitmentFailureStatusDetailsToProto(&r)
}
p.SetDetails(sDetails)
return p
}
// CapacityCommitmentFailureStatusDetailsToProto converts a CapacityCommitmentFailureStatusDetails object to its proto representation.
func BigqueryreservationAlphaCapacityCommitmentFailureStatusDetailsToProto(o *alpha.CapacityCommitmentFailureStatusDetails) *alphapb.BigqueryreservationAlphaCapacityCommitmentFailureStatusDetails {
if o == nil {
return nil
}
p := &alphapb.BigqueryreservationAlphaCapacityCommitmentFailureStatusDetails{}
p.SetTypeUrl(dcl.ValueOrEmptyString(o.TypeUrl))
p.SetValue(dcl.ValueOrEmptyString(o.Value))
return p
}
// CapacityCommitmentToProto converts a CapacityCommitment resource to its proto representation.
func CapacityCommitmentToProto(resource *alpha.CapacityCommitment) *alphapb.BigqueryreservationAlphaCapacityCommitment {
p := &alphapb.BigqueryreservationAlphaCapacityCommitment{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetSlotCount(dcl.ValueOrEmptyInt64(resource.SlotCount))
p.SetPlan(BigqueryreservationAlphaCapacityCommitmentPlanEnumToProto(resource.Plan))
p.SetState(BigqueryreservationAlphaCapacityCommitmentStateEnumToProto(resource.State))
p.SetCommitmentStartTime(dcl.ValueOrEmptyString(resource.CommitmentStartTime))
p.SetCommitmentEndTime(dcl.ValueOrEmptyString(resource.CommitmentEndTime))
p.SetFailureStatus(BigqueryreservationAlphaCapacityCommitmentFailureStatusToProto(resource.FailureStatus))
p.SetRenewalPlan(BigqueryreservationAlphaCapacityCommitmentRenewalPlanEnumToProto(resource.RenewalPlan))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
p.SetLocation(dcl.ValueOrEmptyString(resource.Location))
return p
}
// applyCapacityCommitment handles the gRPC request by passing it to the underlying CapacityCommitment Apply() method.
func (s *CapacityCommitmentServer) applyCapacityCommitment(ctx context.Context, c *alpha.Client, request *alphapb.ApplyBigqueryreservationAlphaCapacityCommitmentRequest) (*alphapb.BigqueryreservationAlphaCapacityCommitment, error) {
p := ProtoToCapacityCommitment(request.GetResource())
res, err := c.ApplyCapacityCommitment(ctx, p)
if err != nil {
return nil, err
}
r := CapacityCommitmentToProto(res)
return r, nil
}
// applyBigqueryreservationAlphaCapacityCommitment handles the gRPC request by passing it to the underlying CapacityCommitment Apply() method.
func (s *CapacityCommitmentServer) ApplyBigqueryreservationAlphaCapacityCommitment(ctx context.Context, request *alphapb.ApplyBigqueryreservationAlphaCapacityCommitmentRequest) (*alphapb.BigqueryreservationAlphaCapacityCommitment, error) {
cl, err := createConfigCapacityCommitment(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyCapacityCommitment(ctx, cl, request)
}
// DeleteCapacityCommitment handles the gRPC request by passing it to the underlying CapacityCommitment Delete() method.
func (s *CapacityCommitmentServer) DeleteBigqueryreservationAlphaCapacityCommitment(ctx context.Context, request *alphapb.DeleteBigqueryreservationAlphaCapacityCommitmentRequest) (*emptypb.Empty, error) {
cl, err := createConfigCapacityCommitment(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteCapacityCommitment(ctx, ProtoToCapacityCommitment(request.GetResource()))
}
// ListBigqueryreservationAlphaCapacityCommitment handles the gRPC request by passing it to the underlying CapacityCommitmentList() method.
func (s *CapacityCommitmentServer) ListBigqueryreservationAlphaCapacityCommitment(ctx context.Context, request *alphapb.ListBigqueryreservationAlphaCapacityCommitmentRequest) (*alphapb.ListBigqueryreservationAlphaCapacityCommitmentResponse, error) {
cl, err := createConfigCapacityCommitment(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListCapacityCommitment(ctx, request.GetProject(), request.GetLocation())
if err != nil {
return nil, err
}
var protos []*alphapb.BigqueryreservationAlphaCapacityCommitment
for _, r := range resources.Items {
rp := CapacityCommitmentToProto(r)
protos = append(protos, rp)
}
p := &alphapb.ListBigqueryreservationAlphaCapacityCommitmentResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigCapacityCommitment(ctx context.Context, service_account_file string) (*alpha.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return alpha.NewClient(conf), nil
}
|
package tools
import (
_ "github.com/golangci/golangci-lint/cmd/golangci-lint"
_ "github.com/mercari/wrench"
_ "github.com/rakyll/statik"
_ "go.mercari.io/yo"
)
|
package service
import (
"culture/cloud/base/internal/support/api"
"github.com/goava/di"
)
// Error 服务错误码
type Error struct {
Code api.Code
Error error
}
// Container 服务容器
var Container *di.Container
// Resolve 获取服务实例
func Resolve(ptr di.Pointer, options ...di.ResolveOption) error {
return Container.Resolve(ptr, options...)
}
|
package ionic
import (
"testing"
"github.com/franela/goblin"
"github.com/ion-channel/tools-golang/spdx"
. "github.com/onsi/gomega"
)
func TestSPDX(t *testing.T) {
g := goblin.Goblin(t)
RegisterFailHandler(func(m string, _ ...int) { g.Fail(m) })
g.Describe("SPDX v2.1", func() {
g.It("should return the top-level project when no dependencies requested", func() {
spdxDocumentRef := spdx.MakeDocElementID("", "DOCUMENT")
spdxRef := spdx.MakeDocElementID("", "some-cool-pkg")
spdxDependencyRef := spdx.MakeDocElementID("", "some-dep")
spdxPackage := spdx.Package2_1{
PackageName: "some-cool-pkg",
PackageSPDXIdentifier: spdxRef.ElementRefID,
PackageVersion: "1.2.3",
PackageSupplier: &spdx.Supplier{Supplier: "The Org", SupplierType: "Organization"},
PackageDownloadLocation: "https://github.com/some-org/some-cool-pkg.git@main",
PackageDescription: "Some description",
}
spdxDependencyPackage := spdx.Package2_1{
PackageName: "some-dep",
PackageSPDXIdentifier: spdxDependencyRef.ElementRefID,
PackageVersion: "3.2.1",
PackageSupplier: &spdx.Supplier{Supplier: "The Org", SupplierType: "Organization"},
PackageDownloadLocation: "https://github.com/some-org/some-dep.git",
PackageDescription: "Some dep description",
}
packages := []*spdx.Package2_1{&spdxPackage, &spdxDependencyPackage}
relationships := []*spdx.Relationship2_1{{
RefA: spdxDocumentRef,
Relationship: "DESCRIBES",
RefB: spdxRef,
}, {
RefA: spdxRef,
Relationship: "DEPENDS_ON",
RefB: spdxDependencyRef,
}}
doc := spdx.Document2_1{
DocumentName: "SPDX SBOM",
DocumentNamespace: "http://ionchannel.io",
CreationInfo: &spdx.CreationInfo2_1{
Creators: []spdx.Creator{{Creator: "Monsieur Package Creator (mpc@mail.com)", CreatorType: "Person"}},
CreatorComment: "some cool package SBOM",
},
Packages: packages,
Relationships: relationships,
}
p, err := ProjectsFromSPDX(&doc, false)
Expect(err).To(BeNil())
Expect(p).NotTo(BeNil())
Expect(len(p)).To(Equal(1))
Expect(*p[0].Name).To(Equal(spdxPackage.PackageName))
Expect(*p[0].Type).To(Equal("git"))
Expect(*p[0].Description).To(Equal(spdxPackage.PackageDescription))
Expect(len(p[0].Aliases)).To(Equal(1))
Expect(p[0].Aliases[0].Version).To(Equal(spdxPackage.PackageVersion))
Expect(*p[0].Branch).To(Equal("main"))
Expect(*p[0].Source).To(Equal("https://github.com/some-org/some-cool-pkg.git"))
})
g.It("should return all projects when dependencies requested", func() {
spdxDocumentRef := spdx.MakeDocElementID("", "DOCUMENT")
spdxRef := spdx.MakeDocElementID("", "some-cool-pkg")
spdxDependencyRef := spdx.MakeDocElementID("", "some-dep")
spdxPackage := spdx.Package2_1{
PackageName: "some-cool-pkg",
PackageSPDXIdentifier: spdxRef.ElementRefID,
PackageVersion: "1.2.3",
PackageSupplier: &spdx.Supplier{Supplier: "The Org", SupplierType: "Organization"},
PackageDownloadLocation: "https://github.com/some-org/some-cool-pkg.git@my-cool-branch",
PackageDescription: "Some description",
}
spdxDependencyPackage := spdx.Package2_1{
PackageName: "some-dep",
PackageSPDXIdentifier: spdxDependencyRef.ElementRefID,
PackageVersion: "3.2.1",
PackageSupplier: &spdx.Supplier{Supplier: "The Org", SupplierType: "Organization"},
PackageDescription: "Some dep description",
}
packages := []*spdx.Package2_1{&spdxPackage, &spdxDependencyPackage}
relationships := []*spdx.Relationship2_1{{
RefA: spdxDocumentRef,
Relationship: "DESCRIBES",
RefB: spdxRef,
}, {
RefA: spdxRef,
Relationship: "DEPENDS_ON",
RefB: spdxDependencyRef,
}}
doc := spdx.Document2_1{
DocumentName: "SPDX SBOM",
DocumentNamespace: "http://ionchannel.io",
CreationInfo: &spdx.CreationInfo2_1{
Creators: []spdx.Creator{{Creator: "Monsieur Package Creator (mpc@mail.com)", CreatorType: "Person"}},
CreatorComment: "some cool package SBOM",
},
Packages: packages,
Relationships: relationships,
}
p, err := ProjectsFromSPDX(&doc, true)
Expect(err).To(BeNil())
Expect(p).NotTo(BeNil())
Expect(len(p)).To(Equal(2))
Expect(*p[0].Name).To(Equal(spdxPackage.PackageName))
Expect(*p[0].Type).To(Equal("git"))
Expect(*p[0].Description).To(Equal(spdxPackage.PackageDescription))
Expect(len(p[0].Aliases)).To(Equal(1))
Expect(p[0].Aliases[0].Version).To(Equal(spdxPackage.PackageVersion))
Expect(p[0].Aliases[0].Org).To(Equal(spdxPackage.PackageSupplier.Supplier))
Expect(*p[0].Branch).To(Equal("my-cool-branch"))
Expect(*p[1].Name).To(Equal(spdxDependencyPackage.PackageName))
Expect(*p[1].Type).To(Equal("source_unavailable"))
Expect(*p[1].Description).To(Equal(spdxDependencyPackage.PackageDescription))
Expect(len(p[1].Aliases)).To(Equal(1))
Expect(p[1].Aliases[0].Version).To(Equal(spdxDependencyPackage.PackageVersion))
Expect(p[1].Aliases[0].Org).To(Equal(spdxPackage.PackageSupplier.Supplier))
})
g.It("should convert NOASSERTION version to blank string", func() {
spdxDocumentRef := spdx.MakeDocElementID("", "DOCUMENT")
spdxRef := spdx.MakeDocElementID("", "some-cool-pkg")
spdxDependencyRef := spdx.MakeDocElementID("", "some-dep")
spdxPackage := spdx.Package2_1{
PackageName: "some-cool-pkg",
PackageSPDXIdentifier: spdxRef.ElementRefID,
PackageVersion: "NOASSERTION",
PackageSupplier: &spdx.Supplier{Supplier: "The Org", SupplierType: "Organization"},
PackageDownloadLocation: "https://github.com/some-org/some-cool-pkg.git@my-cool-branch",
PackageDescription: "Some description",
}
spdxDependencyPackage := spdx.Package2_1{
PackageName: "some-dep",
PackageSPDXIdentifier: spdxDependencyRef.ElementRefID,
PackageVersion: "NOASSERTION",
PackageSupplier: &spdx.Supplier{Supplier: "The Org", SupplierType: "Organization"},
PackageDescription: "Some dep description",
}
packages := []*spdx.Package2_1{&spdxPackage, &spdxDependencyPackage}
relationships := []*spdx.Relationship2_1{{
RefA: spdxDocumentRef,
Relationship: "DESCRIBES",
RefB: spdxRef,
}, {
RefA: spdxRef,
Relationship: "DEPENDS_ON",
RefB: spdxDependencyRef,
}}
doc := spdx.Document2_1{
DocumentName: "SPDX SBOM",
DocumentNamespace: "http://ionchannel.io",
CreationInfo: &spdx.CreationInfo2_1{
Creators: []spdx.Creator{{Creator: "Monsieur Package Creator (mpc@mail.com)", CreatorType: "Person"}},
CreatorComment: "some cool package SBOM",
},
Packages: packages,
Relationships: relationships,
}
p, err := ProjectsFromSPDX(&doc, true)
Expect(err).To(BeNil())
Expect(p).NotTo(BeNil())
Expect(p[0].Aliases[0].Version).To(Equal(""))
Expect(p[1].Aliases[0].Version).To(Equal(""))
})
})
g.Describe("SPDX v2.2", func() {
g.It("should return the top-level project when no dependencies requested", func() {
spdxDocumentRef := spdx.MakeDocElementID("", "DOCUMENT")
spdxRef := spdx.MakeDocElementID("", "some-cool-pkg")
spdxDependencyRef := spdx.MakeDocElementID("", "some-dep")
spdxPackage := spdx.Package2_2{
PackageName: "some-cool-pkg",
PackageSPDXIdentifier: spdxRef.ElementRefID,
PackageVersion: "1.2.3",
PackageSupplier: &spdx.Supplier{Supplier: "The Org", SupplierType: "Organization"},
PackageDownloadLocation: "https://github.com/some-org/some-cool-pkg.git@ian/some-branch",
PackageDescription: "Some description",
}
spdxDependencyPackage := spdx.Package2_2{
PackageName: "some-dep",
PackageSPDXIdentifier: spdxDependencyRef.ElementRefID,
PackageVersion: "3.2.1",
PackageSupplier: &spdx.Supplier{Supplier: "The Org", SupplierType: "Organization"},
PackageDownloadLocation: "https://github.com/some-org/some-dep.git",
PackageDescription: "Some dep description",
}
packages := []*spdx.Package2_2{&spdxPackage, &spdxDependencyPackage}
relationships := []*spdx.Relationship2_2{{
RefA: spdxDocumentRef,
Relationship: "DESCRIBES",
RefB: spdxRef,
}, {
RefA: spdxRef,
Relationship: "DEPENDS_ON",
RefB: spdxDependencyRef,
}}
doc := spdx.Document2_2{
DocumentName: "SPDX SBOM",
DocumentNamespace: "http://ionchannel.io",
CreationInfo: &spdx.CreationInfo2_2{
Creators: []spdx.Creator{{Creator: "Monsieur Package Creator (mpc@mail.com)", CreatorType: "Person"}},
CreatorComment: "some cool package SBOM",
},
Packages: packages,
Relationships: relationships,
}
p, err := ProjectsFromSPDX(&doc, false)
Expect(err).To(BeNil())
Expect(p).NotTo(BeNil())
Expect(len(p)).To(Equal(1))
Expect(*p[0].Name).To(Equal(spdxPackage.PackageName))
Expect(*p[0].Type).To(Equal("git"))
Expect(*p[0].Description).To(Equal(spdxPackage.PackageDescription))
Expect(len(p[0].Aliases)).To(Equal(1))
Expect(p[0].Aliases[0].Version).To(Equal(spdxPackage.PackageVersion))
Expect(*p[0].Branch).To(Equal("ian/some-branch"))
})
g.It("should return all projects when dependencies requested", func() {
spdxDocumentRef := spdx.MakeDocElementID("", "DOCUMENT")
spdxRef := spdx.MakeDocElementID("", "some-cool-pkg")
spdxDependencyRef := spdx.MakeDocElementID("", "some-dep")
spdxPackage := spdx.Package2_2{
PackageName: "some-cool-pkg",
PackageSPDXIdentifier: spdxRef.ElementRefID,
PackageVersion: "1.2.3",
PackageSupplier: &spdx.Supplier{Supplier: "The Org", SupplierType: "Organization"},
PackageDownloadLocation: "https://github.com/some-org/some-cool-pkg.git",
PackageDescription: "Some description",
}
spdxDependencyPackage := spdx.Package2_2{
PackageName: "some-dep",
PackageSPDXIdentifier: spdxDependencyRef.ElementRefID,
PackageVersion: "3.2.1",
PackageSupplier: &spdx.Supplier{Supplier: "The Org", SupplierType: "Organization"},
PackageDescription: "Some dep description",
}
packages := []*spdx.Package2_2{&spdxPackage, &spdxDependencyPackage}
relationships := []*spdx.Relationship2_2{{
RefA: spdxDocumentRef,
Relationship: "DESCRIBES",
RefB: spdxRef,
}, {
RefA: spdxRef,
Relationship: "DEPENDS_ON",
RefB: spdxDependencyRef,
}}
doc := spdx.Document2_2{
DocumentName: "SPDX SBOM",
DocumentNamespace: "http://ionchannel.io",
CreationInfo: &spdx.CreationInfo2_2{
Creators: []spdx.Creator{{Creator: "Monsieur Package Creator (mpc@mail.com)", CreatorType: "Person"}},
CreatorComment: "some cool package SBOM",
},
Packages: packages,
Relationships: relationships,
}
p, err := ProjectsFromSPDX(&doc, true)
Expect(err).To(BeNil())
Expect(p).NotTo(BeNil())
Expect(len(p)).To(Equal(2))
Expect(*p[0].Name).To(Equal(spdxPackage.PackageName))
Expect(*p[0].Type).To(Equal("git"))
Expect(*p[0].Description).To(Equal(spdxPackage.PackageDescription))
Expect(len(p[0].Aliases)).To(Equal(1))
Expect(p[0].Aliases[0].Version).To(Equal(spdxPackage.PackageVersion))
Expect(p[0].Aliases[0].Org).To(Equal(spdxPackage.PackageSupplier.Supplier))
Expect(*p[0].Branch).To(Equal("HEAD"))
Expect(*p[1].Name).To(Equal(spdxDependencyPackage.PackageName))
Expect(*p[1].Type).To(Equal("source_unavailable"))
Expect(*p[1].Description).To(Equal(spdxDependencyPackage.PackageDescription))
Expect(len(p[1].Aliases)).To(Equal(1))
Expect(p[1].Aliases[0].Version).To(Equal(spdxDependencyPackage.PackageVersion))
Expect(p[1].Aliases[0].Org).To(Equal(spdxPackage.PackageSupplier.Supplier))
Expect(*p[1].Branch).To(Equal(""))
})
g.It("should convert NOASSERTION version to blank string", func() {
spdxDocumentRef := spdx.MakeDocElementID("", "DOCUMENT")
spdxRef := spdx.MakeDocElementID("", "some-cool-pkg")
spdxDependencyRef := spdx.MakeDocElementID("", "some-dep")
spdxPackage := spdx.Package2_2{
PackageName: "some-cool-pkg",
PackageSPDXIdentifier: spdxRef.ElementRefID,
PackageVersion: "NOASSERTION",
PackageSupplier: &spdx.Supplier{Supplier: "The Org", SupplierType: "Organization"},
PackageDownloadLocation: "https://github.com/some-org/some-cool-pkg.git",
PackageDescription: "Some description",
}
spdxDependencyPackage := spdx.Package2_2{
PackageName: "some-dep",
PackageSPDXIdentifier: spdxDependencyRef.ElementRefID,
PackageVersion: "NOASSERTION",
PackageSupplier: &spdx.Supplier{Supplier: "The Org", SupplierType: "Organization"},
PackageDescription: "Some dep description",
}
packages := []*spdx.Package2_2{&spdxPackage, &spdxDependencyPackage}
relationships := []*spdx.Relationship2_2{{
RefA: spdxDocumentRef,
Relationship: "DESCRIBES",
RefB: spdxRef,
}, {
RefA: spdxRef,
Relationship: "DEPENDS_ON",
RefB: spdxDependencyRef,
}}
doc := spdx.Document2_2{
DocumentName: "SPDX SBOM",
DocumentNamespace: "http://ionchannel.io",
CreationInfo: &spdx.CreationInfo2_2{
Creators: []spdx.Creator{{Creator: "Monsieur Package Creator (mpc@mail.com)", CreatorType: "Person"}},
CreatorComment: "some cool package SBOM",
},
Packages: packages,
Relationships: relationships,
}
p, err := ProjectsFromSPDX(&doc, true)
Expect(err).To(BeNil())
Expect(p).NotTo(BeNil())
Expect(p[0].Aliases[0].Version).To(Equal(""))
Expect(p[1].Aliases[0].Version).To(Equal(""))
})
})
g.Describe("parse emails from SPDX creator information", func() {
g.It("should return an email if present", func() {
creatorInfo := "My Name (myemail@mail.net)"
email := parseCreatorEmail([]string{creatorInfo})
Expect(email).ToNot(BeNil())
Expect(email).To(Equal("myemail@mail.net"))
})
g.It("should handle a missing email", func() {
creatorInfo := "My Name"
email := parseCreatorEmail([]string{creatorInfo})
Expect(email).ToNot(BeNil())
Expect(email).To(Equal(""))
email = parseCreatorEmail([]string{})
Expect(email).ToNot(BeNil())
Expect(email).To(Equal(""))
})
})
}
|
package my_ldap
import (
"log"
"fmt"
"github.com/go-ldap/ldap/v3"
// "github.com/google/uuid"
"strings"
// "strconv"
"encoding/json"
"net/http"
)
type PkdClinics struct {
PkdAdmId string `json:"pkdAdmId"`
JknName string `json:"jknName"`
PkdName string `json:"pkdName"`
ClinicIds []string `json:"clinicIds"`
ClinicNames []string `json:"clinicNames"`
}
func GetPkdClinics(userId string, userPwd string) (output []byte, err error) {
l, err := ldap.DialURL("ldap://127.0.0.1:389")
if err != nil {
log.Print(err)
return
}
defer l.Close()
userDN := fmt.Sprintf(STAFF_DN, userId)
err = l.Bind(userDN, userPwd)
if err != nil {
log.Print(err)
return
}
searchFilter := fmt.Sprintf("(staffId=%s)", userId)
searchReq := ldap.NewSearchRequest(
USER_BASE_DN,
ldap.ScopeSingleLevel, ldap.NeverDerefAliases, 0,0, false,
searchFilter,
[]string{"groupUid"},
nil,
)
sr, err := l.Search(searchReq)
if err != nil {
log.Print(err)
return
}
groupUid := sr.Entries[0].GetAttributeValues("groupUid")
groupUidSegs := strings.Split(groupUid[0], "-")
pkdName := groupUidSegs[0]
jknName := groupUidSegs[1]
clinicSearchDN := fmt.Sprintf("ou=%s,ou=%s,ou=kkm-clinic,ou=groups,dc=example,dc=com",
pkdName, jknName)
searchReq = ldap.NewSearchRequest(
clinicSearchDN,
ldap.ScopeSingleLevel, ldap.NeverDerefAliases, 0,0, false,
"(&)",
[]string{"clinicName", "cn"},
nil,
)
sr, err = l.Search(searchReq)
if err != nil {
log.Print(err)
return
}
var clinicNames, clinicIds []string
for _, entry := range sr.Entries {
clinicNames = append(clinicNames, entry.GetAttributeValue("clinicName"))
clinicIds = append(clinicIds, entry.GetAttributeValue("cn"))
}
pkdClinics := PkdClinics{
PkdAdmId: userId,
JknName: jknName,
PkdName: pkdName,
ClinicIds: clinicIds,
ClinicNames: clinicNames,
}
fmt.Printf("PkdClinics: %+v \n", pkdClinics)
pkdClinicsJson, err := json.MarshalIndent(pkdClinics, "", "\t")
if err != nil {
log.Print(err)
return
}
return pkdClinicsJson, err
}
func GetPkdClinicsHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "authorization")
r.ParseForm()
fmt.Println("[GetPkdClinicsHandler] Request Form Data Received!\n")
fmt.Println(r.Form)
userId := r.Form["userId"][0]
userPwd := r.Form["userPwd"][0]
pkdClinics, err := GetPkdClinics(userId, userPwd)
if err != nil {
log.Print(err)
w.WriteHeader(500)
fmt.Fprintf(w, "Internal server error!")
return
}
fmt.Fprintf(w, "%s", pkdClinics)
}
|
package main
import "fmt"
// 定义一个结构体,然后给这个结构体一个方法计算面积并返回
type Circle struct {
Radius float64
}
func (c *Circle) area() float64 { // c在这里是一个接收main传来参数的元素,自己的元素自己用~~~~
// 这里是值传递,所以这里运算用的c 是r的考呗,如果变成指针,就变成地址传递,这个时候r是会再area中被修改的
return (*c).Radius * (*c).Radius * 3.14
// 为了提高效率,通常方法和结构体指针类型绑定!!!!
// 实际操作的时候会有部分省略,但是真实传递还是指针类型
//func (c *Circle) area() float64{
// return c.Radius * c.Radius * 3.14 这里的c.Radius还是(*c).Radius,只不过golang有简写
//}
}
func main() {
var r Circle
r.Radius = 3.0
S := (&r).area()
fmt.Printf("r=%v ,S= %v \n", r, S)
}
|
package xlog
// A sink which dispatches to zero or more other sinks.
type MultiSink struct {
sinks []Sink
}
func (ms *MultiSink) Add(sink Sink) {
for _, s := range ms.sinks {
if s == sink {
return
}
}
ms.sinks = append(ms.sinks, sink)
}
func (ms *MultiSink) Remove(sink Sink) {
var newSinks []Sink
for _, s := range ms.sinks {
if s != sink {
newSinks = append(newSinks, s)
}
}
ms.sinks = newSinks
}
func (ms *MultiSink) ReceiveLocally(sev Severity, format string, params ...interface{}) {
for _, s := range ms.sinks {
s.ReceiveLocally(sev, format, params...)
}
}
func (ms *MultiSink) ReceiveFromChild(sev Severity, format string, params ...interface{}) {
for _, s := range ms.sinks {
s.ReceiveFromChild(sev, format, params...)
}
}
|
package main
import (
"github.com/gin-gonic/gin"
)
// DefaultStatus is an Enum storing default JSON errors based on the status.
type DefaultStatus int
const (
// Status503 is for service unavailable.
Status503 DefaultStatus = 1 + iota
// Status400 is for a client error.
Status400
// Status401 is for an unauthorized access.
Status401
// Status403 is for a forbidden access (and auth'ing won't help).
Status403
// Status404 is for a not found link.
Status404
)
var jsonStatus = [...]interface{}{ // This is in the same order as the DefaultStatus const.
gin.H{"error": "service unavailable"},
gin.H{"error": "client error"},
gin.H{"error": "unauthorized"},
gin.H{"error": "forbidden"},
gin.H{"error": "not found"},
}
// StatusMsg stores the correspondance between the status and the default response.
var StatusMsg = map[int]DefaultStatus{503: Status503, 400: Status400, 401: Status401, 403: Status403, 404: Status404}
// JSON returns the default JSON error for the provided status.
func (status DefaultStatus) JSON() interface{} {
return jsonStatus[status-1]
}
|
package data
// 登录信息
type LoginInfo struct {
Account string //用户名或手机号
Password string //密码
}
// 注册信息
type RegisterInfo struct {
Account string //用户名
Mobile string //手机号
Password string //密码
}
|
package main
import (
"context"
"fmt"
"sort"
"strconv"
"github.com/Nerzal/gocloak/v13"
"github.com/pkg/errors"
"github.com/signaux-faibles/keycloakUpdater/v2/logger"
)
func UpdateKeycloak(
kc *KeycloakContext,
clientId string,
realm *gocloak.RealmRepresentation,
clients []*gocloak.Client,
users Users,
compositeRoles CompositeRoles,
configuredUsername Username,
maxChangesToAccept int,
) error {
fields := logger.DataForMethod("UpdateAll")
if _, exists := users[configuredUsername]; !exists {
return errors.Errorf(
"l'utilisateur passé dans la configuration n'est pas présent dans le fichier d'habilitations: %s",
configuredUsername,
)
}
if _, err := kc.GetUser(configuredUsername); err != nil {
return errors.Wrap(
err,
fmt.Sprintf(
"l'utilisateur passé dans la configuration n'existe pas dans Keycloak : %s",
configuredUsername,
),
)
}
logger.Info("START", fields)
logger.Info("accepte "+strconv.Itoa(maxChangesToAccept)+" changements pour les users", fields)
// checking users
logger.Info("checking users", fields)
missing, obsolete, update, current := users.Compare(*kc)
changes := len(missing) + len(obsolete) + len(update)
keeps := len(current)
if sure := areYouSureTooApplyChanges(changes, keeps, maxChangesToAccept); !sure {
return errors.New("Trop de modifications utilisateurs.")
}
// gather roles, newRoles are created before users, oldRoles are deleted after users
logger.Info("checking roles", fields)
neededRoles := neededRoles(compositeRoles, users)
newRoles, oldRoles := neededRoles.compare(kc.GetClientRoles()[clientId])
logger.Info("starting keycloak configuration", fields)
// realmName conf
if realm != nil {
kc.SaveMasterRealm(*realm)
}
// clients conf
if err := kc.SaveClients(clients); err != nil {
return errors.Wrap(err, "error when saving clients")
}
i, err := kc.CreateClientRoles(clientId, newRoles)
if err != nil {
logger.ErrorE("failed creating new roles", fields, err)
}
logger.Infof("%d roles created", i)
// check and adjust composite roles
if err = kc.ComposeRoles(clientId, compositeRoles); err != nil {
logger.Panic(err)
}
if err = kc.CreateUsers(missing, users, clientId); err != nil {
logger.Panic(err)
}
// disable obsolete users
if err = kc.DisableUsers(obsolete, clientId); err != nil {
logger.Panic(err)
}
// enable existing but disabled users
if err = kc.EnableUsers(update); err != nil {
logger.Panic(err)
}
// make sure every on has correct roles
if err = kc.UpdateCurrentUsers(current, users, clientId); err != nil {
logger.Panic(err)
}
// delete old roles
if len(oldRoles) > 0 {
sort.Strings(oldRoles)
fields.AddArray("toDelete", oldRoles)
logger.Info("removing unused roles", fields)
fields.Remove("toDelete")
internalID, err := kc.GetInternalIDFromClientID(clientId)
if err != nil {
panic(err)
}
for _, role := range kc.FindKeycloakRoles(clientId, oldRoles) {
err = kc.API.DeleteClientRole(context.Background(), kc.JWT.AccessToken, kc.getRealmName(), internalID, *role.Name)
if err != nil {
panic(err)
}
}
err = kc.refreshClientRoles()
if err != nil {
panic(err)
}
}
logger.Info("DONE", fields)
return nil
}
func areYouSureTooApplyChanges(changes, keeps, acceptedChanges int) bool {
fields := logger.DataForMethod("areYouSureTooApplyChanges")
logger.Info("nombre d'utilisateurs à rajouter/supprimer/activer : "+strconv.Itoa(changes), fields)
logger.Info("nombre d'utilisateurs à conserver : "+strconv.Itoa(keeps), fields)
if keeps < 1 {
fmt.Println("Aucun utilisateur à conserver -> Refus de prendre en compte les changements.")
return false
}
if acceptedChanges <= 0 {
fmt.Println("Tous les changements sont acceptés (acceptedChanges: " + strconv.Itoa(changes) + ")")
return true
}
if changes > acceptedChanges {
fmt.Println("Trop de changements à prendre en compte. (Max : " + strconv.Itoa(acceptedChanges) + ")")
return false
}
// pas trop de modif
return true
}
|
package main
import (
"fmt"
"io"
"log"
"net/http"
"reflect"
"github.com/google/go-github/github"
"golang.org/x/oauth2"
"github.com/karen-irc/popuko/epic"
"github.com/karen-irc/popuko/input"
"github.com/karen-irc/popuko/queue"
"github.com/karen-irc/popuko/setting"
)
// AppServer is just an this application.
type AppServer struct {
githubClient *github.Client
autoMergeRepo *queue.AutoMergeQRepo
setting *setting.Settings
}
func (srv *AppServer) handleGithubHook(rw http.ResponseWriter, req *http.Request) {
log.Println("info: Start: handle GitHub WebHook")
log.Printf("info: Path is %v\n", req.URL.Path)
defer log.Println("info End: handle GitHub WebHook")
if req.Method != "POST" {
rw.WriteHeader(http.StatusMethodNotAllowed)
return
}
payload, err := github.ValidatePayload(req, config.WebHookSecret())
if err != nil {
rw.WriteHeader(http.StatusPreconditionFailed)
io.WriteString(rw, err.Error())
return
}
event, err := github.ParseWebHook(github.WebHookType(req), payload)
if err != nil {
rw.WriteHeader(http.StatusPreconditionFailed)
io.WriteString(rw, err.Error())
return
}
switch event := event.(type) {
case *github.IssueCommentEvent:
ok, err := srv.processIssueCommentEvent(event)
rw.WriteHeader(http.StatusOK)
if ok {
io.WriteString(rw, "result: \n")
}
if err != nil {
log.Printf("info: %v\n", err)
io.WriteString(rw, err.Error())
}
return
case *github.PushEvent:
srv.processPushEvent(event)
rw.WriteHeader(http.StatusOK)
return
case *github.StatusEvent:
srv.processStatusEvent(event)
rw.WriteHeader(http.StatusOK)
return
default:
rw.WriteHeader(http.StatusOK)
log.Println("warn: Unsupported type events")
log.Println(reflect.TypeOf(event))
io.WriteString(rw, "This event type is not supported: "+github.WebHookType(req))
return
}
}
func (srv *AppServer) processIssueCommentEvent(ev *github.IssueCommentEvent) (bool, error) {
log.Printf("Start: processCommitCommentEvent by %v\n", *ev.Comment.ID)
defer log.Printf("End: processCommitCommentEvent by %v\n", *ev.Comment.ID)
if action := ev.Action; (action == nil) || (*action != "created") {
return false, fmt.Errorf("info: accept `action === \"created\"` only")
}
repoOwner := *ev.Repo.Owner.Login
repo := *ev.Repo.Name
if !srv.setting.AcceptRepo(repoOwner, repo) {
n := repoOwner + "/" + repo
log.Printf("======= error: =======\n This event is from an unaccepted repository: %v\n==============", n)
return false, fmt.Errorf("%v is not accepted", n)
}
body := *ev.Comment.Body
ok, cmd := input.ParseCommand(body)
if !ok {
return false, fmt.Errorf("No operations which this bot should handle.")
}
if cmd == nil {
return false, fmt.Errorf("error: unexpected result of parsing comment body")
}
repoInfo := epic.GetRepositoryInfo(srv.githubClient.Repositories, repoOwner, repo)
if repoInfo == nil {
return false, fmt.Errorf("debug: cannot get repositoryInfo")
}
switch cmd := cmd.(type) {
case *input.AssignReviewerCommand:
return epic.AssignReviewer(srv.githubClient, ev, cmd.Reviewer)
case *input.AcceptChangeByReviewerCommand:
commander := epic.AcceptCommand{
repoOwner,
repo,
srv.githubClient,
config.BotNameForGithub(),
cmd,
repoInfo,
srv.autoMergeRepo,
}
return commander.AcceptChangesetByReviewer(ev)
case *input.AcceptChangeByOthersCommand:
commander := epic.AcceptCommand{
repoOwner,
repo,
srv.githubClient,
config.BotNameForGithub(),
cmd,
repoInfo,
srv.autoMergeRepo,
}
return commander.AcceptChangesetByReviewer(ev)
case *input.CancelApprovedByReviewerCommand:
commander := epic.CancelApprovedCommand{
BotName: config.BotNameForGithub(),
Client: srv.githubClient,
Owner: repoOwner,
Name: repo,
Number: *ev.Issue.Number,
Cmd: cmd,
Info: repoInfo,
AutoMergeRepo: srv.autoMergeRepo,
}
return commander.CancelApprovedChangeSet(ev)
default:
return false, fmt.Errorf("error: unreachable")
}
}
func (srv *AppServer) processPushEvent(ev *github.PushEvent) {
log.Println("info: Start: processPushEvent by push id")
defer log.Println("info: End: processPushEvent by push id")
repoOwner := *ev.Repo.Owner.Name
log.Printf("debug: repository owner is %v\n", repoOwner)
repo := *ev.Repo.Name
log.Printf("debug: repository name is %v\n", repo)
if !srv.setting.AcceptRepo(repoOwner, repo) {
n := repoOwner + "/" + repo
log.Printf("======= error: =======\n This event is from an unaccepted repository: %v\n==============", n)
return
}
epic.DetectUnmergeablePR(srv.githubClient, ev)
}
func (srv *AppServer) processStatusEvent(ev *github.StatusEvent) {
log.Println("info: Start: processStatusEvent")
defer log.Println("info: End: processStatusEvent")
repoOwner := *ev.Repo.Owner.Login
log.Printf("debug: repository owner is %v\n", repoOwner)
repo := *ev.Repo.Name
log.Printf("debug: repository name is %v\n", repo)
if !srv.setting.AcceptRepo(repoOwner, repo) {
n := repoOwner + "/" + repo
log.Printf("======= error: =======\n This event is from an unaccepted repository: %v\n==============", n)
return
}
epic.CheckAutoBranch(srv.githubClient, srv.autoMergeRepo, ev)
}
func createGithubClient(config *setting.Settings) *github.Client {
ts := oauth2.StaticTokenSource(
&oauth2.Token{
AccessToken: config.GithubToken(),
},
)
tc := oauth2.NewClient(oauth2.NoContext, ts)
client := github.NewClient(tc)
return client
}
|
package otpauth
import (
"fmt"
"strconv"
"testing"
"unsafe"
)
func TestZeroPadding(t *testing.T) {
run := func(t *testing.T, fixt Fixture, d int) {
for _, tc := range fixt.TestCases {
tc := tc
val, _ := strconv.ParseInt(tc.Value, 10, 64)
t.Run(fmt.Sprintf("%d to %s", val, tc.Result), func(t *testing.T) {
t.Parallel()
if got := ZeroPadding(val, d); got != tc.Result {
t.Errorf("ZeroPadding(%d, %d) = %s; want %s", val, d, got, tc.Result)
}
})
}
}
t.Run("Digits6", func(t *testing.T) {
t.Parallel()
run(t, zeroPaddingDigits6, 6)
})
t.Run("Digits8", func(t *testing.T) {
t.Parallel()
run(t, zeroPaddingDigits8, 8)
})
}
func TestItob(t *testing.T) {
for i, tc := range intToBytes.TestCases {
i, tc := i, tc
val, _ := strconv.ParseInt(tc.Value, 10, 64)
t.Run(fmt.Sprintf("%d to %s", val, tc.Result), func(t *testing.T) {
t.Parallel()
if b := Itob(int64(val)); fmt.Sprintf("%x", b) != tc.Result || b[7-i] != byte(0x7f) {
t.Errorf("Itob(%d) = %x; want %s", val, b, tc.Result)
}
})
}
}
func TestBase32Secret(t *testing.T) {
run := func(t *testing.T, fixt Fixture, f func(*testing.T, string, string)) {
for _, tc := range fixt.TestCases {
tc := tc
t.Run(fmt.Sprintf("%s to %s", tc.Value, tc.Result), func(t *testing.T) {
t.Parallel()
f(t, tc.Value, tc.Result)
})
}
}
t.Run("TestDecodeSecret", func(t *testing.T) {
t.Parallel()
decodeSecret := func(t *testing.T, v string, r string) {
if got, err := DecodeSecret(v); *(*string)(unsafe.Pointer(&got)) != r {
t.Errorf("DecodeSecret(\"%s\") = %s; want %s", v, got, r)
} else if err != nil {
t.Errorf("DecodeSecret(\"%s\") return error: %s", v, err)
}
}
t.Run("NoPadding", func(t *testing.T) {
t.Parallel()
run(t, decodeBase32, decodeSecret)
})
t.Run("WithPadding", func(t *testing.T) {
t.Parallel()
run(t, decodeBase32WithPadding, decodeSecret)
})
})
t.Run("TestEncodeSecret", func(t *testing.T) {
t.Parallel()
run(t, encodeBase32, func(t *testing.T, v string, r string) {
if got := EncodeSecret([]byte(v)); got != r {
t.Errorf("EncodeSecret(\"%s\") = %s; want %s", v, got, r)
}
})
})
}
|
package sort
// MergeSort 归并排序
// 时间复杂度O(nLogN)
// 与 SelectionSort 一样,不受输入数据影响
func MergeSort(arr []int) []int {
if len(arr) < 2 {
return arr
}
// 分为两组
var mid = len(arr) / 2
left := arr[:mid]
right := arr[mid:]
l := MergeSort(left)
r := MergeSort(right)
return Merge(l, r)
// return Merge(MergeSort(left), MergeSort(right))
}
func Merge(left, right []int) []int {
var res []int
for len(left) > 0 && len(right) > 0 {
if left[0] <= right[0] {
res = append(res, left[0])
left = left[1:]
} else {
res = append(res, right[0])
right = right[1:]
}
}
if len(left) > 0 {
// 左边还剩元素
res = append(res, left...)
}
if len(right) > 0 {
// 右边还剩元素
res = append(res, right...)
}
return res
}
|
package API_Responses
type Response struct {
StatusCode int `json:"statusCode"`
Message string `json:"message"`
Data map[string]interface{} `json:"data"`
}
func DefineResponse(statusCode int, message string, data map[string]interface{}) Response {
return Response{
StatusCode: statusCode,
Message: message,
Data: data,
}
}
func R_200(message string, data map[string]interface{}) Response {
return DefineResponse(200, message, data)
}
func R_204(message string, data map[string]interface{}) Response {
return DefineResponse(204, message, data)
}
func R_400(message string, data map[string]interface{}) Response {
return DefineResponse(400, message, data)
}
func R_404(message string, data map[string]interface{}) Response {
return DefineResponse(404, message, data)
}
|
// upload_plain
package cmd
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"github.com/spf13/cobra"
"gopkg.in/rana/ora.v4"
)
var (
script string
)
// uploadCmd represents the upload_plain command
var uploadPlainCmd = &cobra.Command{
Use: "upload_plain",
Short: "Upload file to Oracle DB and exec script",
Long: `File to Oracle DB uploader (plain).
This application is a tool to upload files to Oracle DB and execute script.`,
Run: func(cmd *cobra.Command, args []string) {
// TODO: Work your own magic here
if (dsn == "") || (filename == "") {
fmt.Println("Error:", "The Upload command demands presence of parameters")
os.Exit(-1)
}
if err := uploadPlain(); err != nil {
fmt.Println("Error:", err)
os.Exit(-1)
}
fmt.Println("upload completed")
},
}
func init() {
RootCmd.AddCommand(uploadPlainCmd)
uploadPlainCmd.PersistentFlags().StringVar(&dsn, "dsn", "", "Username/Password@ConnStr")
uploadPlainCmd.PersistentFlags().StringVar(&filename, "file", "", "Name of file")
uploadPlainCmd.PersistentFlags().StringVar(&script, "script", "", "Script fo uploading")
}
func uploadPlain() error {
filepath, filename := func() (string, string) {
_, name := path.Split(strings.Replace(filename, "\\", "/", -1))
// ext := path.Ext(name)
// name = name[:len(name)-len(ext)]
return filename, name
}()
//fmt.Println("filepath =", filepath, " filename =", filename, " filedesc =", filedesc)
b, err := ioutil.ReadFile(filepath)
if err != nil {
return err
}
env, srv, ses, err := ora.NewEnvSrvSes(dsn)
defer func() {
if ses != nil {
ses.Close()
}
if srv != nil {
srv.Close()
}
if env != nil {
env.Close()
}
}()
if err != nil {
return err
}
body := ora.Lob{Reader: bytes.NewReader(b)}
_, err = ses.PrepAndExe(script, body, filename)
return err
}
|
package controllers
import (
"fmt"
"net/http"
)
// PrivacyController is the Privacy screen
func PrivacyController() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Privacy Controller")
}
}
|
package mr
import (
"encoding/json"
"fmt"
"hash/fnv"
"io/ioutil"
"log"
"net/rpc"
"os"
"sort"
"sync/atomic"
"time"
)
//
// Map functions return a slice of KeyValue.
//
type KeyValue struct {
Key string
Value string
}
//
// use ihash(key) % NReduce to choose the reduce
// task number for each KeyValue emitted by Map.
//
func ihash(key string) int {
h := fnv.New32a()
h.Write([]byte(key))
return int(h.Sum32() & 0x7fffffff)
}
type MapF func(string, string) []KeyValue
type ReduceF func(string, []string) string
type Tasker struct {
nReduce int
mapf MapF
reducef ReduceF
tasks chan WorkerTask
numTasks int32
}
func NewTasker(mapf MapF, reducef ReduceF) *Tasker {
t := &Tasker{
mapf: mapf,
reducef: reducef,
numTasks: 0,
tasks: make(chan WorkerTask, 10),
}
go t.process()
return t
}
func (t *Tasker) process() {
for {
// Get a new task
task := <-t.tasks
// Work until done
if task.isMap {
t.doMap(task)
} else {
t.doReduce(task)
}
}
}
func (t *Tasker) doMap(task WorkerTask) {
for _, filename := range task.filenames {
// Read file
content, err := readMapInput(filename)
if err != nil {
log.Printf("%+v", err)
continue
}
// Do map
log.Println("performing map of task", task.id)
kvs := t.mapf(filename, string(content))
// Output
out, err := writeMapOutput(task.id, task.nReduce, kvs)
// Return result to master
if err == nil {
notifyMapComplete(task.id, out)
}
}
atomic.AddInt32(&t.numTasks, -1)
}
func notifyMapComplete(id int, outfiles map[int]string) {
log.Printf("sending map result: id = %v, num files = %v", id, len(outfiles))
args := &TaskCompletedArgs{
IsMap: true,
ID: id,
OutputFiles: outfiles,
}
reply := &TaskCompletedReply{}
call("Master.NotifyTaskComplete", &args, &reply)
}
func (t *Tasker) doReduce(task WorkerTask) {
// Read intermidiate files
log.Println("reduce: reading intermediate files for task", task.id)
kvs := []KeyValue{}
for _, filename := range task.filenames {
kv, err := readReduceInput(filename)
if err != nil {
log.Println(err)
continue
}
kvs = append(kvs, kv...)
}
// Sort by keys to produce sorted output
sort.Sort(ByKey(kvs))
// Do reduce
log.Printf("reduce: performing task %d with %d keys", task.id, len(kvs))
oname := fmt.Sprintf("mr-out-%d", task.id)
file, _ := os.Create(oname)
for i, j := 0, 0; i < len(kvs); i = j {
values := []string{}
for ; j < len(kvs); j++ {
if kvs[j].Key != kvs[i].Key {
break
}
values = append(values, kvs[j].Value)
}
output := t.reducef(kvs[i].Key, values)
// Output
fmt.Fprintf(file, "%v %v\n", kvs[i].Key, output)
}
err := file.Close()
// Return result to master
if err == nil {
notifyReduceComplete(task.id, map[int]string{
task.id: oname,
})
}
atomic.AddInt32(&t.numTasks, -1)
}
func notifyReduceComplete(id int, outfiles map[int]string) {
log.Printf("sending reduce result: id = %v, files = %v", id, outfiles)
args := &TaskCompletedArgs{
IsMap: false,
ID: id,
OutputFiles: outfiles,
}
reply := &TaskCompletedReply{}
call("Master.NotifyTaskComplete", &args, &reply)
}
func readMapInput(filename string) (string, error) {
log.Println("reading map input of file:", filename)
file, err := os.Open(filename)
if err != nil {
return "", fmt.Errorf("cannot open %v: %w", filename, err)
}
content, err := ioutil.ReadAll(file)
if err != nil {
log.Printf("cannot read %v", filename)
return "", fmt.Errorf("cannot read %v: %w", filename, err)
}
file.Close()
return string(content), nil
}
func readReduceInput(filename string) ([]KeyValue, error) {
file, err := os.Open(filename)
if err != nil {
return nil, fmt.Errorf("cannot open %v: %w", filename, err)
}
kvs := []KeyValue{}
dec := json.NewDecoder(file)
for {
var kv KeyValue
if err := dec.Decode(&kv); err != nil {
break
}
kvs = append(kvs, kv)
}
return kvs, nil
}
func writeMapOutput(mapID, nReduce int, kvs []KeyValue) (map[int]string, error) {
log.Printf("writing map output: id = %v, len(kvs) = %v", mapID, len(kvs))
sort.Sort(ByKeyHash{
kvs: kvs,
hasher: func(key string) int {
return ihash(key) % nReduce
},
})
reduceID := -1 // reduce task id of the current output file
var file *os.File
out := map[int]string{}
for _, kv := range kvs {
// Get correct file
rid := ihash(kv.Key) % nReduce
if rid != reduceID {
// Close old file if needed
if file != nil {
file.Close()
}
// Open new file
filename := fmt.Sprintf("mr-%d-%d", mapID, rid)
file, _ = os.Create(filename)
reduceID = rid
out[rid] = filename
}
enc := json.NewEncoder(file)
if err := enc.Encode(&kv); err != nil {
log.Println(err)
return nil, err
}
}
if file != nil {
if err := file.Close(); err != nil {
return nil, err
}
}
return out, nil
}
func (t *Tasker) IsIdle() bool {
return t.numTasks == 0
}
func (t *Tasker) Work(task WorkerTask) {
atomic.AddInt32(&t.numTasks, 1)
t.tasks <- task
}
type WorkerTask struct {
filenames []string
isMap bool
id int
nReduce int
}
func askForTask() (WorkerTask, bool) {
args := RequestTaskArgs{}
reply := RequestTaskReply{}
ok := call("Master.RequestTask", &args, &reply)
if !ok {
return WorkerTask{}, false
}
if reply.Exit {
log.Fatal("received exit request, exiting...")
}
log.Printf("received a new task: map = %v, id = %v", reply.IsMap, reply.ID)
return WorkerTask{
filenames: reply.Filenames,
isMap: reply.IsMap,
id: reply.ID,
nReduce: reply.NumReduce,
}, true
}
//
// main/mrworker.go calls this function.
//
func Worker(mapf func(string, string) []KeyValue,
reducef func(string, []string) string) {
// Your worker implementation here.
tasker := NewTasker(mapf, reducef)
for {
// Check current task
if tasker.IsIdle() {
// Idling, ask for task
task, ok := askForTask()
if ok {
tasker.Work(task)
}
}
// Sleep to prevent spamming master
time.Sleep(1 * time.Second)
}
// uncomment to send the Example RPC to the master.
// CallExample()
}
//
// example function to show how to make an RPC call to the master.
//
// the RPC argument and reply types are defined in rpc.go.
//
func CallExample() {
// declare an argument structure.
args := ExampleArgs{}
// fill in the argument(s).
args.X = 99
// declare a reply structure.
reply := ExampleReply{}
// send the RPC request, wait for the reply.
call("Master.Example", &args, &reply)
// reply.Y should be 100.
fmt.Printf("reply.Y %v\n", reply.Y)
}
//
// send an RPC request to the master, wait for the response.
// usually returns true.
// returns false if something goes wrong.
//
func call(rpcname string, args interface{}, reply interface{}) bool {
// c, err := rpc.DialHTTP("tcp", "127.0.0.1"+":1234")
sockname := masterSock()
c, err := rpc.DialHTTP("unix", sockname)
if err != nil {
log.Fatal("dialing:", err)
}
defer c.Close()
err = c.Call(rpcname, args, reply)
if err == nil {
return true
}
log.Println(err)
return false
}
// for sorting by hash of key
type ByKeyHash struct {
kvs []KeyValue
hasher func(string) int
}
func (a ByKeyHash) Len() int { return len(a.kvs) }
func (a ByKeyHash) Swap(i, j int) { a.kvs[i], a.kvs[j] = a.kvs[j], a.kvs[i] }
func (a ByKeyHash) Less(i, j int) bool {
ih := a.hasher(a.kvs[i].Key)
jh := a.hasher(a.kvs[j].Key)
if ih != jh {
return ih < jh
}
return a.kvs[i].Key < a.kvs[j].Key
}
// for sorting by key
type ByKey []KeyValue
func (a ByKey) Len() int { return len(a) }
func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByKey) Less(i, j int) bool { return a[i].Key < a[j].Key }
|
package proxy
import (
"encoding/json"
"fmt"
)
type JSONRpcResp struct {
Id *json.RawMessage `json:"id"`
Method string `json:"method"`
Params *json.RawMessage `json:"params"`
}
type StratumReq struct {
JSONRpcResp
Worker string `json:"worker"`
}
func (s *StratumReq) String() string {
m, err := s.Id.MarshalJSON()
if err != nil {
return ""
}
id := string(m)
p, err := s.Params.MarshalJSON()
if err != nil {
return ""
}
params := string(p)
return fmt.Sprintf(`{"id": %s,"method": %s,"params": %s}`, id, s.Method, params)
}
type JSONResponse struct {
Id *json.RawMessage `json:"id"`
Version string `json:"version"`
Result interface{} `json:"result"`
Error *ErrorReply `json:"error"`
}
func (s *JSONResponse) String() string {
m, err := s.Id.MarshalJSON()
if err != nil {
return ""
}
id := string(m)
return fmt.Sprintf(`{"id": %s,"version": %s,"result": %v,"error": %s}`, id, s.Version, s.Result, s.Error)
}
type ErrorReply struct {
Code int `json:"code"`
Message string `json:"message"`
}
func (s *ErrorReply) String() string {
return fmt.Sprintf(`["code": %d,"message": %s]`, s.Code, s.Message)
}
|
package main
/*
+----------+
| |-+
| internal | |-+ Common util packages
| | | |
+----------+ | |
+-----------+ |
+-----------+
+----------+
| |-+
| main | |-+ Application domain packages
| | | |
+----------+ | |
+-----------+ |
+-----------+
+----------+
| |-+
| vendor | |-+ Third-party packages
| | | |
+----------+ | |
+-----------+ |
+-----------+
*/
// common pattern:
// main -> command(flags, env) -> some deps -> router -> api -> server.ListenAndServe()
|
// Copyright (c) 2022 Zededa, Inc.
// SPDX-License-Identifier: Apache-2.0
package main
import (
"context"
"fmt"
"io/ioutil"
"os"
"strconv"
"github.com/lf-edge/eve/libs/depgraph"
)
var lastFileID int
func newFileID() int {
lastFileID++
return lastFileID
}
// file represents a named, regular file.
type file struct {
id int
filename string
content string
permissions os.FileMode
parentDir *directory
}
// Name returns the file identifier.
// This is neither filename nor filepath, both of which can change and we want to use
// the efficient os.Rename(). If filepath would be used as the item ID, then change
// of the path would result in graph calling Delete + Create (i.e. re-creating the file
// on the new location as a completely new item). But we want the graph to call Modify,
// which inside uses the efficient os.Rename().
func (f file) Name() string {
return strconv.Itoa(f.id)
}
// Label is used only for graph visualization and it does not have to be a unique
// item identifier.
func (f file) Label() string {
return f.filename
}
func (f file) Type() string {
return "file"
}
// Equal returns false even when only the parent directory reference changes.
// This is because we want to call Modify to move the file in that case.
func (f file) Equal(item2 depgraph.Item) bool {
f2 := item2.(file)
return f.path() == f2.path() &&
f.permissions == f2.permissions &&
f.content == f2.content
}
func (f file) External() bool {
return false
}
func (f file) path() string {
return f.parentDir.Name() + "/" + f.filename
}
func (f file) String() string {
return fmt.Sprintf("path: %s\ncontent: %s\npermissions: %o",
f.path(), f.content, f.permissions)
}
// Dependencies returns the parent directory as the file's only dependency.
func (f file) Dependencies() []depgraph.Dependency {
return []depgraph.Dependency{
{
RequiredItem: depgraph.ItemRef{
ItemType: directory{}.Type(),
ItemName: f.parentDir.Name(),
},
Description: "For file to be created, the parent directory must already exist",
},
}
}
// fileConfigurator is the Configurator for files.
type fileConfigurator struct{}
// Create writes a new file.
func (fc fileConfigurator) Create(_ context.Context, item depgraph.Item) error {
f := item.(file)
return ioutil.WriteFile(f.path(), []byte(f.content), f.permissions)
}
// Modify can rename the file and change the access rights.
// Change in the file content is handled through re-creation (for demo purposes).
func (fc fileConfigurator) Modify(_ context.Context, oldItem, newItem depgraph.Item) (err error) {
oldF := oldItem.(file)
newF := newItem.(file)
if oldF.path() != newF.path() {
if err := os.Rename(oldF.path(), newF.path()); err != nil {
return err
}
}
if oldF.permissions != newF.permissions {
if err := os.Chmod(newF.path(), newF.permissions); err != nil {
return err
}
}
return nil
}
// Delete removes the file.
func (fc fileConfigurator) Delete(_ context.Context, item depgraph.Item) error {
f := item.(file)
return os.Remove(f.path())
}
// NeedsRecreate returns true when the file content changes.
// This is used just to demonstrate how Reconciler can re-create an item.
func (fc fileConfigurator) NeedsRecreate(oldItem, newItem depgraph.Item) (recreate bool) {
oldF := oldItem.(file)
newF := newItem.(file)
return oldF.content != newF.content
}
|
// Copyright (c) 2020 Doc.ai and/or its affiliates.
//
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package vfio
import (
"context"
"os"
"path"
"github.com/golang/protobuf/ptypes/empty"
"github.com/networkservicemesh/api/pkg/api/networkservice"
"github.com/networkservicemesh/sdk/pkg/networkservice/core/next"
"github.com/networkservicemesh/sdk/pkg/tools/log"
"golang.org/x/sys/unix"
"google.golang.org/grpc"
)
type vfioClient struct {
vfioDir string
cgroupDir string
}
const (
mkdirPerm = 0750
mknodPerm = 0666
)
// NewClient returns a new VFIO client chain element
func NewClient(vfioDir, cgroupDir string) networkservice.NetworkServiceClient {
return &vfioClient{
vfioDir: vfioDir,
cgroupDir: cgroupDir,
}
}
func (c *vfioClient) Request(ctx context.Context, request *networkservice.NetworkServiceRequest, opts ...grpc.CallOption) (*networkservice.Connection, error) {
logEntry := log.Entry(ctx).WithField("vfioClient", "Request")
request.Connection.Context.ExtraContext[clientCgroupDirKey] = c.cgroupDir
conn, err := next.Client(ctx).Request(ctx, request, opts...)
if err != nil {
return nil, err
}
if err := os.Mkdir(c.vfioDir, mkdirPerm); err != nil && !os.IsExist(err) {
logEntry.Error("failed to create vfio directory")
return nil, err
}
if err := unix.Mknod(
path.Join(c.vfioDir, vfioDevice),
unix.S_IFCHR|mknodPerm,
int(unix.Mkdev(atou(conn.Mechanism.Parameters[vfioMajorKey]), atou(conn.Mechanism.Parameters[vfioMinorKey]))),
); err != nil && !os.IsExist(err) {
logEntry.Errorf("failed to mknod device: %v", vfioDevice)
return nil, err
}
igid := conn.Mechanism.Parameters[IommuGroupKey]
if err := unix.Mknod(
path.Join(c.vfioDir, igid),
unix.S_IFCHR|mknodPerm,
int(unix.Mkdev(atou(conn.Mechanism.Parameters[deviceMajorKey]), atou(conn.Mechanism.Parameters[deviceMinorKey]))),
); err != nil && !os.IsExist(err) {
logEntry.Errorf("failed to mknod device: %v", vfioDevice)
return nil, err
}
return conn, nil
}
func (c *vfioClient) Close(ctx context.Context, conn *networkservice.Connection, opts ...grpc.CallOption) (*empty.Empty, error) {
return next.Client(ctx).Close(ctx, conn, opts...)
}
|
package main
import (
"golangStudy/project/note_money_struct/util"
)
func main() {
a := util.NewAccount()
a.MainMenu()
}
|
package fakes
import "github.com/cloudfoundry-incubator/notifications/models"
type FakeUnsubscribesRepo struct {
Unsubscribes map[string]models.Unsubscribe
}
func NewFakeUnsubscribesRepo() *FakeUnsubscribesRepo {
return &FakeUnsubscribesRepo{
Unsubscribes: map[string]models.Unsubscribe{},
}
}
func (fake *FakeUnsubscribesRepo) Create(conn models.ConnectionInterface, unsubscribe models.Unsubscribe) (models.Unsubscribe, error) {
key := unsubscribe.ClientID + unsubscribe.KindID + unsubscribe.UserID
if _, ok := fake.Unsubscribes[key]; ok {
return unsubscribe, models.ErrDuplicateRecord{}
}
fake.Unsubscribes[key] = unsubscribe
return unsubscribe, nil
}
func (fake *FakeUnsubscribesRepo) Upsert(conn models.ConnectionInterface, unsubscribe models.Unsubscribe) (models.Unsubscribe, error) {
key := unsubscribe.ClientID + unsubscribe.KindID + unsubscribe.UserID
fake.Unsubscribes[key] = unsubscribe
return unsubscribe, nil
}
func (fake *FakeUnsubscribesRepo) Find(conn models.ConnectionInterface, clientID string, kindID string, userID string) (models.Unsubscribe, error) {
key := clientID + kindID + userID
if unsubscribe, ok := fake.Unsubscribes[key]; ok {
return unsubscribe, models.ErrDuplicateRecord{}
}
return models.Unsubscribe{}, models.ErrRecordNotFound{}
}
func (fake *FakeUnsubscribesRepo) Destroy(conn models.ConnectionInterface, unsubscribe models.Unsubscribe) (int, error) {
key := unsubscribe.ClientID + unsubscribe.KindID + unsubscribe.UserID
delete(fake.Unsubscribes, key)
return 0, nil
}
|
package leetcode
func letters(n byte) string {
switch n {
case '2':
return "abc"
case '3':
return "def"
case '4':
return "ghi"
case '5':
return "jkl"
case '6':
return "mno"
case '7':
return "pqrs"
case '8':
return "tuv"
case '9':
return "wxyz"
default:
return ""
}
}
func letterCombinationsRealloc(digits string) []string {
n := len(digits)
if n == 0 {
return []string{}
}
results := make([]string, 0, 3)
for _, c := range letters(digits[0]) {
if n == 1 {
results = append(results, string(c))
} else {
for _, str := range letterCombinations(digits[1:]) {
results = append(results, string(c)+str)
}
}
}
return results
}
func lc(buff []string, add byte) []string {
n, res := len(buff), buff[:]
ns := letters(add)
for i, nsn := 0, len(ns); i < nsn; i++ {
s := string(ns[i])
if n == 0 {
res = append(res, s)
} else {
for i2, s2 := range buff {
if i == nsn-1 {
res[i2] = s2 + s
} else {
res = append(res, s2+s)
}
}
}
}
return res
}
// Reduce the number of reallocations
func letterCombinationsReduced(digits string) []string {
n := len(digits)
if n == 0 {
return []string{}
}
cap, exp := 3, n-1
for exp > 0 {
cap *= 3
exp--
}
results := make([]string, 0, cap)
for i := 0; i < n; i++ {
results = lc(results, digits[i])
}
return results
}
func calcKey(digits string) ([][]byte, int) {
n := len(digits)
ret := make([][]byte, 0, n)
m := 1
for i := 0; i < n; i++ {
chars := letters(digits[i])
nn := len(chars)
if nn > 0 {
m *= nn
ret = append(ret, []byte(chars))
}
}
if len(ret) == 0 {
m = 0
}
return ret, m
}
// Fully tabularized
func letterCombinations(digits string) []string {
n := len(digits)
if n == 0 {
return []string{}
}
key, m := calcKey(digits)
n = len(key)
// Strings are immutable, so we need to build our
// table as a grid of bytes.
tab := make([][]byte, m)
stepm := m
// For each digit...
for col := 0; col < n; col++ {
chars := key[col]
charsn := len(key[col])
stepm /= charsn
// ...assign a character per permutation.
for row := 0; row < m; row++ {
if col == 0 {
tab[row] = make([]byte, n)
}
tab[row][col] = chars[(row/stepm)%charsn]
}
}
// Now we need to turn that grid back into a list of strings.
ret := make([]string, m)
for i, bytes := range tab {
ret[i] = string(bytes)
}
return ret
}
|
package client
import (
"encoding/xml"
"fmt"
"net/http"
"net/url"
"github.com/BenjaminLam1202/test-go-config-cameras/hkvision/types/streaming"
)
/**
* @author : Donald Trieu
* @created : 9/24/21, Friday
**/
/*
It is used to get a device streaming status.
*/
func (cli *Client) StreamingStatus() (streaming.StreamStatus, error) {
var req http.Request
req.URL = &url.URL{Scheme: cli.proto, Host: cli.host, Path: cli.getAPIPath("/ISAPI/Streaming/status", nil)}
req.Method = http.MethodGet
var resp *http.Response
var err error
if resp, err = cli.client.RoundTrip(&req); err != nil {
return streaming.StreamStatus{}, err
}
var response streaming.StreamStatus
err = xml.NewDecoder(resp.Body).Decode(&response)
if nil != err {
fmt.Println("Error unmarshalling from XML", err)
return streaming.StreamStatus{}, err
}
return response, err
}
|
package configlib
import (
"fmt"
"os"
"os/user"
"path/filepath"
"strings"
)
var (
CurrentUser *user.User
MetaConfigPath string
DefaultConfigPath string
DefaultConfigType = "local"
)
// error codes
const (
Unknown = -1
ErrCodeCouldNotFindCurrentUser = iota + 1
)
// Storage types
const (
Local = "local"
S3 = "s3"
)
func init() {
var err error
// Get the current user and exit if we can't
CurrentUser, err = user.Current()
if err != nil {
ExitError(err, ErrCodeCouldNotFindCurrentUser)
}
// Set default paths
MetaConfigPath = filepath.Join(CurrentUser.HomeDir, ".cloudconfig.meta")
DefaultConfigPath = filepath.Join(CurrentUser.HomeDir, ".cloudconfig")
}
// ReplaceHomeDir replaces any matching prefixes
func ReplaceHomeDir(s string) string {
for _, r := range homeShortcuts {
s = strings.Replace(s, r, CurrentUser.HomeDir+string(os.PathSeparator), -1)
}
return s
}
// StringComp compares to see if s is in comps
func StringComp(s string, comps ...string) bool {
for _, cs := range comps {
if s == cs {
return true
}
}
return false
}
// ExitError prints an error to stderr and exits with the given code
func ExitError(err error, code int) {
fmt.Fprintln(os.Stderr, err)
os.Exit(code)
}
|
package controllers
import (
"alta-store/lib/database"
"alta-store/middlewares"
"alta-store/models"
"net/http"
"strconv"
"github.com/labstack/echo"
)
func GetAllUsers(c echo.Context) error {
users, err := database.GetUsers()
if err != nil {
return echo.NewHTTPError(http.StatusBadRequest, err.Error())
}
return c.JSON(http.StatusOK, map[string]interface{}{
"status": "Sukses menampilkan semua users",
"list users": users,
})
}
func GetUser(c echo.Context) error {
id, err := strconv.Atoi(c.Param("id"))
if err != nil {
return c.JSON(http.StatusBadRequest, map[string]interface{}{
"message": "invalid id",
})
}
user, err := database.GetUser(id)
if err != nil {
return echo.NewHTTPError(http.StatusBadRequest, err.Error())
}
return c.JSON(http.StatusOK, map[string]interface{}{
"status": "success",
"users": user,
})
}
func CreateUser(c echo.Context) error {
user, err := database.CreateUser(c)
if err != nil {
return echo.NewHTTPError(http.StatusBadRequest, err.Error())
}
return c.JSON(http.StatusOK, map[string]interface{}{
"status": "success",
"users": user,
})
}
func UpdateUser(c echo.Context) error {
var user models.User
id, err := strconv.Atoi(c.Param("id"))
if err != nil {
return c.JSON(http.StatusBadRequest, map[string]interface{}{
"message": "invalid id",
})
}
c.Bind(&user)
update_user, err := database.UpdateUser(id, user)
if err != nil {
return echo.NewHTTPError(http.StatusBadRequest, err.Error())
}
return c.JSON(http.StatusOK, map[string]interface{}{
"message": "success",
"data": update_user,
})
}
func DeleteUser(c echo.Context) error {
id, err := strconv.Atoi(c.Param("id"))
if err != nil {
return c.JSON(http.StatusBadRequest, map[string]interface{}{
"message": "ID nya salah BOS!",
})
}
_, err = database.DeleteUser(id)
if err != nil {
return echo.NewHTTPError(http.StatusBadRequest, err.Error())
}
return c.JSON(http.StatusOK, map[string]interface{}{
"message": "Data has been DELETED",
})
}
func GetUserDetailControllers(c echo.Context) error {
id, err := strconv.Atoi(c.Param("id"))
if err != nil {
return echo.NewHTTPError(http.StatusBadRequest, err.Error())
}
// ini dari JWT token yang dikirim via header
loggedInUserId := middlewares.ExtractTokenUserId(c)
// kalau loggedInUserId tidak sama dengan id yang dari parameter, kembalikan response 401
if loggedInUserId != id {
return echo.NewHTTPError(http.StatusUnauthorized, "unauthorized access, you can only see your own")
}
users, err := database.GetDetailUsers(id)
if err != nil {
return echo.NewHTTPError(http.StatusBadRequest, err.Error())
}
return c.JSON(http.StatusOK, map[string]interface{}{
"status": "success",
"users": users,
})
}
|
// Copyright 2015-2018 trivago N.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package format
import (
"regexp"
"gollum/core"
)
// RegExp formatter
//
// This formatter parses a message using a regular expression, performs
// string (template) replacement and returns the result.
//
// Parameters
//
// - Posix: Set to true to compile the regular expression using posix semantics.
// By default this parameter is set to true.
//
// - Expression: Defines the regular expression used for parsing.
// For details on the regexp syntax see https://golang.org/pkg/regexp/syntax.
// By default this parameter is set to "(.*)"
//
// - Template: Defines the result string. Regexp matching groups can be referred
// to using "${n}", with n being the group's index. For other possible
// reference semantics, see https://golang.org/pkg/regexp/#Regexp.Expand.
// By default this parameter is set to "${1}"
//
// Examples
//
// This example extracts time and host from an imaginary log message format.
//
// exampleConsumer:
// Type: consumer.Console
// Streams: stding
// Modulators:
// - format.RegExp:
// Expression: "^(\\d+) (\\w+): "
// Template: "time: ${1}, host: ${2}"
type RegExp struct {
core.SimpleFormatter `gollumdoc:"embed_type"`
expression *regexp.Regexp
template []byte `config:"Template" default:"${1}"`
}
func init() {
core.TypeRegistry.Register(RegExp{})
}
// Configure initializes this formatter with values from a plugin config.
func (format *RegExp) Configure(conf core.PluginConfigReader) {
var err error
if conf.GetBool("Posix", true) {
format.expression, err = regexp.CompilePOSIX(conf.GetString("Expression", "(.*)"))
} else {
format.expression, err = regexp.Compile(conf.GetString("Expression", "(.*)"))
}
conf.Errors.Push(err)
}
// ApplyFormatter update message payload
func (format *RegExp) ApplyFormatter(msg *core.Message) error {
content := format.GetSourceDataAsBytes(msg)
matches := format.expression.FindSubmatchIndex(content)
transformed := format.expression.Expand([]byte{}, format.template, content, matches)
format.SetTargetData(msg, transformed)
return nil
}
|
package main
import (
"fmt"
"io"
"log"
"github.com/minio/minio-go/v6"
uuid "github.com/nu7hatch/gouuid"
)
//TODO: create an interface and have several implementations e.g. test mocks
func writeImageToObjectStorage(scaledReader io.Reader, length int, imageType string, targetScale string, config imageScalerConfig) (ImageUpdate, error) {
var imageUpdate ImageUpdate
var minioOpts minio.PutObjectOptions
minioOpts.ContentType = fmt.Sprintf("image/%s", imageType)
imageUUID, err := uuid.NewV4()
if err != nil {
log.Printf("error while creating image UUID: %v", err)
return imageUpdate, err
}
minioClient, err := minio.New(config.minioURL, config.minioAccessKey, config.minioSecret, config.minioSecure)
if err != nil {
log.Printf("error while creating min.io client: %v", err)
return imageUpdate, err
}
_, err = minioClient.PutObject(config.minioBucketName, imageUUID.String(), scaledReader, -1, minioOpts)
if err != nil {
log.Printf("error while writing image to min.io: %v", err)
return imageUpdate, err
}
imageUpdate.ImageUUID = imageUUID.String()
imageUpdate.URL = fmt.Sprintf("%s/%s/%s", config.minioExternalURL, config.minioBucketName, imageUUID)
if err != nil {
return imageUpdate, err
}
imageUpdate.ImageScale = targetScale
return imageUpdate, nil
}
|
// Package utils - utils funcs
package utils
|
package main
import (
"bufio"
"fmt"
"io"
"log"
"os"
"strings"
)
func ProcessINPUT(rd io.Reader) string {
var found = false
fmt.Print("Enter data: ")
in := bufio.NewReader(rd)
line, err := in.ReadString('\n')
if err != nil {
log.Fatal(err)
}
line = strings.Trim(line, "\n")
line = strings.ToLower(line)
fmt.Printf("Data: '%v'\n", line)
if strings.HasPrefix(line, "i") && strings.HasSuffix(line, "n") && strings.Contains(line, "a") {
found = true
}
if found {
fmt.Printf("%+v\n", "Found!")
return "Found!"
}
fmt.Printf("%+v\n", "Not Found!")
return "Not Found!"
}
// Main ...
func main() {
for {
ProcessINPUT(os.Stdin)
}
}
|
package main
import (
"time"
"fmt"
)
func main() {
//year := time.Now().Year()
//year_str := strconv.Itoa(year)
//year, month, _ := time.Now().Date()
//thisMonth := time.Date(year, month, 1, 0, 0, 0, 0, time.Local)
//start := thisMonth.AddDate(0, 1, 0).Format("2006-01-02")
//fmt.Println(year,month,thisMonth,start)
//new_data := year_str + "-" + month + "-01"
//fmt.Println(new_data)
//the_time, _ := time.Parse("2006-01-02", "2018-08-01")
//dd := time.Unix(1533052790,0).Day()
//yy := time.Unix(1533052790,0).Year() 1533081590
//mm := time.Unix(1533052790,0).Month()
//
//fmt.Println(yy,mm,dd)
year, month, _ := time.Now().Date()
thisMonth := time.Date(year, month, 1, 0, 0, 0, 0, time.Local)
end := thisMonth.AddDate(0, 0, -1).Format("2006-01-02")
the_time, _ := time.Parse("2006-01-02 15:04:05", end + " 15:59:50")
fmt.Println(the_time.Unix())
//t := time.Now()
//fmt.Println(t)
//
//fmt.Println(t.UTC().Format(time.UnixDate))
//
//fmt.Println(t.Unix())
//
//timestamp := strconv.FormatInt(t.UTC().UnixNano(), 10)
//fmt.Println(timestamp)
//timestamp = timestamp[:10]
//fmt.Println(timestamp)
//const shortForm = "2006-Feb-02"
//t, _ := time.Parse(shortForm, "2013-Feb-03")
//fmt.Println(t)
//获取时间戳
//
//timestamp := time.Now().Unix()
//
//fmt.Println(timestamp)
//
//
//
////格式化为字符串,tm为Time类型
//
//tm := time.Unix(timestamp, 0)
//
//fmt.Println(tm.Format("2006-01-02 03:04:05 PM"))
//
//fmt.Println(tm.Format("02/01/2006 15:04:05 PM"))
//从字符串转为时间戳,第一个参数是格式,第二个是要转换的时间字符串
//tm2, _ := time.Parse("01/02/2006", "07/31/2018")
//
//fmt.Println(tm2.Unix()-1533052790)
}
|
package util
import (
"fmt"
"os"
"time"
"github.com/shanghuiyang/rpi-devices/util/geo"
)
const (
timeFormat = "2006-01-02T15:04:05"
)
// GPSLogger ...
type GPSLogger struct {
f *os.File
chPoints chan *geo.Point
}
// NewGPSLogger ...
func NewGPSLogger() *GPSLogger {
fname := time.Now().Format(timeFormat) + ".csv"
f, err := os.OpenFile(fname, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
if err != nil {
return nil
}
f.WriteString("timestamp,lat,lon\n")
t := &GPSLogger{
f: f,
chPoints: make(chan *geo.Point, 32),
}
go t.start()
return t
}
func (l *GPSLogger) start() {
for pt := range l.chPoints {
tm := time.Now().Format(timeFormat)
line := fmt.Sprintf("%v,%.6f,%.6f\n", tm, pt.Lat, pt.Lon)
l.f.WriteString(line)
}
}
// AddPoint ...
func (l *GPSLogger) AddPoint(pt *geo.Point) {
if pt == nil {
return
}
l.chPoints <- pt
}
// Close ...
func (l *GPSLogger) Close() {
l.f.Close()
close(l.chPoints)
}
|
package models
import (
"github.com/jinzhu/gorm"
"time"
)
type Blood struct {
ID int `json:"id" gorm:"primaryKey;autoIncrement" db:"id"`
DeviceId string `json:"device_id" db:"device_id"`
Pulse int `json:"pulse" db:"pulse" gorm:"comment:脈搏"`
Diastolic float32 `json:"diastolic" db:"diastolic" gorm:"comment:舒張壓"`
Systolic float32 `json:"systolic" db:"systolic" gorm:"comment:收縮壓"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}
func (Blood) TableName() string {
return "bloods"
}
// 取得血壓
func (b *Blood) GetBlood(deviceId string, id string) func (db *gorm.DB) *gorm.DB {
return func(db *gorm.DB) *gorm.DB {
return db.Where("device_id = ? AND id = ?", deviceId, id)
}
}
|
package core
import (
"time"
"github.com/golang/protobuf/ptypes"
pb "github.com/popstk/olddriver/backend"
)
// Item -
type Item struct {
Title string `json:"title"`
URL string `json:"url"`
Tag string `json:"tag"`
Time time.Time `json:"time"`
Baidu []string `json:"baidu" bson:"baidu,omitempty"`
Magnet []string `json:"magnet" bson:"magnet,omitempty"`
Link []string `json:"link" bson:"link,omitempty"`
}
// ToProtoItem -
func ToProtoItem(item *Item) (*pb.Item, error) {
ts, err := ptypes.TimestampProto(item.Time)
if err != nil {
return nil, err
}
return &pb.Item{
Url: item.URL,
Tag: item.Tag,
Title: item.Title,
Time: ts,
Baidu: item.Baidu,
Magnet: item.Magnet,
Link: item.Link,
}, nil
}
// FromProtoItem -
func FromProtoItem(item *pb.Item) (*Item, error) {
ts, err := ptypes.Timestamp(item.Time)
if err != nil {
return nil, err
}
return &Item{
URL: item.Url,
Tag: item.Tag,
Title: item.Title,
Time: ts,
Baidu: item.Baidu,
Magnet: item.Magnet,
Link: item.Link,
}, nil
}
|
package dirlist
import (
"net/http"
"io"
"log"
"html/template"
"os"
"net/url"
"sort"
)
type DirList struct {
FS http.FileSystem
Tpl *template.Template
IndexFiles []string
}
func (d *DirList) ServeHTTP(w http.ResponseWriter, r *http.Request) {
urlPath := r.URL.Path;
method := r.Method
if method != "GET" {
http.NotFound(w, r)
return
}
file, err := d.FS.Open(urlPath)
if err != nil {
http.NotFound(w, r)
return
}
defer file.Close()
st, err := file.Stat()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if !st.IsDir() {
d.ServeFile(w, r, file)
return
}
if urlPath[len(urlPath)-1:] != "/" {
http.Redirect(w, r, urlPath + "/", http.StatusMovedPermanently)
return
}
files, err := file.Readdir(0)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
sort.Sort(FileSorter(files))
var indexFile http.File
{
var indexFileName string
for _, fn := range d.IndexFiles {
for _, f := range files {
if fn == f.Name() {
indexFileName = fn
break
}
}
}
if indexFileName != "" {
f, err := d.FS.Open(urlPath + indexFileName)
if err == nil {
indexFile = f
defer f.Close()
}
}
}
d.ServeDir(w, r, files, indexFile)
}
func (d *DirList) ServeFile(w http.ResponseWriter, r *http.Request, file http.File) {
_, err := io.Copy(w, file)
if err != nil {
log.Print(err)
}
}
func (d *DirList) ServeDir(w http.ResponseWriter, r *http.Request, files []os.FileInfo, index http.File) {
context := &TplContext{
Files: files,
Index: index,
Url: r.URL,
Host: r.Host,
}
w.Header().Add("Content-Type", "text/html; charset=UTF-8")
err := d.Tpl.Execute(w, context)
if err != nil {
log.Printf("Failed to execure template: %s", err)
}
}
type TplContext struct {
Files []os.FileInfo
Index http.File
Url *url.URL
Host string
}
// For sorting files list
type FileSorter []os.FileInfo
func (f FileSorter) Len() int {
return len(f)
}
func (f FileSorter) Swap(i, j int) {
f[i], f[j] = f[j], f[i]
}
func (f FileSorter) Less(i, j int) bool {
return f[i].Name() < f[j].Name()
}
|
package main
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/abiosoft/ishell"
"github.com/abiosoft/readline"
"github.com/gliderlabs/ssh"
"github.com/gtfierro/xboswave/ingester/types"
"github.com/olekukonko/tablewriter"
logrus "github.com/sirupsen/logrus"
)
func parseFilterFromArgs(args []string) (*RequestFilter, error) {
if len(args) == 0 {
return nil, nil
}
filter := &RequestFilter{}
for _, arg := range args {
if arg == "enabled" {
filter.Enabled = &_TRUE
continue
}
if arg == "hasError" {
filter.HasError = &_TRUE
continue
}
parts := strings.SplitN(arg, "=", 2)
if len(parts) == 1 {
return nil, fmt.Errorf("filters need to be of form param=value")
}
switch parts[0] {
case "id":
_id, err := strconv.Atoi(parts[1])
filter.Id = &_id
if err != nil {
return nil, err
}
case "schema":
filter.Schema = &parts[1]
case "plugin":
logrus.Error(parts)
filter.Plugin = &parts[1]
case "namespace":
filter.Namespace = &parts[1]
case "resource":
filter.Resource = &parts[1]
}
}
return filter, nil
}
func (ingest *Ingester) shell(cfg Config) {
ssh.Handle(func(s ssh.Session) {
//io.WriteString(s, fmt.Sprintf("Hello %s\n", s.User()))
cfg := &readline.Config{
Prompt: ">>",
Stdin: s,
StdinWriter: s,
Stdout: s,
Stderr: s,
}
shell := ishell.NewWithConfig(cfg)
// display info.
shell.Println("XBOS/WAVE ingester shell")
shell.Interrupt(func(c *ishell.Context, count int, input string) {
c.Println("Use 'exit' or ctl-d to disconnect")
})
shell.AddCmd(&ishell.Cmd{
Name: "add",
Func: func(c *ishell.Context) {
c.ShowPrompt(false)
defer c.ShowPrompt(true)
if len(c.Args) != 4 {
c.Println("add <schema> <plugin> <namespace> <resource>")
return
}
req := &ArchiveRequest{
Schema: c.Args[0],
Plugin: c.Args[1],
URI: types.SubscriptionURI{
Namespace: c.Args[2],
Resource: c.Args[3],
},
}
if err := ingest.addArchiveRequest(req); err != nil {
logrus.Error(err)
c.Err(err)
return
}
c.Println("Successfully requested archival")
c.Println(c.Args)
},
})
// del archive requests
shell.AddCmd(&ishell.Cmd{
Name: "delete",
Func: func(c *ishell.Context) {
c.ShowPrompt(false)
defer c.ShowPrompt(true)
c.Println("delete id=<id> schema=<schema> plugin=<plugin> namespace=<namespace> resource=<resource>")
filter, err := parseFilterFromArgs(c.Args)
if err != nil {
c.Println(err.Error())
return
}
reqs, err := ingest.cfgmgr.List(filter)
if err != nil {
c.Err(err)
return
}
table := tablewriter.NewWriter(s)
table.SetAutoMergeCells(true)
table.SetRowLine(true)
table.SetHeader([]string{"plugin", "namespace", "resource", "schema"})
for _, req := range reqs {
if err := ingest.delArchiveRequest(&req); err != nil {
logrus.Error(err)
c.Err(err)
return
}
table.Append([]string{req.Plugin, req.URI.Namespace, req.URI.Resource, req.Schema})
}
table.Render()
},
})
// enable archive requests
shell.AddCmd(&ishell.Cmd{
Name: "enable",
Func: func(c *ishell.Context) {
c.ShowPrompt(false)
defer c.ShowPrompt(true)
c.Println("enable id=<id> schema=<schema> plugin=<plugin> namespace=<namespace> resource=<resource>")
filter, err := parseFilterFromArgs(c.Args)
if err != nil {
c.Println(err.Error())
return
}
reqs, err := ingest.cfgmgr.List(filter)
if err != nil {
c.Err(err)
return
}
for _, req := range reqs {
c.Printf("Enabling req %d...\n", req.Id)
if err := ingest.enableArchiveRequest(&req); err != nil {
logrus.Error(err)
c.Err(err)
return
}
}
},
})
shell.AddCmd(&ishell.Cmd{
Name: "disable",
Func: func(c *ishell.Context) {
c.ShowPrompt(false)
defer c.ShowPrompt(true)
c.Println("disable id=<id> schema=<schema> plugin=<plugin> namespace=<namespace> resource=<resource>")
filter, err := parseFilterFromArgs(c.Args)
if err != nil {
c.Println(err.Error())
return
}
reqs, err := ingest.cfgmgr.List(filter)
if err != nil {
c.Err(err)
return
}
for _, req := range reqs {
c.Printf("Disabling req %d...\n", req.Id)
if err := ingest.disableArchiveRequest(&req); err != nil {
logrus.Error(err)
c.Err(err)
return
}
}
},
})
shell.AddCmd(&ishell.Cmd{
Name: "list",
Func: func(c *ishell.Context) {
c.ShowPrompt(false)
defer c.ShowPrompt(true)
// build filter
filter, err := parseFilterFromArgs(c.Args)
if err != nil {
c.Err(err)
return
}
reqs, err := ingest.cfgmgr.List(filter)
if err != nil {
c.Err(err)
return
}
table := tablewriter.NewWriter(s)
table.SetHeader([]string{"id", "enabled?", "namespace", "resource", "schema", "plugin", "created", "error", "error time"})
table.SetColumnColor(tablewriter.Colors{0},
tablewriter.Colors{0},
tablewriter.Colors{0},
tablewriter.Colors{0},
tablewriter.Colors{0},
tablewriter.Colors{0},
tablewriter.Colors{0},
tablewriter.Colors{tablewriter.FgHiRedColor},
tablewriter.Colors{tablewriter.FgHiRedColor},
)
for _, req := range reqs {
var enabledStr string
if req.Enabled {
enabledStr = "1"
} else {
enabledStr = "0"
}
row := []string{fmt.Sprintf("%d", req.Id), enabledStr, req.URI.Namespace, req.URI.Resource, req.Schema, req.Plugin, req.Inserted.Format(time.RFC3339), req.LastError}
if req.ErrorTimestamp.UnixNano() <= 0 {
row = append(row, "")
} else {
row = append(row, req.ErrorTimestamp.Format(time.RFC3339))
}
table.Append(row)
}
table.Render()
},
})
// start shell
shell.Run()
// teardown
shell.Close()
})
if cfg.IngesterShell.PasswordLogin {
if err := ssh.ListenAndServe("localhost:2222", nil, ssh.HostKeyFile(cfg.IngesterShell.SshHostKey), ssh.PasswordAuth(func(ctx ssh.Context, pass string) bool { return pass == cfg.IngesterShell.Password })); err != nil {
logrus.Fatal(err)
}
} else if err := ssh.ListenAndServe("localhost:2222", nil, ssh.HostKeyFile(cfg.IngesterShell.SshHostKey)); err != nil {
logrus.Fatal(err)
}
}
|
package antminer
import (
"context"
"fmt"
"sync"
"github.com/ka2n/masminer/machine/asic/base"
"golang.org/x/crypto/ssh"
"golang.org/x/sync/errgroup"
)
// GetStats returns MinerStats
func (c *Client) GetStats() (stats MinerStats, err error) {
return c.GetStatsContext(context.Background())
}
// GetStatsContext returns MinerStats
func (c *Client) GetStatsContext(ctx context.Context) (stats MinerStats, err error) {
var mu sync.Mutex
wg, ctx := errgroup.WithContext(ctx)
wg.Go(func() error {
ret, err := getMinerStatsSummary(ctx, c.SSH, c.summaryCMD)
if err != nil {
return err
}
mu.Lock()
defer mu.Unlock()
stats.Summary = ret
return nil
})
wg.Go(func() error {
ret, err := getMinerStatsPools(ctx, c.SSH, c.poolsCMD)
if err != nil {
return err
}
mu.Lock()
defer mu.Unlock()
stats.Pools = ret
return nil
})
wg.Go(func() error {
ret, err := getMinerStatsDevs(ctx, c.SSH, c.statsCMD)
if err != nil {
return err
}
mu.Lock()
defer mu.Unlock()
stats.Devs = ret
return nil
})
return stats, wg.Wait()
}
func getMinerStatsSummary(ctx context.Context, client *ssh.Client, cmd string) (summary MinerStatsSummary, err error) {
ret, err := base.OutputRemoteShell(ctx, client, cmd)
if err != nil {
return summary, err
}
return parseSummaryFromCGMinerSummary(ret)
}
func parseSummaryFromCGMinerSummary(in []byte) (MinerStatsSummary, error) {
var summary MinerStatsSummary
lprops := parseCGMinerStats(in)
if len(lprops) < 2 {
return summary, fmt.Errorf("invalid summary input")
}
props := lprops[1]
summary.Elapsed = props["Elapsed"]
summary.GHS5s = props["GHS 5s"]
summary.GHSAvarage = props["GHS av"]
summary.Foundblocks = props["Found Blocks"]
summary.Getworks = props["Getworks"]
summary.Accepted = props["Accepted"]
summary.Rejected = props["Rejected"]
summary.HardwareErrors = props["Hardware Errors"]
summary.Utility = props["Utility"]
summary.Discarded = props["Discarded"]
summary.Stale = props["Stale"]
summary.Localwork = props["Local Work"]
summary.WorkUtility = props["Work Utility"]
summary.DifficultyAccepted = props["Difficulty Accepted"]
summary.DifficultyRejected = props["Difficulty Rejected"]
summary.DifficultyStale = props["Difficulty Stale"]
summary.Bestshare = props["Best Share"]
return summary, nil
}
func getMinerStatsPools(ctx context.Context, client *ssh.Client, cmd string) (pools []MinerStatsPool, err error) {
ret, err := base.OutputRemoteShell(ctx, client, cmd)
if err != nil {
return pools, err
}
return parsePoolsFromCGMinerPools(ret)
}
func getMinerStatsDevs(ctx context.Context, client *ssh.Client, cmd string) (dev MinerStatsDevs, err error) {
ret, err := base.OutputRemoteShell(ctx, client, cmd)
if err != nil {
return dev, err
}
return parseDevsFromCGMinerStats(ret)
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2021/12/2 9:18 上午
# @File : lt_279_完全平方数.go
# @Description :
# @Attention :
*/
package v2
import "math"
/*
给定正整数 n,
找到若干个完全平方数(比如 1, 4, 9, 16, ...)使得它们的和等于 n。你需要让组成和的完全平方数的个数最少。
给你一个整数 n ,返回和为 n 的完全平方数的 最少数量 。
完全平方数 是一个整数,
其值等于另一个整数的平方;换句话说,其值等于一个整数自乘的积。例如,1、4、9 和 16 都是完全平方数,而 3 和 11 不是。
*/
// 关键:
// 动态规划: 状态转移方程: f(n)=1+f(i-j*j)
func numSquares(n int) int {
dp := make([]int, n+1)
for i := 1; i <= n; i++ {
min := math.MaxInt32
// 注意: 这里是 <= i ,因为我们需要统计的是 [0,n]的值
for j := 1; j*j <= i; j++ {
min = numSquaresMin(dp[i-j*j], min)
}
dp[i] = min + 1
}
return dp[n]
}
func numSquaresMin(a, b int) int {
if a < b {
return a
}
return b
}
|
package testutil
import (
ds2 "github.com/ipfs/go-ipfs/thirdparty/datastore2"
"gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore"
syncds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync"
)
func ThreadSafeCloserMapDatastore() ds2.ThreadSafeDatastoreCloser {
return ds2.CloserWrap(syncds.MutexWrap(datastore.NewMapDatastore()))
}
|
package commands
import (
"drdgvhbh/discordbot/internal/cli/anime/mal"
"drdgvhbh/discordbot/internal/cli/anime/mal/api"
messageMal "drdgvhbh/discordbot/internal/discord/message/anime/mal"
"log"
"github.com/bwmarrin/discordgo"
realCli "github.com/urfave/cli"
)
type CommandCallback = func(output *discordgo.MessageEmbed) (*discordgo.Message, error)
func AddAnimeCommands(
cli *realCli.App,
callback CommandCallback,
) {
cli.Commands = append(cli.Commands, realCli.Command{
Name: "anime",
Usage: "List anime commands",
Action: func(c *realCli.Context) error {
realCli.ShowCommandHelp(c, "anime")
return nil
},
Subcommands: realCli.Commands{
getAnimeProfileCommand(callback),
listAnimeCharacterStock(callback),
},
})
}
var getAnimeProfileCommand = func(callback CommandCallback) realCli.Command {
return realCli.Command{
Name: "profile",
Usage: "Displays a user's anime profile",
Flags: []realCli.Flag{
realCli.StringFlag{
Name: "name, n",
Usage: "My Anime List Username",
},
},
Action: func(c *realCli.Context) error {
userProfile, err := api.GetProfile(c.String("name"))
if err != nil {
log.Println(err)
return nil
}
options := messageMal.CreateAnimeProfileEmbeddedOptions{
AnimeProfile: userProfile,
}
embeddedMessage := messageMal.CreateAnimeProfileEmbedded(
options)
_, error := callback(embeddedMessage)
if error != nil {
log.Println(err)
}
return nil
},
}
}
var listAnimeCharacterStock = func(callback CommandCallback) realCli.Command {
return realCli.Command{
Name: "quote",
Usage: "Gets a stock price listing for an anime + anime character pair",
Flags: []realCli.Flag{
realCli.IntFlag{
Name: "anime, a",
Usage: "Anime ID",
},
realCli.IntFlag{
Name: "character, c",
Usage: "Character ID",
},
},
Action: func(c *realCli.Context) error {
animeID := c.Int("anime")
characterID := c.Int("character")
animeStock, err := mal.CreateAnimeStock(characterID, animeID)
if err != nil {
log.Println(err)
return nil
}
options := messageMal.AnimeStockQuoteEmbeddedOptions{
AnimeStock: animeStock,
}
embeddedMessage := messageMal.CreateAnimeStockQuoteEmbedded(options)
_, error := callback(embeddedMessage)
if error != nil {
log.Println(err)
}
return nil
},
}
}
|
package main
import (
"sync"
"sync/atomic"
)
type ClientSessions struct {
sync.Mutex
list []*ClientSession
counter uint64
}
func (c *ClientSessions) Add(s *ClientSession) {
c.Lock()
defer c.Unlock()
c.list = append(c.list, s)
}
func (c *ClientSessions) Remove(s *ClientSession) {
c.Lock()
defer c.Unlock()
idx := c.Index(s)
if idx != -1 {
c.list = append(c.list[:idx], c.list[idx+1:]...)
}
}
func (c *ClientSessions) Len() int {
return len(c.list)
}
func (c *ClientSessions) IncCounter () uint64 {
return atomic.AddUint64(&c.counter, 1)
}
func (c *ClientSessions) ResetCounter () uint64 {
return atomic.SwapUint64(&c.counter, 0)
}
func (c *ClientSessions) Index(s *ClientSession) int {
for p, v := range c.list {
if (v == s) {
return p
}
}
return -1
}
func (c *ClientSessions) Each(channelId string, fn func(*ClientSession)) {
for _, s := range c.list {
if s.channelId == channelId {
fn(s)
}
}
}
func NewClientSessions() *ClientSessions {
s := &ClientSessions{}
return s
}
|
// DO NOT EDIT. This file was generated by "github.com/frk/gosql".
package testdata
import (
"github.com/frk/gosql"
)
func (q *InsertBasicSliceQuery) Exec(c gosql.Conn) error {
var queryString = `INSERT INTO "test_user" AS u (
"id"
, "email"
, "full_name"
, "created_at"
) VALUES ` // `
params := make([]interface{}, len(q.Users)*4)
for i, v := range q.Users {
pos := i * 4
params[pos+0] = v.Id
params[pos+1] = v.Email
params[pos+2] = v.FullName
params[pos+3] = v.CreatedAt
queryString += `(` + gosql.OrdinalParameters[pos+0] +
`, ` + gosql.OrdinalParameters[pos+1] +
`, ` + gosql.OrdinalParameters[pos+2] +
`, ` + gosql.OrdinalParameters[pos+3] +
`),`
}
queryString = queryString[:len(queryString)-1]
_, err := c.Exec(queryString, params...)
return err
}
|
package gorestreact
//go:generate swag init --parseInternal --parseDependency -g ./cmd/main.go
|
package nlp
import (
"io/ioutil"
"log"
"regexp"
"strings"
)
var stopwordRegex *regexp.Regexp
var stopwordMap map[string]bool
func init() {
stopwords := []string{"the", "is", "at",
"which", "on", "and", "a", "an",
"am", "hello", "hey",
"be", "as", "by",
"for", "from",
"he", "her", "hers",
"him", "his", "us", "we", "were", "to", "too"}
b, err := ioutil.ReadFile("./nlp/stop-word-list.csv")
if err != nil {
log.Printf("failed to read stopword file, defaulting to basic stopwords")
} else {
stopwords = strings.Split(string(b), ",")
}
stopwordMap = map[string]bool{}
for _, w := range stopwords {
stopwordMap[w] = true
}
reStr := ""
for i, word := range stopwords {
if i != 0 {
reStr += `|`
}
reStr += `\A` + word + `\z`
}
stopwordRegex = regexp.MustCompile(reStr)
}
func IsStopword(word string) bool {
_, ok := stopwordMap[word]
return ok
}
func FilterStopwords(message string) string {
return string(stopwordRegex.ReplaceAll([]byte(message), []byte("")))
}
|
package actions
import (
"errors"
"github.com/barrydev/api-3h-shop/src/common/connect"
"github.com/barrydev/api-3h-shop/src/factories"
"github.com/barrydev/api-3h-shop/src/model"
"strings"
)
func UpdateOrderItem(orderItemId int64, body *model.BodyOrderItem) (*model.OrderItem, error) {
queryString := ""
var args []interface{}
var set []string
var goroutines []func()
resolveChan := make(chan interface{}, 3)
rejectChan := make(chan error)
if body.ProductId != nil {
//goroutines = append(goroutines, func() {
// order, err := factories.FindProductById(*body.ProductId)
//
// if err != nil {
// rejectChan <- err
// return
// }
// if order == nil {
// rejectChan <- errors.New("product does not exists")
// return
// }
//
// resolveChan <- order
//})
set = append(set, " product_id=?")
args = append(args, body.ProductId)
}
if body.ProductItemId != nil {
//goroutines = append(goroutines, func() {
// order, err := factories.FindProductItemById(*body.ProductItemId)
//
// if err != nil {
// rejectChan <- err
// return
// }
// if order == nil {
// rejectChan <- errors.New("product_item does not exists")
// return
// }
//
// resolveChan <- order
//})
set = append(set, " product_item_id=?")
args = append(args, body.ProductItemId)
}
if body.OrderId != nil {
//goroutines = append(goroutines, func() {
// order, err := factories.FindOrderById(*body.OrderId)
//
// if err != nil {
// rejectChan <- err
// return
// }
// if order == nil {
// rejectChan <- errors.New("order does not exists")
// return
// }
//
// resolveChan <- order
//})
set = append(set, " order_id=?")
args = append(args, body.OrderId)
}
for _, goroutine := range goroutines {
go goroutine()
}
for i := 0; i < len(goroutines); i++ {
select {
case <-resolveChan:
case err := <-rejectChan:
return nil, err
}
}
if body.Quantity != nil {
set = append(set, " quantity=?")
args = append(args, body.Quantity)
}
if body.Status != nil {
set = append(set, " status=?")
args = append(args, body.Quantity)
}
if len(set) > 0 {
queryString += "SET" + strings.Join(set, ",") + "\n"
} else {
orderItem, err := factories.FindOrderItemById(orderItemId)
if err != nil {
return nil, err
}
if orderItem == nil {
return nil, errors.New("orderItem does not exists")
}
return orderItem, nil
}
queryString += "WHERE _id=?"
args = append(args, orderItemId)
rowEffected, err := factories.UpdateOrderItem(&connect.QueryMySQL{
QueryString: queryString,
Args: args,
})
if err != nil {
return nil, err
}
if rowEffected == nil {
return nil, errors.New("update error")
}
return GetOrderItemById(orderItemId)
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hashjoin
import (
"context"
"fmt"
"math/rand"
"strings"
"testing"
"github.com/pingcap/failpoint"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/util/dbterror/exeerrors"
"github.com/stretchr/testify/require"
)
func TestIndexNestedLoopHashJoin(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("set tidb_cost_model_version=2")
tk.MustExec("set @@tidb_init_chunk_size=2")
tk.MustExec("set @@tidb_index_join_batch_size=10")
tk.MustExec("DROP TABLE IF EXISTS t, s")
tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly
tk.MustExec("create table t(pk int primary key, a int)")
for i := 0; i < 100; i++ {
tk.MustExec(fmt.Sprintf("insert into t values(%d, %d)", i, i))
}
tk.MustExec("create table s(a int primary key)")
for i := 0; i < 100; i++ {
if rand.Float32() < 0.3 {
tk.MustExec(fmt.Sprintf("insert into s values(%d)", i))
} else {
tk.MustExec(fmt.Sprintf("insert into s values(%d)", i*100))
}
}
tk.MustExec("analyze table t")
tk.MustExec("analyze table s")
// Test IndexNestedLoopHashJoin keepOrder.
tk.MustQuery("explain format = 'brief' select /*+ INL_HASH_JOIN(s) */ * from t left join s on t.a=s.a order by t.pk").Check(testkit.Rows(
"IndexHashJoin 100.00 root left outer join, inner:TableReader, outer key:test.t.a, inner key:test.s.a, equal cond:eq(test.t.a, test.s.a)",
"├─TableReader(Build) 100.00 root data:TableFullScan",
"│ └─TableFullScan 100.00 cop[tikv] table:t keep order:true",
"└─TableReader(Probe) 100.00 root data:TableRangeScan",
" └─TableRangeScan 100.00 cop[tikv] table:s range: decided by [test.t.a], keep order:false",
))
rs := tk.MustQuery("select /*+ INL_HASH_JOIN(s) */ * from t left join s on t.a=s.a order by t.pk")
for i, row := range rs.Rows() {
require.Equal(t, fmt.Sprintf("%d", i), row[0].(string))
}
// index hash join with semi join
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/planner/core/MockOnlyEnableIndexHashJoin", "return(true)"))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/planner/core/MockOnlyEnableIndexHashJoin"))
}()
tk.MustExec("drop table t")
tk.MustExec("CREATE TABLE `t` ( `l_orderkey` int(11) NOT NULL,`l_linenumber` int(11) NOT NULL,`l_partkey` int(11) DEFAULT NULL,`l_suppkey` int(11) DEFAULT NULL,PRIMARY KEY (`l_orderkey`,`l_linenumber`))")
tk.MustExec(`insert into t values(0,0,0,0);`)
tk.MustExec(`insert into t values(0,1,0,1);`)
tk.MustExec(`insert into t values(0,2,0,0);`)
tk.MustExec(`insert into t values(1,0,1,0);`)
tk.MustExec(`insert into t values(1,1,1,1);`)
tk.MustExec(`insert into t values(1,2,1,0);`)
tk.MustExec(`insert into t values(2,0,0,0);`)
tk.MustExec(`insert into t values(2,1,0,1);`)
tk.MustExec(`insert into t values(2,2,0,0);`)
tk.MustExec("analyze table t")
// test semi join
tk.Session().GetSessionVars().InitChunkSize = 2
tk.Session().GetSessionVars().MaxChunkSize = 2
tk.MustExec("set @@tidb_index_join_batch_size=2")
tk.MustQuery("desc format = 'brief' select * from t l1 where exists ( select * from t l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey <> l1.l_suppkey ) order by `l_orderkey`,`l_linenumber`;").Check(testkit.Rows(
"Sort 7.20 root test.t.l_orderkey, test.t.l_linenumber",
"└─IndexHashJoin 7.20 root semi join, inner:IndexLookUp, outer key:test.t.l_orderkey, inner key:test.t.l_orderkey, equal cond:eq(test.t.l_orderkey, test.t.l_orderkey), other cond:ne(test.t.l_suppkey, test.t.l_suppkey)",
" ├─TableReader(Build) 9.00 root data:Selection",
" │ └─Selection 9.00 cop[tikv] not(isnull(test.t.l_suppkey))",
" │ └─TableFullScan 9.00 cop[tikv] table:l1 keep order:false",
" └─IndexLookUp(Probe) 27.00 root ",
" ├─IndexRangeScan(Build) 27.00 cop[tikv] table:l2, index:PRIMARY(l_orderkey, l_linenumber) range: decided by [eq(test.t.l_orderkey, test.t.l_orderkey)], keep order:false",
" └─Selection(Probe) 27.00 cop[tikv] not(isnull(test.t.l_suppkey))",
" └─TableRowIDScan 27.00 cop[tikv] table:l2 keep order:false"))
tk.MustQuery("select * from t l1 where exists ( select * from t l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey <> l1.l_suppkey )order by `l_orderkey`,`l_linenumber`;").Check(testkit.Rows("0 0 0 0", "0 1 0 1", "0 2 0 0", "1 0 1 0", "1 1 1 1", "1 2 1 0", "2 0 0 0", "2 1 0 1", "2 2 0 0"))
tk.MustQuery("desc format = 'brief' select count(*) from t l1 where exists ( select * from t l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey <> l1.l_suppkey );").Check(testkit.Rows(
"StreamAgg 1.00 root funcs:count(1)->Column#11",
"└─IndexHashJoin 7.20 root semi join, inner:IndexLookUp, outer key:test.t.l_orderkey, inner key:test.t.l_orderkey, equal cond:eq(test.t.l_orderkey, test.t.l_orderkey), other cond:ne(test.t.l_suppkey, test.t.l_suppkey)",
" ├─TableReader(Build) 9.00 root data:Selection",
" │ └─Selection 9.00 cop[tikv] not(isnull(test.t.l_suppkey))",
" │ └─TableFullScan 9.00 cop[tikv] table:l1 keep order:false",
" └─IndexLookUp(Probe) 27.00 root ",
" ├─IndexRangeScan(Build) 27.00 cop[tikv] table:l2, index:PRIMARY(l_orderkey, l_linenumber) range: decided by [eq(test.t.l_orderkey, test.t.l_orderkey)], keep order:false",
" └─Selection(Probe) 27.00 cop[tikv] not(isnull(test.t.l_suppkey))",
" └─TableRowIDScan 27.00 cop[tikv] table:l2 keep order:false"))
tk.MustQuery("select count(*) from t l1 where exists ( select * from t l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey <> l1.l_suppkey );").Check(testkit.Rows("9"))
tk.MustExec("DROP TABLE IF EXISTS t, s")
// issue16586
tk.MustExec("use test;")
tk.MustExec("drop table if exists lineitem;")
tk.MustExec("drop table if exists orders;")
tk.MustExec("drop table if exists supplier;")
tk.MustExec("drop table if exists nation;")
tk.MustExec("CREATE TABLE `lineitem` (`l_orderkey` int(11) NOT NULL,`l_linenumber` int(11) NOT NULL,`l_partkey` int(11) DEFAULT NULL,`l_suppkey` int(11) DEFAULT NULL,PRIMARY KEY (`l_orderkey`,`l_linenumber`) );")
tk.MustExec("CREATE TABLE `supplier` ( `S_SUPPKEY` bigint(20) NOT NULL,`S_NATIONKEY` bigint(20) NOT NULL,PRIMARY KEY (`S_SUPPKEY`));")
tk.MustExec("CREATE TABLE `orders` (`O_ORDERKEY` bigint(20) NOT NULL,`O_ORDERSTATUS` char(1) NOT NULL,PRIMARY KEY (`O_ORDERKEY`));")
tk.MustExec("CREATE TABLE `nation` (`N_NATIONKEY` bigint(20) NOT NULL,`N_NAME` char(25) NOT NULL,PRIMARY KEY (`N_NATIONKEY`))")
tk.MustExec("insert into lineitem values(0,0,0,1)")
tk.MustExec("insert into lineitem values(0,1,1,1)")
tk.MustExec("insert into lineitem values(0,2,2,0)")
tk.MustExec("insert into lineitem values(0,3,3,3)")
tk.MustExec("insert into lineitem values(0,4,1,4)")
tk.MustExec("insert into supplier values(0, 4)")
tk.MustExec("insert into orders values(0, 'F')")
tk.MustExec("insert into nation values(0, 'EGYPT')")
tk.MustExec("insert into lineitem values(1,0,2,4)")
tk.MustExec("insert into lineitem values(1,1,1,0)")
tk.MustExec("insert into lineitem values(1,2,3,3)")
tk.MustExec("insert into lineitem values(1,3,1,0)")
tk.MustExec("insert into lineitem values(1,4,1,3)")
tk.MustExec("insert into supplier values(1, 1)")
tk.MustExec("insert into orders values(1, 'F')")
tk.MustExec("insert into nation values(1, 'EGYPT')")
tk.MustExec("insert into lineitem values(2,0,1,2)")
tk.MustExec("insert into lineitem values(2,1,3,4)")
tk.MustExec("insert into lineitem values(2,2,2,0)")
tk.MustExec("insert into lineitem values(2,3,3,1)")
tk.MustExec("insert into lineitem values(2,4,4,3)")
tk.MustExec("insert into supplier values(2, 3)")
tk.MustExec("insert into orders values(2, 'F')")
tk.MustExec("insert into nation values(2, 'EGYPT')")
tk.MustExec("insert into lineitem values(3,0,4,3)")
tk.MustExec("insert into lineitem values(3,1,4,3)")
tk.MustExec("insert into lineitem values(3,2,2,2)")
tk.MustExec("insert into lineitem values(3,3,0,0)")
tk.MustExec("insert into lineitem values(3,4,1,0)")
tk.MustExec("insert into supplier values(3, 1)")
tk.MustExec("insert into orders values(3, 'F')")
tk.MustExec("insert into nation values(3, 'EGYPT')")
tk.MustExec("insert into lineitem values(4,0,2,2)")
tk.MustExec("insert into lineitem values(4,1,4,2)")
tk.MustExec("insert into lineitem values(4,2,0,2)")
tk.MustExec("insert into lineitem values(4,3,0,1)")
tk.MustExec("insert into lineitem values(4,4,2,2)")
tk.MustExec("insert into supplier values(4, 4)")
tk.MustExec("insert into orders values(4, 'F')")
tk.MustExec("insert into nation values(4, 'EGYPT')")
tk.MustQuery("select count(*) from supplier, lineitem l1, orders, nation where s_suppkey = l1.l_suppkey and o_orderkey = l1.l_orderkey and o_orderstatus = 'F' and exists ( select * from lineitem l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey < l1.l_suppkey ) and s_nationkey = n_nationkey and n_name = 'EGYPT' order by l1.l_orderkey, l1.l_linenumber;").Check(testkit.Rows("18"))
tk.MustExec("drop table lineitem")
tk.MustExec("drop table nation")
tk.MustExec("drop table supplier")
tk.MustExec("drop table orders")
}
func TestIssue13449(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t, s;")
tk.MustExec("create table t(a int, index(a));")
tk.MustExec("create table s(a int, index(a));")
for i := 1; i <= 128; i++ {
tk.MustExec(fmt.Sprintf("insert into t values(%d)", i))
}
tk.MustExec("insert into s values(1), (128)")
tk.MustExec("set @@tidb_max_chunk_size=32;")
tk.MustExec("set @@tidb_index_lookup_join_concurrency=1;")
tk.MustExec("set @@tidb_index_join_batch_size=32;")
tk.MustQuery("desc format = 'brief' select /*+ INL_HASH_JOIN(s) */ * from t join s on t.a=s.a order by t.a;").Check(testkit.Rows(
"IndexHashJoin 12487.50 root inner join, inner:IndexReader, outer key:test.t.a, inner key:test.s.a, equal cond:eq(test.t.a, test.s.a)",
"├─IndexReader(Build) 9990.00 root index:IndexFullScan",
"│ └─IndexFullScan 9990.00 cop[tikv] table:t, index:a(a) keep order:true, stats:pseudo",
"└─IndexReader(Probe) 12487.50 root index:Selection",
" └─Selection 12487.50 cop[tikv] not(isnull(test.s.a))",
" └─IndexRangeScan 12500.00 cop[tikv] table:s, index:a(a) range: decided by [eq(test.s.a, test.t.a)], keep order:false, stats:pseudo"))
tk.MustQuery("select /*+ INL_HASH_JOIN(s) */ * from t join s on t.a=s.a order by t.a;").Check(testkit.Rows("1 1", "128 128"))
}
func TestHashJoin(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(a int, b int);")
tk.MustExec("create table t2(a int, b int);")
tk.MustExec("insert into t1 values(1,1),(2,2),(3,3),(4,4),(5,5);")
tk.MustQuery("select count(*) from t1").Check(testkit.Rows("5"))
tk.MustQuery("select count(*) from t2").Check(testkit.Rows("0"))
tk.MustExec("set @@tidb_init_chunk_size=1;")
result := tk.MustQuery("explain analyze select /*+ TIDB_HJ(t1, t2) */ * from t1 where exists (select a from t2 where t1.a = t2.a);")
// 0 1 2 3 4 5 6 7 8
// 0 HashJoin_9 7992.00 0 root time:959.436µs, loops:1, Concurrency:5, probe collision:0, build:0s semi join, equal:[eq(test.t1.a, test.t2.a)] 0 Bytes 0 Bytes
// 1 ├─TableReader_15(Build) 9990.00 0 root time:583.499µs, loops:1, rpc num: 1, rpc time:563.325µs, proc keys:0 data:Selection_14 141 Bytes N/A
// 2 │ └─Selection_14 9990.00 0 cop[tikv] time:53.674µs, loops:1 not(isnull(test.t2.a)) N/A N/A
// 3 │ └─TableFullScan_13 10000.00 0 cop[tikv] table:t2 time:52.14µs, loops:1 keep order:false, stats:pseudo N/A N/A
// 4 └─TableReader_12(Probe) 9990.00 5 root time:779.503µs, loops:1, rpc num: 1, rpc time:794.929µs, proc keys:0 data:Selection_11 241 Bytes N/A
// 5 └─Selection_11 9990.00 5 cop[tikv] time:243.395µs, loops:6 not(isnull(test.t1.a)) N/A N/A
// 6 └─TableFullScan_10 10000.00 5 cop[tikv] table:t1 time:206.273µs, loops:6 keep order:false, stats:pseudo N/A N/A
row := result.Rows()
require.Equal(t, 7, len(row))
innerActRows := row[1][2].(string)
require.Equal(t, "0", innerActRows)
outerActRows := row[4][2].(string)
// FIXME: revert this result to 1 after TableReaderExecutor can handle initChunkSize.
require.Equal(t, "5", outerActRows)
}
func TestOuterTableBuildHashTableIsuse13933(t *testing.T) {
plannercore.ForceUseOuterBuild4Test.Store(true)
defer func() { plannercore.ForceUseOuterBuild4Test.Store(false) }()
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t, s")
tk.MustExec("create table t (a int,b int)")
tk.MustExec("create table s (a int,b int)")
tk.MustExec("insert into t values (11,11),(1,2)")
tk.MustExec("insert into s values (1,2),(2,1),(11,11)")
tk.MustQuery("select * from t left join s on s.a > t.a").Sort().Check(testkit.Rows("1 2 11 11", "1 2 2 1", "11 11 <nil> <nil>"))
tk.MustQuery("explain format = 'brief' select * from t left join s on s.a > t.a").Check(testkit.Rows(
"HashJoin 99900000.00 root CARTESIAN left outer join, other cond:gt(test.s.a, test.t.a)",
"├─TableReader(Build) 10000.00 root data:TableFullScan",
"│ └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo",
"└─TableReader(Probe) 9990.00 root data:Selection",
" └─Selection 9990.00 cop[tikv] not(isnull(test.s.a))",
" └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo"))
tk.MustExec("drop table if exists t, s")
tk.MustExec("Create table s (a int, b int, key(b))")
tk.MustExec("Create table t (a int, b int, key(b))")
tk.MustExec("Insert into s values (1,2),(2,1),(11,11)")
tk.MustExec("Insert into t values (11,2),(1,2),(5,2)")
tk.MustQuery("select /*+ INL_HASH_JOIN(s)*/ * from t left join s on s.b=t.b and s.a < t.a;").Sort().Check(testkit.Rows("1 2 <nil> <nil>", "11 2 1 2", "5 2 1 2"))
tk.MustQuery("explain format = 'brief' select /*+ INL_HASH_JOIN(s)*/ * from t left join s on s.b=t.b and s.a < t.a;").Check(testkit.Rows(
"IndexHashJoin 12475.01 root left outer join, inner:IndexLookUp, outer key:test.t.b, inner key:test.s.b, equal cond:eq(test.t.b, test.s.b), other cond:lt(test.s.a, test.t.a)",
"├─TableReader(Build) 10000.00 root data:TableFullScan",
"│ └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo",
"└─IndexLookUp(Probe) 12475.01 root ",
" ├─Selection(Build) 12487.50 cop[tikv] not(isnull(test.s.b))",
" │ └─IndexRangeScan 12500.00 cop[tikv] table:s, index:b(b) range: decided by [eq(test.s.b, test.t.b)], keep order:false, stats:pseudo",
" └─Selection(Probe) 12475.01 cop[tikv] not(isnull(test.s.a))",
" └─TableRowIDScan 12487.50 cop[tikv] table:s keep order:false, stats:pseudo"))
}
func TestInlineProjection4HashJoinIssue15316(t *testing.T) {
// Two necessary factors to reproduce this issue:
// (1) taking HashLeftJoin, i.e., letting the probing tuple lay at the left side of joined tuples
// (2) the projection only contains a part of columns from the build side, i.e., pruning the same probe side
plannercore.ForcedHashLeftJoin4Test.Store(true)
defer func() { plannercore.ForcedHashLeftJoin4Test.Store(false) }()
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists S, T")
tk.MustExec("create table S (a int not null, b int, c int);")
tk.MustExec("create table T (a int not null, b int, c int);")
tk.MustExec("insert into S values (0,1,2),(0,1,null),(0,1,2);")
tk.MustExec("insert into T values (0,10,2),(0,10,null),(1,10,2);")
tk.MustQuery("select T.a,T.a,T.c from S join T on T.a = S.a where S.b<T.b order by T.a,T.c;").Check(testkit.Rows(
"0 0 <nil>",
"0 0 <nil>",
"0 0 <nil>",
"0 0 2",
"0 0 2",
"0 0 2",
))
// NOTE: the HashLeftJoin should be kept
tk.MustQuery("explain format = 'brief' select T.a,T.a,T.c from S join T on T.a = S.a where S.b<T.b order by T.a,T.c;").Check(testkit.Rows(
"Sort 12487.50 root test.t.a, test.t.c",
"└─Projection 12487.50 root test.t.a, test.t.a, test.t.c",
" └─HashJoin 12487.50 root inner join, equal:[eq(test.s.a, test.t.a)], other cond:lt(test.s.b, test.t.b)",
" ├─TableReader(Build) 9990.00 root data:Selection",
" │ └─Selection 9990.00 cop[tikv] not(isnull(test.t.b))",
" │ └─TableFullScan 10000.00 cop[tikv] table:T keep order:false, stats:pseudo",
" └─TableReader(Probe) 9990.00 root data:Selection",
" └─Selection 9990.00 cop[tikv] not(isnull(test.s.b))",
" └─TableFullScan 10000.00 cop[tikv] table:S keep order:false, stats:pseudo"))
}
func TestIssue18572_1(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(a int, b int, index idx(b));")
tk.MustExec("insert into t1 values(1, 1);")
tk.MustExec("insert into t1 select * from t1;")
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/testIndexHashJoinInnerWorkerErr", "return"))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/testIndexHashJoinInnerWorkerErr"))
}()
rs, err := tk.Exec("select /*+ inl_hash_join(t1) */ * from t1 right join t1 t2 on t1.b=t2.b;")
require.NoError(t, err)
_, err = session.GetRows4Test(context.Background(), nil, rs)
require.True(t, strings.Contains(err.Error(), "mockIndexHashJoinInnerWorkerErr"))
require.NoError(t, rs.Close())
}
func TestIssue18572_2(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(a int, b int, index idx(b));")
tk.MustExec("insert into t1 values(1, 1);")
tk.MustExec("insert into t1 select * from t1;")
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/testIndexHashJoinOuterWorkerErr", "return"))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/testIndexHashJoinOuterWorkerErr"))
}()
rs, err := tk.Exec("select /*+ inl_hash_join(t1) */ * from t1 right join t1 t2 on t1.b=t2.b;")
require.NoError(t, err)
_, err = session.GetRows4Test(context.Background(), nil, rs)
require.True(t, strings.Contains(err.Error(), "mockIndexHashJoinOuterWorkerErr"))
require.NoError(t, rs.Close())
}
func TestIssue18572_3(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(a int, b int, index idx(b));")
tk.MustExec("insert into t1 values(1, 1);")
tk.MustExec("insert into t1 select * from t1;")
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/testIndexHashJoinBuildErr", "return"))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/testIndexHashJoinBuildErr"))
}()
rs, err := tk.Exec("select /*+ inl_hash_join(t1) */ * from t1 right join t1 t2 on t1.b=t2.b;")
require.NoError(t, err)
_, err = session.GetRows4Test(context.Background(), nil, rs)
require.True(t, strings.Contains(err.Error(), "mockIndexHashJoinBuildErr"))
require.NoError(t, rs.Close())
}
func TestExplainAnalyzeJoin(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1,t2;")
tk.MustExec("create table t1 (a int, b int, unique index (a));")
tk.MustExec("create table t2 (a int, b int, unique index (a))")
tk.MustExec("insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5)")
tk.MustExec("insert into t2 values (1,1),(2,2),(3,3),(4,4),(5,5)")
// Test for index lookup join.
rows := tk.MustQuery("explain analyze select /*+ INL_JOIN(t1, t2) */ * from t1,t2 where t1.a=t2.a;").Rows()
require.Equal(t, 8, len(rows))
require.Regexp(t, "IndexJoin_.*", rows[0][0])
require.Regexp(t, "time:.*, loops:.*, inner:{total:.*, concurrency:.*, task:.*, construct:.*, fetch:.*, build:.*}, probe:.*", rows[0][5])
// Test for index lookup hash join.
rows = tk.MustQuery("explain analyze select /*+ INL_HASH_JOIN(t1, t2) */ * from t1,t2 where t1.a=t2.a;").Rows()
require.Equal(t, 8, len(rows))
require.Regexp(t, "IndexHashJoin.*", rows[0][0])
require.Regexp(t, "time:.*, loops:.*, inner:{total:.*, concurrency:.*, task:.*, construct:.*, fetch:.*, build:.*, join:.*}", rows[0][5])
// Test for hash join.
rows = tk.MustQuery("explain analyze select /*+ HASH_JOIN(t1, t2) */ * from t1,t2 where t1.a=t2.a;").Rows()
require.Equal(t, 7, len(rows))
require.Regexp(t, "HashJoin.*", rows[0][0])
require.Regexp(t, "time:.*, loops:.*, build_hash_table:{total:.*, fetch:.*, build:.*}, probe:{concurrency:5, total:.*, max:.*, probe:.*, fetch:.*}", rows[0][5])
// Test for index merge join.
rows = tk.MustQuery("explain analyze select /*+ INL_MERGE_JOIN(t1, t2) */ * from t1,t2 where t1.a=t2.a;").Rows()
require.Len(t, rows, 9)
require.Regexp(t, "IndexMergeJoin_.*", rows[0][0])
require.Regexp(t, fmt.Sprintf(".*Concurrency:%v.*", tk.Session().GetSessionVars().IndexLookupJoinConcurrency()), rows[0][5])
}
func TestIssue20270(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t;")
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t(c1 int, c2 int)")
tk.MustExec("create table t1(c1 int, c2 int)")
tk.MustExec("insert into t values(1,1),(2,2)")
tk.MustExec("insert into t1 values(2,3),(4,4)")
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/killedInJoin2Chunk", "return(true)"))
err := tk.QueryToErr("select /*+ TIDB_HJ(t, t1) */ * from t left join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20")
require.Equal(t, exeerrors.ErrQueryInterrupted, err)
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/killedInJoin2Chunk"))
plannercore.ForceUseOuterBuild4Test.Store(true)
defer func() {
plannercore.ForceUseOuterBuild4Test.Store(false)
}()
err = failpoint.Enable("github.com/pingcap/tidb/executor/killedInJoin2ChunkForOuterHashJoin", "return(true)")
require.NoError(t, err)
tk.MustExec("insert into t1 values(1,30),(2,40)")
err = tk.QueryToErr("select /*+ TIDB_HJ(t, t1) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20")
require.Equal(t, exeerrors.ErrQueryInterrupted, err)
err = failpoint.Disable("github.com/pingcap/tidb/executor/killedInJoin2ChunkForOuterHashJoin")
require.NoError(t, err)
}
func TestIssue31129(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("set @@tidb_init_chunk_size=2")
tk.MustExec("set @@tidb_index_join_batch_size=10")
tk.MustExec("DROP TABLE IF EXISTS t, s")
tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly
tk.MustExec("create table t(pk int primary key, a int)")
for i := 0; i < 100; i++ {
tk.MustExec(fmt.Sprintf("insert into t values(%d, %d)", i, i))
}
tk.MustExec("create table s(a int primary key)")
for i := 0; i < 100; i++ {
tk.MustExec(fmt.Sprintf("insert into s values(%d)", i))
}
tk.MustExec("analyze table t")
tk.MustExec("analyze table s")
// Test IndexNestedLoopHashJoin keepOrder.
fpName := "github.com/pingcap/tidb/executor/TestIssue31129"
require.NoError(t, failpoint.Enable(fpName, "return"))
err := tk.QueryToErr("select /*+ INL_HASH_JOIN(s) */ * from t left join s on t.a=s.a order by t.pk")
require.True(t, strings.Contains(err.Error(), "TestIssue31129"))
require.NoError(t, failpoint.Disable(fpName))
// Test IndexNestedLoopHashJoin build hash table panic.
fpName = "github.com/pingcap/tidb/executor/IndexHashJoinBuildHashTablePanic"
require.NoError(t, failpoint.Enable(fpName, `panic("IndexHashJoinBuildHashTablePanic")`))
err = tk.QueryToErr("select /*+ INL_HASH_JOIN(s) */ * from t left join s on t.a=s.a order by t.pk")
require.True(t, strings.Contains(err.Error(), "IndexHashJoinBuildHashTablePanic"))
require.NoError(t, failpoint.Disable(fpName))
// Test IndexNestedLoopHashJoin fetch inner fail.
fpName = "github.com/pingcap/tidb/executor/IndexHashJoinFetchInnerResultsErr"
require.NoError(t, failpoint.Enable(fpName, "return"))
err = tk.QueryToErr("select /*+ INL_HASH_JOIN(s) */ * from t left join s on t.a=s.a order by t.pk")
require.True(t, strings.Contains(err.Error(), "IndexHashJoinFetchInnerResultsErr"))
require.NoError(t, failpoint.Disable(fpName))
// Test IndexNestedLoopHashJoin build hash table panic and IndexNestedLoopHashJoin fetch inner fail at the same time.
fpName1, fpName2 := "github.com/pingcap/tidb/executor/IndexHashJoinBuildHashTablePanic", "github.com/pingcap/tidb/executor/IndexHashJoinFetchInnerResultsErr"
require.NoError(t, failpoint.Enable(fpName1, `panic("IndexHashJoinBuildHashTablePanic")`))
require.NoError(t, failpoint.Enable(fpName2, "return"))
err = tk.QueryToErr("select /*+ INL_HASH_JOIN(s) */ * from t left join s on t.a=s.a order by t.pk")
require.True(t, strings.Contains(err.Error(), "IndexHashJoinBuildHashTablePanic"))
require.NoError(t, failpoint.Disable(fpName1))
require.NoError(t, failpoint.Disable(fpName2))
}
func TestHashJoinExecEncodeDecodeRow(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t1 (id int)")
tk.MustExec("create table t2 (id int, name varchar(255), ts timestamp)")
tk.MustExec("insert into t1 values (1)")
tk.MustExec("insert into t2 values (1, 'xxx', '2003-06-09 10:51:26')")
result := tk.MustQuery("select ts from t1 inner join t2 where t2.name = 'xxx'")
result.Check(testkit.Rows("2003-06-09 10:51:26"))
}
func TestIndexLookupJoin(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("set tidb_cost_model_version=2")
tk.MustExec("set @@tidb_init_chunk_size=2")
tk.MustExec("DROP TABLE IF EXISTS t")
tk.MustExec("CREATE TABLE `t` (`a` int, pk integer auto_increment,`b` char (20),primary key (pk))")
tk.MustExec("CREATE INDEX idx_t_a ON t(`a`)")
tk.MustExec("CREATE INDEX idx_t_b ON t(`b`)")
tk.MustExec("INSERT INTO t VALUES (148307968, DEFAULT, 'nndsjofmpdxvhqv') , (-1327693824, DEFAULT, 'pnndsjofmpdxvhqvfny') , (-277544960, DEFAULT, 'fpnndsjo')")
tk.MustExec("DROP TABLE IF EXISTS s")
tk.MustExec("CREATE TABLE `s` (`a` int, `b` char (20))")
tk.MustExec("CREATE INDEX idx_s_a ON s(`a`)")
tk.MustExec("INSERT INTO s VALUES (-277544960, 'fpnndsjo') , (2, 'kfpnndsjof') , (2, 'vtdiockfpn'), (-277544960, 'fpnndsjo') , (2, 'kfpnndsjof') , (6, 'ckfp')")
tk.MustQuery("select /*+ INL_JOIN(t, s) */ t.a from t join s on t.a = s.a").Sort().Check(testkit.Rows("-277544960", "-277544960"))
tk.MustQuery("select /*+ INL_HASH_JOIN(t, s) */ t.a from t join s on t.a = s.a").Sort().Check(testkit.Rows("-277544960", "-277544960"))
tk.MustQuery("select /*+ INL_MERGE_JOIN(t, s) */ t.a from t join s on t.a = s.a").Sort().Check(testkit.Rows("-277544960", "-277544960"))
tk.MustQuery("select /*+ INL_JOIN(t, s) */ t.a from t left join s on t.a = s.a").Sort().Check(testkit.Rows("-1327693824", "-277544960", "-277544960", "148307968"))
tk.MustQuery("select /*+ INL_HASH_JOIN(t, s) */ t.a from t left join s on t.a = s.a").Sort().Check(testkit.Rows("-1327693824", "-277544960", "-277544960", "148307968"))
tk.MustQuery("select /*+ INL_MERGE_JOIN(t, s) */ t.a from t left join s on t.a = s.a").Sort().Check(testkit.Rows("-1327693824", "-277544960", "-277544960", "148307968"))
tk.MustQuery("select /*+ INL_JOIN(t, s) */ t.a from t left join s on t.a = s.a where t.a = -277544960").Sort().Check(testkit.Rows("-277544960", "-277544960"))
tk.MustQuery("select /*+ INL_HASH_JOIN(t, s) */ t.a from t left join s on t.a = s.a where t.a = -277544960").Sort().Check(testkit.Rows("-277544960", "-277544960"))
tk.MustQuery("select /*+ INL_MERGE_JOIN(t, s) */ t.a from t left join s on t.a = s.a where t.a = -277544960").Sort().Check(testkit.Rows("-277544960", "-277544960"))
tk.MustQuery("select /*+ INL_JOIN(t, s) */ t.a from t right join s on t.a = s.a").Sort().Check(testkit.Rows("-277544960", "-277544960", "<nil>", "<nil>", "<nil>", "<nil>"))
tk.MustQuery("select /*+ INL_HASH_JOIN(t, s) */ t.a from t right join s on t.a = s.a").Sort().Check(testkit.Rows("-277544960", "-277544960", "<nil>", "<nil>", "<nil>", "<nil>"))
tk.MustQuery("select /*+ INL_MERGE_JOIN(t, s) */ t.a from t right join s on t.a = s.a").Sort().Check(testkit.Rows("-277544960", "-277544960", "<nil>", "<nil>", "<nil>", "<nil>"))
tk.MustQuery("select /*+ INL_JOIN(t, s) */ t.a from t left join s on t.a = s.a order by t.a desc").Check(testkit.Rows("148307968", "-277544960", "-277544960", "-1327693824"))
tk.MustQuery("select /*+ INL_HASH_JOIN(t, s) */ t.a from t left join s on t.a = s.a order by t.a desc").Check(testkit.Rows("148307968", "-277544960", "-277544960", "-1327693824"))
tk.MustQuery("select /*+ INL_MERGE_JOIN(t, s) */ t.a from t left join s on t.a = s.a order by t.a desc").Check(testkit.Rows("148307968", "-277544960", "-277544960", "-1327693824"))
tk.MustExec("DROP TABLE IF EXISTS t;")
tk.MustExec("CREATE TABLE t(a BIGINT PRIMARY KEY, b BIGINT);")
tk.MustExec("INSERT INTO t VALUES(1, 2);")
tk.MustQuery("SELECT /*+ INL_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a UNION ALL SELECT /*+ INL_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a;").Check(testkit.Rows("1 2 1 2", "1 2 1 2"))
tk.MustQuery("SELECT /*+ INL_HASH_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a UNION ALL SELECT /*+ INL_HASH_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a;").Check(testkit.Rows("1 2 1 2", "1 2 1 2"))
tk.MustQuery("SELECT /*+ INL_MERGE_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a UNION ALL SELECT /*+ INL_MERGE_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a;").Check(testkit.Rows("1 2 1 2", "1 2 1 2"))
tk.MustExec(`drop table if exists t;`)
tk.MustExec(`create table t(a decimal(6,2), index idx(a));`)
tk.MustExec(`insert into t values(1.01), (2.02), (NULL);`)
tk.MustQuery(`select /*+ INL_JOIN(t2) */ t1.a from t t1 join t t2 on t1.a=t2.a order by t1.a;`).Check(testkit.Rows(
`1.01`,
`2.02`,
))
tk.MustQuery(`select /*+ INL_HASH_JOIN(t2) */ t1.a from t t1 join t t2 on t1.a=t2.a order by t1.a;`).Check(testkit.Rows(
`1.01`,
`2.02`,
))
tk.MustQuery(`select /*+ INL_MERGE_JOIN(t2) */ t1.a from t t1 join t t2 on t1.a=t2.a order by t1.a;`).Check(testkit.Rows(
`1.01`,
`2.02`,
))
tk.MustExec(`drop table if exists t;`)
tk.MustExec(`create table t(a bigint, b bigint, unique key idx1(a, b));`)
tk.MustExec(`insert into t values(1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6);`)
tk.MustExec(`set @@tidb_init_chunk_size = 2;`)
tk.MustQuery(`select /*+ INL_JOIN(t2) */ * from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b + 4;`).Check(testkit.Rows(
`1 1 <nil> <nil>`,
`1 2 <nil> <nil>`,
`1 3 <nil> <nil>`,
`1 4 <nil> <nil>`,
`1 5 1 1`,
`1 6 1 2`,
))
tk.MustQuery(`select /*+ INL_HASH_JOIN(t2) */ * from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b + 4;`).Check(testkit.Rows(
`1 1 <nil> <nil>`,
`1 2 <nil> <nil>`,
`1 3 <nil> <nil>`,
`1 4 <nil> <nil>`,
`1 5 1 1`,
`1 6 1 2`,
))
tk.MustQuery(`select /*+ INL_MERGE_JOIN(t2) */ * from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b + 4;`).Check(testkit.Rows(
`1 1 <nil> <nil>`,
`1 2 <nil> <nil>`,
`1 3 <nil> <nil>`,
`1 4 <nil> <nil>`,
`1 5 1 1`,
`1 6 1 2`,
))
tk.MustExec(`drop table if exists t1, t2, t3;`)
tk.MustExec("create table t1(a int primary key, b int)")
tk.MustExec("insert into t1 values(1, 0), (2, null)")
tk.MustExec("create table t2(a int primary key)")
tk.MustExec("insert into t2 values(0)")
tk.MustQuery("select /*+ INL_JOIN(t2)*/ * from t1 left join t2 on t1.b = t2.a;").Sort().Check(testkit.Rows(
`1 0 0`,
`2 <nil> <nil>`,
))
tk.MustQuery("select /*+ INL_HASH_JOIN(t2)*/ * from t1 left join t2 on t1.b = t2.a;").Sort().Check(testkit.Rows(
`1 0 0`,
`2 <nil> <nil>`,
))
tk.MustQuery("select /*+ INL_MERGE_JOIN(t2)*/ * from t1 left join t2 on t1.b = t2.a;").Sort().Check(testkit.Rows(
`1 0 0`,
`2 <nil> <nil>`,
))
tk.MustExec("create table t3(a int, key(a))")
tk.MustExec("insert into t3 values(0)")
tk.MustQuery("select /*+ INL_JOIN(t3)*/ * from t1 left join t3 on t1.b = t3.a;").Check(testkit.Rows(
`1 0 0`,
`2 <nil> <nil>`,
))
tk.MustQuery("select /*+ INL_HASH_JOIN(t3)*/ * from t1 left join t3 on t1.b = t3.a;").Check(testkit.Rows(
`1 0 0`,
`2 <nil> <nil>`,
))
tk.MustQuery("select /*+ INL_MERGE_JOIN(t3)*/ * from t1 left join t3 on t1.b = t3.a;").Check(testkit.Rows(
`2 <nil> <nil>`,
`1 0 0`,
))
tk.MustExec("drop table if exists t,s")
tk.MustExec("create table t(a int primary key auto_increment, b time)")
tk.MustExec("create table s(a int, b time)")
tk.MustExec("alter table s add index idx(a,b)")
tk.MustExec("set @@tidb_index_join_batch_size=4;set @@tidb_init_chunk_size=1;set @@tidb_max_chunk_size=32; set @@tidb_index_lookup_join_concurrency=15;")
tk.MustExec("set @@session.tidb_executor_concurrency = 4;")
tk.MustExec("set @@session.tidb_hash_join_concurrency = 5;")
// insert 64 rows into `t`
tk.MustExec("insert into t values(0, '01:01:01')")
for i := 0; i < 6; i++ {
tk.MustExec("insert into t select 0, b + 1 from t")
}
tk.MustExec("insert into s select a, b - 1 from t")
tk.MustExec("analyze table t;")
tk.MustExec("analyze table s;")
tk.MustQuery("desc format = 'brief' select /*+ TIDB_INLJ(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b").Check(testkit.Rows(
"HashAgg 1.00 root funcs:count(1)->Column#6",
"└─IndexJoin 64.00 root inner join, inner:IndexReader, outer key:test.t.a, inner key:test.s.a, equal cond:eq(test.t.a, test.s.a), other cond:lt(test.s.b, test.t.b)",
" ├─TableReader(Build) 64.00 root data:Selection",
" │ └─Selection 64.00 cop[tikv] not(isnull(test.t.b))",
" │ └─TableFullScan 64.00 cop[tikv] table:t keep order:false",
" └─IndexReader(Probe) 64.00 root index:Selection",
" └─Selection 64.00 cop[tikv] not(isnull(test.s.a)), not(isnull(test.s.b))",
" └─IndexRangeScan 64.00 cop[tikv] table:s, index:idx(a, b) range: decided by [eq(test.s.a, test.t.a) lt(test.s.b, test.t.b)], keep order:false"))
tk.MustQuery("select /*+ TIDB_INLJ(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b").Check(testkit.Rows("64"))
tk.MustExec("set @@tidb_index_lookup_join_concurrency=1;")
tk.MustQuery("select /*+ TIDB_INLJ(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b").Check(testkit.Rows("64"))
tk.MustQuery("desc format = 'brief' select /*+ INL_MERGE_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b").Check(testkit.Rows(
"HashAgg 1.00 root funcs:count(1)->Column#6",
"└─IndexMergeJoin 64.00 root inner join, inner:IndexReader, outer key:test.t.a, inner key:test.s.a, other cond:lt(test.s.b, test.t.b)",
" ├─TableReader(Build) 64.00 root data:Selection",
" │ └─Selection 64.00 cop[tikv] not(isnull(test.t.b))",
" │ └─TableFullScan 64.00 cop[tikv] table:t keep order:false",
" └─IndexReader(Probe) 64.00 root index:Selection",
" └─Selection 64.00 cop[tikv] not(isnull(test.s.a)), not(isnull(test.s.b))",
" └─IndexRangeScan 64.00 cop[tikv] table:s, index:idx(a, b) range: decided by [eq(test.s.a, test.t.a) lt(test.s.b, test.t.b)], keep order:true",
))
tk.MustQuery("select /*+ INL_MERGE_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b").Check(testkit.Rows("64"))
tk.MustExec("set @@tidb_index_lookup_join_concurrency=1;")
tk.MustQuery("select /*+ INL_MERGE_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b").Check(testkit.Rows("64"))
tk.MustQuery("desc format = 'brief' select /*+ INL_HASH_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b").Check(testkit.Rows(
"HashAgg 1.00 root funcs:count(1)->Column#6",
"└─IndexHashJoin 64.00 root inner join, inner:IndexReader, outer key:test.t.a, inner key:test.s.a, equal cond:eq(test.t.a, test.s.a), other cond:lt(test.s.b, test.t.b)",
" ├─TableReader(Build) 64.00 root data:Selection",
" │ └─Selection 64.00 cop[tikv] not(isnull(test.t.b))",
" │ └─TableFullScan 64.00 cop[tikv] table:t keep order:false",
" └─IndexReader(Probe) 64.00 root index:Selection",
" └─Selection 64.00 cop[tikv] not(isnull(test.s.a)), not(isnull(test.s.b))",
" └─IndexRangeScan 64.00 cop[tikv] table:s, index:idx(a, b) range: decided by [eq(test.s.a, test.t.a) lt(test.s.b, test.t.b)], keep order:false",
))
tk.MustQuery("select /*+ INL_HASH_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b").Check(testkit.Rows("64"))
tk.MustExec("set @@tidb_index_lookup_join_concurrency=1;")
tk.MustQuery("select /*+ INL_HASH_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b").Check(testkit.Rows("64"))
// issue15658
tk.MustExec("drop table t1, t2")
tk.MustExec("create table t1(id int primary key)")
tk.MustExec("create table t2(a int, b int)")
tk.MustExec("insert into t1 values(1)")
tk.MustExec("insert into t2 values(1,1),(2,1)")
tk.MustQuery("select /*+ inl_join(t1)*/ * from t1 join t2 on t2.b=t1.id and t2.a=t1.id;").Check(testkit.Rows("1 1 1"))
tk.MustQuery("select /*+ inl_hash_join(t1)*/ * from t1 join t2 on t2.b=t1.id and t2.a=t1.id;").Check(testkit.Rows("1 1 1"))
tk.MustQuery("select /*+ inl_merge_join(t1)*/ * from t1 join t2 on t2.b=t1.id and t2.a=t1.id;").Check(testkit.Rows("1 1 1"))
}
func TestExplainAnalyzeIndexHashJoin(t *testing.T) {
// Issue 43597
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t (a int, index idx(a));")
sql := "insert into t values"
for i := 0; i <= 1024; i++ {
if i != 0 {
sql += ","
}
sql += fmt.Sprintf("(%d)", i)
}
tk.MustExec(sql)
for i := 0; i <= 10; i++ {
// Test for index lookup hash join.
rows := tk.MustQuery("explain analyze select /*+ INL_HASH_JOIN(t1, t2) */ * from t t1 join t t2 on t1.a=t2.a limit 1;").Rows()
require.Equal(t, 7, len(rows))
require.Regexp(t, "IndexHashJoin.*", rows[1][0])
// When innerWorkerRuntimeStats.join is negative, `join:` will not print.
require.Regexp(t, "time:.*, loops:.*, inner:{total:.*, concurrency:.*, task:.*, construct:.*, fetch:.*, build:.*, join:.*}", rows[1][5])
}
}
|
package main
import (
"encoding/csv"
"flag"
//"fmt"
svg "github.com/ajstarks/svgo"
"io"
"log"
"math/rand"
"os"
"strconv"
)
func rn(n int) int { return rand.Intn(n) }
func main() {
canvas := svg.New(os.Stdout)
var file string
flag.StringVar(&file, "input", "input.csv", "input file")
flag.Parse()
csvfile, err := os.Open(file)
if err != nil {
log.Fatalln("Couldn't open the csv file", err)
}
r := csv.NewReader(csvfile)
type mon struct {
Month string
Usage int
}
const n = 6
dat := [n]mon{}
i := 0
for {
record, err := r.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatal(err)
}
dat[i].Month = record[0]
x, _ := strconv.Atoi(record[1])
dat[i].Usage = x
i++
}
width := len(dat)*60 + 10
height := 150
threshold := 160
max := 100
for _, item := range dat {
if item.Usage > max {
max = item.Usage
}
}
canvas.Start(width, height)
canvas.Rect(0, 0, width, height, "fill:white")
for i, val := range dat {
percent := val.Usage * (height - 50) / max
canvas.Rect(i*60+10, (height-50)-percent, 50, percent, "fill:rgb(77,200,232)")
canvas.Text(i*60+35, height-24, val.Month, "font-size:14pt;fill:rgb(150, 150, 150);text-anchor:middle")
}
threshPercent := threshold * (height - 50) / max
canvas.Line(0, height-threshPercent, width, height-threshPercent, "stroke: rgb(255,100,100); opacity: 0.8; stroke-width: 2px")
canvas.Rect(0, 0, width, height-threshPercent, "fill:rgb(255, 100, 100); opacity: 0.1")
canvas.Line(0, height-50, width, height-50, "stroke: rgb(150, 150, 150); stroke-width:2")
canvas.End()
}
|
package main
import (
"os"
"path/filepath"
"testing"
)
func TestValidateDocsDirStructure(t *testing.T) {
testCases := []struct {
name string
dirStructure []string
expectedErr bool
}{
{
name: "Valid directory structure",
dirStructure: []string{"mutation-examples", "validation", "intro.md", "pspintro.md"},
expectedErr: false,
},
{
name: "Unexpected directory",
dirStructure: []string{"mutation-examples", "unexpected-dir", "validation", "intro.md", "pspintro.md"},
expectedErr: true,
},
{
name: "Unexpected file",
dirStructure: []string{"mutation-examples", "validation", "unexpected-file.md", "intro.md", "pspintro.md"},
expectedErr: true,
},
{
name: "Missing file",
dirStructure: []string{"mutation-examples", "validation", "intro.md"},
expectedErr: true,
},
{
name: "Missing directory",
dirStructure: []string{"mutation-examples", "intro.md", "pspintro.md"},
expectedErr: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Create a temporary directory for testing
tmpDir, err := os.MkdirTemp("", "test")
if err != nil {
t.Fatalf("Error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
// Create the directory structure
docsDirPath := filepath.Join(tmpDir, docsDirEntry)
err = os.MkdirAll(docsDirPath, 0o755)
if err != nil {
t.Fatalf("Error creating docs dir: %v", err)
}
for _, item := range tc.dirStructure {
path := filepath.Join(docsDirPath, item)
if filepath.Ext(path) == "" {
if os.Mkdir(path, 0o755) != nil {
t.Fatalf("Error creating directory: %v", path)
}
} else {
f, err := os.Create(path)
if err != nil {
t.Fatalf("Error creating the file %s: %v", item, err)
}
defer f.Close()
_, err = f.Write([]byte{})
if err != nil {
t.Fatalf("Error writing to the file %s: %v", item, err)
}
}
}
err = validateDocsDirStructure(tmpDir)
if tc.expectedErr && err == nil {
t.Errorf("Expected error, but got nil")
}
})
}
}
func TestContains(t *testing.T) {
testCases := []struct {
name string
items []string
item string
expected bool
}{
{
name: "Item in list",
items: []string{"item1", "item2", "item3"},
item: "item2",
expected: true,
},
{
name: "Item not in list",
items: []string{"item1", "item2", "item3"},
item: "item4",
expected: false,
},
{
name: "list is empty",
items: []string{},
item: "foo",
expected: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := contains(tc.items, tc.item)
if result != tc.expected {
t.Errorf("Expected %v, but got %v", tc.expected, result)
}
})
}
}
|
package accgenerator_test
import(
"testing"
"dbcreator"
//"fmt"
)
func Test_FiDBCreator_Process(t *testing.T){
m := accgenerator.NewAccDBCreator()
//m.Process()
m.Delete()
}
|
package phpCommons
import (
"testing"
"fmt"
)
func Test_array_values(t *testing.T) {
b := []string{"aaaaa", "vvvv", "cc", "aaaa", "aaaaa", "aaaaa", "cc", "vvvv"}
fmt.Printf("%#v\n", array_values(b))
// c := []string{"aaaa", "aaaaa", "cc", "vvvv"}
}
|
package sndtag
import (
"fmt"
"io"
)
// Types of tags that are supported.
// TODO: support id3
const (
RIFF = iota
ID3v1
ID3v2
)
// New creates a new map with metadata read from an io.Reader.
// If the type is not one of the supported types then an error is returned.
func New(r io.Reader) (map[string]string, error) {
// Read the first 3 bytes.
header := make([]byte, 3)
bytesRead, err := r.Read(header)
if err != nil {
return nil, err
}
if expected, got := 3, bytesRead; expected != got {
return nil, fmt.Errorf("expected to read %d bytes, actually read %d", expected, got)
}
// Figure out the type.
switch x := string(header); x {
default:
return nil, fmt.Errorf("unrecognized header: %s", x)
case "TAG":
// TODO: handle id3
return newID3(r)
case "RIF":
if err := checkRIFFLastByte(r, header); err != nil {
return nil, err
}
getter, err := newWav(r)
if err != nil && err != io.EOF {
return nil, err
}
return getter, nil
}
}
// checkRIFFLastByte checks that the 4th byte of a RIFF file is 'F'.
func checkRIFFLastByte(r io.Reader, header []byte) error {
// Read one more byte for RIFF type.
headerLastByte := make([]byte, 1)
bytesRead, err := r.Read(headerLastByte)
if err != nil {
return err
}
if expected, got := 1, bytesRead; expected != got {
return fmt.Errorf("expected to read %d bytes, actually read %d", expected, got)
}
if headerLastByte[0] != 'F' {
hdr := string(append(header, headerLastByte...))
return fmt.Errorf("expected RIFF, got %s", hdr)
}
return nil
}
|
package csvConvert
import (
"testing"
)
// TestOpenCSV check to open file and content
func TestOpenCSV(t *testing.T) {
// test.csv used for this test
file := "./test.csv"
data, err := OpenCSV(file)
if err != nil {
t.Fatal(err)
}
// check content data
if len(data) < 1 {
t.Fatal("no data inside test.csv")
}
}
|
package common
func IsStringSliceHas(target interface{}, slice []string) (bool, int) {
for index, key := range slice {
if key == target {
return true, index
}
}
return false, -1
}
|
package django
import (
"fmt"
"github.com/spf13/viper"
"io/ioutil"
"os"
"path/filepath"
"projcli/utils"
"time"
)
var workDir string
func init() {
wd, err := os.Getwd()
if err != nil {
utils.HandleErr(err)
}
workDir = wd
}
func setup(configName string) {
extension := filepath.Ext(configName)
_configName := configName[0 : len(configName)-len(extension)]
viper.AddConfigPath(workDir)
viper.SetConfigName(_configName)
viper.SetConfigType("yaml")
viper.Set("Verbose", true)
err := viper.ReadInConfig()
if err != nil {
utils.HandleErr(err)
}
}
func New(configName string) {
pipChan := make(chan string, 1)
go func() {
pipTxt, err := utils.HttpGet("bootstrap.pypa.io/get-pip.py")
if err != nil {
utils.HandleErr(err)
}
pipChan <- pipTxt
}()
setup(configName)
projectName := viper.Get("project")
fmt.Println("Creating a new Django Application with name: ", projectName)
cmd := "django-admin.py"
args := []string{"startproject", projectName.(string)}
utils.RunCmd(cmd, args)
fmt.Println("Copying: ", configName)
cmd = "cp"
args = []string{workDir + "/" + configName, workDir + "/" + projectName.(string)}
utils.RunCmd(cmd, args)
select {
case pipTxt := <-pipChan:
pipPath := workDir + "/" + projectName.(string) + "/get-pip.py"
requirementsPath := workDir + "/" + projectName.(string) + "/requirements.txt"
ioutil.WriteFile(pipPath, []byte(pipTxt), 0755)
ioutil.WriteFile(requirementsPath, []byte(Requirements), 0644)
fmt.Println("Installing pip...")
cmd = pipPath
args = []string{}
utils.RunCmd(cmd, args)
fmt.Println("Installing requirements...")
cmd = "pip"
args = []string{"install", "-r", requirementsPath}
utils.RunCmd(cmd, args)
case <-time.After(1 * time.Second):
fmt.Println("Could not install pip")
}
}
func Migrations(configName string) {
setup(configName)
fmt.Println("Making Migrations...")
cmd := workDir + "/manage.py"
args := []string{"makemigrations"}
utils.RunCmd(cmd, args)
}
func Migrate(configName string) {
setup(configName)
fmt.Println("Migrating Database...")
cmd := workDir + "/manage.py"
args := []string{"migrate"}
utils.RunCmd(cmd, args)
}
func App(configName string) {
setup(configName)
applications := viper.Get("applications").([]interface{})
numOfApps := len(applications)
done := make(chan bool, numOfApps)
for _, app := range applications {
for appName, _ := range app.(map[interface{}]interface{}) {
go func(appName string) {
fmt.Println("Creating Application: ", appName)
cmd := workDir + "/manage.py"
args := []string{"startapp", appName}
utils.RunCmd(cmd, args)
done <- true
}(appName.(string))
}
}
for _ = range applications {
<-done
}
}
|
package text
import (
"fmt"
)
func Box(message string) (output string) {
messageLength := len(message)
output += fmt.Sprintf("╭")
for i := 0; i < l+2; i++ {
output += fmt.Sprintf("─")
}
output += fmt.Sprintf("╮\n")
output += fmt.Sprintf("│ %v │\n", message)
output += fmt.Sprintf("╰")
for i := 0; i < l+2; i++ {
output += fmt.Sprintf("─")
}
output += fmt.Sprintf("╯\n")
return output
}
|
package environment
import (
"fmt"
"github.com/getynge/environment/filter"
"gopkg.in/alessio/shellescape.v1"
"os"
"strings"
)
type Environment struct {
m map[string]string
}
// New creates an empty Environment
func New() (e Environment) {
e = Environment{
m: make(map[string]string),
}
return e
}
// Shell creates a new Environment, with the environment variables of the current process added to it
func Shell() (e Environment) {
e = New()
for _, pair := range os.Environ() {
var v string
kv := strings.Split(pair, "=")
k := kv[0]
if len(kv) >= 2 {
v = kv[1]
}
e.m[k] = v
}
return e
}
// String returns a runnable export command for this set of environment variables
//
// e.g. if e contains the key value pair "GREETING": "Hello World!", then String would return
// `export GREETING="Hello World!"`
func (e Environment) String() string {
b := new(strings.Builder)
b.WriteString("export")
for k, v := range e.m {
b.WriteString(fmt.Sprintf(` %s=%s`, k, shellescape.Quote(v)))
}
return b.String()
}
// Set sets the given key to the given value, after running all filters on the key value pair.
// If any of the filters fail, the variable is not added to the Environment and an error is returned
func (e Environment) Set(key, value string) (err error) {
for _, f := range filter.GlobalEntranceGroup {
if key, value, err = f.Filter(key, value); err != nil {
return err
}
}
for _, f := range filter.GlobalGroups[key] {
if key, value, err = f.Filter(key, value); err != nil {
return err
}
}
for _, f := range filter.GlobalExitGroup {
if key, value, err = f.Filter(key, value); err != nil {
return err
}
}
e.m[key] = value
return err
}
func (e Environment) Get(key string) (variable string, has bool) {
variable, has = e.m[key]
return variable, has
}
func (e Environment) Remove(key string) {
delete(e.m, key)
}
|
package service
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/rbonnat/blockchain-in-go/blockchain"
)
const (
testTime = "2020-07-04T14:05:53-04:00"
)
func mockNowFunc(t *testing.T) func() time.Time {
now, err := time.Parse(time.RFC3339, testTime)
require.NoError(t, err)
return func() time.Time { return now }
}
func TestNew(t *testing.T) {
nowFunc := mockNowFunc(t)
ctx := context.TODO()
tests := map[string]struct {
expected *Service
}{
"Succeed and returns a pointer to a Service": {
&Service{
blockchain.New(ctx, nowFunc),
},
},
}
for name, test := range tests {
s := New(ctx, nowFunc)
assert.IsType(t, test.expected, s, "Type is different: '%s'", name)
assert.Equal(t, test.expected.Blocks(ctx), s.Blocks(ctx), "Blockchain are different: '%s'", name)
}
}
func TestInsertNewBlock(t *testing.T) {
nowFunc := mockNowFunc(t)
ctx := context.TODO()
tests := map[string]struct {
expected *Service
}{
"Succeed and returns a pointer to a Service": {
&Service{
blockchain.New(ctx, nowFunc),
},
},
}
for name, test := range tests {
s := New(ctx, nowFunc)
assert.IsType(t, test.expected, s, "Type is different: '%s'", name)
assert.Equal(t, test.expected.Blocks(ctx), s.Blocks(ctx), "Blockchain are different: '%s'", name)
}
}
|
// Copyright © 2016 Nathan Sharpe <nathanjsharpe@gmail.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"github.com/nathanjsharpe/trakitapi"
"github.com/nathanjsharpe/trakitcli/config"
"github.com/nathanjsharpe/trakitcli/utils"
"github.com/spf13/cobra"
)
var loginCmd = &cobra.Command{
Use: "login <environment>",
Short: "Log in to Trakit",
Long: `Log in to Trakit.
Prompts for username and password and attempts to login to the specified environment. On a successful login, creates a new session
and sets the current environment.`,
Example: ` trakitcli login --env party
trakitcli login --env io`,
Aliases: []string{"li"},
Run: func(cmd *cobra.Command, args []string) {
if err := requireEnvironment(); err == nil {
username, _ := utils.GetInput("Username: ")
password, _ := utils.GetHiddenInput("Password: ")
c := trakitapi.NewClient(nil).SetApiUrl(CurrentEnvironment.Urls["appApi"])
payload := trakitapi.SessionTokenCreateRequest{
Username: username,
Password: password,
}
st, user, _, err := c.SessionToken.Create(&payload)
if err != nil {
printError("\n" + err.Error())
printVerbose("Retaining old session")
} else {
printSuccess("\nLogged into " + EnvKey + " as " + username)
session := config.Session{
Token: *st,
User: *user,
EnvironmentKey: EnvKey,
}
printVerbose("Saving session to " + sessionFile)
session.Save(sessionFile)
}
} else {
printError(err.Error())
cmd.Help()
}
},
}
func init() {
RootCmd.AddCommand(loginCmd)
}
|
package main
import "testing"
func TestHashSet(t *testing.T) {
hs := Constructor()
hs.Add(1)
hs.Add(2)
if hs.Contains(1) != true {
t.Fatal()
}
if hs.Contains(3) != false {
t.Fatal()
}
hs.Add(2)
if hs.Contains(2) != true {
t.Fatal()
}
hs.Remove(2)
if hs.Contains(2) != false {
t.Fatal()
}
}
|
package problem0084
import "testing"
func TestSolve(t *testing.T) {
t.Log(largestRectangleArea([]int{2, 1, 2}))
t.Log(largestRectangleArea([]int{1, 1}))
}
|
package ruffe
import "net/http"
var emptyHandler = HandlerFunc(func(Context) error { return nil })
type HandlerFunc func(Context) error
func (h HandlerFunc) Handle(ctx Context) error {
return h(ctx)
}
type Handler interface {
Handle(h Context) error
}
type HTTPHandlerFunc func(http.ResponseWriter, *http.Request)
func (h HTTPHandlerFunc) Handle(ctx Context) error {
h(ctx, ctx.Request())
return nil
}
|
// gRPC server
package main
import (
"bytes"
"flag"
"fmt"
"hash"
"io"
"log"
"net"
"os"
"runtime/pprof"
"sync"
"time"
"google.golang.org/grpc"
"github.com/glycerine/blake2b" // vendor https://github.com/dchest/blake2b"
"google.golang.org/grpc/credentials"
"github.com/glycerine/bchan"
"github.com/glycerine/grpc-demo/api"
pb "github.com/glycerine/grpc-demo/streambigfile"
)
type PeerServerClass struct {
lgs api.LocalGetSet
cfg *ServerConfig
GotFile *bchan.Bchan
mut sync.Mutex
filesReceivedCount int64
}
func NewPeerServerClass(lgs api.LocalGetSet, cfg *ServerConfig) *PeerServerClass {
return &PeerServerClass{
lgs: lgs,
cfg: cfg,
GotFile: bchan.New(1),
}
}
func (s *PeerServerClass) IncrementGotFileCount() {
s.mut.Lock()
s.filesReceivedCount++
count := s.filesReceivedCount
s.mut.Unlock()
s.GotFile.Bcast(count)
}
// implement pb.PeerServer interface; the server is receiving a file here,
// because the client called SendFile() on the other end.
//
func (s *PeerServerClass) SendFile(stream pb.Peer_SendFileServer) (err error) {
log.Printf("%s peer.Server SendFile (for receiving a file) starting!", s.cfg.MyID)
var chunkCount int64
path := ""
var hasher hash.Hash
hasher, err = blake2b.New(nil)
if err != nil {
return err
}
var finalChecksum []byte
const writeFileToDisk = false
var fd *os.File
var bytesSeen int64
defer func() {
if fd != nil {
fd.Close()
}
finalChecksum = []byte(hasher.Sum(nil))
endTime := time.Now()
log.Printf("%s this server.SendFile() call got %v chunks, byteCount=%v. with final checksum '%x'. defer running/is returning with err='%v'", s.cfg.MyID, chunkCount, bytesSeen, finalChecksum, err)
errStr := ""
if err != nil {
errStr = err.Error()
}
sacErr := stream.SendAndClose(&pb.BigFileAck{
Filepath: path,
SizeInBytes: bytesSeen,
RecvTime: uint64(endTime.UnixNano()),
WholeFileBlake2B: finalChecksum,
Err: errStr,
})
if sacErr != nil {
log.Printf("warning: sacErr='%s' in gserv server.go PeerServerClass.SendFile() attempt to stream.SendAndClose().", sacErr)
}
}()
firstChunkSeen := false
var nk *pb.BigFileChunk
for {
nk, err = stream.Recv()
if err == io.EOF {
if nk != nil && len(nk.Data) > 0 {
// we are assuming that this never happens!
panic("we need to save this last chunk too!")
}
//p("server doing stream.Recv(); sees err == io.EOF. nk=%p. bytesSeen=%v. chunkCount=%v.", nk, bytesSeen, chunkCount)
return nil
}
if err != nil {
return err
}
// INVAR: we have a chunk
if !firstChunkSeen {
if nk.Filepath != "" {
if writeFileToDisk {
fd, err = os.Create(nk.Filepath + fmt.Sprintf("__%v", time.Now()))
if err != nil {
return err
}
defer fd.Close()
}
}
firstChunkSeen = true
}
hasher.Write(nk.Data)
cumul := []byte(hasher.Sum(nil))
if 0 != bytes.Compare(cumul, nk.Blake2BCumulative) {
return fmt.Errorf("cumulative checksums failed at chunk %v of '%s'. Observed: '%x', expected: '%x'.", nk.ChunkNumber, nk.Filepath, cumul, nk.Blake2BCumulative)
} else {
//p("cumulative checksum on nk.ChunkNumber=%v looks good; cumul='%x'. nk.IsLastChunk=%v", nk.ChunkNumber, nk.Blake2BCumulative, nk.IsLastChunk)
}
if path == "" {
path = nk.Filepath
//p("peer.Server SendFile sees new file '%s'", path)
}
if path != "" && path != nk.Filepath {
panic(fmt.Errorf("confusing between two different streams! '%s' vs '%s'", path, nk.Filepath))
}
if nk.SizeInBytes != int64(len(nk.Data)) {
return fmt.Errorf("%v == nk.SizeInBytes != int64(len(nk.Data)) == %v", nk.SizeInBytes, int64(len(nk.Data)))
}
checksum := blake2bOfBytes(nk.Data)
cmp := bytes.Compare(checksum, nk.Blake2B)
if cmp != 0 {
return fmt.Errorf("chunk %v bad .Data, checksum mismatch!",
nk.ChunkNumber)
}
// INVAR: chunk passes tests, keep it.
bytesSeen += int64(len(nk.Data))
chunkCount++
// TODO: user should store chunk somewhere here... or accumulate
// all the chunks in memory
// until ready to store it elsewhere; e.g. in boltdb.
if writeFileToDisk {
err = writeToFd(fd, nk.Data)
if err != nil {
return err
}
}
if nk.IsLastChunk {
return err
}
} // end for
return nil
}
func MainExample() {
myflags := flag.NewFlagSet(ProgramName, flag.ExitOnError)
myID := "123"
cfg := NewServerConfig(myID)
cfg.DefineFlags(myflags)
sshegoCfg := setupSshFlags(myflags)
err := myflags.Parse(os.Args[1:])
if cfg.CpuProfilePath != "" {
f, err := os.Create(cfg.CpuProfilePath)
if err != nil {
log.Fatal(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
err = cfg.ValidateConfig()
if err != nil {
log.Fatalf("%s command line flag error: '%s'", ProgramName, err)
}
cfg.SshegoCfg = sshegoCfg
// cfg.StartGrpcServer()
}
func (cfg *ServerConfig) Stop() {
if cfg != nil {
cfg.mut.Lock()
if cfg.GrpcServer != nil {
cfg.GrpcServer.Stop() // race here in Test103BcastGet
}
cfg.mut.Unlock()
}
}
func (cfg *ServerConfig) StartGrpcServer(
peer api.LocalGetSet,
sshdReady chan bool,
myID string,
) {
var gRpcBindPort int
var gRpcHost string
if cfg.SkipEncryption {
// no encryption, only for VPN that already provides it.
gRpcBindPort = cfg.ExternalLsnPort
gRpcHost = cfg.Host
} else if cfg.UseTLS {
// use TLS
gRpcBindPort = cfg.ExternalLsnPort
gRpcHost = cfg.Host
//p("gRPC with TLS listening on %v:%v", gRpcHost, gRpcBindPort)
} else {
// SSH will take the external, gRPC will take the internal.
gRpcBindPort = cfg.InternalLsnPort
gRpcHost = "127.0.0.1" // local only, behind the SSHD
//p("%s external SSHd listening on %v:%v, internal gRPC service listening on 127.0.0.1:%v", myID, cfg.Host, cfg.ExternalLsnPort, cfg.InternalLsnPort)
}
lis, err := net.Listen("tcp", fmt.Sprintf("%v:%d", gRpcHost, gRpcBindPort))
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
var opts []grpc.ServerOption
if cfg.SkipEncryption {
//p("cfg.SkipEncryption is true")
close(sshdReady)
} else {
if cfg.UseTLS {
// use TLS
creds, err := credentials.NewServerTLSFromFile(cfg.CertPath, cfg.KeyPath)
if err != nil {
log.Fatalf("Failed to generate credentials %v", err)
}
opts = []grpc.ServerOption{grpc.Creds(creds)}
} else {
// use SSH
err = serverSshMain(cfg.SshegoCfg, cfg.Host,
cfg.ExternalLsnPort, cfg.InternalLsnPort)
panicOn(err)
close(sshdReady)
}
}
cfg.mut.Lock()
cfg.GrpcServer = grpc.NewServer(opts...)
cls := NewPeerServerClass(peer, cfg)
cfg.Cls = cls
cfg.mut.Unlock()
pb.RegisterPeerServer(cfg.GrpcServer, cls)
// blocks until shutdown
cfg.GrpcServer.Serve(lis)
}
func blake2bOfBytes(by []byte) []byte {
h, err := blake2b.New(nil)
panicOn(err)
h.Write(by)
return []byte(h.Sum(nil))
}
func intMin(a, b int) int {
if a < b {
return a
}
return b
}
func writeToFd(fd *os.File, data []byte) error {
w := 0
n := len(data)
for {
nw, err := fd.Write(data[w:])
if err != nil {
return err
}
w += nw
if nw >= n {
return nil
}
}
}
|
package main
import (
controller "go-todo/backend/controller"
"net/http"
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
)
func main() {
e := echo.New()
e.Use(middleware.Logger())
e.Use(middleware.CORSWithConfig(middleware.CORSConfig{
AllowOrigins: []string{"*"},
AllowMethods: []string{http.MethodGet, http.MethodPut, http.MethodPost, http.MethodDelete},
}))
e.GET("/zadanie/:id", controller.Zadanie)
e.GET("/zadania", controller.Zadania)
e.POST("/zadanie/dodaj", controller.Dodaj)
e.DELETE("/zadanie/usun/:id", controller.Usun)
e.PUT("/zadanie/zakoncz/:id", controller.Zakoncz)
e.Logger.Fatal(e.Start(":2381"))
}
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
// TFModule is the struct representation of a terraform module definition in
// JSON format, found in the output of terraform plan
type TFModule struct {
ChildModules []TFModule `json:"child_modules"`
Resources []TFResource `json:"resources"`
ModuleAddress string `json:"address"`
}
// TFResource is the struct representation of a terraform resource in JSON
// format found in the output of terraform plan
type TFResource struct {
Type string `json:"type"`
Name string `json:"name"`
Provider string `json:"provider_name"`
Values TFValues `json:"values"`
Provisioners []TFProvisioner `json:"provisioners"`
}
// TFValues represents the common terraform resource attributes available in the
// JSON output of terraform plan. Note that this is a union of all possible
// values for any modules used in this script
type TFValues struct {
Name string `json:"name"`
Index string `json:"index"`
Region string `json:"region"`
Zone string `json:"zone"`
AddressType string `json:"address_type"`
InstanceTemplate string `json:"source_instance_template"`
FileName string `json:"filename"`
FilePermissions string `json:"file_permission"`
DirPermissions string `json:"directory_permission"`
CryptoAlgorithm string `json:"algorithm"`
Content string `json:"content"`
Project string `json:"project"`
ImageFamily string `json:"family"`
MachineType string `json:"machine_type"`
MinCPUPlatform string `json:"min_cpu_platform"`
AdvancedMachineFeatures []AdvancedMachineFeatures `json:"advanced_machine_features"`
Service string `json:"service"`
CanIPForward bool `json:"can_ip_forward"`
Trigger Trigger `json:"triggers"`
Tags []string `json:"tags"`
Disk []Disk `json:"disk"`
ServiceAccount []ServiceAccount `json:"service_account"`
NetworkInterfaces []Interface `json:"network_interface"`
Gpu *Gpu `json:"gpu"`
}
// TFProvisioner represents the provisioners configured in the terraform output
// resulting from the terraform plan
type TFProvisioner struct {
Type string `json:"type"`
}
// Interface represents a network interface of a network resource
type Interface struct {
Network string `json:"network"`
}
// ServiceAccount represents a service account associated to an instance
// template resource
type ServiceAccount struct {
Scopes []string `json:"scopes"`
}
// Disk represents a storage disk associated to an instance template resource
type Disk struct {
Type string `json:"disk_type"`
Size int `json:"disk_size_gb"`
}
// AdvancedMachineFeatures represents advanced machine features associated to an instance template resource
type AdvancedMachineFeatures struct {
EnableNestedVirtualization bool `json:"enable_nested_virtualization"`
}
// Trigger represents a trigger attributes of a GCP gcloud resource
type Trigger struct {
CmdBody string `json:"create_cmd_body"`
CmdEntrypoint string `json:"create_cmd_entrypoint"`
}
// Variable represents an instance of a single terraform input variable
type Variable struct {
Value string `json:"value"`
}
// ListVariable represents an instance of a single terraform input variable
// which is of type list(string)
type ListVariable struct {
Value []string `json:"value"`
}
// ListIntVariable represents an instance of a single terraform input variable
// which is of type list(int)
type ListIntVariable struct {
Value []int `json:"value"`
}
// MapVariable represents an instance of a single terraform input variable
// which is of type map
type MapVariable struct {
Value map[string]int `json:"value"`
}
// SensitiveVariable represents a terraform variable that has been marked as
// sensitive by the provider
type SensitiveVariable struct {
Sensitive bool `json:"sensitive"`
}
// GpuVariable represents an instance of a variable carrying Gpu information
type GpuVariable struct {
Value Gpu `json:"value"`
}
// Gpu represents an instance of a single terraform input which holds
// information about a Gpu instance to be associated to a VM
type Gpu struct {
Count int `json:"count"`
Type string `json:"type"`
}
|
/*
Package go-sudoku implements a simple library for solving sudoku puzzles.
*/
package main
import (
"flag"
"fmt"
"net/http"
"github.com/jamesandersen/gosudoku/sudokuparser"
"github.com/nytimes/gziphandler"
)
func main() {
var filename string
var mode string
flag.StringVar(&mode, "mode", "serve", "whether to serve web app or parse additional args in CLI mode")
flag.StringVar(&filename, "filename", "", "Sudoku puzzle image")
flag.Parse()
if mode == "serve" {
fs := http.FileServer(http.Dir("web/static"))
http.Handle("/static/", gziphandler.GzipHandler(http.StripPrefix("/static/", fs)))
http.HandleFunc("/solve", solveHandler)
http.HandleFunc("/", sudokuFormHandler)
http.ListenAndServe(":8080", nil)
} else if mode == "cli" {
parsed, points := sudokuparser.ParseSudokuFromFile(filename)
fmt.Println("Parsed sudoku: " + parsed)
if len(points) == 4 {
fmt.Print(fmt.Sprintf("TopLeft (%c, %c)\n", points[0].X, points[0].Y))
fmt.Print(fmt.Sprintf("TopRight (%c, %c)\n", points[1].X, points[1].Y))
fmt.Print(fmt.Sprintf("BottomRight (%c, %c)\n", points[2].X, points[2].Y))
fmt.Print(fmt.Sprintf("BottomLeft (%c, %c)\n", points[3].X, points[3].Y))
}
board := NewSudoku(parsed, STANDARD)
fmt.Print("Attempting to solve Sudoku...\n")
board.Print()
finalBoard, success := board.Solve()
if success {
finalBoard.Print()
} else {
fmt.Print("Board not solved...")
}
}
}
|
package leetcode
func dominantIndex(nums []int) int {
tm, m, mi := 0, 0, 0
for i, v := range nums {
if m < v {
tm = m * 2
m = v
mi = i
} else if tm < v*2 {
tm = v * 2
}
}
if m >= tm {
return mi
} else {
return -1
}
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2021/6/13 8:27 上午
# @File : lt_z型打印二叉树.go
# @Description :
# @Attention :
*/
package v2
func zigzagLevelOrder(root *TreeNode) [][]int {
if nil == root {
return nil
}
r := make([][]int, 0)
toogle := true
queue := make([]*TreeNode, 0)
queue = append(queue, root)
for len(queue) > 0 {
l := len(queue)
list := make([]int, 0)
for i := 0; i < l; i++ {
node := queue[0]
queue = queue[1:]
list = append(list, node.Val)
if node.Left != nil {
queue = append(queue, node.Left)
}
if nil != node.Right {
queue = append(queue, node.Right)
}
if toogle {
reverseList222(list)
}
}
r = append(r, list)
toogle = !toogle
}
return r
}
func reverseList222(r []int) {
for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 {
r[i], r[j] = r[j], r[i]
}
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
package types
import (
"bytes"
"encoding/json"
"errors"
"io"
"strconv"
)
// BucketInfluencer type.
//
// https://github.com/elastic/elasticsearch-specification/blob/33e8a1c9cad22a5946ac735c4fba31af2da2cec2/specification/ml/_types/Bucket.ts#L80-L128
type BucketInfluencer struct {
// AnomalyScore A normalized score between 0-100, which is calculated for each bucket
// influencer. This score might be updated as
// newer data is analyzed.
AnomalyScore Float64 `json:"anomaly_score"`
// BucketSpan The length of the bucket in seconds. This value matches the bucket span that
// is specified in the job.
BucketSpan int64 `json:"bucket_span"`
// InfluencerFieldName The field name of the influencer.
InfluencerFieldName string `json:"influencer_field_name"`
// InitialAnomalyScore The score between 0-100 for each bucket influencer. This score is the initial
// value that was calculated at the
// time the bucket was processed.
InitialAnomalyScore Float64 `json:"initial_anomaly_score"`
// IsInterim If true, this is an interim result. In other words, the results are
// calculated based on partial input data.
IsInterim bool `json:"is_interim"`
// JobId Identifier for the anomaly detection job.
JobId string `json:"job_id"`
// Probability The probability that the bucket has this behavior, in the range 0 to 1. This
// value can be held to a high precision
// of over 300 decimal places, so the `anomaly_score` is provided as a
// human-readable and friendly interpretation of
// this.
Probability Float64 `json:"probability"`
// RawAnomalyScore Internal.
RawAnomalyScore Float64 `json:"raw_anomaly_score"`
// ResultType Internal. This value is always set to `bucket_influencer`.
ResultType string `json:"result_type"`
// Timestamp The start time of the bucket for which these results were calculated.
Timestamp int64 `json:"timestamp"`
// TimestampString The start time of the bucket for which these results were calculated.
TimestampString DateTime `json:"timestamp_string,omitempty"`
}
func (s *BucketInfluencer) UnmarshalJSON(data []byte) error {
dec := json.NewDecoder(bytes.NewReader(data))
for {
t, err := dec.Token()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
switch t {
case "anomaly_score":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseFloat(v, 64)
if err != nil {
return err
}
f := Float64(value)
s.AnomalyScore = f
case float64:
f := Float64(v)
s.AnomalyScore = f
}
case "bucket_span":
if err := dec.Decode(&s.BucketSpan); err != nil {
return err
}
case "influencer_field_name":
if err := dec.Decode(&s.InfluencerFieldName); err != nil {
return err
}
case "initial_anomaly_score":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseFloat(v, 64)
if err != nil {
return err
}
f := Float64(value)
s.InitialAnomalyScore = f
case float64:
f := Float64(v)
s.InitialAnomalyScore = f
}
case "is_interim":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseBool(v)
if err != nil {
return err
}
s.IsInterim = value
case bool:
s.IsInterim = v
}
case "job_id":
if err := dec.Decode(&s.JobId); err != nil {
return err
}
case "probability":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseFloat(v, 64)
if err != nil {
return err
}
f := Float64(value)
s.Probability = f
case float64:
f := Float64(v)
s.Probability = f
}
case "raw_anomaly_score":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseFloat(v, 64)
if err != nil {
return err
}
f := Float64(value)
s.RawAnomalyScore = f
case float64:
f := Float64(v)
s.RawAnomalyScore = f
}
case "result_type":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.ResultType = o
case "timestamp":
if err := dec.Decode(&s.Timestamp); err != nil {
return err
}
case "timestamp_string":
if err := dec.Decode(&s.TimestampString); err != nil {
return err
}
}
}
return nil
}
// NewBucketInfluencer returns a BucketInfluencer.
func NewBucketInfluencer() *BucketInfluencer {
r := &BucketInfluencer{}
return r
}
|
package util
import (
"reflect"
)
// ParseArray for return empty arrays in responses
func ParseArray(a interface{}) (res interface{}) {
if a == nil || reflect.ValueOf(a).IsNil() {
res = []string{}
} else {
res = a
}
return
}
|
package main
import (
"fmt"
"log"
"net"
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
pb "github.com/gautamrege/gochat/api"
)
type chatServer struct {
}
func (s *chatServer) Chat(ctx context.Context, req *pb.ChatRequest) (res *pb.ChatResponse, err error) {
fmt.Printf("\n%s\n> ", fmt.Sprintf("@%s says: \"%s\"", req.From.Name, req.Message))
// TODO-WORKSHOP: If this is a chat from an unknown user, insert into HANDLES
return &pb.ChatResponse{}, nil
}
// gRPC listener
// - register and start grpc server
func listen(wg *sync.WaitGroup, exit chan bool) {
defer wg.Done()
lis, err := net.Listen("tcp", fmt.Sprintf("%s:%d", *host, *port))
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
grpcServer := grpc.NewServer()
pb.RegisterGoChatServer(grpcServer, &chatServer{})
err = grpcServer.Serve(lis)
if err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
func sendChat(h pb.Handle, message string) {
dest := fmt.Sprintf("%s:%d", h.Host, h.Port)
conn, err := grpc.Dial(dest, grpc.WithInsecure())
if err != nil {
log.Fatalf("fail to dial: %v", err)
}
defer conn.Close()
client := pb.NewGoChatClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
/****
// THIS CODE IS FOR REFERENCE ONLY FROM THE pb PACKAGE. DO NOT UNCOMMENT
type pb.ChatRequest struct {
From *pb.Handle
To *pb.Handle
Message string
}
*****/
// var req pb.ChatRequest
var req = pb.ChatRequest {
From: &ME,
To: &h,
Message: message,
}
// TODO-WORKSHOP: Create req struct of type pb.ChatRequest to send to client.Chat method
_, err = client.Chat(ctx, &req)
if err != nil {
log.Printf("ERROR: Chat(): %v", err)
HANDLES.Delete(h.Name)
}
return
}
|
package reqValidator
import (
"errors"
"fmt"
"reflect"
"strings"
)
type structure interface {
Map() map[string]interface{}
}
// Validate the types and return a bool
func Validate(structure structure, itemsMap map[string]interface{}) bool {
fmt.Println("DEPRECATED: Validate(structure structure, itemsMap map[string]interface{}) bool TRY ValidateAndPopulate(st interface{}, inputMap map[string]interface{}) (err error)")
stMap := structure.Map()
if len(stMap) != len(itemsMap) {
fmt.Println(len(stMap), len(itemsMap))
return false
}
for name, value := range stMap {
if _, ok := itemsMap[name]; !ok {
fmt.Println(name)
return false
}
if reflect.TypeOf(value).Kind() != reflect.TypeOf(itemsMap[name]).Kind() {
fmt.Println(name)
return false
}
}
return true
}
// ValidateAndPopulate make the validation, populate the received pointer and callback if has panics
func ValidateAndPopulate(st interface{}, inputMap map[string]interface{}) (err error) {
defer func() {
if r := recover(); r != nil {
switch x := r.(type) {
case string:
err = errors.New(x)
case error:
err = x
default:
err = errors.New("Unknown panic")
}
}
}()
t := reflect.ValueOf(st).Elem()
for elementName, value := range inputMap {
if value == "" {
panic("Empty value")
}
val := t.FieldByName(elementName)
val.Set(reflect.ValueOf(value))
}
return nil
}
// ValidateMap Verify if items exists in map
func ValidateMap(items map[interface{}]interface{}, names ...interface{}) bool {
for _, name := range names {
_, ok := items[name]
if !ok {
return false
}
}
return true
}
// ValidateForm Verify if items exists in the Form
func ValidateForm(items ...[]string) bool {
for _, item := range items {
if len(item) <= 0 {
return false
}
}
return true
}
// ValidateImageURL Verify if the url has a valid image format
func ValidateImageURL(url string, types ...string) (bool, string) {
for _, value := range types {
if strings.Contains(url, value) {
return true, value
}
}
return false, ""
}
|
package vfs
import (
"strconv"
"strings"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/rc"
"github.com/pkg/errors"
)
// Add remote control for the VFS
func (vfs *VFS) addRC() {
rc.Add(rc.Call{
Path: "vfs/forget",
Fn: func(in rc.Params) (out rc.Params, err error) {
root, err := vfs.Root()
if err != nil {
return nil, err
}
forgotten := []string{}
if len(in) == 0 {
root.ForgetAll()
} else {
for k, v := range in {
path, ok := v.(string)
if !ok {
return out, errors.Errorf("value must be string %q=%v", k, v)
}
path = strings.Trim(path, "/")
if strings.HasPrefix(k, "file") {
root.ForgetPath(path, fs.EntryObject)
} else if strings.HasPrefix(k, "dir") {
root.ForgetPath(path, fs.EntryDirectory)
} else {
return out, errors.Errorf("unknown key %q", k)
}
forgotten = append(forgotten, path)
}
}
out = rc.Params{
"forgotten": forgotten,
}
return out, nil
},
Title: "Forget files or directories in the directory cache.",
Help: `
This forgets the paths in the directory cache causing them to be
re-read from the remote when needed.
If no paths are passed in then it will forget all the paths in the
directory cache.
rclone rc vfs/forget
Otherwise pass files or dirs in as file=path or dir=path. Any
parameter key starting with file will forget that file and any
starting with dir will forget that dir, eg
rclone rc vfs/forget file=hello file2=goodbye dir=home/junk
`,
})
rc.Add(rc.Call{
Path: "vfs/refresh",
Fn: func(in rc.Params) (out rc.Params, err error) {
root, err := vfs.Root()
if err != nil {
return nil, err
}
getDir := func(path string) (*Dir, error) {
path = strings.Trim(path, "/")
segments := strings.Split(path, "/")
var node Node = root
for _, s := range segments {
if dir, ok := node.(*Dir); ok {
node, err = dir.stat(s)
if err != nil {
return nil, err
}
}
}
if dir, ok := node.(*Dir); ok {
return dir, nil
}
return nil, EINVAL
}
recursive := false
{
const k = "recursive"
if v, ok := in[k]; ok {
s, ok := v.(string)
if !ok {
return out, errors.Errorf("value must be string %q=%v", k, v)
}
recursive, err = strconv.ParseBool(s)
if err != nil {
return out, errors.Errorf("invalid value %q=%v", k, v)
}
delete(in, k)
}
}
result := map[string]string{}
if len(in) == 0 {
if recursive {
err = root.readDirTree()
} else {
err = root.readDir()
}
if err != nil {
result[""] = err.Error()
} else {
result[""] = "OK"
}
} else {
for k, v := range in {
path, ok := v.(string)
if !ok {
return out, errors.Errorf("value must be string %q=%v", k, v)
}
if strings.HasPrefix(k, "dir") {
dir, err := getDir(path)
if err != nil {
result[path] = err.Error()
} else {
if recursive {
err = dir.readDirTree()
} else {
err = dir.readDir()
}
if err != nil {
result[path] = err.Error()
} else {
result[path] = "OK"
}
}
} else {
return out, errors.Errorf("unknown key %q", k)
}
}
}
out = rc.Params{
"result": result,
}
return out, nil
},
Title: "Refresh the directory cache.",
Help: `
This reads the directories for the specified paths and freshens the
directory cache.
If no paths are passed in then it will refresh the root directory.
rclone rc vfs/refresh
Otherwise pass directories in as dir=path. Any parameter key
starting with dir will refresh that directory, eg
rclone rc vfs/refresh dir=home/junk dir2=data/misc
If the parameter recursive=true is given the whole directory tree
will get refreshed. This refresh will use --fast-list if enabled.
`,
})
}
|
package domain
import (
"fmt"
"regexp"
"strings"
"github.com/thoas/go-funk"
)
type MJLog struct {
ID string
MyPosition string
Body string
}
func (m MJLog) GetRiichCount() int {
r := regexp.MustCompile(`<REACH.*?/>`)
matches := r.FindAllString(m.Body, -1)
return len(funk.Filter(matches, func(match string) bool {
// リーチ時にロンされなかった場合のみ`step="2"`が記録される
if !strings.Contains(match, `step="2"`) {
return false
}
// MyPositionの指定がある場合、自分自身のリーチのみを集計
if m.MyPosition != "" && !strings.Contains(match, fmt.Sprintf(`who="%s"`, m.MyPosition)) {
return false
}
return true
}).([]string))
}
func (m MJLog) GetRiichSuccessCount() int {
tsumo, ron := m.GetRiichSuccessCounts()
return tsumo + ron
}
func (m MJLog) GetRiichSuccessCounts() (tsumo int, ron int) {
r := regexp.MustCompile(`<AGARI.*?/>`)
agaries := r.FindAllString(m.Body, -1)
for _, agari := range agaries {
// MyPositionの指定がある場合、自分自身のあがりのみを集計
if m.MyPosition != "" && !strings.Contains(agari, fmt.Sprintf(`who="%s"`, m.MyPosition)) {
continue
}
r := regexp.MustCompile(`yaku="([\d,]+)"`)
matches := r.FindStringSubmatch(agari)
if len(matches) > 0 {
var isTsumo bool
var isRiich bool
// [役A,役Aの飜数,役B,役Bの飜数,...]
for i, yaku := range strings.Split(matches[1], ",") {
if i%2 == 0 && yaku == YAKU_TSUMO {
isTsumo = true
}
if i%2 == 0 && yaku == YAKU_RIICH {
isRiich = true
}
if i%2 == 0 && yaku == YAKU_W_RIICH {
isRiich = true
}
}
if isRiich {
if isTsumo {
tsumo += 1
} else {
ron += 1
}
}
}
}
return
}
|
package mqtt
import (
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"os"
"regexp"
"time"
aws "github.com/aws/aws-sdk-go/aws/credentials"
paho "github.com/eclipse/paho.mqtt.golang"
"github.com/uhppoted/uhppote-core/uhppote"
"github.com/uhppoted/uhppoted-lib/uhppoted"
"github.com/uhppoted/uhppoted-mqtt/acl"
"github.com/uhppoted/uhppoted-mqtt/auth"
"github.com/uhppoted/uhppoted-mqtt/device"
"github.com/uhppoted/uhppoted-mqtt/log"
)
type MQTTD struct {
ServerID string
Connection Connection
TLS *tls.Config
Topics Topics
Alerts Alerts
HMAC auth.HMAC
Encryption Encryption
Authentication string
Permissions auth.Permissions
AWS AWS
ACL ACL
EventMap string
Protocol string
Debug bool
client paho.Client
interrupt chan os.Signal
}
type Connection struct {
Broker string
ClientID string
UserName string
Password string
}
type Topics struct {
Requests string
Replies string
Events string
System string
}
type Alerts struct {
QOS byte
Retained bool
}
type Encryption struct {
SignOutgoing bool
EncryptOutgoing bool
EventsKeyID string
SystemKeyID string
HOTP *auth.HOTP
RSA *auth.RSA
Nonce auth.Nonce
}
type AWS struct {
Credentials *aws.Credentials
Region string
}
type ACL struct {
Verify map[acl.Verification]bool
}
type fdispatch struct {
method string
f func(uhppoted.IUHPPOTED, []byte) (interface{}, error)
}
type dispatcher struct {
mqttd *MQTTD
uhppoted *uhppoted.UHPPOTED
devices []uhppote.Device
table map[string]fdispatch
}
type request struct {
ClientID *string
RequestID *string
ReplyTo *string
Request []byte
}
type metainfo struct {
RequestID *string `json:"request-id,omitempty"`
ClientID *string `json:"client-id,omitempty"`
ServerID string `json:"server-id,omitempty"`
Method string `json:"method,omitempty"`
Nonce fnonce `json:"nonce,omitempty"`
}
type fnonce func() uint64
func (f fnonce) MarshalJSON() ([]byte, error) {
return json.Marshal(f())
}
var regex = struct {
clean *regexp.Regexp
base64 *regexp.Regexp
}{
clean: regexp.MustCompile(`\s+`),
base64: regexp.MustCompile(`^"[A-Za-z0-9+/]*[=]{0,2}"$`),
}
func (mqttd *MQTTD) Run(u uhppote.IUHPPOTE, devices []uhppote.Device, authorized []string) error {
device.SetProtocol(mqttd.Protocol)
api := uhppoted.UHPPOTED{
UHPPOTE: u,
ListenBatchSize: 32,
}
dev := device.Device{
AuthorizedCards: authorized,
}
acl := acl.ACL{
UHPPOTE: u,
Devices: devices,
RSA: mqttd.Encryption.RSA,
Credentials: mqttd.AWS.Credentials,
Region: mqttd.AWS.Region,
Verify: mqttd.ACL.Verify,
}
d := dispatcher{
mqttd: mqttd,
uhppoted: &api,
devices: devices,
table: map[string]fdispatch{
mqttd.Topics.Requests + "/devices:get": fdispatch{"get-devices", dev.GetDevices},
mqttd.Topics.Requests + "/device:get": fdispatch{"get-device", dev.GetDevice},
mqttd.Topics.Requests + "/device/status:get": fdispatch{"get-status", dev.GetStatus},
mqttd.Topics.Requests + "/device/time:get": fdispatch{"get-time", dev.GetTime},
mqttd.Topics.Requests + "/device/time:set": fdispatch{"set-time", dev.SetTime},
mqttd.Topics.Requests + "/device/door/delay:get": fdispatch{"get-door-delay", dev.GetDoorDelay},
mqttd.Topics.Requests + "/device/door/delay:set": fdispatch{"set-door-delay", dev.SetDoorDelay},
mqttd.Topics.Requests + "/device/door/control:get": fdispatch{"get-door-control", dev.GetDoorControl},
mqttd.Topics.Requests + "/device/door/control:set": fdispatch{"set-door-control", dev.SetDoorControl},
mqttd.Topics.Requests + "/device/door/interlock:set": fdispatch{"set-interlock", dev.SetInterlock},
mqttd.Topics.Requests + "/device/door/keypads:set": fdispatch{"set-keypads", dev.SetKeypads},
mqttd.Topics.Requests + "/device/door/lock:open": fdispatch{"open-door", dev.OpenDoor},
mqttd.Topics.Requests + "/device/special-events:set": fdispatch{"record-special-events", dev.RecordSpecialEvents},
mqttd.Topics.Requests + "/device/cards:get": fdispatch{"get-cards", dev.GetCards},
mqttd.Topics.Requests + "/device/cards:delete": fdispatch{"delete-cards", dev.DeleteCards},
mqttd.Topics.Requests + "/device/card:get": fdispatch{"get-card", dev.GetCard},
mqttd.Topics.Requests + "/device/card:put": fdispatch{"put-card", dev.PutCard},
mqttd.Topics.Requests + "/device/card:delete": fdispatch{"delete-card", dev.DeleteCard},
mqttd.Topics.Requests + "/device/time-profile:get": fdispatch{"get-time-profile", dev.GetTimeProfile},
mqttd.Topics.Requests + "/device/time-profile:set": fdispatch{"set-time-profile", dev.PutTimeProfile},
mqttd.Topics.Requests + "/device/time-profiles:get": fdispatch{"get-time-profiles", dev.GetTimeProfiles},
mqttd.Topics.Requests + "/device/time-profiles:set": fdispatch{"get-time-profiles", dev.PutTimeProfiles},
mqttd.Topics.Requests + "/device/time-profiles:delete": fdispatch{"clear-time-profiles", dev.ClearTimeProfiles},
mqttd.Topics.Requests + "/device/tasklist:set": fdispatch{"set-task-list", dev.PutTaskList},
mqttd.Topics.Requests + "/device/events:get": fdispatch{"get-events", dev.GetEvents},
mqttd.Topics.Requests + "/device/event:get": fdispatch{"get-event", dev.GetEvent},
mqttd.Topics.Requests + "/acl/card:show": fdispatch{"acl:show", acl.Show},
mqttd.Topics.Requests + "/acl/card:grant": fdispatch{"acl:grant", acl.Grant},
mqttd.Topics.Requests + "/acl/card:revoke": fdispatch{"acl:revoke", acl.Revoke},
mqttd.Topics.Requests + "/acl/acl:upload": fdispatch{"acl:upload", acl.Upload},
mqttd.Topics.Requests + "/acl/acl:download": fdispatch{"acl:download", acl.Download},
mqttd.Topics.Requests + "/acl/acl:compare": fdispatch{"acl:compare", acl.Compare},
},
}
if client, err := mqttd.subscribeAndServe(&d); err != nil {
return fmt.Errorf("ERROR: Error connecting to '%s': %v", mqttd.Connection.Broker, err)
} else {
mqttd.client = client
}
if err := mqttd.listen(&api, u); err != nil {
return fmt.Errorf("ERROR: Failed to bind to listen port '%d': %v", 12345, err)
}
return nil
}
func (m *MQTTD) Close() {
if m.interrupt != nil {
close(m.interrupt)
}
if m.client != nil {
infof("closing connection to %s", m.Connection.Broker)
m.client.Disconnect(250)
infof("closed connection to %s", m.Connection.Broker)
}
m.client = nil
}
func (m *MQTTD) subscribeAndServe(d *dispatcher) (paho.Client, error) {
var handler paho.MessageHandler = func(client paho.Client, msg paho.Message) {
d.dispatch(client, msg)
}
var connected paho.OnConnectHandler = func(client paho.Client) {
options := client.OptionsReader()
servers := options.Servers()
for _, url := range servers {
infof("connected to %s", url)
}
token := m.client.Subscribe(m.Topics.Requests+"/#", 0, handler)
if err := token.Error(); err != nil {
errorf("unable to subscribe to %s (%v)", m.Topics.Requests, err)
} else {
infof("subscribed to %s", m.Topics.Requests)
}
}
var disconnected paho.ConnectionLostHandler = func(client paho.Client, err error) {
errorf("connection to MQTT broker lost (%v)", err)
stats.onDisconnected()
go func() {
time.Sleep(10 * time.Second)
infof("retrying connection to MQTT broker %v", m.Connection.Broker)
token := client.Connect()
if err := token.Error(); err != nil {
errorf("failed to reconnect to MQTT broker (%v)", err)
}
}()
}
// NOTE: Paho auto-reconnect causes a retry storm if two MQTT clients are using the same client ID.
// 'Theoretically' (à la Terminator Genesys) the lockfile should prevent this but careful
// misconfiguration is always a possibility.
options := paho.
NewClientOptions().
AddBroker(m.Connection.Broker).
SetClientID(m.Connection.ClientID).
SetTLSConfig(m.TLS).
SetCleanSession(false).
SetAutoReconnect(false).
SetConnectRetry(true).
SetConnectRetryInterval(30 * time.Second).
SetOnConnectHandler(connected).
SetConnectionLostHandler(disconnected)
if m.Connection.UserName != "" {
options.SetUsername(m.Connection.UserName)
if m.Connection.Password != "" {
options.SetPassword(m.Connection.Password)
}
}
client := paho.NewClient(options)
token := client.Connect()
if err := token.Error(); err != nil {
return nil, err
}
return client, nil
}
func (m *MQTTD) listen(api *uhppoted.UHPPOTED, u uhppote.IUHPPOTE) error {
infof("listening on %v", u.ListenAddr())
infof("publishing events to %s", m.Topics.Events)
last := uhppoted.NewEventMap(m.EventMap)
if err := last.Load(); err != nil {
warnf("error loading event map [%v]", err)
}
handler := func(e uhppoted.Event) bool {
event := struct {
Event any `json:"event"`
}{
Event: device.Transmogrify(e),
}
if err := m.send(&m.Encryption.EventsKeyID, m.Topics.Events, nil, event, msgEvent, true); err != nil {
warnf("%v", err)
return false
}
return true
}
m.interrupt = make(chan os.Signal)
go func() {
api.Listen(handler, last, m.interrupt)
}()
return nil
}
func (d *dispatcher) dispatch(client paho.Client, msg paho.Message) {
if fn, ok := d.table[msg.Topic()]; ok {
msg.Ack()
debugf("%v", string(msg.Payload()))
go func() {
rq, err := d.mqttd.unwrap(msg.Payload())
if err != nil {
warnf("%v", err)
return
}
if err := d.mqttd.authorise(rq.ClientID, msg.Topic()); err != nil {
warnf("%-20v error authorising request (%v)", fn.method, err)
return
}
replyTo := d.mqttd.Topics.Replies
if rq.ClientID != nil {
replyTo = d.mqttd.Topics.Replies + "/" + *rq.ClientID
}
if rq.ReplyTo != nil {
replyTo = *rq.ReplyTo
}
meta := metainfo{
RequestID: rq.RequestID,
ClientID: rq.ClientID,
ServerID: d.mqttd.ServerID,
Method: fn.method,
Nonce: func() uint64 { return d.mqttd.Encryption.Nonce.Next() },
}
response, err := fn.f(d.uhppoted, rq.Request)
if err != nil {
warnf("%-20v %v", fn.method, err)
if response != nil {
reply := struct {
Error interface{} `json:"error"`
}{
Error: response,
}
if err := d.mqttd.send(rq.ClientID, replyTo, &meta, reply, msgError, false); err != nil {
warnf("%-20v %v", fn.method, err)
}
}
} else if response != nil {
reply := struct {
Response interface{} `json:"response"`
}{
Response: response,
}
if err := d.mqttd.send(rq.ClientID, replyTo, &meta, reply, msgReply, false); err != nil {
warnf("%-20v %v", fn.method, err)
}
}
}()
}
}
func (m *MQTTD) authorise(clientID *string, topic string) error {
if m.Permissions.Enabled {
if clientID == nil {
return errors.New("request without client-id")
}
match := regexp.MustCompile(`.*?/(\w+):(\w+)$`).FindStringSubmatch(topic)
if len(match) != 3 {
return fmt.Errorf("invalid resource:action (%s)", topic)
}
return m.Permissions.Validate(*clientID, match[1], match[2])
}
return nil
}
// TODO: add callback for published/failed
func (mqttd *MQTTD) send(destID *string, topic string, meta *metainfo, message interface{}, msgtype msgType, critical bool) error {
if mqttd.client == nil || !mqttd.client.IsConnected() {
return errors.New("no connection to MQTT broker")
}
content, err := compose(meta, message)
if err != nil {
return err
}
m, err := mqttd.wrap(msgtype, content, destID)
if err != nil {
return err
} else if m == nil {
return errors.New("'wrap' failed to return a publishable message")
}
qos := byte(0)
retained := false
if critical {
qos = mqttd.Alerts.QOS
retained = mqttd.Alerts.Retained
}
token := mqttd.client.Publish(topic, qos, retained, string(m))
if token.Error() != nil {
return token.Error()
}
return nil
}
func compose(meta *metainfo, content interface{}) (interface{}, error) {
reply := make(map[string]interface{})
s, err := json.Marshal(meta)
if err != nil {
return nil, err
}
err = json.Unmarshal(s, &reply)
if err != nil {
return nil, err
}
s, err = json.Marshal(content)
if err != nil {
return nil, err
}
err = json.Unmarshal(s, &reply)
if err != nil {
return nil, err
}
return reply, nil
}
func isBase64(request []byte) bool {
return regex.base64.Match(request)
}
func debugf(format string, args ...any) {
log.Debugf("mqttd", format, args...)
}
func infof(format string, args ...any) {
log.Infof("mqttd", format, args...)
}
func warnf(format string, args ...any) {
log.Warnf("mqttd", format, args...)
}
func errorf(format string, args ...any) {
log.Errorf("mqttd", format, args...)
}
func fatalf(format string, args ...any) {
log.Fatalf("mqttd", format, args...)
}
|
package main
import (
"fmt"
)
func apply(afungsi func(int) int, val int) int {
return afungsi(val)
}
func increment(x int) int { return x + 1 }
func decrement(x int) int { return x - 1 }
func main() {
fmt.Println(apply(increment, 2))
fmt.Println(apply(decrement, 2))
}
|
package keeper_test
import (
"encoding/hex"
"fmt"
"math/big"
"testing"
"github.com/stretchr/testify/suite"
abci "github.com/tendermint/tendermint/abci/types"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/irisnet/irismod/modules/random/keeper"
"github.com/irisnet/irismod/modules/random/types"
)
func TestQuerierSuite(t *testing.T) {
suite.Run(t, new(KeeperTestSuite))
}
func (suite *KeeperTestSuite) TestNewQuerier() {
req := abci.RequestQuery{
Path: "",
Data: []byte{},
}
querier := keeper.NewQuerier(suite.keeper, suite.cdc)
res, err := querier(suite.ctx, []string{"other"}, req)
suite.Error(err)
suite.Nil(res)
// init random
random := types.NewRandom(
types.SHA256(testTxBytes),
testHeight,
big.NewRat(testRandomNumerator, testRandomDenomiator).FloatString(types.RandPrec),
)
suite.keeper.SetRandom(suite.ctx, testReqID, random)
storedRandom, err := suite.keeper.GetRandom(suite.ctx, testReqID)
suite.NoError(err)
// test queryRandom
bz, errRes := suite.cdc.MarshalJSON(types.QueryRandomParams{ReqID: hex.EncodeToString(testReqID)})
suite.NoError(errRes)
req.Path = fmt.Sprintf("custom/%s/%s", types.QuerierRoute, types.QueryRandom)
req.Data = bz
res, err = querier(suite.ctx, []string{types.QueryRandom}, req)
suite.NoError(err)
var resultRandom types.Random
errRes = suite.cdc.UnmarshalJSON(res, &resultRandom)
suite.NoError(errRes)
suite.Equal(storedRandom, resultRandom)
// test queryRandomRequestQueue
request, _ := suite.keeper.RequestRandom(suite.ctx, testConsumer, testBlockInterval, false, sdk.NewCoins())
bz, errRes = suite.cdc.MarshalJSON(types.QueryRandomRequestQueueParams{Height: int64(testBlockInterval)})
suite.NoError(errRes)
req.Path = fmt.Sprintf("custom/%s/%s", types.QuerierRoute, types.QueryRandomRequestQueue)
req.Data = bz
res, err = querier(suite.ctx, []string{types.QueryRandomRequestQueue}, req)
suite.NoError(err)
var resultRequests []types.Request
errRes = suite.cdc.UnmarshalJSON(res, &resultRequests)
suite.NoError(errRes)
suite.Equal([]types.Request{request}, resultRequests)
}
|
package main
import (
"fmt"
"log"
"os"
"strings"
"time"
"qpid.apache.org/amqp"
"qpid.apache.org/electron"
)
func main() {
s := newSender()
count := 0
// endless for loop which keeps sending data
for {
m := amqp.NewMessage()
msg := fmt.Sprintf("hello from %q on %q! %d", os.Getenv("TYPE_OF_AMQP_USER"), os.Getenv("POD_NAME"), count)
m.Marshal(msg)
outcome := s.SendSync(m)
if fmt.Sprintf("%s", outcome.Error) == "amqp:connection:forced: " {
// this means we need to create sender again
s = newSender()
continue
} else if outcome.Error != nil {
log.Print("[!] sending message:", outcome.Value, ", error:", outcome.Error)
// continue to retry sending the message
time.Sleep(time.Second)
continue
} else if outcome.Status != electron.Accepted {
log.Print("[!] sending message:", outcome.Value, ", unexpected status:", outcome.Status)
// continue to retry sending the message
time.Sleep(time.Second)
continue
}
log.Println("[*] sent message:", msg)
count++
time.Sleep(2 * time.Second)
}
}
func newSender() electron.Sender {
sleepingTime := 1
// persistent sender creation
for {
log.Printf("sleeping while creating sender for %ds", sleepingTime)
time.Sleep(time.Duration(sleepingTime) * time.Second)
sleepingTime *= 2
container := electron.NewContainer(os.Getenv("POD_NAME"))
u, err := amqp.ParseURL(os.Getenv("AMQ_SERVER"))
if err != nil {
log.Printf("[!] parsing amqp url: %v", err)
continue
}
addr := strings.TrimPrefix(u.Path, "/")
c, err := container.Dial("tcp", u.Host)
if err != nil {
log.Printf("[!] dialing the amqp server: %v", err)
continue
}
s, err := c.Sender(electron.Target(addr))
if err != nil {
log.Printf("[!] creating sender: %v", err)
continue
}
return s
}
}
|
package main
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
func main() {
mygraph := IngestInput()
if mygraph.Balance() {
fmt.Println("Balanced")
} else {
fmt.Println("Not balanced")
}
}
// IngestInput ingest input
func IngestInput() Graph {
mygraph := Graph{
nodes: []*Node{},
enemy: map[Node][]*Node{},
friend: map[Node][]*Node{},
}
reader := bufio.NewReader(os.Stdin)
l, _ := reader.ReadString('\n')
l = strings.Replace(l, "\n", "", 1)
cl := strings.Split(l, " ")
ltr, _ := strconv.Atoi(cl[1])
for i := 0; i < ltr; i++ {
l, _ := reader.ReadString('\n')
l = strings.Replace(l, "\n", "", 1)
friends := strings.Split(l, " ++ ")
enemy := strings.Split(l, " -- ")
if len(friends) == 2 {
c1 := mygraph.NewNode(friends[0])
c2 := mygraph.NewNode(friends[1])
mygraph.AddEdge(c1, c2, true)
} else {
c1 := mygraph.NewNode(enemy[0])
c2 := mygraph.NewNode(enemy[1])
mygraph.AddEdge(c1, c2, false)
}
}
return mygraph
}
|
package jsonapi
import (
. "github.com/smartystreets/goconvey/convey"
"testing"
)
func TestRoutes(t *testing.T) {
Convey("Given a new resquest builder", t, func() {
b := NewRequestBuilder()
Convey("Resource collection routes are defined without ids", func() {
r, err := b.SetResourcePath("posts").Build()
So(err, ShouldBeNil)
So(r.Route(), ShouldEqual, "/posts/")
})
Convey("Single resource item routes are defined with a single id", func() {
r, err := b.SetResourcePath("posts", "1").Build()
So(err, ShouldBeNil)
So(r.Route(), ShouldEqual, "/posts/1")
})
Convey("Multiple resource item routes are defined with multiple ids", func() {
r, err := b.SetResourcePath("posts", "1", "2", "3").Build()
So(err, ShouldBeNil)
So(r.Route(), ShouldEqual, "/posts/1,2,3")
})
Convey("Weird non jsonapi routes can be defined with SetRoute", func() {
r, err := b.SetRoute("posts/publish").Build()
So(err, ShouldBeNil)
So(r.Route(), ShouldEqual, "/posts/publish")
})
})
}
|
package main
import (
"testing"
"time"
)
func TestMatchmakingSimplePairing(t *testing.T) {
t.Skip()
mmc := makeMatchmakingController()
client0 := client{clientNum:0}
connec0 := MakePlayerConnection(client0,nil)
client1 := client{clientNum:1}
connec1 := MakePlayerConnection(client1,nil)
client2 := client{clientNum:2}
connec2 := MakePlayerConnection(client2,nil)
client3 := client{clientNum:3}
connec3 := MakePlayerConnection(client3,nil)
mmc.addConnectionToPool(connec0)
mmc.addConnectionToPool(connec1)
mmc.addConnectionToPool(connec2)
mmc.addConnectionToPool(connec3)
time.Sleep(100 * time.Millisecond)
}
|
package main
import (
"fmt"
)
func copyArray() {
slice := []int{1, 2, 3, 4, 3, 5}
copia := make([]int, len(slice), cap(slice)*2) //incrementar la capacidad x 2
//copy(destino, fuente)
copy(copia, slice)
fmt.Println(slice)
fmt.Println(copia)
}
|
//line waccparser.y:2
package parser
import __yyfmt__ "fmt"
//line waccparser.y:2
import (
. "ast"
)
//line waccparser.y:10
type parserSymType struct{
yys int
str string
stringconst Str
number int
pos int
integer Integer
ident Ident
character Character
boolean Boolean
fieldaccess Evaluation
functions []*Function
function *Function
classes []*Class
class *Class
stmt Statement
stmts []Statement
assignrhs Evaluation
assignlhs Evaluation
expr Evaluation
exprs []Evaluation
params []Param
param Param
fields []Field
field Field
bracketed []Evaluation
pairliter Evaluation
arrayliter ArrayLiter
pairelem PairElem
arrayelem ArrayElem
typedefinition Type
pairelemtype Type
}
var parserToknames = []string{
"BEGIN",
"END",
"CLASS",
"OPEN",
"CLOSE",
"NEW",
"DOT",
"THIS",
"IS",
"SKIP",
"READ",
"FREE",
"RETURN",
"EXIT",
"PRINT",
"PRINTLN",
"IF",
"THEN",
"ELSE",
"FI",
"WHILE",
"DO",
"DONE",
"NEWPAIR",
"CALL",
"FST",
"SND",
"INT",
"BOOL",
"CHAR",
"STRING",
"PAIR",
"NOT",
"NEG",
"LEN",
"ORD",
"CHR",
"MUL",
"DIV",
"MOD",
"PLUS",
"SUB",
"AND",
"OR",
"GT",
"GTE",
"LT",
"LTE",
"EQ",
"NEQ",
"POSITIVE",
"NEGATIVE",
"TRUE",
"FALSE",
"NULL",
"OPENSQUARE",
"OPENROUND",
"CLOSESQUARE",
"CLOSEROUND",
"ASSIGNMENT",
"COMMA",
"SEMICOLON",
"ERROR",
"FOR",
"STRINGCONST",
"IDENTIFIER",
"INTEGER",
"CHARACTER",
}
var parserStatenames = []string{}
const parserEofCode = 1
const parserErrCode = 2
const parserMaxDepth = 200
//line waccparser.y:300
//line yacctab:1
var parserExca = []int{
-1, 1,
1, -1,
-2, 0,
-1, 28,
69, 116,
-2, 15,
-1, 46,
69, 116,
-2, 70,
}
const parserNprod = 124
const parserPrivate = 57344
var parserTokenNames []string
var parserStates []string
const parserLast = 693
var parserAct = []int{
212, 206, 7, 180, 211, 37, 172, 158, 4, 250,
265, 45, 240, 231, 13, 221, 70, 72, 73, 74,
75, 76, 77, 52, 27, 78, 26, 25, 30, 202,
178, 147, 31, 32, 33, 34, 35, 141, 69, 132,
105, 106, 79, 110, 84, 66, 30, 11, 85, 41,
253, 230, 274, 44, 271, 228, 125, 126, 127, 128,
129, 130, 131, 241, 54, 278, 170, 30, 267, 36,
182, 31, 32, 33, 34, 35, 38, 269, 272, 36,
244, 38, 137, 256, 140, 244, 133, 36, 169, 31,
32, 33, 34, 35, 39, 40, 43, 157, 159, 39,
40, 242, 204, 161, 43, 264, 43, 43, 36, 182,
45, 81, 43, 186, 187, 188, 189, 190, 191, 192,
193, 194, 195, 196, 197, 198, 184, 182, 176, 82,
174, 173, 83, 42, 185, 30, 203, 43, 159, 68,
43, 209, 210, 161, 43, 208, 183, 171, 213, 214,
181, 215, 248, 216, 217, 244, 218, 219, 255, 245,
244, 244, 30, 30, 243, 223, 244, 224, 222, 225,
226, 80, 227, 238, 167, 239, 36, 165, 154, 207,
151, 168, 136, 149, 159, 137, 155, 152, 229, 161,
97, 232, 166, 43, 276, 146, 164, 145, 150, 23,
153, 22, 148, 36, 36, 234, 235, 144, 38, 89,
12, 14, 15, 16, 17, 18, 19, 20, 89, 136,
247, 21, 103, 201, 143, 24, 39, 40, 31, 32,
33, 34, 35, 135, 252, 249, 254, 257, 88, 87,
258, 260, 89, 89, 261, 262, 31, 32, 33, 34,
175, 86, 263, 176, 266, 174, 173, 30, 259, 200,
156, 30, 270, 233, 10, 30, 28, 237, 142, 9,
29, 134, 251, 181, 93, 92, 94, 90, 91, 277,
104, 107, 207, 30, 177, 67, 6, 63, 275, 2,
53, 96, 96, 118, 120, 117, 119, 30, 36, 95,
30, 160, 36, 64, 62, 179, 36, 31, 32, 33,
34, 35, 55, 108, 56, 57, 58, 205, 8, 5,
60, 59, 3, 1, 36, 101, 100, 102, 98, 99,
0, 0, 48, 49, 65, 162, 61, 63, 36, 0,
0, 36, 0, 0, 51, 46, 47, 50, 0, 0,
114, 116, 115, 64, 62, 39, 40, 118, 120, 117,
119, 0, 55, 0, 56, 57, 58, 0, 0, 0,
60, 59, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 48, 49, 65, 163, 61, 0, 0, 23,
0, 22, 0, 0, 51, 71, 47, 50, 38, 0,
12, 14, 15, 16, 17, 18, 19, 20, 0, 0,
0, 21, 63, 0, 0, 24, 39, 40, 31, 32,
33, 34, 35, 114, 116, 115, 112, 113, 64, 62,
118, 120, 117, 119, 121, 122, 0, 55, 0, 56,
57, 58, 0, 0, 0, 60, 59, 0, 0, 0,
0, 0, 0, 0, 109, 0, 28, 48, 49, 65,
0, 61, 0, 0, 0, 0, 0, 0, 0, 51,
71, 47, 50, 114, 116, 115, 112, 113, 123, 124,
118, 120, 117, 119, 121, 122, 114, 116, 115, 112,
113, 0, 0, 118, 120, 117, 119, 273, 114, 116,
115, 112, 113, 123, 124, 118, 120, 117, 119, 121,
122, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 111, 114, 116, 115, 112, 113, 123, 124,
118, 120, 117, 119, 121, 122, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 236, 114, 116, 115,
112, 113, 123, 124, 118, 120, 117, 119, 121, 122,
0, 0, 0, 0, 0, 0, 0, 0, 268, 114,
116, 115, 112, 113, 123, 124, 118, 120, 117, 119,
121, 122, 0, 0, 0, 0, 0, 0, 0, 0,
199, 114, 116, 115, 112, 113, 123, 124, 118, 120,
117, 119, 121, 122, 0, 0, 0, 0, 0, 0,
0, 246, 114, 116, 115, 112, 113, 123, 124, 118,
120, 117, 119, 121, 122, 139, 0, 0, 0, 0,
0, 0, 220, 0, 138, 0, 0, 0, 0, 0,
0, 114, 116, 115, 112, 113, 123, 124, 118, 120,
117, 119, 121, 122, 114, 116, 115, 112, 113, 123,
124, 118, 120, 117, 119, 121, 122, 114, 116, 115,
112, 113, 123, 124, 118, 120, 117, 119, 121, 122,
114, 116, 115, 112, 113, 123, 0, 118, 120, 117,
119, 121, 122,
}
var parserPact = []int{
285, -1000, -1000, 280, 197, -1000, -20, 128, -1000, -1000,
276, -24, -1000, -1000, 70, 401, 401, 401, 401, 401,
401, 401, 197, 106, -25, 192, 180, 179, 233, 127,
284, -1000, -1000, -1000, -1000, 162, -1000, -1000, 270, 401,
401, 274, -1000, 387, -26, 457, 232, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, 401, 401, 401, 401, 401,
401, 401, -30, 261, 173, -1000, 122, -1000, 232, -1000,
626, 232, 626, 626, 626, 626, 613, 600, 79, -32,
-1000, -1000, -1000, -1000, 208, 164, 146, 136, 134, -38,
139, 135, 124, 137, 123, 201, 401, 326, 133, 129,
118, 25, 84, 215, -39, 626, 626, 58, -1000, 401,
83, 65, 401, 401, 401, 401, 401, 401, 401, 401,
401, 401, 401, 401, 401, -1000, -1000, -1000, -1000, 309,
309, 528, 199, 163, -40, 401, 40, 326, 197, 197,
-1000, 19, 401, 401, -1000, -1000, -1000, -1000, 401, -1000,
401, -1000, 401, 401, -1000, 401, 401, 571, -1000, 626,
-1000, -1000, -54, 401, 401, -1000, 401, -1000, 401, 401,
-1000, 401, -9, 192, 180, 162, 179, -1000, -1000, -13,
-1000, -56, -1000, 326, 238, 233, 309, 309, 245, 245,
245, -1000, -1000, -1000, -1000, 445, 445, 382, 639, -1000,
401, 401, -1000, 482, 255, 111, -1000, -57, -1000, 41,
75, 102, 626, 97, 626, 626, 626, 626, 626, 550,
-1000, 160, 91, 626, 626, 626, 626, 626, 215, 1,
58, -1000, -15, 197, 96, 21, 401, 197, 246, 58,
-1000, 197, -1000, -1000, 401, -1000, -1000, 401, -1000, 43,
-1000, -59, -1000, 401, 42, -1000, -1000, 506, 72, 197,
-1000, 31, 626, 16, -1000, 159, 432, -1000, -1000, -1000,
47, -1000, -1000, 65, -1000, 169, 197, 39, -1000,
}
var parserPgo = []int{
0, 323, 322, 319, 8, 318, 269, 2, 7, 270,
0, 4, 317, 1, 305, 3, 5, 301, 64, 299,
290, 27, 42, 26, 24, 6, 14, 23,
}
var parserR1 = []int{
0, 1, 2, 2, 3, 14, 14, 15, 4, 4,
5, 5, 12, 12, 13, 9, 9, 9, 9, 9,
27, 8, 8, 8, 8, 7, 7, 7, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 17, 11, 11,
11, 18, 19, 19, 20, 16, 16, 24, 25, 25,
25, 25, 25, 22, 22, 22, 22, 21, 21, 21,
21, 23, 23, 23,
}
var parserR2 = []int{
0, 5, 2, 0, 6, 3, 1, 2, 2, 0,
7, 8, 3, 1, 2, 1, 1, 1, 1, 3,
3, 1, 1, 1, 5, 3, 1, 12, 1, 4,
1, 2, 2, 2, 2, 2, 2, 7, 7, 5,
3, 2, 2, 2, 2, 5, 5, 3, 4, 4,
4, 4, 4, 3, 3, 3, 4, 4, 4, 4,
4, 3, 3, 3, 1, 1, 1, 1, 1, 1,
1, 1, 1, 2, 2, 2, 2, 2, 2, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 5, 5, 3, 6, 3, 3, 1,
0, 2, 4, 3, 1, 2, 2, 6, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 3, 3, 3,
}
var parserChk = []int{
-1000, -1, 4, -2, -4, -3, 6, -7, -5, -6,
67, -22, 13, -26, 14, 15, 16, 17, 18, 19,
20, 24, 4, 2, 28, -21, -23, -24, 69, -9,
-27, 31, 32, 33, 34, 35, -18, -16, 11, 29,
30, 69, 5, 65, -22, -10, 69, 70, 56, 57,
71, 68, -27, -20, -18, 36, 38, 39, 40, 45,
44, 60, 28, 11, 27, 58, 69, -9, 69, -27,
-10, 69, -10, -10, -10, -10, -10, -10, -7, -22,
65, 5, 23, 26, 69, -27, 59, 59, 59, 10,
44, 45, 42, 41, 43, -19, 59, 63, 44, 45,
42, 41, 43, 60, 10, -10, -10, 7, -6, 67,
69, 65, 44, 45, 41, 43, 42, 50, 48, 51,
49, 52, 53, 46, 47, -10, -10, -10, -10, -10,
-10, -10, 69, -27, 10, 60, 60, 63, 21, 25,
5, 69, 60, 60, 61, 61, 61, 69, 63, 44,
63, 45, 63, 63, 41, 63, 59, -10, -8, -10,
-17, -16, 9, 59, 63, 44, 63, 45, 63, 63,
41, 63, -25, -21, -23, 35, -24, 69, 69, -14,
-15, -22, 69, 63, -26, 69, -10, -10, -10, -10,
-10, -10, -10, -10, -10, -10, -10, -10, -10, 62,
60, 60, 69, -10, 62, -12, -13, -22, -8, -7,
-7, -11, -10, -11, -10, -10, -10, -10, -10, -10,
61, 69, -11, -10, -10, -10, -10, -10, 64, -4,
64, 69, -8, 25, -11, -11, 64, 12, 62, 64,
69, 22, 26, 62, 64, 62, 61, 60, 61, -25,
8, -22, -15, 65, -7, 62, 62, -10, -7, 12,
-13, -7, -10, -11, 62, 69, -10, 26, 62, 5,
-7, 23, 62, 65, 5, -26, 25, -7, 26,
}
var parserDef = []int{
0, -2, 3, 9, 0, 2, 0, 0, 8, 26,
0, 0, 28, 30, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 113, 114, 115, -2, 0,
18, 117, 118, 119, 120, 0, 16, 17, 0, 0,
0, 0, 1, 0, 0, 0, -2, 64, 65, 66,
67, 68, 69, 71, 72, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 104, 0, 31, 15, 18,
32, 70, 33, 34, 35, 36, 0, 0, 0, 0,
41, 42, 43, 44, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 101, 0, 0, 0, 0,
0, 0, 0, 0, 0, 105, 106, 0, 25, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 73, 74, 75, 76, 77,
78, 0, 0, 0, 0, 0, 0, 0, 0, 0,
40, 0, 100, 100, 121, 123, 122, 20, 0, 53,
0, 54, 0, 0, 55, 0, 0, 0, 47, 21,
22, 23, 0, 100, 0, 61, 0, 62, 0, 0,
63, 0, 0, 108, 109, 110, 111, 112, 19, 9,
6, 0, 116, 0, 0, 15, 79, 80, 81, 82,
83, 84, 85, 86, 87, 88, 89, 90, 91, 92,
100, 100, 95, 0, 0, 0, 13, 0, 29, 0,
0, 0, 99, 0, 48, 49, 50, 51, 52, 0,
103, 0, 0, 56, 57, 58, 59, 60, 0, 0,
0, 7, 0, 0, 0, 0, 0, 0, 0, 0,
14, 0, 39, 45, 0, 46, 102, 100, 97, 0,
4, 0, 5, 0, 0, 93, 94, 0, 0, 0,
12, 0, 98, 0, 107, 0, 0, 38, 96, 10,
0, 37, 24, 0, 11, 0, 0, 0, 27,
}
var parserTok1 = []int{
1,
}
var parserTok2 = []int{
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
}
var parserTok3 = []int{
0,
}
//line yaccpar:1
/* parser for yacc output */
var parserDebug = 0
type parserLexer interface {
Lex(lval *parserSymType) int
Error(s string)
}
const parserFlag = -1000
func parserTokname(c int) string {
// 4 is TOKSTART above
if c >= 4 && c-4 < len(parserToknames) {
if parserToknames[c-4] != "" {
return parserToknames[c-4]
}
}
return __yyfmt__.Sprintf("tok-%v", c)
}
func parserStatname(s int) string {
if s >= 0 && s < len(parserStatenames) {
if parserStatenames[s] != "" {
return parserStatenames[s]
}
}
return __yyfmt__.Sprintf("state-%v", s)
}
func parserlex1(lex parserLexer, lval *parserSymType) int {
c := 0
char := lex.Lex(lval)
if char <= 0 {
c = parserTok1[0]
goto out
}
if char < len(parserTok1) {
c = parserTok1[char]
goto out
}
if char >= parserPrivate {
if char < parserPrivate+len(parserTok2) {
c = parserTok2[char-parserPrivate]
goto out
}
}
for i := 0; i < len(parserTok3); i += 2 {
c = parserTok3[i+0]
if c == char {
c = parserTok3[i+1]
goto out
}
}
out:
if c == 0 {
c = parserTok2[1] /* unknown char */
}
if parserDebug >= 3 {
__yyfmt__.Printf("lex %s(%d)\n", parserTokname(c), uint(char))
}
return c
}
func parserParse(parserlex parserLexer) int {
var parsern int
var parserlval parserSymType
var parserVAL parserSymType
parserS := make([]parserSymType, parserMaxDepth)
Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */
parserstate := 0
parserchar := -1
parserp := -1
goto parserstack
ret0:
return 0
ret1:
return 1
parserstack:
/* put a state and value onto the stack */
if parserDebug >= 4 {
__yyfmt__.Printf("char %v in %v\n", parserTokname(parserchar), parserStatname(parserstate))
}
parserp++
if parserp >= len(parserS) {
nyys := make([]parserSymType, len(parserS)*2)
copy(nyys, parserS)
parserS = nyys
}
parserS[parserp] = parserVAL
parserS[parserp].yys = parserstate
parsernewstate:
parsern = parserPact[parserstate]
if parsern <= parserFlag {
goto parserdefault /* simple state */
}
if parserchar < 0 {
parserchar = parserlex1(parserlex, &parserlval)
}
parsern += parserchar
if parsern < 0 || parsern >= parserLast {
goto parserdefault
}
parsern = parserAct[parsern]
if parserChk[parsern] == parserchar { /* valid shift */
parserchar = -1
parserVAL = parserlval
parserstate = parsern
if Errflag > 0 {
Errflag--
}
goto parserstack
}
parserdefault:
/* default state action */
parsern = parserDef[parserstate]
if parsern == -2 {
if parserchar < 0 {
parserchar = parserlex1(parserlex, &parserlval)
}
/* look through exception table */
xi := 0
for {
if parserExca[xi+0] == -1 && parserExca[xi+1] == parserstate {
break
}
xi += 2
}
for xi += 2; ; xi += 2 {
parsern = parserExca[xi+0]
if parsern < 0 || parsern == parserchar {
break
}
}
parsern = parserExca[xi+1]
if parsern < 0 {
goto ret0
}
}
if parsern == 0 {
/* error ... attempt to resume parsing */
switch Errflag {
case 0: /* brand new error */
parserlex.Error("syntax error")
Nerrs++
if parserDebug >= 1 {
__yyfmt__.Printf("%s", parserStatname(parserstate))
__yyfmt__.Printf(" saw %s\n", parserTokname(parserchar))
}
fallthrough
case 1, 2: /* incompletely recovered error ... try again */
Errflag = 3
/* find a state where "error" is a legal shift action */
for parserp >= 0 {
parsern = parserPact[parserS[parserp].yys] + parserErrCode
if parsern >= 0 && parsern < parserLast {
parserstate = parserAct[parsern] /* simulate a shift of "error" */
if parserChk[parserstate] == parserErrCode {
goto parserstack
}
}
/* the current p has no shift on "error", pop stack */
if parserDebug >= 2 {
__yyfmt__.Printf("error recovery pops state %d\n", parserS[parserp].yys)
}
parserp--
}
/* there is no state on the stack with an error shift ... abort */
goto ret1
case 3: /* no shift yet; clobber input char */
if parserDebug >= 2 {
__yyfmt__.Printf("error recovery discards %s\n", parserTokname(parserchar))
}
if parserchar == parserEofCode {
goto ret1
}
parserchar = -1
goto parsernewstate /* try again in the same state */
}
}
/* reduction by production parsern */
if parserDebug >= 2 {
__yyfmt__.Printf("reduce %v in:\n\t%v\n", parsern, parserStatname(parserstate))
}
parsernt := parsern
parserpt := parserp
_ = parserpt // guard against "declared and not used"
parserp -= parserR2[parsern]
parserVAL = parserS[parserp+1]
/* consult goto table to find next state */
parsern = parserR1[parsern]
parserg := parserPgo[parsern]
parserj := parserg + parserS[parserp].yys + 1
if parserj >= parserLast {
parserstate = parserAct[parserg]
} else {
parserstate = parserAct[parserj]
if parserChk[parserstate] != -parsern {
parserstate = parserAct[parserg]
}
}
// dummy call; replaced with literal code
switch parsernt {
case 1:
//line waccparser.y:110
{
parserlex.(*Lexer).prog = &Program{ClassList : parserS[parserpt-3].classes , FunctionList : parserS[parserpt-2].functions , StatList : parserS[parserpt-1].stmts , SymbolTable : NewInstance(), FileText :&parserlex.(*Lexer).input}
}
case 2:
//line waccparser.y:115
{ parserVAL.classes = append(parserS[parserpt-1].classes, parserS[parserpt-0].class)}
case 3:
//line waccparser.y:116
{ parserVAL.classes = []*Class{} }
case 4:
//line waccparser.y:119
{ if !checkClassIdent(parserS[parserpt-4].ident) {
parserlex.Error("Invalid class name")
}
parserVAL.class = &Class{ Pos : parserS[parserpt-5].pos, FileText :&parserlex.(*Lexer).input, Ident : ClassType(parserS[parserpt-4].ident), FieldList : parserS[parserpt-2].fields , FunctionList : parserS[parserpt-1].functions}
}
case 5:
//line waccparser.y:125
{ parserVAL.fields = append(parserS[parserpt-2].fields, parserS[parserpt-0].field)}
case 6:
//line waccparser.y:126
{ parserVAL.fields = []Field{ parserS[parserpt-0].field } }
case 7:
//line waccparser.y:129
{ parserVAL.field = Field{FieldType : parserS[parserpt-1].typedefinition, Ident : parserS[parserpt-0].ident} }
case 8:
//line waccparser.y:131
{ parserVAL.functions = append(parserS[parserpt-1].functions, parserS[parserpt-0].function)}
case 9:
//line waccparser.y:132
{ parserVAL.functions = []*Function{} }
case 10:
//line waccparser.y:135
{ if !checkStats(parserS[parserpt-1].stmts) {
parserlex.Error("Missing return statement")
}
parserVAL.function = &Function{Ident : parserS[parserpt-5].ident, ReturnType : parserS[parserpt-6].typedefinition, StatList : parserS[parserpt-1].stmts, SymbolTable: NewInstance(), FileText :&parserlex.(*Lexer).input}
}
case 11:
//line waccparser.y:141
{ if !checkStats(parserS[parserpt-1].stmts) {
parserlex.Error("Missing return statement")
}
parserVAL.function = &Function{Ident : parserS[parserpt-6].ident, ReturnType : parserS[parserpt-7].typedefinition, StatList : parserS[parserpt-1].stmts, ParameterTypes : parserS[parserpt-4].params, SymbolTable: NewInstance(), FileText :&parserlex.(*Lexer).input}
}
case 12:
//line waccparser.y:147
{ parserVAL.params = append(parserS[parserpt-2].params, parserS[parserpt-0].param)}
case 13:
//line waccparser.y:148
{ parserVAL.params = []Param{ parserS[parserpt-0].param } }
case 14:
//line waccparser.y:150
{ parserVAL.param = Param{ParamType : parserS[parserpt-1].typedefinition, Ident : parserS[parserpt-0].ident} }
case 15:
//line waccparser.y:152
{parserVAL.assignlhs = parserS[parserpt-0].ident}
case 16:
//line waccparser.y:153
{parserVAL.assignlhs = parserS[parserpt-0].arrayelem}
case 17:
//line waccparser.y:154
{parserVAL.assignlhs = parserS[parserpt-0].pairelem}
case 18:
//line waccparser.y:155
{ parserVAL.assignlhs = parserS[parserpt-0].fieldaccess}
case 19:
//line waccparser.y:156
{ parserVAL.assignlhs = ThisInstance{&parserlex.(*Lexer).input, parserS[parserpt-2].pos, parserS[parserpt-0].ident} }
case 20:
//line waccparser.y:158
{parserVAL.fieldaccess = FieldAccess{ &parserlex.(*Lexer).input, parserS[parserpt-2].pos, parserS[parserpt-2].ident, parserS[parserpt-0].ident, } }
case 21:
//line waccparser.y:160
{parserVAL.assignrhs = parserS[parserpt-0].expr}
case 22:
//line waccparser.y:161
{parserVAL.assignrhs = parserS[parserpt-0].arrayliter}
case 23:
//line waccparser.y:162
{parserVAL.assignrhs = parserS[parserpt-0].pairelem}
case 24:
//line waccparser.y:163
{ parserVAL.assignrhs = NewObject{Class : ClassType(parserS[parserpt-3].ident) , Init : parserS[parserpt-1].exprs , Pos : parserS[parserpt-4].pos, FileText :&parserlex.(*Lexer).input}}
case 25:
//line waccparser.y:165
{ parserVAL.stmts = append(parserS[parserpt-2].stmts,parserS[parserpt-0].stmt) }
case 26:
//line waccparser.y:166
{ parserVAL.stmts = []Statement{parserS[parserpt-0].stmt} }
case 27:
//line waccparser.y:168
{
stats := append(parserS[parserpt-1].stmts, parserS[parserpt-3].stmt)
w := While{Conditional : parserS[parserpt-5].expr, DoStat : stats, Pos : parserS[parserpt-11].pos, FileText :&parserlex.(*Lexer).input}
d := Declare{DecType : parserS[parserpt-10].typedefinition, Lhs : parserS[parserpt-9].ident, Rhs : parserS[parserpt-7].assignrhs, Pos : parserS[parserpt-11].pos ,FileText :&parserlex.(*Lexer).input }
parserVAL.stmts = []Statement{d,w}
}
case 28:
//line waccparser.y:175
{ parserVAL.stmt = Skip{Pos : parserS[parserpt-0].pos ,FileText :&parserlex.(*Lexer).input } }
case 29:
//line waccparser.y:176
{ parserVAL.stmt = Declare{DecType : parserS[parserpt-3].typedefinition, Lhs : parserS[parserpt-2].ident, Rhs : parserS[parserpt-0].assignrhs, Pos : parserS[parserpt-3].pos ,FileText :&parserlex.(*Lexer).input } }
case 30:
//line waccparser.y:177
{ parserVAL.stmt = parserS[parserpt-0].stmt }
case 31:
//line waccparser.y:178
{ parserVAL.stmt = Read{ &parserlex.(*Lexer).input, parserS[parserpt-1].pos , parserS[parserpt-0].assignlhs, } }
case 32:
//line waccparser.y:179
{ parserVAL.stmt = Free{&parserlex.(*Lexer).input, parserS[parserpt-1].pos, parserS[parserpt-0].expr} }
case 33:
//line waccparser.y:180
{ parserVAL.stmt = Return{&parserlex.(*Lexer).input, parserS[parserpt-1].pos, parserS[parserpt-0].expr} }
case 34:
//line waccparser.y:181
{ parserVAL.stmt = Exit{&parserlex.(*Lexer).input, parserS[parserpt-1].pos, parserS[parserpt-0].expr} }
case 35:
//line waccparser.y:182
{ parserVAL.stmt = Print{&parserlex.(*Lexer).input, parserS[parserpt-1].pos, parserS[parserpt-0].expr} }
case 36:
//line waccparser.y:183
{ parserVAL.stmt = Println{&parserlex.(*Lexer).input, parserS[parserpt-1].pos, parserS[parserpt-0].expr} }
case 37:
//line waccparser.y:184
{ parserVAL.stmt = If{Conditional : parserS[parserpt-5].expr, ThenStat : parserS[parserpt-3].stmts, ElseStat : parserS[parserpt-1].stmts, Pos : parserS[parserpt-6].pos, FileText :&parserlex.(*Lexer).input } }
case 38:
//line waccparser.y:185
{
stats := append(parserS[parserpt-1].stmts, parserS[parserpt-3].stmt)
parserVAL.stmt = While{Conditional : parserS[parserpt-5].expr, DoStat : stats, Pos : parserS[parserpt-6].pos, FileText :&parserlex.(*Lexer).input}
}
case 39:
//line waccparser.y:189
{ parserVAL.stmt = While{Conditional : parserS[parserpt-3].expr, DoStat : parserS[parserpt-1].stmts, Pos : parserS[parserpt-4].pos, FileText :&parserlex.(*Lexer).input} }
case 40:
//line waccparser.y:190
{ parserVAL.stmt = Scope{StatList : parserS[parserpt-1].stmts, Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input } }
case 41:
//line waccparser.y:191
{
parserlex.Error("Syntax error : Invalid statement")
parserVAL.stmt = nil
}
case 42:
//line waccparser.y:195
{ parserlex.Error("Syntax error : Invalid statement")
parserVAL.stmt = nil
}
case 43:
//line waccparser.y:198
{
parserlex.Error("Syntax error : Invalid statement")
parserVAL.stmt = nil
}
case 44:
//line waccparser.y:202
{
parserlex.Error("Syntax error : Invalid statement")
parserVAL.stmt = nil
}
case 45:
//line waccparser.y:206
{ parserVAL.stmt = Call{Ident : parserS[parserpt-3].ident, ParamList : parserS[parserpt-1].exprs, Pos : parserS[parserpt-4].pos, FileText :&parserlex.(*Lexer).input } }
case 46:
//line waccparser.y:207
{ parserVAL.stmt = CallInstance{Class : (parserS[parserpt-3].fieldaccess.(FieldAccess)).ObjectName, Func: (parserS[parserpt-3].fieldaccess.(FieldAccess)).Field, ParamList : parserS[parserpt-1].exprs, Pos : parserS[parserpt-4].pos, FileText :&parserlex.(*Lexer).input } }
case 47:
//line waccparser.y:209
{ parserVAL.stmt = Assignment{Lhs : parserS[parserpt-2].assignlhs, Rhs : parserS[parserpt-0].assignrhs, Pos : parserS[parserpt-2].pos ,FileText :&parserlex.(*Lexer).input} }
case 48:
//line waccparser.y:210
{ parserVAL.stmt = Assignment{Lhs : parserS[parserpt-3].ident, Rhs : Binop{Left : parserS[parserpt-3].ident, Binary : PLUS, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-3].pos, FileText :&parserlex.(*Lexer).input}, Pos : parserS[parserpt-3].pos ,FileText :&parserlex.(*Lexer).input} }
case 49:
//line waccparser.y:211
{ parserVAL.stmt = Assignment{Lhs : parserS[parserpt-3].ident, Rhs : Binop{Left : parserS[parserpt-3].ident, Binary : SUB , Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-3].pos, FileText :&parserlex.(*Lexer).input}, Pos : parserS[parserpt-3].pos ,FileText :&parserlex.(*Lexer).input} }
case 50:
//line waccparser.y:212
{ parserVAL.stmt = Assignment{Lhs : parserS[parserpt-3].ident, Rhs : Binop{Left : parserS[parserpt-3].ident, Binary : DIV, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-3].pos, FileText :&parserlex.(*Lexer).input}, Pos : parserS[parserpt-3].pos ,FileText :&parserlex.(*Lexer).input} }
case 51:
//line waccparser.y:213
{ parserVAL.stmt = Assignment{Lhs : parserS[parserpt-3].ident, Rhs : Binop{Left : parserS[parserpt-3].ident, Binary : MUL, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-3].pos, FileText :&parserlex.(*Lexer).input}, Pos : parserS[parserpt-3].pos ,FileText :&parserlex.(*Lexer).input} }
case 52:
//line waccparser.y:214
{ parserVAL.stmt = Assignment{Lhs : parserS[parserpt-3].ident, Rhs : Binop{Left : parserS[parserpt-3].ident, Binary : MOD, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-3].pos, FileText :&parserlex.(*Lexer).input}, Pos : parserS[parserpt-3].pos ,FileText :&parserlex.(*Lexer).input} }
case 53:
//line waccparser.y:215
{ parserVAL.stmt = Assignment{Lhs : parserS[parserpt-2].ident, Rhs : Binop{Left : parserS[parserpt-2].ident, Binary : PLUS, Right : Integer(1), Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input}, Pos : parserS[parserpt-2].pos ,FileText :&parserlex.(*Lexer).input} }
case 54:
//line waccparser.y:216
{ parserVAL.stmt = Assignment{Lhs : parserS[parserpt-2].ident, Rhs : Binop{Left : parserS[parserpt-2].ident, Binary : SUB, Right : Integer(1), Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input}, Pos : parserS[parserpt-2].pos ,FileText :&parserlex.(*Lexer).input} }
case 55:
//line waccparser.y:217
{ parserVAL.stmt = Assignment{Lhs : parserS[parserpt-2].ident, Rhs : Binop{Left : parserS[parserpt-2].ident, Binary : MUL, Right : parserS[parserpt-2].ident, Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input}, Pos : parserS[parserpt-2].pos ,FileText :&parserlex.(*Lexer).input} }
case 56:
//line waccparser.y:219
{ parserVAL.stmt = Assignment{Lhs : parserS[parserpt-3].fieldaccess, Rhs : Binop{Left : parserS[parserpt-3].fieldaccess, Binary : PLUS, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-3].pos, FileText :&parserlex.(*Lexer).input}, Pos : parserS[parserpt-3].pos ,FileText :&parserlex.(*Lexer).input} }
case 57:
//line waccparser.y:220
{ parserVAL.stmt = Assignment{Lhs : parserS[parserpt-3].fieldaccess, Rhs : Binop{Left : parserS[parserpt-3].fieldaccess, Binary : SUB , Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-3].pos, FileText :&parserlex.(*Lexer).input}, Pos : parserS[parserpt-3].pos ,FileText :&parserlex.(*Lexer).input} }
case 58:
//line waccparser.y:221
{ parserVAL.stmt = Assignment{Lhs : parserS[parserpt-3].fieldaccess, Rhs : Binop{Left : parserS[parserpt-3].fieldaccess, Binary : DIV, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-3].pos, FileText :&parserlex.(*Lexer).input}, Pos : parserS[parserpt-3].pos ,FileText :&parserlex.(*Lexer).input} }
case 59:
//line waccparser.y:222
{ parserVAL.stmt = Assignment{Lhs : parserS[parserpt-3].fieldaccess, Rhs : Binop{Left : parserS[parserpt-3].fieldaccess, Binary : MUL, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-3].pos, FileText :&parserlex.(*Lexer).input}, Pos : parserS[parserpt-3].pos ,FileText :&parserlex.(*Lexer).input} }
case 60:
//line waccparser.y:223
{ parserVAL.stmt = Assignment{Lhs : parserS[parserpt-3].fieldaccess, Rhs : Binop{Left : parserS[parserpt-3].fieldaccess, Binary : MOD, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-3].pos, FileText :&parserlex.(*Lexer).input}, Pos : parserS[parserpt-3].pos ,FileText :&parserlex.(*Lexer).input} }
case 61:
//line waccparser.y:224
{ parserVAL.stmt = Assignment{Lhs : parserS[parserpt-2].fieldaccess, Rhs : Binop{Left : parserS[parserpt-2].fieldaccess, Binary : PLUS, Right : Integer(1), Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input}, Pos : parserS[parserpt-2].pos ,FileText :&parserlex.(*Lexer).input} }
case 62:
//line waccparser.y:225
{ parserVAL.stmt = Assignment{Lhs : parserS[parserpt-2].fieldaccess, Rhs : Binop{Left : parserS[parserpt-2].fieldaccess, Binary : SUB, Right : Integer(1), Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input}, Pos : parserS[parserpt-2].pos ,FileText :&parserlex.(*Lexer).input} }
case 63:
//line waccparser.y:226
{ parserVAL.stmt = Assignment{Lhs : parserS[parserpt-2].fieldaccess, Rhs : Binop{Left : parserS[parserpt-2].fieldaccess, Binary : MUL, Right : parserS[parserpt-2].fieldaccess, Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input}, Pos : parserS[parserpt-2].pos ,FileText :&parserlex.(*Lexer).input} }
case 64:
//line waccparser.y:228
{ parserVAL.expr = parserS[parserpt-0].integer }
case 65:
//line waccparser.y:229
{ parserVAL.expr = parserS[parserpt-0].boolean }
case 66:
//line waccparser.y:230
{ parserVAL.expr = parserS[parserpt-0].boolean }
case 67:
//line waccparser.y:231
{ parserVAL.expr = parserS[parserpt-0].character }
case 68:
//line waccparser.y:232
{ parserVAL.expr = parserS[parserpt-0].stringconst }
case 69:
//line waccparser.y:233
{ parserVAL.expr = parserS[parserpt-0].fieldaccess }
case 70:
//line waccparser.y:234
{ parserVAL.expr = parserS[parserpt-0].ident}
case 71:
//line waccparser.y:235
{ parserVAL.expr = parserS[parserpt-0].pairliter }
case 72:
//line waccparser.y:236
{ parserVAL.expr = parserS[parserpt-0].arrayelem }
case 73:
//line waccparser.y:237
{ parserVAL.expr = Unop{Unary : NOT, Expr : parserS[parserpt-0].expr, Pos : parserS[parserpt-1].pos, FileText :&parserlex.(*Lexer).input } }
case 74:
//line waccparser.y:238
{ parserVAL.expr = Unop{Unary : LEN, Expr : parserS[parserpt-0].expr, Pos : parserS[parserpt-1].pos, FileText :&parserlex.(*Lexer).input } }
case 75:
//line waccparser.y:239
{ parserVAL.expr = Unop{Unary : ORD, Expr : parserS[parserpt-0].expr, Pos : parserS[parserpt-1].pos, FileText :&parserlex.(*Lexer).input } }
case 76:
//line waccparser.y:240
{ parserVAL.expr = Unop{Unary : CHR, Expr : parserS[parserpt-0].expr, Pos : parserS[parserpt-1].pos, FileText :&parserlex.(*Lexer).input } }
case 77:
//line waccparser.y:241
{ parserVAL.expr = Unop{Unary : SUB, Expr : parserS[parserpt-0].expr, Pos : parserS[parserpt-1].pos, FileText :&parserlex.(*Lexer).input } }
case 78:
//line waccparser.y:242
{ parserVAL.expr = parserS[parserpt-0].expr }
case 79:
//line waccparser.y:243
{ parserVAL.expr = Binop{Left : parserS[parserpt-2].expr, Binary : PLUS, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input } }
case 80:
//line waccparser.y:244
{ parserVAL.expr = Binop{Left : parserS[parserpt-2].expr, Binary : SUB, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input } }
case 81:
//line waccparser.y:245
{ parserVAL.expr = Binop{Left : parserS[parserpt-2].expr, Binary : MUL, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input } }
case 82:
//line waccparser.y:246
{ parserVAL.expr = Binop{Left : parserS[parserpt-2].expr, Binary : MOD, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input } }
case 83:
//line waccparser.y:247
{ parserVAL.expr = Binop{Left : parserS[parserpt-2].expr, Binary : DIV, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input } }
case 84:
//line waccparser.y:248
{ parserVAL.expr = Binop{Left : parserS[parserpt-2].expr, Binary : LT, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input } }
case 85:
//line waccparser.y:249
{ parserVAL.expr = Binop{Left : parserS[parserpt-2].expr, Binary : GT, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input } }
case 86:
//line waccparser.y:250
{ parserVAL.expr = Binop{Left : parserS[parserpt-2].expr, Binary : LTE, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input } }
case 87:
//line waccparser.y:251
{ parserVAL.expr = Binop{Left : parserS[parserpt-2].expr, Binary : GTE, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input } }
case 88:
//line waccparser.y:252
{ parserVAL.expr = Binop{Left : parserS[parserpt-2].expr, Binary : EQ, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input } }
case 89:
//line waccparser.y:253
{ parserVAL.expr = Binop{Left : parserS[parserpt-2].expr, Binary : NEQ, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input } }
case 90:
//line waccparser.y:254
{ parserVAL.expr = Binop{Left : parserS[parserpt-2].expr, Binary : AND, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input } }
case 91:
//line waccparser.y:255
{ parserVAL.expr = Binop{Left : parserS[parserpt-2].expr, Binary : OR, Right : parserS[parserpt-0].expr, Pos : parserS[parserpt-2].pos, FileText :&parserlex.(*Lexer).input } }
case 92:
//line waccparser.y:256
{ parserVAL.expr = parserS[parserpt-1].expr }
case 93:
//line waccparser.y:257
{ parserVAL.expr = Call{Ident : parserS[parserpt-3].ident, ParamList : parserS[parserpt-1].exprs, Pos : parserS[parserpt-4].pos, FileText :&parserlex.(*Lexer).input } }
case 94:
//line waccparser.y:258
{ parserVAL.expr = CallInstance{Class : (parserS[parserpt-3].fieldaccess.(FieldAccess)).ObjectName, Func: (parserS[parserpt-3].fieldaccess.(FieldAccess)).Field, ParamList : parserS[parserpt-1].exprs, Pos : parserS[parserpt-4].pos, FileText :&parserlex.(*Lexer).input } }
case 95:
//line waccparser.y:259
{ parserVAL.expr = ThisInstance{&parserlex.(*Lexer).input, parserS[parserpt-2].pos, parserS[parserpt-0].ident} }
case 96:
//line waccparser.y:260
{ parserVAL.expr = NewPair{FstExpr : parserS[parserpt-3].expr, SndExpr : parserS[parserpt-1].expr, Pos : parserS[parserpt-5].pos, FileText :&parserlex.(*Lexer).input } }
case 97:
//line waccparser.y:262
{ parserVAL.arrayliter = ArrayLiter{&parserlex.(*Lexer).input, parserS[parserpt-2].pos, parserS[parserpt-1].exprs } }
case 98:
//line waccparser.y:264
{parserVAL.exprs = append(parserS[parserpt-2].exprs, parserS[parserpt-0].expr)}
case 99:
//line waccparser.y:265
{parserVAL.exprs = []Evaluation{parserS[parserpt-0].expr}}
case 100:
//line waccparser.y:266
{parserVAL.exprs = []Evaluation{}}
case 101:
//line waccparser.y:268
{parserVAL.arrayelem = ArrayElem{Ident: parserS[parserpt-1].ident, Exprs : parserS[parserpt-0].exprs, Pos : parserS[parserpt-1].pos,FileText :&parserlex.(*Lexer).input } }
case 102:
//line waccparser.y:270
{parserVAL.exprs = append(parserS[parserpt-3].exprs, parserS[parserpt-1].expr)}
case 103:
//line waccparser.y:271
{parserVAL.exprs = []Evaluation{parserS[parserpt-1].expr}}
case 104:
//line waccparser.y:273
{ parserVAL.pairliter = PairLiter{} }
case 105:
//line waccparser.y:275
{ parserVAL.pairelem = PairElem{Fsnd: Fst, Expr : parserS[parserpt-0].expr, Pos : parserS[parserpt-1].pos } }
case 106:
//line waccparser.y:276
{ parserVAL.pairelem = PairElem{Fsnd: Snd, Expr : parserS[parserpt-0].expr, Pos : parserS[parserpt-1].pos } }
case 107:
//line waccparser.y:278
{ parserVAL.typedefinition = PairType{FstType : parserS[parserpt-3].pairelemtype, SndType : parserS[parserpt-1].pairelemtype} }
case 108:
//line waccparser.y:280
{ parserVAL.pairelemtype = parserS[parserpt-0].typedefinition }
case 109:
//line waccparser.y:281
{ parserVAL.pairelemtype = parserS[parserpt-0].typedefinition }
case 110:
//line waccparser.y:282
{ parserVAL.pairelemtype = Pair}
case 111:
//line waccparser.y:283
{ parserVAL.pairelemtype = parserS[parserpt-0].typedefinition}
case 112:
//line waccparser.y:284
{ parserVAL.pairelemtype = ClassType(parserS[parserpt-0].ident)}
case 113:
//line waccparser.y:286
{ parserVAL.typedefinition = parserS[parserpt-0].typedefinition }
case 114:
//line waccparser.y:287
{ parserVAL.typedefinition = parserS[parserpt-0].typedefinition }
case 115:
//line waccparser.y:288
{ parserVAL.typedefinition = parserS[parserpt-0].typedefinition }
case 116:
//line waccparser.y:289
{ parserVAL.typedefinition = ClassType(parserS[parserpt-0].ident) }
case 117:
//line waccparser.y:291
{ parserVAL.typedefinition = Int }
case 118:
//line waccparser.y:292
{ parserVAL.typedefinition = Bool }
case 119:
//line waccparser.y:293
{ parserVAL.typedefinition = Char }
case 120:
//line waccparser.y:294
{ parserVAL.typedefinition = String }
case 121:
//line waccparser.y:296
{ parserVAL.typedefinition = ArrayType{Type : parserS[parserpt-2].typedefinition} }
case 122:
//line waccparser.y:297
{ parserVAL.typedefinition = ArrayType{Type : parserS[parserpt-2].typedefinition} }
case 123:
//line waccparser.y:298
{ parserVAL.typedefinition = ArrayType{Type : parserS[parserpt-2].typedefinition} }
}
goto parserstack /* stack new state and value */
}
|
package lib
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
)
type layerDetails string
func (l layerDetails) ContentDigest() string {
return string(l)
}
type tagDetails struct {
name string
tag string
rawManifest interface{}
contentDigest string
layers []LayerDetails
}
func (t *tagDetails) RawManifest() interface{} {
return t.rawManifest
}
func (t *tagDetails) ContentDigest() string {
return t.contentDigest
}
func (t *tagDetails) RepositoryName() string {
return t.name
}
func (t *tagDetails) TagName() string {
return t.tag
}
func (t *tagDetails) Layers() []LayerDetails {
return t.layers
}
func (t *tagDetails) setLayers(layers []parsedLayer) {
t.layers = make([]LayerDetails, 0, len(layers))
for _, layer := range layers {
t.layers = append(t.layers, layerDetails(layer.blobSum()))
}
}
func (r *registryApi) GetTagDetails(ref Refspec, manifestVersion uint) (details TagDetails, err error) {
url := r.endpointUrl(fmt.Sprintf("v2/%s/manifests/%s", ref.Repository(), ref.Reference()))
headers, err := r.getHeadersForManifestVersion(manifestVersion)
if err != nil {
return
}
apiResponse, err := r.connector.Get(
url,
headers,
cacheHintTagDetails(ref.Repository()),
)
if err != nil {
return
}
if apiResponse.Close {
defer apiResponse.Body.Close()
}
switch apiResponse.StatusCode {
case http.StatusForbidden, http.StatusUnauthorized:
err = genericAuthorizationError
case http.StatusNotFound:
err = newNotFoundError(fmt.Sprintf("%v : no such repository or reference", ref))
case http.StatusOK:
default:
err = newInvalidStatusCodeError(apiResponse.StatusCode)
}
if err != nil {
return
}
bodyBuffer := bytes.Buffer{}
_, err = io.Copy(&bodyBuffer, apiResponse.Body)
if err != nil {
return
}
var rawManifest interface{}
err = json.Unmarshal(bodyBuffer.Bytes(), &rawManifest)
if err != nil {
return
}
parsedManifest, err := parseManifest(bodyBuffer.Bytes())
if err != nil {
return
}
_details := &tagDetails{
rawManifest: rawManifest,
name: ref.Repository(),
tag: ref.Reference(),
contentDigest: apiResponse.Header.Get("docker-content-digest"),
}
_details.setLayers(parsedManifest.layers())
details = _details
return
}
func (r *registryApi) getHeadersForManifestVersion(version uint) (headers map[string]string, err error) {
switch version {
case 1:
headers = nil
case 2:
headers = map[string]string{
"accept": "application/vnd.docker.distribution.manifest.v2+json",
}
default:
err = errors.New("invalid manifest version")
}
return
}
|
package main
import (
"fmt"
"strings"
)
func main() {
str := "abdc"
greatest := ""
pairs := [][]int{{1, 4}, {3, 4}}
for _, x := range pairs {
s1 := x[0] - 1
s2 := x[1] - 1
tempArr := strings.Split(str, "")
tempStr := tempArr[s1]
tempArr[s1] = tempArr[s2]
tempArr[s2] = tempStr
combinStr := strings.Join(tempArr, "")
if combinStr > greatest {
greatest = combinStr
}
//fmt.Println(x)
}
fmt.Println(greatest)
}
|
package calc
import (
"fmt"
)
func init() {
fmt.Println("calc init...")
}
// 加法运算
func Add(a, b int) (result int) {
result = a + b
return
}
// 减法运算
func Minus(a, b int) (result int) {
result = a - b
return
}
// 乘法运算
func multiply(a, b int) (result int) {
result = a * b
return
}
|
package dushengchen
func isPalindrome(x int) bool {
if x < 0 {
return false
}
if reverse(x) == x {
return true
}
return false
}
//
////from q7
//func reverse(x int) int {
// if x == 0 {
// return 0
// } else if x < 0 {
// return -reverse(-x)
// }
// res := 0
// for {
// res = res * 10+ x % 10
// x = x / 10
//
// if x == 0 {
// break
// }
// }
//
// return res
//}
|
package main;
import (
"fmt";
"time";
)
type Customer struct {
name string
old string
sdt string
address string
}
type geometry interface {
area() int
}
type reg struct{
w int
h int
}
func (r reg) area() int{
return r.w * r.h
}
func s(g geometry){
fmt.Println(g.area());
}
func goroutin2(){
fmt.Println("hello i'am goroutine2");
}
func goroutin(){
fmt.Println("hello i'm goroutine");
}
func main (){
go goroutin();
go goroutin2();
time.Sleep(1 * time.Second);
fmt.Println("xinchao");
vt := Customer{
name: "minh",
old:"20",
sdt:"0971725797",
address:"okay",
};
fmt.Println(vt);
hv1 := reg{
w : 5,
h: 6,
};
s(hv1);
}
|
package api
import (
utils "github.com/kevinbarbary/go-lms/utils"
"encoding/json"
"log"
)
type EnrolStatus string // @todo - rune
func (s EnrolStatus) Enabled() bool {
// Status A = Active, G = Group, P = Pending, D = Disabled, etc.
return s == "A" || s == "G"
}
type UserEnrol struct {
EnrollID int `json:"EnrollID"`
CourseID int64 `json:"CourseID"`
Type string `json:"Type"`
CourseTitle string `json:"CourseTitle"`
PublisherID string `json:"PublisherID"`
Publisher string `json:"Publisher"`
PublisherLogo string `json:"PublisherLogo"`
StartDate JsonDate `json:"StartDate"` // date
EndDate JsonDate `json:"EndDate"` // date
TotalDuration int64 `json:"TotalDuration"`
//LastAccessed JsonDate `json:"LastAccessed"` // date // @todo - use this instead
LastAccessed string `json:"LastAccessed"` // date
Completed bool `json:"Completed"`
EnrollStatus EnrolStatus `json:"EnrollStatus"`
CertificateURL string `json:"CertificateURL"`
}
func UserEnrolments(token, useragent, site, loginId string) ([]UserEnrol, string, string, Timestamp, error) {
response, err := Call("GET", utils.Endpoint(utils.Concat("/enrolments/", loginId)), token, useragent, site, nil, true)
if err != nil {
log.Print("UserEnrolments Error - invalid response from API call... ", err.Error())
return nil, "", "", 0, err
}
data, e, help, now, token, user := extract(response)
if e != "" {
log.Print("UserEnrolments Error... ", e)
}
if help != "" {
log.Print("UserEnrolments help... ", help)
}
if data == nil {
log.Print("UserEnrolments... NO DATA")
return nil, token, user, now, err
}
byteData, err := json.Marshal(data)
if err != nil {
log.Print("UserEnrolments - Marshal fail... ", err.Error())
return nil, token, user, now, err
}
var val []UserEnrol
err = json.Unmarshal(byteData, &val)
if err != nil {
log.Print("UserEnrolments - Unmarshal fail... ", err.Error())
return nil, token, user, now, err
}
return val, token, user, now, nil
}
|
package main
import (
"crypto/rand"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"github.com/rn/iso9660wrap"
log "github.com/sirupsen/logrus"
)
func buildQemuCmdline(config QemuConfig) (QemuConfig, []string) {
// Iterate through the flags and build arguments
var qemuArgs []string
qemuArgs = append(qemuArgs, "-smp", config.CPUs)
qemuArgs = append(qemuArgs, "-m", config.Memory)
qemuArgs = append(qemuArgs, "-uuid", config.UUID.String())
qemuArgs = append(qemuArgs, "-pidfile", filepath.Join(config.StatePath, "qemu.pid"))
// Need to specify the vcpu type when running qemu on arm64 platform, for security reason,
// the vcpu should be "host" instead of other names such as "cortex-a53"...
if config.Arch == "aarch64" {
if runtime.GOARCH == "arm64" {
qemuArgs = append(qemuArgs, "-cpu", "host")
} else {
qemuArgs = append(qemuArgs, "-cpu", "cortex-a57")
}
}
if config.KVM {
qemuArgs = append(qemuArgs, "-enable-kvm")
if config.Arch == "aarch64" {
qemuArgs = append(qemuArgs, "-machine", "virt,gic_version=host")
} else {
qemuArgs = append(qemuArgs, "-machine", "q35,accel=kvm:tcg")
}
} else {
if config.Arch == "aarch64" {
qemuArgs = append(qemuArgs, "-machine", "virt")
} else {
qemuArgs = append(qemuArgs, "-machine", "q35")
}
}
if config.ISOBoot {
qemuArgs = append(qemuArgs, "-boot", "d")
}
//need to be Restructured
for i, p := range config.ISOImages {
if i == 0 {
// This is hdc/CDROM which is skipped by the disk loop above
qemuArgs = append(qemuArgs, "-cdrom", p)
} else {
qemuArgs = append(qemuArgs, "-drive", "file="+p+",index="+strconv.Itoa(i)+",media=cdrom")
}
}
//network
if config.NetdevConfig == "" {
qemuArgs = append(qemuArgs, "-net", "none")
} else {
mac := retrieveMAC(config.StatePath)
qemuArgs = append(qemuArgs, "-device", "virtio-net-pci,netdev=t0,mac="+mac.String())
forwardings, err := buildQemuForwardings(config.PublishedPorts)
if err != nil {
log.Error(err)
}
qemuArgs = append(qemuArgs, "-netdev", config.NetdevConfig+forwardings)
}
//gui
if config.GUI != true {
qemuArgs = append(qemuArgs, "-nographic")
}
return config, qemuArgs
}
type multipleFlag []string
type publishedPorts struct {
guest int
host int
protocol string
}
func (f *multipleFlag) String() string {
return "A multiple flag is a type of flag that can be repeated any number of times"
}
func (f *multipleFlag) Set(value string) error {
*f = append(*f, value)
return nil
}
func splitPublish(publish string) (publishedPorts, error) {
p := publishedPorts{}
slice := strings.Split(publish, ":")
if len(slice) < 2 {
return p, fmt.Errorf("Unable to parse the ports to be published, should be in format <host>:<guest> or <host>:<guest>/<tcp|udp>")
}
hostPort, err := strconv.Atoi(slice[0])
if err != nil {
return p, fmt.Errorf("The provided hostPort can't be converted to int")
}
right := strings.Split(slice[1], "/")
protocol := "tcp"
if len(right) == 2 {
protocol = strings.TrimSpace(strings.ToLower(right[1]))
}
if protocol != "tcp" && protocol != "udp" {
return p, fmt.Errorf("Provided protocol is not valid, valid options are: udp and tcp")
}
guestPort, err := strconv.Atoi(right[0])
if err != nil {
return p, fmt.Errorf("The provided guestPort can't be converted to int")
}
if hostPort < 1 || hostPort > 65535 {
return p, fmt.Errorf("Invalid hostPort: %d", hostPort)
}
if guestPort < 1 || guestPort > 65535 {
return p, fmt.Errorf("Invalid guestPort: %d", guestPort)
}
p.guest = guestPort
p.host = hostPort
p.protocol = protocol
return p, nil
}
func buildQemuForwardings(publishFlags multipleFlag) (string, error) {
if len(publishFlags) == 0 {
return "", nil
}
var forwardings string
for _, publish := range publishFlags {
p, err := splitPublish(publish)
if err != nil {
return "", err
}
hostPort := p.host
guestPort := p.guest
forwardings = fmt.Sprintf("%s,hostfwd=%s::%d-:%d", forwardings, p.protocol, hostPort, guestPort)
}
return forwardings, nil
}
// WriteMetadataISO writes a metadata ISO file in a format usable by pkg/metadata
func WriteMetadataISO(path string, content []byte) error {
outfh, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer outfh.Close()
return iso9660wrap.WriteBuffer(outfh, content, "config")
}
func retrieveMAC(statePath string) net.HardwareAddr {
var mac net.HardwareAddr
fileName := filepath.Join(statePath, "mac-addr")
// we did not generate a mac yet. generate one
mac = generateMAC()
if err := ioutil.WriteFile(fileName, []byte(mac.String()), 0640); err != nil {
log.Fatalln("failed to write mac-addr file:", err)
}
return mac
}
func generateMAC() net.HardwareAddr {
mac := make([]byte, 6)
n, err := rand.Read(mac)
if err != nil {
log.WithError(err).Fatal("failed to generate random mac address")
}
if n != 6 {
log.WithError(err).Fatal("generated %d bytes for random mac address", n)
}
mac[0] &^= 0x01 // Clear multicast bit
mac[0] |= 0x2 // Set locally administered bit
return net.HardwareAddr(mac)
}
|
package main
import (
"fmt"
"os"
"os/exec"
)
func warn(directory string) error {
if directory == "" {
return fmt.Errorf("warn directory should not be nil")
}
find := exec.Command(
"/usr/bin/find",
directory,
"-name",
"*.swift",
"-print0",
)
match := exec.Command(
"xargs",
"-0",
"egrep",
"--with-filename",
"--line-number",
"--only-matching",
"(UIImage|NSImage)\\(named",
)
sed := exec.Command(
"sed",
"-E",
"s/(UIImage|NSImage).*/ warning: legacy use of imageNamed; consider using Resourceful/",
)
stdin, err := find.StdoutPipe()
match.Stdin = stdin
if err != nil {
return err
}
sed.Stdin, err = match.StdoutPipe()
if err != nil {
return err
}
sed.Stdout = os.Stderr
err = match.Start()
if err != nil {
return err
}
err = sed.Start()
if err != nil {
return err
}
err = find.Run()
if err != nil {
return err
}
err = match.Wait()
if err != nil {
return err
}
err = sed.Wait()
if err != nil {
return err
}
return nil
}
|
package template
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/AlecAivazis/survey"
"gopkg.in/yaml.v2"
)
const (
// The configuration file relative to the template root directory
configName = "config.yaml"
// The directory holding the template file and directories relative to
// the template root directory
templateDir = "template"
)
// -- Template stuff --
type meta struct {
Description string `yaml:"description"`
Author string `yaml:"author"`
}
type question struct {
Name string `yaml:"name"`
Type string `yaml:"type"`
Message string `yaml:"message"`
Help string `yaml:"help"`
Required bool `yaml:"required"`
Default string `yaml:"default"`
Options []string `yaml:"options"`
}
// Template is our representation of a set of templates and questions.
type Template struct {
// The Templates metadata
Meta meta `yaml:"meta"`
// Our internal representation of the Questions to ask
Questions []question `yaml:"questions"`
baseDir string
}
// New take a directory containing a template, parses it and returns a new
// Tempalate.
func New(dir string) (Template, error) {
t := Template{
baseDir: dir,
}
configFileName := filepath.Join(t.baseDir, configName)
file, err := os.Open(configFileName)
if err != nil {
return t, err
}
defer file.Close()
raw, err := ioutil.ReadAll(file)
if err != nil {
return t, err
}
err = yaml.Unmarshal(raw, &t)
return t, err
}
// GetQuestions takes the templates internal representation of the Template's
// questions and converts them into a format that can be used by survey.
func (t *Template) GetQuestions() []*survey.Question {
var questions = []*survey.Question{}
for _, q := range t.Questions {
sq, err := parseQuestion(q)
if err != nil {
// TODO - Need to handle the error here is a better way
continue
}
questions = append(questions, sq)
}
return questions
}
// Execute applies the template to a given directory using the data that has
// been passed in.
func (t *Template) Execute(targetRoot string, data map[string]interface{}) error {
// Walk the template dir evaluate each path as a template and then
// execute each file as a template
rootDir := filepath.Join(t.baseDir, templateDir)
walker := func(path string, info os.FileInfo, err error) error {
// First handle any incoming error by returning it to the
// caller rather than trying to do something fancy here.
if err != nil {
return err
}
// Build our target path from the target root and the relative
// path to the base target dir. Then evaluate this as a
// template to get our actual target path.
relPath, err := filepath.Rel(rootDir, path)
if err != nil {
return err
}
rawPath := filepath.Join(targetRoot, relPath)
targetPath, err := templatePath(rawPath, data)
if err != nil {
return err
}
fmt.Printf("Processing %q\n", targetPath)
// If this is a dir we just create the dir and set the mode.
if info.IsDir() {
return os.MkdirAll(targetPath, info.Mode())
}
// If this is a file we process the source file as a template,
// execute it into a new file and set the mode.
src, err := os.Open(path)
if err != nil {
return err
}
defer src.Close()
// Create/truncate the target file and set it's mode before
// executing the template
dest, err := os.Create(targetPath)
if err != nil {
return err
}
defer dest.Close()
err = dest.Chmod(info.Mode())
if err != nil {
return err
}
return renderTemplate(src, dest, data)
}
return filepath.Walk(rootDir, walker)
}
|
package config
import (
"os"
"path"
)
var Server = map[string]string{
"host": "localhost",
"port": "8080",
}
var Client = map[string]string{
"cache_dir": path.Join(os.Getenv("HOME"), ".fx/"),
"remote_images_url": "https://raw.githubusercontent.com/metrue/fx/master/images.zip",
}
|
// Implement [VLQ] encoding/decoding
// https://en.wikipedia.org/wiki/Variable-length_quantity
package variablelengthquantity
const testVersion = 1
func EncodeVarint(input uint32) (byteArr []byte) {
r := input % 128
byteArr = []byte{byte(r)}
input = input / 128
for input > 0 {
r := input % 128
input = input / 128
byteArr = append([]byte{byte(r + 128)}, byteArr...)
}
return
}
// no idea what the size means
func DecodeVarint(byteArr []byte) (output uint32, size int) {
lenArr := len(byteArr)
for ix, b := range byteArr {
output = output << 7
if ix != lenArr-1 {
b = b - 128
output += uint32(b)
} else { // last byte
output += uint32(b)
}
}
if lenArr <= 2 {
size = 1
} else {
size = lenArr - 1
}
return
}
|
package runner
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/Shopify/sarama"
"github.com/pkg/errors"
)
// Topic is a definition for a kafka topic
type Topic struct {
Name string
Partitions int
Replicas int
Compact bool
Retention time.Duration
Segment time.Duration
Create bool
}
// ConfigureTopics configures and checks topics in the slice passed.
func ConfigureTopics(ctx context.Context, brokers []string, topics []Topic) error {
_, testMode := os.LookupEnv("KAFMESH_TEST_MODE")
config := sarama.NewConfig()
config.Version = sarama.MaxVersion
config.Consumer.Offsets.Initial = sarama.OffsetOldest
config.Consumer.Offsets.AutoCommit.Enable = true
config.Consumer.Offsets.CommitInterval = 1 * time.Second
client, err := sarama.NewClusterAdmin(brokers, config)
if err != nil {
return errors.Wrap(err, "failed to create cluster admin")
}
descriptions, err := client.ListTopics()
if err != nil {
return errors.Wrap(err, "failed to describe topics")
}
errs := []string{}
for _, topic := range topics {
if testMode {
topic.Replicas = 1
topic.Create = true
topic.Segment = 1 * time.Hour
topic.Retention = 1 * time.Hour
topic.Partitions = 10
}
definition, exists := descriptions[topic.Name]
if !exists && !topic.Create {
errs = append(errs, fmt.Sprintf("topic '%s' does not exist and is not created in this service", topic.Name))
continue
}
if !topic.Create {
continue
}
retention := fmt.Sprintf("%d", topic.Retention/time.Millisecond)
segment := fmt.Sprintf("%d", topic.Segment/time.Millisecond)
config := map[string]*string{
"retention.ms": &retention,
"segment.ms": &segment,
}
if topic.Compact {
c := "compact"
config["cleanup.policy"] = &c
}
if !exists {
err = client.CreateTopic(topic.Name, &sarama.TopicDetail{
NumPartitions: int32(topic.Partitions),
ReplicationFactor: int16(topic.Replicas),
ConfigEntries: config,
}, false)
if err != nil && strings.Contains(err.Error(), "Topic with this name already exists") {
continue
}
if err != nil {
return errors.Wrap(err, "failed to create topic")
}
continue
}
if definition.NumPartitions != int32(topic.Partitions) {
errs = append(errs, fmt.Sprintf("topic '%s' is configured with '%d' partitions and cannot be change to '%d' partitions", topic.Name, definition.NumPartitions, topic.Partitions))
continue
}
shouldUpdate := false
for k, v := range config {
cv, ok := definition.ConfigEntries[k]
if !ok || cv != v {
shouldUpdate = true
}
}
for k := range definition.ConfigEntries {
_, ok := config[k]
if !ok {
shouldUpdate = true
}
}
if !shouldUpdate {
continue
}
err = client.AlterConfig(sarama.TopicResource, topic.Name, config, false)
if err != nil {
return errors.Wrapf(err, "failed to alert config on topic '%s'", topic.Name)
}
}
if len(errs) > 0 {
return errors.Errorf("topic configuration invalid '%s'", strings.Join(errs, ","))
}
return nil
}
|
package main
func reverse(number int) {
println(number)
}
func main() {
number := []int{1, 2, 3, 4, 5}
for _, i := range number {
defer reverse(i)
}
}
|
// Copyright 2021 Akamai Technologies, Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collectors
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
client "github.com/akamai/AkamaiOPEN-edgegrid-golang/client-v1"
gtm "github.com/akamai/AkamaiOPEN-edgegrid-golang/reportsgtm-v1" // Note: imports ./configgtm-v1_3
"strconv"
"time"
)
var (
gtmDatacenterTrafficExporter GTMDatacenterTrafficExporter
)
type GTMDatacenterTrafficExporter struct {
GTMConfig GTMMetricsConfig
DCMetricPrefix string
DCLookbackDuration time.Duration
LastTimestamp map[string]map[int]time.Time // index by domain, datacenterid
DCRegistry *prometheus.Registry
}
func NewDatacenterTrafficCollector(r *prometheus.Registry, gtmMetricsConfig GTMMetricsConfig, gtmMetricPrefix string, tstart time.Time, lookbackDuration time.Duration) *GTMDatacenterTrafficExporter {
gtmDatacenterTrafficExporter = GTMDatacenterTrafficExporter{GTMConfig: gtmMetricsConfig, DCLookbackDuration: lookbackDuration}
gtmDatacenterTrafficExporter.DCMetricPrefix = gtmMetricPrefix + "datacenter_traffic"
gtmDatacenterTrafficExporter.DCLookbackDuration = lookbackDuration
gtmDatacenterTrafficExporter.DCRegistry = r
// Populate LastTimestamp per domain, datacenter. Start time applies to all.
domainMap := make(map[string]map[int]time.Time)
for _, domain := range gtmMetricsConfig.Domains {
dcReqSummaryMap[domain.Name] = make(map[int]prometheus.Summary)
tStampMap := make(map[int]time.Time) // index by zone name
for _, dc := range domain.Datacenters {
tStampMap[dc.DatacenterID] = tstart
// Create and register Summaries by domain, datacenter. TODO: property granualarity?
dcSumMap := createDatacenterMaps(domain.Name, dc.DatacenterID)
r.MustRegister(dcSumMap)
}
domainMap[domain.Name] = tStampMap
}
gtmDatacenterTrafficExporter.LastTimestamp = domainMap
return >mDatacenterTrafficExporter
}
// Summaries map by domain and datacenter
var dcReqSummaryMap = make(map[string]map[int]prometheus.Summary)
// Initialize locally maintained maps. Only use domain and datacenter.
func createDatacenterMaps(domain string, dc int) prometheus.Summary {
dclabel := strconv.Itoa(dc)
labels := prometheus.Labels{"domain": domain, "datacenter": dclabel}
dcReqSummaryMap[domain][dc] = prometheus.NewSummary(
prometheus.SummaryOpts{
Namespace: gtmDatacenterTrafficExporter.DCMetricPrefix,
Name: "requests_per_interval_summary",
Help: "Number of aggregate datacenter requests per 5 minute interval (per domain)",
MaxAge: gtmDatacenterTrafficExporter.DCLookbackDuration,
BufCap: prometheus.DefBufCap * 2,
ConstLabels: labels,
})
return dcReqSummaryMap[domain][dc]
}
// Describe function
func (d *GTMDatacenterTrafficExporter) Describe(ch chan<- *prometheus.Desc) {
ch <- prometheus.NewDesc(d.DCMetricPrefix, "Akamai GTM Datacenter Traffic", nil, nil)
}
// Collect function
func (d *GTMDatacenterTrafficExporter) Collect(ch chan<- prometheus.Metric) {
log.Debugf("Entering GTM DC Traffic Collect")
endtime := time.Now().UTC() // Use same current time for all zones
// Collect metrics for each domain and datacenter
for _, domain := range d.GTMConfig.Domains {
log.Debugf("Processing domain %s", domain.Name)
for _, dc := range domain.Datacenters {
// get last timestamp recorded. make sure diff > 5 mins.
lasttime := d.LastTimestamp[domain.Name][dc.DatacenterID].Add(time.Minute)
if endtime.Before(lasttime.Add(time.Minute * 5)) {
lasttime = lasttime.Add(time.Minute * 5)
}
log.Debugf("Fetching datacenter Report for datacenter %d in domain %s.", dc.DatacenterID, domain.Name)
dcTrafficReport, err := retrieveDatacenterTraffic(domain.Name, dc.DatacenterID, lasttime, endtime)
if err != nil {
apierr, ok := err.(client.APIError)
if ok && apierr.Status == 500 {
log.Warnf("Unable to get traffic report for datacenter %d. Internal error ... Skipping.", dc.DatacenterID)
continue
}
if ok && apierr.Status == 400 {
log.Warnf("Unable to get traffic report for datacenter %d. ... Skipping.", dc.DatacenterID)
log.Errorf("%s", err.Error())
continue
}
log.Errorf("Unable to get traffic report for datacenter %d ... Skipping. Error: %s", dc.DatacenterID, err.Error())
continue
}
log.Debugf("Traffic Metadata: [%v]", dcTrafficReport.Metadata)
for _, reportInstance := range dcTrafficReport.DataRows {
instanceTimestamp, err := parseTimeString(reportInstance.Timestamp, GTMTrafficLongTimeFormat)
if err != nil {
log.Errorf("Instance timestamp invalid ... Skipping. Error: %s", err.Error())
continue
}
if !instanceTimestamp.After(d.LastTimestamp[domain.Name][dc.DatacenterID]) {
log.Debugf("Instance timestamp: [%v]. Last timestamp: [%v]", instanceTimestamp, d.LastTimestamp[domain.Name][dc.DatacenterID])
log.Warnf("Attempting to re process report instance: [%v]. Skipping.", reportInstance)
continue
}
// See if we missed an interval. Log warning for low
log.Debugf("Instance timestamp: [%v]. Last timestamp: [%v]", instanceTimestamp, d.LastTimestamp[domain.Name][dc.DatacenterID])
if instanceTimestamp.After(d.LastTimestamp[domain.Name][dc.DatacenterID].Add(time.Minute * (trafficReportInterval + 1))) {
log.Warnf("Missing report interval. Current: %v, Last: %v", instanceTimestamp, d.LastTimestamp[domain.Name][dc.DatacenterID])
}
var aggReqs int64
var baseLabels = []string{"domain", "datacenter"}
for _, instanceProp := range reportInstance.Properties {
aggReqs += instanceProp.Requests // aggregate properties in scope
if len(dc.Properties) > 0 {
// create metric instance for properties in scope
if stringSliceContains(dc.Properties, instanceProp.Name) {
tsLabels := append(baseLabels, "property")
if d.GTMConfig.TSLabel {
tsLabels = append(tsLabels, "interval_timestamp")
}
ts := instanceTimestamp.Format(time.RFC3339)
desc := prometheus.NewDesc(prometheus.BuildFQName(d.DCMetricPrefix, "", "requests_per_interval"), "Number of datacenter requests per 5 minute interval (per domain)", tsLabels, nil)
log.Debugf("Creating Requests metric. Domain: %s, Datacenter: %d, Property: %s, Requests: %v, Timestamp: %v", domain.Name, dc.DatacenterID, instanceProp.Name, float64(instanceProp.Requests), ts)
var reqsmetric prometheus.Metric
if d.GTMConfig.TSLabel {
reqsmetric = prometheus.MustNewConstMetric(
desc, prometheus.GaugeValue, float64(instanceProp.Requests), domain.Name, strconv.Itoa(dc.DatacenterID), instanceProp.Name, ts)
} else {
reqsmetric = prometheus.MustNewConstMetric(
desc, prometheus.GaugeValue, float64(instanceProp.Requests), domain.Name, strconv.Itoa(dc.DatacenterID), instanceProp.Name)
}
if d.GTMConfig.UseTimestamp != nil && !*d.GTMConfig.UseTimestamp {
ch <- reqsmetric
} else {
ch <- prometheus.NewMetricWithTimestamp(instanceTimestamp, reqsmetric)
}
}
}
} // properties in time interval end
if len(dc.Properties) < 1 {
// Create agg instance
tsLabels := baseLabels
if d.GTMConfig.TSLabel {
tsLabels = append(tsLabels, "interval_timestamp")
}
ts := instanceTimestamp.Format(time.RFC3339)
desc := prometheus.NewDesc(prometheus.BuildFQName(d.DCMetricPrefix, "", "requests_per_interval"), "Number of datacenter requests per 5 minute interval (per domain)", tsLabels, nil)
log.Debugf("Creating Requests metric. Domain: %s, Datacenter: %d, Requests: %v, Timestamp: %v", domain.Name, dc.DatacenterID, float64(aggReqs), ts)
var reqsmetric prometheus.Metric
if d.GTMConfig.TSLabel {
reqsmetric = prometheus.MustNewConstMetric(
desc, prometheus.GaugeValue, float64(aggReqs), domain.Name, strconv.Itoa(dc.DatacenterID), ts)
} else {
reqsmetric = prometheus.MustNewConstMetric(
desc, prometheus.GaugeValue, float64(aggReqs), domain.Name, strconv.Itoa(dc.DatacenterID))
}
if d.GTMConfig.UseTimestamp != nil && !*d.GTMConfig.UseTimestamp {
ch <- reqsmetric
} else {
ch <- prometheus.NewMetricWithTimestamp(instanceTimestamp, reqsmetric)
}
}
// Update summary
dcReqSummaryMap[domain.Name][dc.DatacenterID].Observe(float64(aggReqs))
// Update last timestamp processed
if instanceTimestamp.After(d.LastTimestamp[domain.Name][dc.DatacenterID]) {
log.Debugf("Updating Last Timestamp from %v TO %v", d.LastTimestamp[domain.Name][dc.DatacenterID], instanceTimestamp)
d.LastTimestamp[domain.Name][dc.DatacenterID] = instanceTimestamp
}
// only process one each interval!
break
} // interval end
} // datacenter end
} // domain end
}
func retrieveDatacenterTraffic(domain string, dc int, start, end time.Time) (*gtm.DcTrafficResponse, error) {
qargs := make(map[string]string)
// Get valid Traffic Window
var err error
dcTrafficWindow, err := gtm.GetDatacentersTrafficWindow()
if err != nil {
return nil, err
}
// Make sure provided start and end are in range
if dcTrafficWindow.StartTime.Before(start) {
if dcTrafficWindow.EndTime.After(start) {
qargs["start"], err = convertTimeFormat(start, time.RFC3339)
} else {
qargs["start"], err = convertTimeFormat(dcTrafficWindow.EndTime, time.RFC3339)
}
} else {
qargs["start"], err = convertTimeFormat(dcTrafficWindow.StartTime, time.RFC3339)
}
if err != nil {
return nil, err
}
if dcTrafficWindow.EndTime.Before(end) {
qargs["end"], err = convertTimeFormat(dcTrafficWindow.EndTime, time.RFC3339)
} else {
qargs["end"], err = convertTimeFormat(end, time.RFC3339)
}
if err != nil {
return nil, err
}
if qargs["start"] >= qargs["end"] {
resp := >m.DcTrafficResponse{}
resp.DataRows = make([]*gtm.DCTData, 0)
log.Warnf("Start or End time outside valid report window")
return resp, nil
}
resp, err := gtm.GetTrafficPerDatacenter(domain, dc, qargs)
if err != nil {
return >m.DcTrafficResponse{}, err
}
//DataRows is list of pointers
sortDCDataRowsByTimestamp(resp.DataRows)
return resp, nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.