text
stringlengths 11
4.05M
|
|---|
package main
import (
"fmt"
)
func main() {
cities := []string{}
cities = append(cities, "San Diego", "Mountain View")
fmt.Printf("%q\n", cities)
}
|
// Package chanutil implements methods for working with channels.
package chanutil
|
package prompt
import (
"bytes"
"testing"
)
func TestVT100WriterWrite(t *testing.T) {
scenarioTable := []struct {
input []byte
expected []byte
}{
{
input: []byte{0x1b},
expected: []byte{'?'},
},
{
input: []byte{'a'},
expected: []byte{'a'},
},
}
for _, s := range scenarioTable {
pw := &VT100Writer{}
pw.Write(s.input)
if !bytes.Equal(pw.buffer, s.expected) {
t.Errorf("Should be %+#v, but got %+#v", pw.buffer, s.expected)
}
}
}
func TestVT100WriterWriteStr(t *testing.T) {
scenarioTable := []struct {
input string
expected []byte
}{
{
input: "\x1b",
expected: []byte{'?'},
},
{
input: "a",
expected: []byte{'a'},
},
}
for _, s := range scenarioTable {
pw := &VT100Writer{}
pw.WriteStr(s.input)
if !bytes.Equal(pw.buffer, s.expected) {
t.Errorf("Should be %+#v, but got %+#v", pw.buffer, s.expected)
}
}
}
func TestVT100WriterWriteRawStr(t *testing.T) {
scenarioTable := []struct {
input string
expected []byte
}{
{
input: "\x1b",
expected: []byte{0x1b},
},
{
input: "a",
expected: []byte{'a'},
},
}
for _, s := range scenarioTable {
pw := &VT100Writer{}
pw.WriteRawStr(s.input)
if !bytes.Equal(pw.buffer, s.expected) {
t.Errorf("Should be %+#v, but got %+#v", pw.buffer, s.expected)
}
}
}
|
package main
import (
"net/http"
)
type Adapter func(http.HandlerFunc) http.HandlerFunc
// InitRoutes to start up a mux router and return the routes
func InitRoutes() *http.ServeMux {
serveMux := http.NewServeMux()
//pingpong
serveMux.HandleFunc("/ping", Adapt(PingPong, ValidateRestMethod("GET")))
serveMux.HandleFunc("/api/ping", Adapt(PingPong, ValidateRestMethod("GET")))
//randomizer
serveMux.HandleFunc("/api/v1/randomize", Adapt(RandomizeHandler, ValidatePayload(), ValidateRestMethod("POST")))
serveMux.HandleFunc("/api/randomize", Adapt(RandomizeHandler, ValidatePayload(), ValidateRestMethod("POST")))
return serveMux
}
// PingPong pongs the ping
func PingPong(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(200)
w.Write([]byte("pong"))
}
// Iterate over adapters and run them one by one
func Adapt(h http.HandlerFunc, adapters ...Adapter) http.HandlerFunc {
for _, adapter := range adapters {
h = adapter(h)
}
return h
}
|
package main
import (
"fmt"
"github.com/gin-gonic/gin"
)
var message string
func main() {
message = "Hello World"
fmt.Println("Hello world")
r := gin.Default()
r.GET("/", func(c *gin.Context) {
c.String(200, message)
})
r.POST("/post", func(c *gin.Context) {
if len(c.Query("message")) > 0 {
message = c.Query("message")
fmt.Print(message)
c.String(200, message)
return
}
c.String(200, message)
})
r.Run(":8080")
}
|
package sudoku
import (
"math"
"math/rand"
"testing"
)
const _NUM_RUNS_TEST_WEIGHTED_DISTRIBUTION = 10000
const _ALLOWABLE_DIFF_WEIGHTED_DISTRIBUTION = 0.01
func TestInvertingReallyReallyBigDistribution(t *testing.T) {
//In practical use, Guess technique's non-inverted value is like absurdly
//large and leads to NaN. We want to make sure that we handle that case
//reasonably and that all of those huge numbers go to O, not NaN--but only
//if there are some "normal" valued things int he distribution.
crazyDistribution := ProbabilityDistribution{
1.0,
10.0,
100.0,
1000.0,
1000000000000000000000.0,
}
invertedDistribution := crazyDistribution.invert()
for i, probabability := range invertedDistribution {
if math.IsNaN(probabability) {
t.Error("Index", i, "was NaN")
}
}
}
func TestAllInfDistribution(t *testing.T) {
crazyDistribution := ProbabilityDistribution{
math.Inf(1),
math.Inf(1),
math.Inf(1),
}
invertedDistribution := crazyDistribution.invert()
for i, probability := range invertedDistribution {
if math.IsNaN(probability) {
t.Error("Index", i, "was NaN")
}
if probability != 0.3333333333333333 {
t.Error("Got wrong value for index", i, "got", probability, "expected 0.333333")
}
}
}
func TestRandomWeightedIndex(t *testing.T) {
result := ProbabilityDistribution{1.0, 0.0}.RandomIndex()
if result != 0 {
t.Log("Got wrong result with random weights")
t.Fail()
}
result = ProbabilityDistribution{0.5, 0.0, 0.5}.RandomIndex()
if result != 0 && result != 2 {
t.Log("Didn't get one of two legal weights")
t.Fail()
}
result = ProbabilityDistribution{0.0, 0.0, 1.0}.RandomIndex()
if result != 2 {
t.Log("Should have gotten last item in random weights; we didn't")
t.Fail()
}
if (ProbabilityDistribution{1.0, 0.000001}.normalized()) {
t.Log("thought weights were normalized when they weren't")
t.Fail()
}
if !(ProbabilityDistribution{0.5, 0.25, 0.25}.normalized()) {
t.Log("Didn't think weights were normalized but they were")
t.Fail()
}
if (ProbabilityDistribution{0.5, -0.25, 0.25}.normalized()) {
t.Error("A negative weight was considered normal.")
}
rand.Seed(1)
result = ProbabilityDistribution{0.0, 0.0, 1.0}.invert().RandomIndex()
if result == 2 {
t.Error("Got the wrong index for inverted weights")
}
weightResult := ProbabilityDistribution{2.0, 1.0, 1.0}.normalize()
if weightResult[0] != 0.5 || weightResult[1] != 0.25 || weightResult[2] != 0.25 {
t.Log("Nomralized weights came back wrong")
t.Fail()
}
weightResult = ProbabilityDistribution{1.0, 1.0, -0.5}.normalize()
if weightResult[0] != 0.5 || weightResult[1] != 0.5 || weightResult[2] != 0 {
t.Error("Normalized weights with a negative came back wrong: ", weightResult)
}
weightResult = ProbabilityDistribution{-0.25, -0.5, 0.25}.normalize()
if weightResult[0] != 0.25 || weightResult[1] != 0 || weightResult[2] != 0.75 {
t.Error("Normalized weights with two different negative numbers came back wrong: ", weightResult)
}
result = ProbabilityDistribution{1.0, 0.0}.RandomIndex()
if result != 0 {
t.Log("Got wrong result with random weights")
t.Fail()
}
result = ProbabilityDistribution{5.0, 0.0, 5.0}.RandomIndex()
if result != 0 && result != 2 {
t.Log("Didn't get one of two legal weights")
t.Fail()
}
result = ProbabilityDistribution{0.0, 0.0, 5.0}.RandomIndex()
if result != 2 {
t.Log("Should have gotten last item in random weights; we didn't")
t.Fail()
}
for i := 0; i < 100; i++ {
rand.Seed(int64(i))
result = ProbabilityDistribution{1.0, 10.0, 0.5, -1.0, 0.0, 6.4}.RandomIndex()
if result == 3 {
t.Error("Random index with weights picked wrong index with seed ", i)
}
}
for i := 0; i < 100; i++ {
rand.Seed(int64(i))
result = ProbabilityDistribution{1.0, 10.0, 0.5, 1.0, 0.0, 6.4, 0.0}.RandomIndex()
if result == 4 || result == 6 {
t.Error("Random index with weights that ended in zero picked wrong index with seed ", i)
}
}
}
func TestWeightedRandomDistribution(t *testing.T) {
//We're just going to bother testing randomIndexWithInvertedWeights since that's the one we actually use
//in HumanSolve.
type distributionTestCase struct {
input ProbabilityDistribution
expected ProbabilityDistribution
description string
}
cases := []distributionTestCase{
{
ProbabilityDistribution{
0.0,
1.0,
2.0,
},
ProbabilityDistribution{
0.3678,
0.3323,
0.2999,
},
"0 1 2",
},
{
ProbabilityDistribution{
0.0,
},
ProbabilityDistribution{
1.0,
},
"0.0",
},
{
ProbabilityDistribution{
10.0,
},
ProbabilityDistribution{
1.0,
},
"10.0",
},
{
ProbabilityDistribution{
0.5,
0.5,
1.0,
},
ProbabilityDistribution{
0.337,
0.337,
0.327,
},
"0.5, 0.5, 1.0",
},
{
ProbabilityDistribution{
1.0,
100.0,
0.5,
-1.0,
0.0,
6.4,
},
ProbabilityDistribution{
0.2015,
0.0,
0.2113,
0.2474,
0.2231,
0.1167,
},
"1.0, 100.0, 0.5, -1.0, 0.0, 6.4",
},
{
ProbabilityDistribution{
3.0,
3.0,
4.0,
4.0,
4.0,
4.0,
100.0,
100.0,
400.0,
},
ProbabilityDistribution{
0.1782,
0.1782,
0.1613,
0.1608,
0.1603,
0.1612,
0.0,
0.0,
0.0,
},
"Many at same weight; exponential incrase",
},
//This demonstrates the same problem as the case above, but is more pure
{
ProbabilityDistribution{
0.0,
1.0,
2.0,
4.0,
8.0,
16.0,
},
ProbabilityDistribution{
0.2468,
0.2239,
0.2031,
0.1651,
0.1117,
0.0494,
},
"Straight power of two increase 31",
},
{
ProbabilityDistribution{
1.0,
2.0,
3.0,
4.0,
10.0,
1000.0,
},
ProbabilityDistribution{
0.2589,
0.2337,
0.2121,
0.191,
0.1043,
0.0,
},
"Small numbers and very big one",
},
{
ProbabilityDistribution{
2400000028.253748,
math.Inf(1),
math.Inf(1),
},
ProbabilityDistribution{
1.0,
0.0,
0.0,
},
"Single very large non-inf with two infs",
},
}
for _, testCase := range cases {
randomIndexDistributionHelper(
t,
testCase.input,
testCase.expected,
testCase.description)
}
for _, testCase := range cases {
distribution := testCase.input.invert()
for i, num := range distribution {
if math.Abs(num-testCase.expected[i]) > 0.01 {
t.Error("Got wrong distribution for", testCase.description, "at", i, "Got", distribution, "Wanted", testCase.expected)
}
}
}
}
func randomIndexDistributionHelper(t *testing.T, input ProbabilityDistribution, expectedDistribution ProbabilityDistribution, testCase string) {
if len(input) != len(expectedDistribution) {
t.Fatal("Given differently sized input and expected distribution")
}
//collect the results
results := make([]int, len(expectedDistribution))
for i := 0; i < _NUM_RUNS_TEST_WEIGHTED_DISTRIBUTION; i++ {
rand.Seed(int64(i))
result := input.invert().RandomIndex()
results[result]++
}
//normalize the results and then calculate the diffs from expected.
diffAccum := 0.0
normalizedResults := make([]float64, len(results))
for i, result := range results {
normalizedResults[i] = float64(result) / _NUM_RUNS_TEST_WEIGHTED_DISTRIBUTION
diffAccum += math.Abs(normalizedResults[i] - expectedDistribution[i])
}
if diffAccum > _ALLOWABLE_DIFF_WEIGHTED_DISTRIBUTION {
t.Error("More than allowable difference observed in weighted random distribution:", diffAccum, testCase, "Got", normalizedResults, "Expected", expectedDistribution)
}
}
|
package main
// 贪心:
// 1、先将数组分别加上下标得到每个位置可以跳跃到的位置(可以省去,直接在遍历的时候计算)
// 2、要跳到的位置应尽可能远,因此要在满足约束的条件下,用max_dist记录最大可以跳跃的距离
// 3、约束条件为索引i<=max_dist,因为这样才能保证连续跳跃,即从位置0到当前位置,也即之前的元素可以跳跃到当前的位置
func canJump(nums []int) bool {
n := len(nums)
max_dist := nums[0]
for i := 0; i < n; i++ {
if i > max_dist {
return false
}
if max_dist < nums[i]+i {
max_dist = nums[i] + i
if max_dist >= n-1 { //可以跳跃到最后一个元素,因为是一直保证连续跳跃,所以能跳到max_dist的
return true //位置时,也一定可以跳到max_dist之前的所有位置
}
}
}
return true // 只有一个元素0的情况
}
|
package commands
import (
"log"
"sync"
"time"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"github.com/andreyvit/mongobulk"
api "github.com/michigan-com/gannett-newsfetch/gannettApi"
m "github.com/michigan-com/gannett-newsfetch/model"
)
func GetArticles(session *mgo.Session, siteCodes []string, gannettSearchAPIKey string) {
var startTime time.Time = time.Now()
var articleWait sync.WaitGroup
var totalArticles int = 0
articleChannel := make(chan *m.SearchArticle, len(siteCodes)*100)
// Fetch each markets' articles in parallel
log.Printf("Fetching articles for all sites ...")
for _, code := range siteCodes {
articleWait.Add(1)
go func(code string) {
defer articleWait.Done()
articles := api.GetArticlesByDay(code, time.Now(), gannettSearchAPIKey)
for _, article := range articles {
articleChannel <- article
}
}(code)
}
articleWait.Wait()
close(articleChannel)
log.Printf("...Done fetching articles")
coll := session.DB("").C("ToScrape")
bulk := mongobulk.New(coll, mongobulk.Config{})
// Iterate over all the articles, and determine whether or not we need to
// summarize the articles
log.Printf("Determining which articles need to be scraped...")
for article := range articleChannel {
if shouldSummarizeArticle(article, session) {
totalArticles += 1
bulk.Upsert(bson.M{"article_id": article.AssetId}, &m.ScrapeRequest{
ArticleID: article.AssetId,
ArticleURL: article.Urls.LongUrl,
})
}
}
err := bulk.Finish()
if err != nil {
log.Printf("ERROR: Failed to store articles to be scraped: %v", err)
}
log.Printf("Article processing done (%v). Total Articles Found: %d.", time.Now().Sub(startTime), totalArticles)
}
|
package service
import (
"context"
pb "github.com/johnbellone/persona-service/internal/gen/persona/api/v1"
ptypes "github.com/johnbellone/persona-service/internal/gen/persona/type"
"github.com/johnbellone/persona-service/internal/server"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type RoleHandler struct {
config *server.Config
}
func NewRoleHandler(c *server.Config) *RoleHandler {
return &RoleHandler{config: c}
}
func (h *RoleHandler) Create(ctx context.Context, req *pb.RoleRequest) (*pb.RoleResponse, error) {
return nil, status.Error(codes.Unimplemented, "Not implemented")
}
func (h *RoleHandler) Get(ctx context.Context, req *pb.RoleRequest) (*ptypes.Role, error) {
return nil, status.Error(codes.Unimplemented, "Not implemented")
}
func (h *RoleHandler) Update(ctx context.Context, req *pb.RoleRequest) (*pb.RoleResponse, error) {
return nil, status.Error(codes.Unimplemented, "Not implemented")
}
func (h *RoleHandler) Delete(ctx context.Context, req *pb.RoleRequest) (*pb.RoleResponse, error) {
return nil, status.Error(codes.Unimplemented, "Not implemented")
}
|
/*
Copyright 2020 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pam
import (
"bytes"
"os/user"
"strings"
"testing"
"github.com/gravitational/teleport/lib/utils"
"gopkg.in/check.v1"
)
type Suite struct {
username string
}
var _ = check.Suite(&Suite{})
func TestPAM(t *testing.T) { check.TestingT(t) }
func (s *Suite) SetUpSuite(c *check.C) {
utils.InitLoggerForTests()
// Skip this test if the binary was not built with PAM support.
if !BuildHasPAM() || !SystemHasPAM() {
c.Skip("Skipping test: PAM support not enabled.")
}
local, err := user.Current()
c.Assert(err, check.IsNil)
s.username = local.Username
}
// TestEcho makes sure that the teleport env variables passed to a PAM module
// are correctly set
//
// The PAM module used, pam_teleport.so is called from the policy file
// teleport-acct-echo. The policy file instructs pam_teleport.so to echo the
// contents of TELEPORT_* to stdout where this test can read, parse, and
// validate it's output.
func (s *Suite) TestEcho(c *check.C) {
var buf bytes.Buffer
pamContext, err := Open(&Config{
Enabled: true,
ServiceName: "teleport-acct-echo",
Login: s.username,
Env: map[string]string{
"TELEPORT_USERNAME": s.username + "@example.com",
"TELEPORT_LOGIN": s.username,
"TELEPORT_ROLES": "bar baz qux",
},
Stdin: &discardReader{},
Stdout: &buf,
Stderr: &buf,
})
c.Assert(err, check.IsNil)
defer pamContext.Close()
assertOutput(c, buf.String(), []string{
s.username + "@example.com",
s.username,
"bar baz qux",
"pam_sm_acct_mgmt OK",
"pam_sm_authenticate OK",
"pam_sm_open_session OK",
})
}
// TestEnvironment makes sure that PAM environment variables (environment
// variables set by a PAM module) can be accessed from the PAM handle/context
// in Go code.
//
// The PAM module used, pam_teleport.so is called from the policy file
// teleport-session-environment. The policy file instructs pam_teleport.so to
// read in the first argument and set it as a PAM environment variable. This
// test then validates it matches what was set in the policy file.
func (s *Suite) TestEnvironment(c *check.C) {
var buf bytes.Buffer
pamContext, err := Open(&Config{
Enabled: true,
ServiceName: "teleport-session-environment",
Login: s.username,
Stdin: &discardReader{},
Stdout: &buf,
Stderr: &buf,
})
c.Assert(err, check.IsNil)
defer pamContext.Close()
c.Assert(pamContext.Environment(), check.HasLen, 1)
c.Assert(pamContext.Environment()[0], check.Equals, "foo=bar")
}
func (s *Suite) TestSuccess(c *check.C) {
var buf bytes.Buffer
pamContext, err := Open(&Config{
Enabled: true,
ServiceName: "teleport-success",
Login: s.username,
Stdin: &discardReader{},
Stdout: &buf,
Stderr: &buf,
})
c.Assert(err, check.IsNil)
defer pamContext.Close()
assertOutput(c, buf.String(), []string{
"pam_sm_acct_mgmt OK",
"pam_sm_authenticate OK",
"pam_sm_open_session OK",
})
}
func (s *Suite) TestAccountFailure(c *check.C) {
var buf bytes.Buffer
_, err := Open(&Config{
Enabled: true,
ServiceName: "teleport-acct-failure",
Login: s.username,
Stdin: &discardReader{},
Stdout: &buf,
Stderr: &buf,
})
c.Assert(err, check.NotNil)
}
func (s *Suite) TestAuthFailure(c *check.C) {
var buf bytes.Buffer
_, err := Open(&Config{
Enabled: true,
ServiceName: "teleport-auth-failure",
Login: s.username,
Stdin: &discardReader{},
Stdout: &buf,
Stderr: &buf,
})
c.Assert(err, check.NotNil)
}
func (s *Suite) TestSessionFailure(c *check.C) {
var buf bytes.Buffer
_, err := Open(&Config{
Enabled: true,
ServiceName: "teleport-session-failure",
Login: s.username,
Stdin: &discardReader{},
Stdout: &buf,
Stderr: &buf,
})
c.Assert(err, check.NotNil)
}
func assertOutput(c *check.C, got string, want []string) {
got = strings.TrimSpace(got)
lines := strings.Split(got, "\n")
for i, l := range lines {
lines[i] = strings.TrimSpace(l)
}
c.Assert(lines, check.DeepEquals, want)
}
type discardReader struct {
}
func (d *discardReader) Read(p []byte) (int, error) {
return len(p), nil
}
|
package main
import "testing"
func TestCalculate(t *testing.T) {
var tests = []struct {
date string
frame string
handlebar string
gear int64
geargrip int64
seating int64
seatingbottle int64
wheels string
spokes int64
rim int64
tube int64
tyre string
chain string
}{
{"10-2021", "steel", "steel", 4, 220, 1, 200, "steel", 400, 200, 300, "tubeless", "onespeed"},
}
for _, test := range tests {
if output, output1, outpu2, output3, output4, output5 := Calculateginprice(test.date, test.frame, test.handlebar, test.gear, test.geargrip, test.seating, test.seatingbottle, test.wheels, test.spokes, test.rim, test.tube, test.tyre, test.chain); test.date[3:7] != "2016" {
t.Error("test failed {} is {} is {}: {}: {}: {}:", output, output1, outpu2, output3, output4, output5)
} else {
t.Log("tested the price for cycle engine {}", output5)
}
}
}
|
package cain
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01000101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:cain.010.001.01 Document"`
Message *NetworkManagementResponse `xml:"NtwkMgmtRspn"`
}
func (d *Document01000101) AddMessage() *NetworkManagementResponse {
d.Message = new(NetworkManagementResponse)
return d.Message
}
// The NetworkManagementResponse message is sent by an acquirer, an issuer or an agent to answer to an NetworkManagementInitiation message.
type NetworkManagementResponse struct {
// Information related to the protocol management.
Header *iso20022.Header17 `xml:"Hdr"`
// Information related to the response to the network management.
NetworkManagementResponse *iso20022.AcquirerNetworkManagementResponse1 `xml:"NtwkMgmtRspn"`
// Trailer of the message containing a MAC.
SecurityTrailer *iso20022.ContentInformationType15 `xml:"SctyTrlr,omitempty"`
}
func (n *NetworkManagementResponse) AddHeader() *iso20022.Header17 {
n.Header = new(iso20022.Header17)
return n.Header
}
func (n *NetworkManagementResponse) AddNetworkManagementResponse() *iso20022.AcquirerNetworkManagementResponse1 {
n.NetworkManagementResponse = new(iso20022.AcquirerNetworkManagementResponse1)
return n.NetworkManagementResponse
}
func (n *NetworkManagementResponse) AddSecurityTrailer() *iso20022.ContentInformationType15 {
n.SecurityTrailer = new(iso20022.ContentInformationType15)
return n.SecurityTrailer
}
|
package mappers
import (
"fmt"
"github.com/vfreex/gones/pkg/emulator/memory"
"github.com/vfreex/gones/pkg/emulator/rom/ines"
)
type NROMMapper struct {
mapperBase
prgBankMapping [2]int
}
func init() {
MapperConstructors[0] = NewNROMMapper
}
func NewNROMMapper(rom *ines.INesRom) Mapper {
p := &NROMMapper{}
p.prgBin = rom.PrgBin
if len(p.prgBin) > PrgBankSize {
p.prgBankMapping[1] = 1
}
if len(rom.ChrBin) > 0 {
p.chrBin = rom.ChrBin
} else {
// cartridge use CHR-RAM rather than CHR-ROM
p.chrBin = make([]byte, ChrBankSize)
p.useChrRam = true
}
return p
}
func (p *NROMMapper) PeekPrg(addr memory.Ptr) byte {
if addr < 0x4020 {
panic(fmt.Errorf("mapper 0 PRG-ROM address %04x is not configured", addr))
}
if addr < 0x8000 {
return p.prgRam[addr-0x4020]
}
bank := p.prgBankMapping[int(addr-0x8000)/PrgBankSize]
return p.prgBin[bank*PrgBankSize|int(addr)&0x3fff]
}
func (p *NROMMapper) PokePrg(addr memory.Ptr, val byte) {
if addr < 0x4020 {
panic(fmt.Errorf("mapper 0 PRG-ROM address %04x is not configured", addr))
}
if addr < 0x8000 {
p.prgRam[addr-0x4020] = val
return
}
panic(fmt.Errorf("mapper 0 PRG-ROM address %04x is not writable", addr))
}
func (p *NROMMapper) PeekChr(addr memory.Ptr) byte {
if addr >= 0x2000 {
panic(fmt.Errorf("mapper 0 CHR-ROM/CHR-RAM address %04x is not configured", addr))
}
return p.chrBin[addr]
}
func (p *NROMMapper) PokeChr(addr memory.Ptr, val byte) {
if addr >= 0x2000 {
panic(fmt.Errorf("mapper 0 CHR-ROM/CHR-RAM %04x is not configured", addr))
}
if p.useChrRam {
panic(fmt.Errorf("this mapper 0 cartridge uses CHR-ROM, writing address %04x is not possible", addr))
}
p.chrBin[addr] = val
}
|
/* A testing script written for backspaceCompare function */
package backspaceCompare
import "testing"
func Test(t *testing.T) {
var tests = []struct {
s, t string
want bool
}{
{"ab#c", "ad#c", true},
{"ab##", "c#d#", true},
{"a##c", "#a#c", true},
{"a#c", "b", false},
{"", "aaaaa#####", true},
{"gggg###", "aaaaa#####", false},
}
for _, c := range tests {
got := BackspaceCompare(c.s, c.t)
if got != c.want {
t.Errorf("BackspaceCompare(%q, %q) == %t, want %t", c.s, c.t, got, c.want)
}
}
}
|
package main
import (
"strconv"
"strings"
)
/*
* @lc app=leetcode id=257 lang=golang
*
* [257] Binary Tree Paths
*/
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func binaryTreePaths(root *TreeNode) []string {
var res []string
var path []string
helper(root, &path, &res)
return res
}
func helper(root *TreeNode, path, res *[]string) {
if root == nil {
return
}
*path = append(*path, strconv.Itoa(root.Val))
if root.Left == nil && root.Right == nil {
*res = append(*res, strings.Join(*path, "->"))
*path = (*path)[:len(*path)-1]
return
}
helper(root.Left, path, res)
helper(root.Right, path, res)
*path = (*path)[:len(*path)-1]
}
|
package api
import (
"encoding/json"
"fmt"
"log"
"net/http"
"github.com/julienschmidt/httprouter"
)
// RouteHandler describes objects which handle requests from specific HTTP
// endpoints.
type RouteHandler interface {
RegisterRoutes(*httprouter.Router)
}
// StartServer registers all routes for RouteHandler objects, and spins up the
// HTTP server.
func StartServer(port string, handlers []RouteHandler) {
router := httprouter.New()
for _, handler := range handlers {
handler.RegisterRoutes(router)
}
log.Println("Listening on port:", port)
portString := fmt.Sprintf(":%s", port)
log.Fatalln(http.ListenAndServe(portString, router))
}
// constructAndSendResponse adds important, common headers to endpoint
// responses, and marshals the provided response body into JSON.
func constructAndSendResponse(w http.ResponseWriter, body interface{}) {
w.Header().Set("Content-Type", "application/json")
err := json.NewEncoder(w).Encode(body)
if err != nil {
errCode := http.StatusInternalServerError
errMsg := fmt.Sprintf("Failed to encode response as JSON: %s", err.Error())
http.Error(w, errMsg, errCode)
return
}
}
|
package main
import (
"fmt"
"bufio"
"os"
"log"
"strconv"
"strings"
"github.com/pitex/sieci-projekt/src/joinservice"
)
// Main function:
// First it creates Client and asks for address to connect to.
// After receiving address it creates a server and starts it.
func main() {
reader := bufio.NewReader(os.Stdin)
fmt.Printf("Enter your IP address: ")
myip, _ := reader.ReadString('\n')
myip = myip[:len(myip)-1]
fmt.Printf("Enter connection limit for this computer: ")
temp, _ := reader.ReadString('\n')
temp = temp[:len(temp)-1]
capacity, err := strconv.ParseInt(temp, 10, 0)
if err != nil {
log.Fatal(err)
}
if capacity < 2 {
capacity = 2
}
fmt.Printf("Is this the first computer in network?: ")
ans, _ := reader.ReadString('\n')
ans = strings.ToLower(ans[:len(ans)-1])
root := ans == "y" || ans == "yes"
var ip string
var address string
if !root {
fmt.Printf("Enter IP address of a computer in network: ")
ip, _ = reader.ReadString('\n')
ip = ip[:len(ip)-1]
cli := joinservice.NewClient(myip,int(capacity),ip,root)
address, err = cli.Connect()
if err != nil {
log.Fatal(err)
}
}
server := joinservice.NewServer(myip, address, int(capacity), root)
err = server.Start()
if err != nil {
log.Fatal(err)
}
}
|
package operatorlister
import (
"fmt"
"sync"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"github.com/operator-framework/api/pkg/operators/v1alpha1"
listers "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/listers/operators/v1alpha1"
)
type UnionCatalogSourceLister struct {
catsrcListers map[string]listers.CatalogSourceLister
catsrcLock sync.RWMutex
}
// List lists all CatalogSources in the indexer.
func (ucl *UnionCatalogSourceLister) List(selector labels.Selector) (ret []*v1alpha1.CatalogSource, err error) {
ucl.catsrcLock.RLock()
defer ucl.catsrcLock.RUnlock()
set := make(map[types.UID]*v1alpha1.CatalogSource)
for _, cl := range ucl.catsrcListers {
catsrcs, err := cl.List(selector)
if err != nil {
return nil, err
}
for _, catsrc := range catsrcs {
set[catsrc.GetUID()] = catsrc
}
}
for _, catsrc := range set {
ret = append(ret, catsrc)
}
return
}
// CatalogSources returns an object that can list and get CatalogSources.
func (ucl *UnionCatalogSourceLister) CatalogSources(namespace string) listers.CatalogSourceNamespaceLister {
ucl.catsrcLock.RLock()
defer ucl.catsrcLock.RUnlock()
// Check for specific namespace listers
if cl, ok := ucl.catsrcListers[namespace]; ok {
return cl.CatalogSources(namespace)
}
// Check for any namespace-all listers
if cl, ok := ucl.catsrcListers[metav1.NamespaceAll]; ok {
return cl.CatalogSources(namespace)
}
return &NullCatalogSourceNamespaceLister{}
}
func (ucl *UnionCatalogSourceLister) RegisterCatalogSourceLister(namespace string, lister listers.CatalogSourceLister) {
ucl.catsrcLock.Lock()
defer ucl.catsrcLock.Unlock()
if ucl.catsrcListers == nil {
ucl.catsrcListers = make(map[string]listers.CatalogSourceLister)
}
ucl.catsrcListers[namespace] = lister
}
func (l *operatorsV1alpha1Lister) RegisterCatalogSourceLister(namespace string, lister listers.CatalogSourceLister) {
l.catalogSourceLister.RegisterCatalogSourceLister(namespace, lister)
}
func (l *operatorsV1alpha1Lister) CatalogSourceLister() listers.CatalogSourceLister {
return l.catalogSourceLister
}
// NullCatalogSourceNamespaceLister is an implementation of a null CatalogSourceNamespaceLister. It is
// used to prevent nil pointers when no CatalogSourceNamespaceLister has been registered for a given
// namespace.
type NullCatalogSourceNamespaceLister struct {
listers.CatalogSourceNamespaceLister
}
// List returns nil and an error explaining that this is a NullCatalogSourceNamespaceLister.
func (n *NullCatalogSourceNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.CatalogSource, err error) {
return nil, fmt.Errorf("cannot list CatalogSources with a NullCatalogSourceNamespaceLister")
}
// Get returns nil and an error explaining that this is a NullCatalogSourceNamespaceLister.
func (n *NullCatalogSourceNamespaceLister) Get(name string) (*v1alpha1.CatalogSource, error) {
return nil, fmt.Errorf("cannot get CatalogSource with a NullCatalogSourceNamespaceLister")
}
|
// Package header provides a request header based implementation of a
// session loader.
package header
import (
"net/http"
"strings"
"github.com/pomerium/pomerium/internal/encoding"
"github.com/pomerium/pomerium/internal/httputil"
"github.com/pomerium/pomerium/internal/sessions"
)
var _ sessions.SessionLoader = &Store{}
// Store implements the load session store interface using http
// authorization headers.
type Store struct {
encoder encoding.Unmarshaler
}
// NewStore returns a new header store for loading sessions from
// authorization header as defined in as defined in rfc2617
//
// NOTA BENE: While most servers do not log Authorization headers by default,
// you should ensure no other services are logging or leaking your auth headers.
func NewStore(enc encoding.Unmarshaler) *Store {
return &Store{
encoder: enc,
}
}
// LoadSession tries to retrieve the token string from the Authorization header.
func (as *Store) LoadSession(r *http.Request) (string, error) {
jwt := TokenFromHeaders(r)
if jwt == "" {
return "", sessions.ErrNoSessionFound
}
return jwt, nil
}
// TokenFromHeaders retrieves the value of the authorization header(s) from a given
// request and authentication type.
func TokenFromHeaders(r *http.Request) string {
// X-Pomerium-Authorization: <JWT>
if jwt := r.Header.Get(httputil.HeaderPomeriumAuthorization); jwt != "" {
return jwt
}
bearer := r.Header.Get(httputil.HeaderAuthorization)
// Authorization: Pomerium <JWT>
prefix := httputil.AuthorizationTypePomerium + " "
if strings.HasPrefix(bearer, prefix) {
return bearer[len(prefix):]
}
// Authorization: Bearer Pomerium-<JWT>
prefix = "Bearer " + httputil.AuthorizationTypePomerium + "-"
if strings.HasPrefix(bearer, prefix) {
return bearer[len(prefix):]
}
return ""
}
|
package Modules
type VpsInfo struct {
Time uint
Count uint
Name string
Ip string
Backend string
}
type global struct {
Online map[string]*VpsInfo
Offline map[string]int64
}
var Global global
func (a *global) Init() {
a.Online = make(map[string]*VpsInfo, 0)
a.Offline = make(map[string]int64, 0)
}
|
package main
import (
"github.com/beego/beego/v2/client/orm"
_ "github.com/mattn/go-sqlite3"
)
// User -
type User struct {
ID int `orm:"column(id)"`
Name string `orm:"column(name)"`
}
func init() {
// need to register models in init
orm.RegisterModel(new(User))
// need to register db driver
orm.RegisterDriver("sqlite3", orm.DRSqlite)
// need to register default database
orm.RegisterDataBase("default", "sqlite3", "beego.db")
}
func main() {
// automatically build table
orm.RunSyncdb("default", false, true)
// create orm object
o := orm.NewOrm()
// data
user := new(User)
user.Name = "mike"
// insert data
o.Insert(user)
}
|
package app
import (
"net/http"
"time"
"appengine/datastore"
"github.com/PinkFairyArmadillos/go-endpoints/endpoints"
)
const clientId = "755334619802-7mgbbpk6vbkmim76ov2kvi0vn607j2cu.apps.googleusercontent.com"
var (
scopes = []string{
endpoints.EmailScope,
"https://www.googleapis.com/auth/userinfo.profile",
}
clientIds = []string{clientId, endpoints.ApiExplorerClientId}
audiences = []string{clientId}
)
// Reminder is a datastore entity that represents a single reminder.
// It also serves as (part of) a response of ReminderService.
type Reminder struct {
Id string `json:"id,omitempty" datastore:"-"`
User string `json:"user" datastore:"User"`
Title string `json:"title" datastore:",noindex"`
Location []float64 `json:"location" datastore:",noindex"`
Reminder []string `json:"reminder" datastore:",noindex"`
Date time.Time `json:"date"`
Urgency int `json:"urgency"`
}
// ReminderService
type ReminderService struct {
}
// ReminderList is a response type of ReminderService.List method
type RemindersList struct {
Items []*Reminder `json:"items"`
}
type ReminderUserQuery struct {
UserName string `json:"username" endpoints:"required"`
}
// List responds with a list of all reminders ordered by Date field.
// Most recent reminders come first.
func (gs *ReminderService) List(
r *http.Request, req *ReminderUserQuery, resp *RemindersList) error {
c := endpoints.NewContext(r)
username := req.UserName
q := datastore.NewQuery("Reminder").Filter("User =", username)
reminder := make([]*Reminder, 0, 10)
keys, err := q.GetAll(c, &reminder)
if err != nil {
return err
}
for i, k := range keys {
reminder[i].Id = k.Encode()
}
resp.Items = reminder
return nil
}
// NewReminder is the expected data structure
type NewReminder struct {
List []string `json:"reminder" endpoints:"required"`
Lat float64 `json:"latitude" endpoints:"required"`
Lng float64 `json:"longitude" endpoints:"required"`
UserName string `json:"username" endpoints:"required"`
Time time.Time `json:"due date/time" endpoints:"required"`
Title string `json:"title" endpoints:"required"`
Urgency int `json:"urgency" endpoints:"required"`
}
// createReminder creates a new Reminder based on provided NewReminder.
// It stores newly created Reminder with Content being that of NewReminder.Reminder.
// User field will have current username
func (gs *ReminderService) CreateReminder(
r *http.Request, req *NewReminder, reminder *Reminder) error {
c := endpoints.NewContext(r)
reminder.Reminder = make([]string, 1)
reminder.Reminder = append(req.List)
reminder.Location = make([]float64,0)
reminder.Location = append(reminder.Location,req.Lat,req.Lng)
reminder.Date = time.Now()
reminder.User = req.UserName
reminder.Title = req.Title
reminder.Urgency = req.Urgency
key, err := datastore.Put(
c, datastore.NewIncompleteKey(c, "Reminder", nil), reminder)
if err != nil {
return err
}
reminder.Id = key.Encode()
return nil
}
// ReminderIdReq serves as a data structure for identifying a single Reminder.
type ReminderIdReq struct {
Id string `json:"id" endpoints:"required"`
}
// Delete deletes a single reminder
func (gs *ReminderService) Delete(
r *http.Request, req *ReminderIdReq, _ *endpoints.VoidMessage) error {
c := endpoints.NewContext(r)
key, err := datastore.DecodeKey(req.Id)
if err != nil {
return err
}
return datastore.Delete(c, key)
}
func registerApi() (*endpoints.RpcService, error) {
reminderService := &ReminderService{}
rpcService, err := endpoints.RegisterServiceWithDefaults(reminderService)
if err != nil {
return nil, err
}
rpcService.Info().Name = "reminders"
info := rpcService.MethodByName("List").Info()
info.Name, info.HttpMethod, info.Path, info.Desc =
"reminders.list", "GET", "reminders/list", "List most recent reminders."
info = rpcService.MethodByName("CreateReminder").Info()
info.Name, info.HttpMethod, info.Path, info.Desc =
"reminders.createReminder", "POST", "reminders/createreminder", "Create a Reminder."
info.Scopes = scopes
info.Audiences = audiences
info.ClientIds = clientIds
info = rpcService.MethodByName("Delete").Info()
info.Name, info.HttpMethod, info.Path, info.Desc =
"reminders.delete", "DELETE", "reminders/delete/{id}", "Delete a single reminder."
return rpcService, nil
}
|
package certgen
var DeploymentName string
// SetDeploymentName saves the name of the deployment
func SetDeploymentName(deploymentname string) {
DeploymentName = deploymentname
}
// GetDeploymentName - Returns the name of the current Deployment
func GetDeploymentName() string {
return DeploymentName
}
|
// Copyright 2016 Kranz. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package base
import (
"path"
"strconv"
"strings"
)
func PathToUrl(p string) string {
p = strings.Replace(p, path.Ext(p), "", 1)
p = strings.Replace(p, "__", "#", -1)
p = strings.Replace(p, "_", "/", -1)
p = strings.Replace(p, "#", "_", -1)
return p
}
func UrLToPath(p string) string {
p = strings.Replace(p, "__", "#", -1)
p = strings.Replace(p, "/", "_", -1)
p = strings.Replace(p, "#", "_", -1)
return p
}
func SplitMethodAndStatus(s string) (method string, code int) {
if !strings.Contains(s, "_") {
return method, 200
}
splitted := strings.Split(s, "_")
method = splitted[0]
if i, err := strconv.ParseInt(splitted[1], 10, 64); err != nil {
code = 200
} else {
code = int(i)
}
return
}
|
package main
import (
"log"
"os"
"os/exec"
"os/signal"
"syscall"
)
func handleSigchld(c <-chan os.Signal) {
log.Println("process reaper launching")
for {
_ = <-c
for {
st := new(syscall.WaitStatus)
pid, err := syscall.Wait4(-1, st, syscall.WNOHANG, nil)
if pid == 0 || err == syscall.ECHILD {
break
}
log.Println("reaped", pid, err)
}
}
}
func handleTty() {
log.Println("opening console")
for {
tty := exec.Command("/bin/agetty", "--noclear", "tty1")
err := tty.Start()
if err != nil {
log.Println("agetty could not be started: ", err)
break
}
err = tty.Wait()
log.Println("agetty terminated with ", err)
}
}
func initReaper() {
log.Println("preparing process reaper")
chldchan := make(chan os.Signal, 10)
go handleSigchld(chldchan)
signal.Notify(chldchan, syscall.SIGCHLD)
}
func main() {
if amIInit() {
initLogging()
defer closeLogging()
log.Println("initiald starting up")
initReaper()
initHostname()
initTrueno()
handleTty()
} else {
log.Println("I am here to communicate")
}
}
|
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
// TraceJobSpec defines the desired state of TraceJob
type TraceJobSpec struct {
// Program is a string literal to evaluate as a bpftrace program.
Program string `json:"program"`
Hostname string `json:"hostname"`
ServiceAccount *string `json:"serviceAccount,omitempty"` // +optional
ImageNameTag *string `json:"imageNameTag,omitempty"` // +optional
InitImageNameTag *string `json:"initImageNameTag,omitempty"` // +optional
FetchHeaders bool `json:"fetchHeaders,omitempty"` // +optional
Deadline *int64 `json:"deadline,omitempty"` // +optional
DeadlineGracePeriod *int64 `json:"deadlineGracePeriod,omitempty"` // +optional
}
// TraceJobStatus defines the observed state of TraceJob
type TraceJobStatus struct {
// ID is a generated UUID for this object.
ID *types.UID `json:"id,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// TraceJob is the Schema for the tracejobs API
type TraceJob struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec TraceJobSpec `json:"spec,omitempty"`
Status TraceJobStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// TraceJobList contains a list of TraceJob
type TraceJobList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []TraceJob `json:"items"`
}
func init() {
SchemeBuilder.Register(&TraceJob{}, &TraceJobList{})
}
|
package validate
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
v1beta1 "k8s.io/api/admission/v1beta1"
)
func TestValidateJSON(t *testing.T) {
rawJSON := `{
"kind": "AdmissionReview",
"apiVersion": "admission.k8s.io/v1",
"request": {
"uid": "07ea6264-e624-439b-83d0-f7724106ec16",
"kind": {
"group": "",
"version": "v1",
"kind": "PodExecOptions"
},
"resource": {
"group": "",
"version": "v1",
"resource": "pods"
},
"subResource": "exec",
"requestKind": {
"group": "",
"version": "v1",
"kind": "PodExecOptions"
},
"requestResource": {
"group": "",
"version": "v1",
"resource": "pods"
},
"requestSubResource": "exec",
"name": "nginx-deployment-774548f7d4-4mgc2",
"namespace": "admission-test",
"operation": "CONNECT",
"userInfo": {
"username": "system:serviceaccount:che:che-workspace",
"groups": [
"system:masters",
"system:authenticated"
]
},
"object": {
"kind": "PodExecOptions",
"apiVersion": "v1",
"stdin": true,
"stdout": true,
"tty": true,
"container": "nginx",
"command": [
"bash"
]
},
"oldObject": null,
"dryRun": false,
"options": null
}
}`
response, err := Validate([]byte(rawJSON))
if err != nil {
t.Errorf("failed to validate AdmissionRequest %s with error %s", string(response), err)
}
r := v1beta1.AdmissionReview{}
err = json.Unmarshal(response, &r)
assert.NoError(t, err, "failed to unmarshal with error %s", err)
rr := r.Response
assert.True(t, rr.Allowed)
}
|
package main
import (
"fmt"
"jblee.net/adventofcode2018/utils"
)
func toLower(c byte) byte {
if c >= 'A' && c <= 'Z' {
return 'a' + (c - 'A')
}
return c
}
func cancelOut(a, b byte) bool {
aLower := toLower(a)
bLower := toLower(b)
return aLower == bLower && a != b
}
func main() {
line := utils.ReadLinesOrDie("input.txt")[0]
for i := 0; i < len(line)-1; {
if cancelOut(line[i], line[i+1]) {
line = line[:i] + line[i+2:]
if i > 0 {
i--
}
} else {
i++
}
}
fmt.Printf("line len: %d\n", len(line))
}
|
package middleware
import (
"github.com/gin-gonic/gin"
"go-admin/global"
"go-admin/models"
"go-admin/utils/response"
"net/http"
)
func CasbinHandler() gin.HandlerFunc {
return func(c *gin.Context) {
claims, _ := c.Get("claims")
waitUse, ok := claims.(*models.CustomClaims)
if !ok {
response.Result(http.StatusBadRequest, nil, "获取用户失败", 0, false, c)
c.Abort()
//return
}
obj := c.Request.URL.RequestURI()
act := c.Request.Method
//e := ccasbin.SyncedEnforcer
//if err != nil {
// response.FailWithMessage("Casbin失败", c)
// c.Abort()
// return
//}
// 先循环用户所拥有的角色是否拥有权限, 如果用户属于管理员组,则默认拥有所有权
for _, v := range waitUse.Roles {
sub := v.RoleName
ok, _ := global.GSyncedEnforcer.Enforce(sub, obj, act)
// 如果拥有administrators权限,直接通过
if ok {
//response.FailWithMessage("权限认证失败", c)
c.Next()
return
}
}
// 先查看用户是否拥有权限, 如果已经拥有了权限, 则不查看所属是否拥有权限
//sub := waitUse.UUID
sub := waitUse.Username
ok, _ = global.GSyncedEnforcer.Enforce(sub, obj, act)
if !ok {
response.Result(http.StatusBadRequest, nil, "权限不足", 0, false, c)
c.Abort()
return
}
//response.FailWithMessage("权限不足", c)
c.Abort()
return
}
}
|
package popgun
import (
"bufio"
"fmt"
"io/ioutil"
"net"
"reflect"
"testing"
"time"
"github.com/DevelHell/popgun/backends"
)
func TestClient_handle(t *testing.T) {
s, c := net.Pipe()
defer s.Close()
defer c.Close()
backend := backends.DummyBackend{}
authorizator := backends.DummyAuthorizator{}
client := newClient(authorizator, backend)
go func() {
client.handle(s)
}()
reader := bufio.NewReader(c)
//read welcome message
response, err := reader.ReadString('\n')
if err != nil {
t.Fatal(err)
}
//invalid command
expected := "-ERR Invalid command INVALID\r\n"
fmt.Fprintf(c, "INVALID\n")
response, err = reader.ReadString('\n')
if err != nil {
t.Fatal(err)
}
if response != expected {
t.Errorf("Expected '%s', but got '%s'", expected, response)
}
//error executing command - rset cannot be executed in current state
expected = "-ERR Error executing command RSET\r\n"
fmt.Fprintf(c, "RSET\n")
response, err = reader.ReadString('\n')
if response != expected {
t.Errorf("Expected '%s', but got '%s'", expected, response)
}
//successful command
expected = "+OK Goodbye\r\n"
fmt.Fprintf(c, "QUIT\n")
response, err = reader.ReadString('\n')
if response != expected {
t.Errorf("Expected '%s', but got '%s'", expected, response)
}
}
func TestClient_parseInput(t *testing.T) {
backend := backends.DummyBackend{}
authorizator := backends.DummyAuthorizator{}
client := newClient(authorizator, backend)
tables := [][][]string{
{{"COMMAND1"}, {"COMMAND1"}},
{{"COMMAND1 "}, {"COMMAND1"}},
{{"COMMAND1 \r \n "}, {"COMMAND1"}},
{{"comm ARG"}, {"COMM", "ARG"}},
{{"COMM arg"}, {"COMM", "arg"}},
{{"COMM ARG1 ARG2"}, {"COMM", "ARG1", "ARG2"}},
}
for _, testCase := range tables {
inputCmd := testCase[0][0]
cmd, args := client.parseInput(inputCmd)
expectedCmd := testCase[1][0]
if cmd != expectedCmd {
t.Errorf("Expected '%s', but got '%s'", expectedCmd, cmd)
}
expectedArgs := testCase[1][1:]
if !reflect.DeepEqual(args, expectedArgs) {
t.Errorf("Expected '%s', but got '%s'", expectedArgs, args)
}
}
}
func TestServer_Start(t *testing.T) {
cfg := Config{
ListenInterface: "localhost:3001",
}
backend := backends.DummyBackend{}
authorizator := backends.DummyAuthorizator{}
server := NewServer(cfg, authorizator, backend)
server.Start()
conn, err := net.DialTimeout("tcp", cfg.ListenInterface, 3*time.Second)
if err != nil {
t.Errorf("Expected listening on '%s', but could not connect", cfg.ListenInterface)
return
}
defer conn.Close()
}
type printerFunc func(conn net.Conn)
func printerTest(t *testing.T, f printerFunc) string {
s, c := net.Pipe()
defer s.Close()
go func() {
f(c)
c.Close()
}()
buf, err := ioutil.ReadAll(s)
if err != nil {
t.Fatal(err)
}
return string(buf[:])
}
func TestPrinter_Welcome(t *testing.T) {
expected := "+OK POPgun POP3 server ready\r\n"
msg := printerTest(t, func(conn net.Conn) {
p := NewPrinter(conn)
p.Welcome()
})
if msg != expected {
t.Errorf("Expected '%s', but got '%s'", expected, msg)
}
}
func TestPrinter_Ok(t *testing.T) {
expected := "+OK 2 foxes jumping over lazy dog\r\n"
msg := printerTest(t, func(conn net.Conn) {
p := NewPrinter(conn)
p.Ok("%d foxes jumping over lazy dog", 2)
})
if msg != expected {
t.Errorf("Expected '%s', but got '%s'", expected, msg)
}
}
func TestPrinter_Err(t *testing.T) {
expected := "-ERR everything wrong in 10 seconds\r\n"
msg := printerTest(t, func(conn net.Conn) {
p := NewPrinter(conn)
p.Err("everything wrong in %d seconds", 10)
})
if msg != expected {
t.Errorf("Expected '%s', but got '%s'", expected, msg)
}
}
func TestPrinter_MultiLine(t *testing.T) {
expected := "multi\r\nline\r\n.\r\n"
msg := printerTest(t, func(conn net.Conn) {
p := NewPrinter(conn)
p.MultiLine([]string{"multi", "line"})
})
if msg != expected {
t.Errorf("Expected '%s', but got '%s'", expected, msg)
}
}
|
package worker
import (
"FoG/src/github.com/cl/crontab/common"
"context"
"fmt"
clientv3 "go.etcd.io/etcd/client/v3"
)
type JobLock struct {
//基于etcd实现分布式锁
kv clientv3.KV
lease clientv3.Lease
jobName string
leaseId clientv3.LeaseID // 租约ID,记录之后,后续取消
cancelFunc context.CancelFunc // 取消函数 ,后续基于该函数来取消续约
isLocked bool // 是否上锁成功
}
//1 初始化
func InitJobLock(jobName string,kv clientv3.KV,lease clientv3.Lease)(jobLock *JobLock){
jobLock = &JobLock{
kv: kv,
lease:lease,
jobName: jobName,
}
return
}
//2 尝试上锁 基于 lease + txn实现
func(jobLock *JobLock) TryLock()(err error){
var(
leaseGrantResp *clientv3.LeaseGrantResponse
ctx context.Context
cancelFunc context.CancelFunc
leaseId clientv3.LeaseID
keepRespChan <- chan *clientv3.LeaseKeepAliveResponse
txn clientv3.Txn
lockKey string
txnResp *clientv3.TxnResponse
)
ctx,cancelFunc = context.WithCancel(context.TODO())
// 1 创建租约
if leaseGrantResp,err = jobLock.lease.Grant(context.TODO(),5);err != nil{
return
}
//2 获取id
leaseId = leaseGrantResp.ID
//3 自动续租
if keepRespChan, err = jobLock.lease.KeepAlive(ctx,leaseId); err != nil{
return
}
//4 处理租约应答
go func() {
var(
keepResp *clientv3.LeaseKeepAliveResponse
)
for{
select{
case keepResp = <- keepRespChan:
if keepResp == nil{
goto END
}
}
}
END:
fmt.Println("-------租约到期")
}()
// 5 创建事务
txn = jobLock.kv.Txn(context.TODO())
// 锁
lockKey = common.JOB_LOCK_DIR + jobLock.jobName
// 尝试获取锁
txn.If(clientv3.Compare(clientv3.CreateRevision(lockKey),"=",0)).
Then(clientv3.OpPut(lockKey,"",clientv3.WithLease(leaseId))).
Else(clientv3.OpGet(lockKey))
// 提交事务
if txnResp,err = txn.Commit(); err != nil{
goto FAIL
}
// 6 成功返回,失败就释放租约
if !txnResp.Succeeded{
err = common.ERR_LOCK_ALREADY_REQUIRED
goto FAIL
}
// 7 抢锁成功
jobLock.leaseId = leaseId
return
FAIL:
cancelFunc() // 取消自动续约
jobLock.lease.Revoke(context.TODO(),leaseId) // 释放租约
return
}
// 3 释放锁
func(jobLock *JobLock) Unlock(){
if jobLock.isLocked{
jobLock.cancelFunc()
jobLock.lease.Revoke(context.TODO(),jobLock.leaseId)
}
}
|
package NFA
import (
"fmt"
"testing"
)
func TestDFA(t *testing.T) {
rulebook := DFARulebook{Rules: []DFARule{{
State: 2,
Character: 'b',
NextState: 3,
}, {
State: 2,
Character: 'a',
NextState: 3,
}, {
State: 1,
Character: 'b',
NextState: 1,
}, {
State: 1,
Character: 'a',
NextState: 1,
}, {
State: 1,
Character: 'b',
NextState: 2,
}, {
State: 3,
Character: 'a',
NextState: 4,
}, {
State: 3,
Character: 'b',
NextState: 4,
}}}
nfa := New(1, []int32{4}, rulebook)
nfa.ReadString("bbabb")
fmt.Println(nfa.Accepting())
}
func PrintStructValueMap(valueKeyMap map[DFARule]struct{}) {
for value := range valueKeyMap {
fmt.Print(value.NextState, " ")
}
fmt.Println()
}
func TestNFA_OE_AE_IE_Endings(t *testing.T) {
rulebook := DFARulebook{Rules: []DFARule{{
State: 1,
Character: 'а',
NextState: 2,
}, {
State: 1,
Character: 'о',
NextState: 2,
}, {
State: 1,
Character: 'и',
NextState: 2,
}, {
State: 2,
Character: 'е',
NextState: 3,
},
}}
alphabet := "абвгдеёжзийклмнопрстуфхцчшщъэьэюя"
for _, v := range alphabet {
rulebook.AddRule(1, int32(v), 1)
}
//nfa := New(1, []int32{3}, Rulebook)
//nfa.CheckAndPrintWords([]string{"большое", "театр", "самолет", "дикие", "танцы", "трение", "дунае"})
nfa := NFADesign{
CurrentStates: []int32{1},
AcceptStates: []int32{3},
Rulebook: rulebook,
}
fmt.Println(nfa.Accepts("большое"))
fmt.Println(nfa.Accepts("театр"))
fmt.Println(nfa.Accepts("самолет"))
fmt.Println(nfa.Accepts("дикие"))
fmt.Println(nfa.Accepts("танцы"))
fmt.Println(nfa.Accepts("трение"))
fmt.Println(nfa.Accepts("дунае"))
}
|
package main
import (
"fmt"
)
func main() {
// buffer size is 2 i.e. at max 2 values can be stored(or kept) in the channel
c := make(chan int, 2)
c <- 42
c <- 43
// will not work as the channel size is 2, so before putting any new value
// atlease one value needs to be extracted from the channel by some go-routine
// c <- 44
fmt.Println(<-c)
// will work now as the channel size is 2, and one value have already been extracted
// from the channel. so there's space for one more value now
c <- 44
fmt.Println(<-c)
fmt.Println(<-c)
}
|
package cli
import (
"fmt"
"strings"
sdk "github.com/cosmos/cosmos-sdk/types"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
paramscutils "github.com/cosmos/cosmos-sdk/x/params/client/utils"
paramproposal "github.com/cosmos/cosmos-sdk/x/params/types/proposal"
"github.com/gookit/gcli/v3"
"github.com/ovrclk/akcmd/client"
"github.com/ovrclk/akcmd/l10n"
)
// NewSubmitParamChangeProposalTxCmd returns a CLI command handler for creating
// a parameter change proposal governance transaction.
func NewSubmitParamChangeProposalTxCmd() *gcli.Command {
return &gcli.Command{
Name: "param-change",
Desc: "Submit a parameter change proposal",
Help: strings.TrimSpace(
fmt.Sprintf(`Submit a parameter proposal along with an initial deposit.
The proposal details must be supplied via a JSON file. For values that contains
objects, only non-empty fields will be updated.
IMPORTANT: Currently parameter changes are evaluated but not validated, so it is
very important that any "value" change is valid (ie. correct type and within bounds)
for its respective parameter, eg. "MaxValidators" should be an integer and not a decimal.
Proper vetting of a parameter change proposal should prevent this from happening
(no deposits should occur during the governance process), but it should be noted
regardless.
Example:
$ %s tx gov submit-proposal param-change <path/to/proposal.json> --from=<key_or_address>
Where proposal.json contains:
{
"title": "Staking Param Change",
"description": "Update max validators",
"changes": [
{
"subspace": "staking",
"key": "MaxValidators",
"value": 105
}
],
"deposit": "1000stake"
}
`,
l10n.GetLocalizationStrings().AppName,
),
),
Config: func(cmd *gcli.Command) {
cmd.AddArg("proposal-file", "", true)
},
Func: func(cmd *gcli.Command, args []string) error {
clientCtx, err := client.GetClientTxContext()
if err != nil {
return err
}
proposal, err := paramscutils.ParseParamChangeProposalJSON(clientCtx.LegacyAmino, args[0])
if err != nil {
return err
}
from := clientCtx.GetFromAddress()
content := paramproposal.NewParameterChangeProposal(
proposal.Title, proposal.Description, proposal.Changes.ToParamChanges(),
)
deposit, err := sdk.ParseCoinsNormalized(proposal.Deposit)
if err != nil {
return err
}
msg, err := govtypes.NewMsgSubmitProposal(content, deposit, from)
if err != nil {
return err
}
return client.BroadcastTX(clientCtx, msg)
},
}
}
|
package faterpg
//Game represent game
type Game struct {
Name string
Description string
GM *GM
Players []*Player
Aspects []*Aspect
}
//NewGame create new game
func NewGame() *Game {
return &Game{}
}
//IsReady return game ready to play or not
func (game *Game) IsReady() bool {
haveGM := game.GM != nil
havePlayers := (len(game.Players) > 0)
return haveGM && havePlayers
}
|
package stmanager_test
import (
"testing"
"manager/stmanager"
)
func Test_StockHistDataManager(t *testing.T) {
m := stmanager.NewStockHistDataManager()
m.Process()
}
|
package enfins
import (
"testing"
"fmt"
"net/http"
)
var cfg *Configuration
var client *APIClient
func init() {
cfg = &Configuration{
"http",
"62.80.163.18:9000",
"GEpcqDRSwB",
"gF-GnQZMayC1PJ0rvAU5",
"v1",
nil,
}
var err error
client = &APIClient{
cfg,
http.DefaultClient,
}
if err != nil {
fmt.Errorf("error initing tests: %s", err)
}
}
func TestQueryBuilder_AddParam(t *testing.T) {
qb, _ := NewQuery("/", cfg)
qb.AddParam("test", "yes")
if qb.params.Get("test") != "yes" {
t.Error("Arg not added properly")
}
}
func TestAPIClient_GetBalance(t *testing.T) {
b,e,err := client.GetBalance()
if e != nil {
t.Errorf("error response with Message '%s'", e.Message)
}
if err != nil {
t.Errorf("error executing with message '%s'", err.Error())
}
if len(b) < 1 {
t.Errorf("empty response array")
t.Fail()
}
}
func TestAPIClient_CreateBill(t *testing.T) {
_, e, err := client.CreateBill(&CreateBillPostOpts{
"UAH",
100,
"Test amount",
"test_m_id_100",//fmt.Sprintf( "EXT_ORDER_RAND_%d", rand.Int()),
&CreateBillOptional{
Testing:true,
},
})
if e != nil {
t.Errorf("error response with Message %s", e.Message)
}
if err != nil {
t.Errorf("error executing: %s", err.Error())
}
}
func TestAPIClient_GetStats(t *testing.T) {
s,e,err := client.GetStats(&StatsOpt{"UAH", 0,0, "", true})
if e != nil {
t.Errorf("error response with Message '%s'", e.Message)
}
if err != nil {
t.Errorf("error executing with message '%s'", err.Error())
}
if s == nil {
t.FailNow()
}
}
func TestAPIClient_GetRates_Success(t *testing.T) {
t.Skip("Disabled method")
s,e,err := client.GetRates(&RatesOpt{
"USD",
"UAH",
100,
0,
})
if e != nil {
t.Errorf("error response with Message '%s'", e.Message)
}
if err != nil {
t.Errorf("error executing with message '%s'", err.Error())
}
if s == nil {
t.FailNow()
}
}
func TestAPIClient_GetRates_Error(t *testing.T) {
t.Skip("Disabled method")
s,e,err := client.GetRates(&RatesOpt{
"USD",
"UAH",
0,
0,
})
if e != nil {
t.Errorf("error response with Message '%s'", e.Message)
}
if err == nil {
t.Error("Must occur an error")
t.FailNow()
}
if s != nil {
t.FailNow()
}
}
func TestAPIClient_Payout_Error(t *testing.T) {
s,e,err := client.Payout(&PayoutOpt{
"UAH",
"UAH",
10.00,
"Testing",
"00000001", // Not exist
})
if e != nil {
if e.Code == 10606 {// user not found
return
}
t.Errorf("error response with Message '%s'", e.Message)
}
if err != nil {
t.Errorf("error executing with message '%s'", err.Error())
}
if s != nil {
t.FailNow()
}
}
func TestAPIClient_PayoutCard_Error(t *testing.T) {
s,e,err := client.PayoutCard(&PayoutCardOpt{
"UAH",
"UAH",
0.00,
"Testing",
"4111111111111111", // Not exist
})
if e != nil {
if e.Code == 10563 {// rejected by limits
return
}
t.Errorf("error response with Message '%s'", e.Message)
}
if err != nil {
t.Errorf("error executing with message '%s'", err.Error())
}
if s != nil {
t.FailNow()
}
}
func TestAPIClient_GetHistory(t *testing.T) {
s,e,err := client.GetHistory(&HistoryOpt{
1496416572,
1652919375,
"withdraw",
100,
0,
true,
})
if e != nil {
t.Errorf("error response with Message '%s'", e.Message)
}
if err != nil {
t.Errorf("error executing with message '%s'", err.Error())
}
if s == nil {
t.FailNow()
}
}
func TestAPIClient_FindBill_ByBillId_Success(t *testing.T) {
bid := new(int)
*bid = 993
s,e,err := client.FindBill(nil, bid)
if e != nil {
t.Errorf("error response with Message '%s'", e.Message)
}
if err != nil {
t.Errorf("error executing with message '%s'", err.Error())
}
if s == nil {
t.FailNow()
}
}
func TestAPIClient_FindBill_ByBillId_PermissionDeny(t *testing.T) {
bid := new(int)
*bid = 1
_,e,err := client.FindBill(nil, bid)
if err != nil {
t.Errorf("error executing with message '%s'", err.Error())
}
if e != nil {
if e.Code != 10520 { // not found
t.Errorf("error response with Message '%s'", e.Message)
}
}
}
func TestAPIClient_FindBill_ByMOrderId_Success(t *testing.T) {
moid := new(string)
*moid = "test_m_id_100"
s,e,err := client.FindBill(moid, nil)
if e != nil {
t.Errorf("error response with Message '%s'", e.Message)
}
if err != nil {
t.Errorf("error executing with message '%s'", err.Error())
}
if s == nil {
t.FailNow()
}
if s.MOrder != "test_m_id_100" {
t.FailNow()
}
}
func TestAPIClient_FindBill_ByMOrderId_PermissionDeny(t *testing.T) {
moid := new(string)
*moid = "test_m_id"
_,e,err := client.FindBill(moid, nil)
if err != nil {
t.Errorf("error executing with message '%s'", err.Error())
}
if e != nil {
if e.Code != 10520 { // not found
t.Errorf("error response with Message '%s'", e.Message)
}
}
}
func TestAPIClient_FindBill_NoParams(t *testing.T) {
_,e,err := client.FindBill(nil, nil)
if err == nil {
t.Errorf("expected error not found")
}
if e != nil {
t.Errorf("not expected error response")
}
}
func TestAPIClient_FindOrder_Error(t *testing.T) {
_,e,err := client.FindOrder(1000)
if e != nil {
if e.Code != 10507 { //not found
t.Errorf("error response with Message '%s'", e.Message)
}
}
if err != nil {
t.Errorf("error executing with message '%s'", err.Error())
}
}
|
package main
import (
"bufio"
"flag"
"fmt"
"os"
)
func main() {
filePath := flag.String("p", "input.txt", "Input's file path")
flag.Parse()
f, err := os.Open(*filePath)
if err != nil {
fmt.Fprintf(os.Stderr, "Opening input file: %v\n", err)
os.Exit(1)
}
defer f.Close()
scanner := bufio.NewScanner(f)
twices := 0
thirds := 0
ids := []string{}
for scanner.Scan() {
occurrences := map[rune]int{}
txt := scanner.Text()
for _, char := range txt {
occurrences[char]++
}
ids = append(ids, txt)
applyTwices := true
applyThirds := true
for _, occurrence := range occurrences {
if occurrence == 2 && applyTwices {
twices++
applyTwices = false
}
if occurrence == 3 && applyThirds {
thirds++
applyThirds = false
}
if !applyTwices && !applyThirds {
break
}
}
}
fmt.Printf("Checksum: %d\n", twices*thirds)
fmt.Printf("Common letters: %s\n", findCommonLetters(ids))
}
func findCommonLetters(ids []string) (result string) {
for i := 0; i < len(ids); i++ {
for j := 0; j < len(ids); j++ {
if i == j {
continue
}
diffCount := 0
for idx := range ids[i] {
if ids[i][idx] != ids[j][idx] {
diffCount++
}
if diffCount > 1 {
break
}
}
if diffCount == 1 {
for idx := range ids[i] {
if ids[i][idx] == ids[j][idx] {
result += string(ids[i][idx])
}
}
return result
}
}
}
return
}
|
/*
Background
A linear recurrence relation is a description of a sequence, defined as one or more initial terms and a linear formula on last k terms to calculate the next term.
(For the sake of simplicity, we only consider homogeneous relations, i.e. the ones without a constant term in the formula.)
A formal definition of a linear recurrence relation looks like this, where yn
is the desired sequence (1-based, so it is defined over n≥1) and xi's and ai's are constants:
y[n] = x[n] 1 <= n <= k
a[1]y[n-1]+a[2]y[n-2]+⋯+a[k]y[n-k] k < n
In this challenge, we will accelerate this sequence by converting it to a matrix form, so that the n-th term can be found by repeated squaring of the matrix in O(logn) steps, followed by inner product with the vector of initial terms.
For example, consider the famous Fibonacci sequence: its recurrence relation is y[n]=y[n-1]+y[n-2]
with k=2, and let's use the initial values x1=x2=1.
The recurrence relation can be converted to a matrix form:
[y[n-1]] => [y[n-1]] => [0 1] [y[n-2]]
[y[n]] => [y[n-1]+y[n-2]] => [1 1] [y[n-1]]
So multiplying the matrix once advances the sequence by one term. Since this holds for any n, it can be extended all the way until we reach the initial terms:
In general, one way to construct such a matrix is the following:
[y[n-k+1]] [0 1 0 ... 0] [y[n-k]]
[y[n-k+2]] [0 0 1 ... 0] [y[n-k+1]]
... [ ... ] ...
[y[n-1]] [0 0 0 ... 1] [y[n-2]]
[y[n]] [a[k] a[k-1] a[k-2] ... [a1]] [y[n-1]]
Note that, if you reverse the vectors and the matrix in every dimension, the equation still holds, retaining the property of "advancing a term by matmul-ing once".
(Actually any permutation will work, given that the rows and columns of the matrix are permuted in the same way.)
Challenge
Given the list of coefficients a1,⋯,ak, construct a matrix that represents the recurrence relation (so that its powers can be used to accelerate the computation of n-th term of the sequence).
You can take the coefficients in reverse order, and you can optionally take the value k as a separate input. k (the number of terms) is at least 1.
Standard code-golf rules apply. The shortest code in bytes wins.
Test cases
In all cases, any other matrix that can be formed by permuting rows and columns in the same way is also valid.
Input
[1,1]
Output
[[0, 1],
[1, 1]]
Input
[5]
Output
[[5]]
Input
[3, -1, 19]
Output
[[0, 1, 0],
[0, 0, 1],
[19, -1, 3]]
or reversed in both dimensions:
[[3, -1, 19],
[1, 0, 0],
[0, 1, 0]]
or cycled once in both dimensions:
[[3, 19, -1],
[0, 0, 1],
[1, 0, 0]]
etc.
*/
package main
import (
"fmt"
"reflect"
)
func main() {
test([]int{1, 1}, [][]int{{0, 1}, {1, 1}})
test([]int{5}, [][]int{{5}})
test([]int{3, -1, 19}, [][]int{{0, 1, 0}, {0, 0, 1}, {19, -1, 3}})
}
func test(a []int, r [][]int) {
m := matrix(a)
fmt.Println(m)
assert(reflect.DeepEqual(m, r))
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func matrix(a []int) [][]int {
n := len(a)
m := make([][]int, n)
t := make([]int, n*n)
for i := range m {
m[i] = t[i*n : (i+1)*n]
}
for i := 0; i < n-1; i++ {
m[i][i+1] = 1
}
for i := range a {
m[n-1][n-i-1] = a[i]
}
return m
}
|
package resto
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
"time"
"github.com/rs/zerolog/log"
"github.com/ryanuber/go-glob"
"github.com/saucelabs/saucectl/internal/config"
"github.com/saucelabs/saucectl/internal/job"
"github.com/saucelabs/saucectl/internal/requesth"
"github.com/saucelabs/saucectl/internal/vmd"
)
var (
// ErrServerError is returned when the server was not able to correctly handle our request (status code >= 500).
ErrServerError = errors.New("internal server error")
// ErrJobNotFound is returned when the requested job was not found.
ErrJobNotFound = errors.New("job was not found")
// ErrTunnelNotFound is returned when the requested tunnel was not found.
ErrTunnelNotFound = errors.New("tunnel not found")
)
// Client http client.
type Client struct {
HTTPClient *http.Client
URL string
Username string
AccessKey string
ArtifactConfig config.ArtifactDownload
}
// concurrencyResponse is the response body as is returned by resto's rest/v1.2/users/{username}/concurrency endpoint.
type concurrencyResponse struct {
Concurrency struct {
Organization struct {
Allowed struct {
VMS int `json:"vms"`
RDS int `json:"rds"`
}
}
}
}
// availableTunnelsResponse is the response body as is returned by resto's rest/v1/users/{username}/tunnels endpoint.
type availableTunnelsResponse map[string][]tunnel
type tunnel struct {
ID string `json:"id"`
Status string `json:"status"` // 'new', 'booting', 'deploying', 'halting', 'running', 'terminated'
TunnelID string `json:"tunnel_identifier"`
}
// New creates a new client.
func New(url, username, accessKey string, timeout time.Duration) Client {
return Client{
HTTPClient: &http.Client{Timeout: timeout},
URL: url,
Username: username,
AccessKey: accessKey,
}
}
// ReadJob returns the job details.
func (c *Client) ReadJob(ctx context.Context, id string) (job.Job, error) {
request, err := createRequest(ctx, c.URL, c.Username, c.AccessKey, id)
if err != nil {
return job.Job{}, err
}
return doRequest(c.HTTPClient, request)
}
// PollJob polls job details at an interval, until the job has ended, whether successfully or due to an error.
func (c *Client) PollJob(ctx context.Context, id string, interval time.Duration) (job.Job, error) {
request, err := createRequest(ctx, c.URL, c.Username, c.AccessKey, id)
if err != nil {
return job.Job{}, err
}
ticker := time.NewTicker(interval)
defer ticker.Stop()
for range ticker.C {
j, err := doRequest(c.HTTPClient, request)
if err != nil {
return job.Job{}, err
}
if job.Done(j.Status) {
return j, nil
}
}
return job.Job{}, nil
}
// GetJobAssetFileNames return the job assets list.
func (c *Client) GetJobAssetFileNames(ctx context.Context, jobID string) ([]string, error) {
request, err := createListAssetsRequest(ctx, c.URL, c.Username, c.AccessKey, jobID)
if err != nil {
return nil, err
}
return doListAssetsRequest(c.HTTPClient, request)
}
// GetJobAssetFileContent returns the job asset file content.
func (c *Client) GetJobAssetFileContent(ctx context.Context, jobID, fileName string) ([]byte, error) {
request, err := createAssetRequest(ctx, c.URL, c.Username, c.AccessKey, jobID, fileName)
if err != nil {
return nil, err
}
return doAssetRequest(c.HTTPClient, request)
}
// ReadAllowedCCY returns the allowed (max) concurrency for the current account.
func (c *Client) ReadAllowedCCY(ctx context.Context) (int, error) {
req, err := requesth.NewWithContext(ctx, http.MethodGet,
fmt.Sprintf("%s/rest/v1.2/users/%s/concurrency", c.URL, c.Username), nil)
if err != nil {
return 0, err
}
req.SetBasicAuth(c.Username, c.AccessKey)
resp, err := c.HTTPClient.Do(req)
if err != nil {
return 0, err
}
defer resp.Body.Close()
var cr concurrencyResponse
if err := json.NewDecoder(resp.Body).Decode(&cr); err != nil {
return 0, err
}
return cr.Concurrency.Organization.Allowed.VMS, nil
}
// IsTunnelRunning checks whether tunnelID is running. If not, it will wait for the tunnel to become available or
// timeout. Whichever comes first.
func (c *Client) IsTunnelRunning(ctx context.Context, id string, wait time.Duration) error {
deathclock := time.Now().Add(wait)
var err error
for time.Now().Before(deathclock) {
if err = c.isTunnelRunning(ctx, id); err == nil {
return nil
}
time.Sleep(1 * time.Second)
}
return err
}
func (c *Client) isTunnelRunning(ctx context.Context, id string) error {
req, err := requesth.NewWithContext(ctx, http.MethodGet,
fmt.Sprintf("%s/rest/v1/%s/tunnels", c.URL, c.Username), nil)
if err != nil {
return err
}
req.SetBasicAuth(c.Username, c.AccessKey)
q := req.URL.Query()
q.Add("full", "true")
q.Add("all", "true")
req.URL.RawQuery = q.Encode()
res, err := c.HTTPClient.Do(req)
if err != nil {
return err
}
if res.StatusCode != http.StatusOK {
body, _ := io.ReadAll(res.Body)
err := fmt.Errorf("tunnel request failed; unexpected response code:'%d', msg:'%v'", res.StatusCode, string(body))
return err
}
var resp availableTunnelsResponse
if err := json.NewDecoder(res.Body).Decode(&resp); err != nil {
return err
}
for _, tt := range resp {
for _, t := range tt {
// User could be using tunnel name (aka tunnel_identifier) or the tunnel ID. Make sure we check both.
if t.TunnelID != id && t.ID != id {
continue
}
if t.Status == "running" {
return nil
}
}
}
return ErrTunnelNotFound
}
// StopJob stops the job on the Sauce Cloud.
func (c *Client) StopJob(ctx context.Context, id string) (job.Job, error) {
request, err := createStopRequest(ctx, c.URL, c.Username, c.AccessKey, id)
if err != nil {
return job.Job{}, err
}
j, err := doRequest(c.HTTPClient, request)
if err != nil {
return job.Job{}, err
}
return j, nil
}
func doListAssetsRequest(httpClient *http.Client, request *http.Request) ([]string, error) {
resp, err := httpClient.Do(request)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode >= http.StatusInternalServerError {
return nil, ErrServerError
}
if resp.StatusCode == http.StatusNotFound {
return nil, ErrJobNotFound
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
err := fmt.Errorf("job assets list request failed; unexpected response code:'%d', msg:'%v'", resp.StatusCode, string(body))
return nil, err
}
var filesMap map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&filesMap); err != nil {
return []string{}, err
}
var filesList []string
for k, v := range filesMap {
if v != nil && !isSpecialFile(k) && reflect.TypeOf(v).Name() == "string" {
filesList = append(filesList, v.(string))
}
}
return filesList, nil
}
// isSpecialFile tells if a file is a specific case or not.
func isSpecialFile(fileName string) bool {
if fileName == "video" || fileName == "screenshots" {
return true
}
return false
}
func doAssetRequest(httpClient *http.Client, request *http.Request) ([]byte, error) {
resp, err := httpClient.Do(request)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode >= http.StatusInternalServerError {
return nil, ErrServerError
}
if resp.StatusCode == http.StatusNotFound {
return nil, ErrJobNotFound
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
err := fmt.Errorf("job status request failed; unexpected response code:'%d', msg:'%v'", resp.StatusCode, string(body))
return nil, err
}
return io.ReadAll(resp.Body)
}
func doRequest(httpClient *http.Client, request *http.Request) (job.Job, error) {
resp, err := httpClient.Do(request)
if err != nil {
return job.Job{}, err
}
defer resp.Body.Close()
if resp.StatusCode >= http.StatusInternalServerError {
return job.Job{}, ErrServerError
}
if resp.StatusCode == http.StatusNotFound {
return job.Job{}, ErrJobNotFound
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
err := fmt.Errorf("job status request failed; unexpected response code:'%d', msg:'%v'", resp.StatusCode, string(body))
return job.Job{}, err
}
jobDetails := job.Job{}
if err := json.NewDecoder(resp.Body).Decode(&jobDetails); err != nil {
return job.Job{}, err
}
return jobDetails, nil
}
func createRequest(ctx context.Context, url, username, accessKey, jobID string) (*http.Request, error) {
req, err := requesth.NewWithContext(ctx, http.MethodGet,
fmt.Sprintf("%s/rest/v1.1/%s/jobs/%s", url, username, jobID), nil)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
req.SetBasicAuth(username, accessKey)
return req, nil
}
func createListAssetsRequest(ctx context.Context, url, username, accessKey, jobID string) (*http.Request, error) {
req, err := requesth.NewWithContext(ctx, http.MethodGet,
fmt.Sprintf("%s/rest/v1/%s/jobs/%s/assets", url, username, jobID), nil)
if err != nil {
return nil, err
}
req.SetBasicAuth(username, accessKey)
return req, nil
}
func createAssetRequest(ctx context.Context, url, username, accessKey, jobID, fileName string) (*http.Request, error) {
req, err := requesth.NewWithContext(ctx, http.MethodGet,
fmt.Sprintf("%s/rest/v1/%s/jobs/%s/assets/%s", url, username, jobID, fileName), nil)
if err != nil {
return nil, err
}
req.SetBasicAuth(username, accessKey)
return req, nil
}
func createStopRequest(ctx context.Context, url, username, accessKey, jobID string) (*http.Request, error) {
req, err := requesth.NewWithContext(ctx, http.MethodPut,
fmt.Sprintf("%s/rest/v1/%s/jobs/%s/stop", url, username, jobID), nil)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
req.SetBasicAuth(username, accessKey)
return req, nil
}
// DownloadArtifact does downloading artifacts
func (c *Client) DownloadArtifact(jobID string) {
targetDir := filepath.Join(c.ArtifactConfig.Directory, jobID)
if err := os.MkdirAll(targetDir, 0755); err != nil {
log.Error().Msgf("Unable to create %s to fetch artifacts (%v)", targetDir, err)
return
}
files, err := c.GetJobAssetFileNames(context.Background(), jobID)
if err != nil {
log.Error().Msgf("Unable to fetch artifacts list (%v)", err)
return
}
for _, f := range files {
for _, pattern := range c.ArtifactConfig.Match {
if glob.Glob(pattern, f) {
if err := c.downloadArtifact(targetDir, jobID, f); err != nil {
log.Error().Err(err).Msgf("Failed to download file: %s", f)
}
break
}
}
}
}
func (c *Client) downloadArtifact(targetDir, jobID, fileName string) error {
content, err := c.GetJobAssetFileContent(context.Background(), jobID, fileName)
if err != nil {
return err
}
targetFile := filepath.Join(targetDir, fileName)
return os.WriteFile(targetFile, content, 0644)
}
type platformEntry struct {
LongName string `json:"long_name"`
ShortVersion string `json:"short_version"`
}
// GetVirtualDevices returns the list of available virtual devices.
func (c *Client) GetVirtualDevices(ctx context.Context, kind string) ([]vmd.VirtualDevice, error) {
req, err := requesth.NewWithContext(ctx, http.MethodGet, fmt.Sprintf("%s/rest/v1.1/info/platforms/all", c.URL), nil)
if err != nil {
return nil, err
}
req.SetBasicAuth(c.Username, c.AccessKey)
res, err := c.HTTPClient.Do(req)
if err != nil {
return []vmd.VirtualDevice{}, err
}
var resp []platformEntry
if err := json.NewDecoder(res.Body).Decode(&resp); err != nil {
return []vmd.VirtualDevice{}, err
}
key := "Emulator"
if kind == vmd.IOSSimulator {
key = "Simulator"
}
devs := map[string]map[string]bool{}
for _, d := range resp {
if !strings.Contains(d.LongName, key) {
continue
}
if _, ok := devs[d.LongName]; !ok {
devs[d.LongName] = map[string]bool{}
}
devs[d.LongName][d.ShortVersion] = true
}
var dev []vmd.VirtualDevice
for vmdName, versions := range devs {
d := vmd.VirtualDevice{Name: vmdName}
for version := range versions {
d.OSVersion = append(d.OSVersion, version)
}
sort.Strings(d.OSVersion)
dev = append(dev, d)
}
sort.Slice(dev, func(i, j int) bool {
return dev[i].Name < dev[j].Name
})
return dev, nil
}
|
/*
* Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.
*
* file: ipfix_dm.go
* details: IPFIX packet handler for Data Manager
*
*/
package msghandler
import (
"bytes"
"encoding/json"
opts "github.com/Juniper/collector/flow-translator/options"
)
// Message represents IPFIX message
type IPFIXDMMessage struct {
AgentID string `json:"AgentID"`
Header map[string]interface{} `json:"Header"`
DataSets []map[string]interface{} `json:"DataSets"`
Timestamp interface{} `json:"Timestamp"`
RoomKey string `json:"roomKey"`
}
// Data Manager, we split the data into length of DataSets.
type IPFIXAugmentedDMMessage struct {
AgentID string `json:"AgentID"`
Header map[string]interface{} `json:"Header"`
DataSets map[string]interface{} `json:"DataSets"`
Timestamp interface{} `json:"Timestamp"`
RoomKey string `json:"roomKey"`
}
func serializeIPFIXData(msg *IPFIXDMMessage) []DMMessage {
var (
emptyData []DMMessage
)
res := make([]DMMessage, len(msg.DataSets))
opts.Logger.Printf("getting msg.Timestamp as %v %T:", msg.Timestamp,
msg.Timestamp)
timeStamp, err := msg.Timestamp.(json.Number).Int64()
if err != nil {
opts.Logger.Println("TimStamp err:", err)
return emptyData
}
timeStamp = timeStamp / 1000
for i, dataSet := range msg.DataSets {
res[i] = DMMessage{CollectionName: opts.IPFIXCollection,
Data: IPFIXAugmentedDMMessage{Header: msg.Header, DataSets: dataSet,
AgentID: msg.AgentID,
RoomKey: msg.AgentID,
Timestamp: timeStamp}}
var emptyTM struct{}
res[i].TailwindManager = &emptyTM
}
return res
}
func SerializeIPFIXMsgInDM(msg []byte) ([]DMMessage, error) {
d := json.NewDecoder(bytes.NewReader(msg))
d.UseNumber()
var ipfixMsg IPFIXDMMessage
if err := d.Decode(&ipfixMsg); err != nil {
opts.Logger.Println("sFlow message decode error:", err)
return nil, err
}
dmMsgs := serializeIPFIXData(&ipfixMsg)
return dmMsgs, nil
}
|
package executor
import "fmt"
type Config struct {
Host string
Port int
User string
Password string
DB string
Options string
}
func (c *Config) DSN() string {
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?%s", c.User, c.Password, c.Host, c.Port, c.DB, c.Options)
}
func (c *Config) Address() string {
return fmt.Sprintf("%s:%d", c.Host, c.Port)
}
|
package utils
import (
"bytes"
)
// StringsInsertRuneStep ...
func StringsInsertRuneStep(s string, step int, sep string) string {
buffer := bytes.Buffer{}
before := step - 1
last := len(s) - 1
for i, char := range s {
buffer.WriteRune(char)
if i%step == before && i != last {
buffer.WriteString(sep)
}
}
return buffer.String()
}
|
package benchmarks
import (
"net/url"
"testing"
"github.com/go-playground/form/v4"
)
// Simple Benchmarks
type User struct {
FirstName string `form:"fname" schema:"fname" formam:"fname"`
LastName string `form:"lname" schema:"lname" formam:"lname"`
Email string `form:"email" schema:"email" formam:"email"`
Age uint8 `form:"age" schema:"age" formam:"age"`
}
func getUserStructValues() url.Values {
return url.Values{
"fname": []string{"Joey"},
"lname": []string{"Bloggs"},
"email": []string{"joeybloggs@gmail.com"},
"age": []string{"32"},
}
}
func getUserStruct() *User {
return &User{
FirstName: "Joey",
LastName: "Bloggs",
Email: "joeybloggs@gmail.com",
Age: 32,
}
}
func BenchmarkSimpleUserDecodeStruct(b *testing.B) {
values := getUserStructValues()
decoder := form.NewDecoder()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
var test User
if err := decoder.Decode(&test, values); err != nil {
b.Error(err)
}
}
}
func BenchmarkSimpleUserDecodeStructParallel(b *testing.B) {
values := getUserStructValues()
decoder := form.NewDecoder()
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
var test User
if err := decoder.Decode(&test, values); err != nil {
b.Error(err)
}
}
})
}
func BenchmarkSimpleUserEncodeStruct(b *testing.B) {
test := getUserStruct()
encoder := form.NewEncoder()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
if _, err := encoder.Encode(&test); err != nil {
b.Error(err)
}
}
}
func BenchmarkSimpleUserEncodeStructParallel(b *testing.B) {
test := getUserStruct()
encoder := form.NewEncoder()
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if _, err := encoder.Encode(&test); err != nil {
b.Error(err)
}
}
})
}
// Primitives ALL types
type PrimitivesStruct struct {
String string
Int int
Int8 int8
Int16 int16
Int32 int32
Int64 int64
Uint uint
Uint8 uint8
Uint16 uint16
Uint32 uint32
Uint64 uint64
Float32 float32
Float64 float64
Bool bool
}
func getPrimitivesStructValues() url.Values {
return url.Values{
"String": []string{"joeybloggs"},
"Int": []string{"1"},
"Int8": []string{"2"},
"Int16": []string{"3"},
"Int32": []string{"4"},
"Int64": []string{"5"},
"Uint": []string{"1"},
"Uint8": []string{"2"},
"Uint16": []string{"3"},
"Uint32": []string{"4"},
"Uint64": []string{"5"},
"Float32": []string{"1.1"},
"Float64": []string{"5.0"},
"Bool": []string{"true"},
}
}
func getPrimitivesStruct() *PrimitivesStruct {
return &PrimitivesStruct{
String: "joeybloggs",
Int: 1,
Int8: 2,
Int16: 3,
Int32: 4,
Int64: 5,
Uint: 1,
Uint8: 2,
Uint16: 3,
Uint32: 4,
Uint64: 5,
Float32: 1.1,
Float64: 5.0,
Bool: true,
}
}
func BenchmarkPrimitivesDecodeStructAllPrimitivesTypes(b *testing.B) {
values := getPrimitivesStructValues()
decoder := form.NewDecoder()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
var test PrimitivesStruct
if err := decoder.Decode(&test, values); err != nil {
b.Error(err)
}
}
}
func BenchmarkPrimitivesDecodeStructAllPrimitivesTypesParallel(b *testing.B) {
values := getPrimitivesStructValues()
decoder := form.NewDecoder()
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
var test PrimitivesStruct
if err := decoder.Decode(&test, values); err != nil {
b.Error(err)
}
}
})
}
func BenchmarkPrimitivesEncodeStructAllPrimitivesTypes(b *testing.B) {
test := getPrimitivesStruct()
encoder := form.NewEncoder()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
if _, err := encoder.Encode(&test); err != nil {
b.Error(err)
}
}
}
func BenchmarkPrimitivesEncodeStructAllPrimitivesTypesParallel(b *testing.B) {
test := getPrimitivesStruct()
encoder := form.NewEncoder()
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if _, err := encoder.Encode(&test); err != nil {
b.Error(err)
}
}
})
}
// Complex Array ALL types
type ComplexArrayStruct struct {
String []string
StringPtr []*string
Int []int
IntPtr []*int
Int8 []int8
Int8Ptr []*int8
Int16 []int16
Int16Ptr []*int16
Int32 []int32
Int32Ptr []*int32
Int64 []int64
Int64Ptr []*int64
Uint []uint
UintPtr []*uint
Uint8 []uint8
Uint8Ptr []*uint8
Uint16 []uint16
Uint16Ptr []*uint16
Uint32 []uint32
Uint32Ptr []*uint32
Uint64 []uint64
Uint64Ptr []*uint64
NestedInt [][]int
NestedIntPtr [][]*int
}
func getComplexArrayStructValues() url.Values {
return url.Values{
"String": []string{"joeybloggs"},
"StringPtr": []string{"joeybloggs"},
"Int": []string{"1", "2"},
"IntPtr": []string{"1", "2"},
"Int8[0]": []string{"1"},
"Int8[1]": []string{"2"},
"Int8Ptr[0]": []string{"1"},
"Int8Ptr[1]": []string{"2"},
"Int16": []string{"1", "2"},
"Int16Ptr": []string{"1", "2"},
"Int32": []string{"1", "2"},
"Int32Ptr": []string{"1", "2"},
"Int64": []string{"1", "2"},
"Int64Ptr": []string{"1", "2"},
"Uint": []string{"1", "2"},
"UintPtr": []string{"1", "2"},
"Uint8[0]": []string{"1"},
"Uint8[1]": []string{"2"},
"Uint8Ptr[0]": []string{"1"},
"Uint8Ptr[1]": []string{"2"},
"Uint16": []string{"1", "2"},
"Uint16Ptr": []string{"1", "2"},
"Uint32": []string{"1", "2"},
"Uint32Ptr": []string{"1", "2"},
"Uint64": []string{"1", "2"},
"Uint64Ptr": []string{"1", "2"},
"NestedInt[0][0]": []string{"1"},
"NestedIntPtr[0][1]": []string{"1"},
}
}
func getComplexArrayStruct() *ComplexArrayStruct {
s := "joeybloggs"
i1 := int(1)
i2 := int(2)
i81 := int8(1)
i82 := int8(2)
i161 := int16(1)
i162 := int16(2)
i321 := int32(1)
i322 := int32(2)
i641 := int64(1)
i642 := int64(2)
ui1 := uint(1)
ui2 := uint(2)
ui81 := uint8(1)
ui82 := uint8(2)
ui161 := uint16(1)
ui162 := uint16(2)
ui321 := uint32(1)
ui322 := uint32(2)
ui641 := uint64(1)
ui642 := uint64(2)
return &ComplexArrayStruct{
String: []string{s},
StringPtr: []*string{&s},
Int: []int{i1, i2},
IntPtr: []*int{&i1, &i2},
Int8: []int8{i81, i82},
Int8Ptr: []*int8{&i81, &i82},
Int16: []int16{i161, i162},
Int16Ptr: []*int16{&i161, &i162},
Int32: []int32{i321, i322},
Int32Ptr: []*int32{&i321, &i322},
Int64: []int64{i641, i642},
Int64Ptr: []*int64{&i641, &i642},
Uint: []uint{ui1, ui2},
UintPtr: []*uint{&ui1, &ui2},
Uint8: []uint8{ui81, ui82},
Uint8Ptr: []*uint8{&ui81, &ui82},
Uint16: []uint16{ui161, ui162},
Uint16Ptr: []*uint16{&ui161, &ui162},
Uint32: []uint32{ui321, ui322},
Uint32Ptr: []*uint32{&ui321, &ui322},
Uint64: []uint64{ui641, ui642},
Uint64Ptr: []*uint64{&ui641, &ui642},
NestedInt: [][]int{{i1}},
NestedIntPtr: [][]*int{nil, {&i1}},
}
}
func BenchmarkComplexArrayDecodeStructAllTypes(b *testing.B) {
values := getComplexArrayStructValues()
decoder := form.NewDecoder()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
var test ComplexArrayStruct
if err := decoder.Decode(&test, values); err != nil {
b.Error(err)
}
}
}
func BenchmarkComplexArrayDecodeStructAllTypesParallel(b *testing.B) {
values := getComplexArrayStructValues()
decoder := form.NewDecoder()
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
var test ComplexArrayStruct
if err := decoder.Decode(&test, values); err != nil {
b.Error(err)
}
}
})
}
func BenchmarkComplexArrayEncodeStructAllTypes(b *testing.B) {
test := getComplexArrayStruct()
encoder := form.NewEncoder()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
if _, err := encoder.Encode(&test); err != nil {
b.Error(err)
}
}
}
func BenchmarkComplexArrayEncodeStructAllTypesParallel(b *testing.B) {
test := getComplexArrayStruct()
encoder := form.NewEncoder()
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if _, err := encoder.Encode(&test); err != nil {
b.Error(err)
}
}
})
}
// Complex Map ALL types
type ComplexMapStruct struct {
String map[string]string
StringPtr map[*string]*string
Int map[int]int
IntPtr map[*int]*int
Int8 map[int8]int8
Int8Ptr map[*int8]*int8
Int16 map[int16]int16
Int16Ptr map[*int16]*int16
Int32 map[int32]int32
Int32Ptr map[*int32]*int32
Int64 map[int64]int64
Int64Ptr map[*int64]*int64
Uint map[uint]uint
UintPtr map[*uint]*uint
Uint8 map[uint8]uint8
Uint8Ptr map[*uint8]*uint8
Uint16 map[uint16]uint16
Uint16Ptr map[*uint16]*uint16
Uint32 map[uint32]uint32
Uint32Ptr map[*uint32]*uint32
Uint64 map[uint64]uint64
Uint64Ptr map[*uint64]*uint64
NestedInt map[int]map[int]int
NestedIntPtr map[*int]map[*int]*int
}
func getComplexMapStructValues() url.Values {
return url.Values{
"String[key]": []string{"value"},
"StringPtr[key]": []string{"value"},
"Int[0]": []string{"1"},
"IntPtr[0]": []string{"1"},
"Int8[0]": []string{"1"},
"Int8Ptr[0]": []string{"1"},
"Int16[0]": []string{"1"},
"Int16Ptr[0]": []string{"1"},
"Int32[0]": []string{"1"},
"Int32Ptr[0]": []string{"1"},
"Int64[0]": []string{"1"},
"Int64Ptr[0]": []string{"1"},
"Uint[0]": []string{"1"},
"UintPtr[0]": []string{"1"},
"Uint8[0]": []string{"1"},
"Uint8Ptr[0]": []string{"1"},
"Uint16[0]": []string{"1"},
"Uint16Ptr[0]": []string{"1"},
"Uint32[0]": []string{"1"},
"Uint32Ptr[0]": []string{"1"},
"Uint64[0]": []string{"1"},
"Uint64Ptr[0]": []string{"1"},
"NestedInt[1][2]": []string{"3"},
"NestedIntPtr[1][2]": []string{"3"},
}
}
func getComplexMapStruct() *ComplexMapStruct {
key := "key"
val := "value"
i0 := int(0)
i1 := int(1)
i2 := int(2)
i3 := int(3)
i80 := int8(0)
i81 := int8(1)
i160 := int16(0)
i161 := int16(1)
i320 := int32(0)
i321 := int32(1)
i640 := int64(0)
i641 := int64(1)
ui0 := uint(0)
ui1 := uint(1)
ui80 := uint8(0)
ui81 := uint8(1)
ui160 := uint16(0)
ui161 := uint16(1)
ui320 := uint32(0)
ui321 := uint32(1)
ui640 := uint64(0)
ui641 := uint64(1)
return &ComplexMapStruct{
String: map[string]string{key: val},
StringPtr: map[*string]*string{&key: &val},
Int: map[int]int{i0: i1},
IntPtr: map[*int]*int{&i0: &i1},
Int8: map[int8]int8{i80: i81},
Int8Ptr: map[*int8]*int8{&i80: &i81},
Int16: map[int16]int16{i160: i161},
Int16Ptr: map[*int16]*int16{&i160: &i161},
Int32: map[int32]int32{i320: i321},
Int32Ptr: map[*int32]*int32{&i320: &i321},
Int64: map[int64]int64{i640: i641},
Int64Ptr: map[*int64]*int64{&i640: &i641},
Uint: map[uint]uint{ui0: ui1},
UintPtr: map[*uint]*uint{&ui0: &ui1},
Uint8: map[uint8]uint8{ui80: ui81},
Uint8Ptr: map[*uint8]*uint8{&ui80: &ui81},
Uint16: map[uint16]uint16{ui160: ui161},
Uint16Ptr: map[*uint16]*uint16{&ui160: &ui161},
Uint32: map[uint32]uint32{ui320: ui321},
Uint32Ptr: map[*uint32]*uint32{&ui320: &ui321},
Uint64: map[uint64]uint64{ui640: ui641},
Uint64Ptr: map[*uint64]*uint64{&ui640: &ui641},
NestedInt: map[int]map[int]int{i1: {i2: i3}},
NestedIntPtr: map[*int]map[*int]*int{&i1: {&i2: &i3}},
}
}
func BenchmarkComplexMapDecodeStructAllTypes(b *testing.B) {
values := getComplexMapStructValues()
decoder := form.NewDecoder()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
var test ComplexMapStruct
if err := decoder.Decode(&test, values); err != nil {
b.Error(err)
}
}
}
func BenchmarkComplexMapDecodeStructAllTypesParallel(b *testing.B) {
values := getComplexMapStructValues()
decoder := form.NewDecoder()
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
var test ComplexMapStruct
if err := decoder.Decode(&test, values); err != nil {
b.Error(err)
}
}
})
}
func BenchmarkComplexMapEncodeStructAllTypes(b *testing.B) {
test := getComplexMapStruct()
encoder := form.NewEncoder()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
if _, err := encoder.Encode(&test); err != nil {
b.Error(err)
}
}
}
func BenchmarkComplexMapEncodeStructAllTypesParallel(b *testing.B) {
test := getComplexMapStruct()
encoder := form.NewEncoder()
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if _, err := encoder.Encode(&test); err != nil {
b.Error(err)
}
}
})
}
// NestedStruct Benchmarks
type Nested2 struct {
Value string
Nested2 *Nested2
}
type Nested struct {
Value string
}
type NestedStruct struct {
Nested
NestedArray []Nested
NestedPtrArray []*Nested
Nested2 Nested2
}
func getNestedStructValues() url.Values {
return url.Values{
// Nested Field
"Value": []string{"value"},
// Nested Array
"NestedArray[0].Value": []string{"value"},
"NestedArray[1].Value": []string{"value"},
// Nested Array Ptr
"NestedPtrArray[0].Value": []string{"value"},
"NestedPtrArray[1].Value": []string{"value"},
// Nested 2
"Nested2.Value": []string{"value"},
"Nested2.Nested2.Value": []string{"value"},
}
}
func getNestedStruct() *NestedStruct {
nested := Nested{
Value: "value",
}
nested2 := Nested2{
Value: "value",
Nested2: &Nested2{Value: "value"},
}
return &NestedStruct{
Nested: nested,
NestedArray: []Nested{nested, nested},
NestedPtrArray: []*Nested{&nested, &nested},
Nested2: nested2,
}
}
func BenchmarkDecodeNestedStruct(b *testing.B) {
values := getNestedStructValues()
decoder := form.NewDecoder()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
var test NestedStruct
if err := decoder.Decode(&test, values); err != nil {
b.Error(err)
}
}
}
func BenchmarkDecodeNestedStructParallel(b *testing.B) {
values := getNestedStructValues()
decoder := form.NewDecoder()
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
var test NestedStruct
if err := decoder.Decode(&test, values); err != nil {
b.Error(err)
}
}
})
}
func BenchmarkEncodeNestedStruct(b *testing.B) {
test := getNestedStruct()
encoder := form.NewEncoder()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
if _, err := encoder.Encode(&test); err != nil {
b.Error(err)
}
}
}
func BenchmarkEncodeNestedStructParallel(b *testing.B) {
test := getNestedStruct()
encoder := form.NewEncoder()
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if _, err := encoder.Encode(&test); err != nil {
b.Error(err)
}
}
})
}
|
package agent
import (
"encoding/json"
"time"
"dudu/models"
"dudu/modules/collector"
_ "dudu/modules/collector/collect"
)
// 初始化采集管理器
func (app *AgentNode) initCollect() error {
app.collectorMag = collector.NewCollectorManager(app.logger, app.cfg.Agent.Collects)
return nil
}
// 开始采集
func (app *AgentNode) startCollect() {
collectResultChan := app.collectorMag.Run()
app.wg.Add(1)
go func() {
defer app.wg.Done()
app.asyncPush(collectResultChan)
}()
}
// 关闭采集
func (app *AgentNode) stopCollect() {
app.collectorMag.Stop()
}
// 推送采集信息
func (app *AgentNode) asyncPush(collectResultChan <-chan *models.CollectResult) {
// 过期时长
batchDuration := time.Second * time.Duration(app.cfg.Agent.BatchDuration)
if batchDuration <= 0 {
batchDuration = time.Second * 5 // 默认最长间隔5秒
}
batchLength := app.cfg.Agent.BatchLength
if batchLength <= 0 {
batchLength = 100 // 默认最大100条
}
timer := time.NewTimer(batchDuration)
collectResults := make([]*models.CollectResult, 0, batchLength)
for {
select {
case <-timer.C:
if len(collectResults) > 0 {
if err := app.push(collectResults); err != nil {
app.logger.Warnf("push collect results err:%s", err.Error())
}
collectResults = collectResults[0:0] // 重置
}
timer.Reset(batchDuration) // 重置
case collectResult, ok := <-collectResultChan:
if !ok {
if len(collectResults) > 0 {
if err := app.push(collectResults); err != nil {
app.logger.Warnf("push collect results err:%s", err.Error())
}
collectResults = collectResults[0:0] // 重置
}
app.logger.Info("清理完成剩余日志")
timer.Stop()
return
}
collectResults = append(collectResults, collectResult)
if len(collectResults) >= batchLength {
if err := app.push(collectResults); err != nil {
app.logger.Warnf("push collect results err:%s", err.Error())
}
collectResults = collectResults[0:0] // 重置
timer.Reset(batchDuration) // 重置
}
}
}
}
func (app *AgentNode) push(collectResults []*models.CollectResult) (err error) {
value, err := json.Marshal(collectResults)
if err != nil {
return
}
var compactorName string
if app.compactor != nil {
// comparess
value, err = app.compactor.Encode(value)
compactorName = app.compactor.Name()
}
if err != nil {
// compactor err
return
}
metric := &models.MetricValue{
Endpoint: app.cfg.IP,
HostName: app.cfg.HostName,
Compactor: compactorName,
Value: value,
Tags: "",
Timestamp: time.Now().UnixNano() / int64(time.Millisecond),
}
data, err := json.Marshal(metric)
if err != nil {
return
}
return app.pipe.Push(data)
}
|
package server
import (
"net/http"
"github.com/UnnecessaryRain/ironway-core/pkg/network/client"
"github.com/gorilla/websocket"
log "github.com/sirupsen/logrus"
)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(r *http.Request) bool {
return true
},
}
func serveSocket(server *Server, w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println(err)
return
}
client := client.NewClient(conn, server.receivedChan, server.registerChan, server.unregisterChan)
go client.StartWriter()
go client.StartReader()
}
|
// Copyright © 2019 Banzai Cloud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package azure
import (
"github.com/Azure/azure-sdk-for-go/services/monitor/mgmt/2017-09-01/insights"
"github.com/Azure/go-autorest/autorest/azure/auth"
"github.com/banzaicloud/azure-aks-client/cluster"
"github.com/goph/emperror"
)
// NewActivityLogsClient instantiates a new Azure Activity Logs client using the specified credentials
func NewActivityLogsClient(creds *cluster.AKSCredential) (*insights.ActivityLogsClient, error) {
authorizer, err := auth.NewClientCredentialsConfig(
creds.ClientId,
creds.ClientSecret,
creds.TenantId).Authorizer()
if err != nil {
return nil, emperror.Wrap(err, "failed to instantiate new Authorizer from Azure client credentials")
}
activityLogClient := insights.NewActivityLogsClient(creds.SubscriptionId)
activityLogClient.Authorizer = authorizer
return &activityLogClient, nil
}
|
package main
import "fmt"
func nonempty(strings []string) []string {
i := 0
for _, s := range strings {
if s != "" {
strings[i] = s
i++
}
}
return strings[:i]
}
func nonempty2(strings []string) []string {
out := strings[:0]
for _, s := range strings {
if s != "" {
out = append(out, s)
}
}
return out
}
func remove(slice []int, i int) []int {
copy(slice[i:], slice[i+1:])
return slice[:len(slice)-1]
}
func remove2(slice []int, i int) []int {
slice[i] = slice[len(slice)-1]
return slice[:len(slice)-1]
}
func main() {
s := []int{5, 6, 7, 8, 9}
fmt.Println(s[:len(s)])
fmt.Println(remove2(s, 2))
m := map[string][2]int{
"a": {1, 2},
"b": {3, 4},
}
s1 := m["a"]
s2 := s1[:]
fmt.Printf("sssss %v %v \n", s1, s2)
ssss := [][][][][][]int{}
fmt.Printf("%T \n", ssss)
}
|
package local_test
import (
"bytes"
"compress/gzip"
"context"
"io"
"io/ioutil"
"math/rand"
"os"
"reflect"
"strings"
"testing"
"time"
"github.com/fishy/fsdb"
"github.com/fishy/fsdb/local"
)
const lorem = `Lorem ipsum dolor sit amet,
consectetur adipiscing elit,
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam,
quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
Excepteur sint occaecat cupidatat non proident,
sunt in culpa qui officia deserunt mollit anim id est laborum.`
func TestReadWriteDelete(t *testing.T) {
root, err := ioutil.TempDir("", "fsdb_")
if err != nil {
t.Fatalf("failed to get tmp dir: %v", err)
}
defer os.RemoveAll(root)
opts := local.NewDefaultOptions(root).SetUseGzip(false)
db := local.Open(opts)
key := fsdb.Key("foo")
// Empty
testDeleteEmpty(t, db, key)
testReadEmpty(t, db, key)
// Write
testWrite(t, db, key, lorem)
testRead(t, db, key, lorem)
testRead(t, db, key, lorem)
// Overwrite
content := ""
testWrite(t, db, key, content)
testRead(t, db, key, content)
// Delete
testDelete(t, db, key)
testReadEmpty(t, db, key)
}
func TestGzip(t *testing.T) {
root, err := ioutil.TempDir("", "fsdb_")
if err != nil {
t.Fatalf("failed to get tmp dir: %v", err)
}
defer os.RemoveAll(root)
opts := local.NewDefaultOptions(root).SetUseGzip(true)
db := local.Open(opts)
key := fsdb.Key("foo")
// Empty
testDeleteEmpty(t, db, key)
testReadEmpty(t, db, key)
// Write
testWrite(t, db, key, lorem)
testRead(t, db, key, lorem)
testRead(t, db, key, lorem)
// Overwrite
content := ""
testWrite(t, db, key, content)
testRead(t, db, key, content)
// Delete
testDelete(t, db, key)
testReadEmpty(t, db, key)
}
func TestChangeCompression(t *testing.T) {
root, err := ioutil.TempDir("", "fsdb_")
if err != nil {
t.Fatalf("failed to get tmp dir: %v", err)
}
defer os.RemoveAll(root)
gzipOpts := local.NewDefaultOptions(root).SetUseGzip(true)
gzipDb := local.Open(gzipOpts)
key := fsdb.Key("foo")
testWrite(t, gzipDb, key, lorem)
testRead(t, gzipDb, key, lorem)
opts := local.NewDefaultOptions(root).SetUseGzip(false)
db := local.Open(opts)
testRead(t, db, key, lorem)
content := ""
testWrite(t, db, key, content)
testRead(t, db, key, content)
testRead(t, gzipDb, key, content)
testDelete(t, gzipDb, key)
testReadEmpty(t, gzipDb, key)
}
func TestScan(t *testing.T) {
ctx := context.Background()
root, err := ioutil.TempDir("", "fsdb_")
if err != nil {
t.Fatalf("failed to get tmp dir: %v", err)
}
defer os.RemoveAll(root)
opts := local.NewDefaultOptions(root)
db := local.Open(opts)
keys := make(map[string]bool)
keyFunc := func(ret bool) func(key fsdb.Key) bool {
return func(key fsdb.Key) bool {
keys[string(key)] = true
return ret
}
}
err = db.ScanKeys(ctx, keyFunc(true), fsdb.IgnoreAll)
if err != nil {
t.Fatalf("ScanKeys failed: %v", err)
}
if len(keys) != 0 {
t.Errorf("Scan empty db got keys: %+v", keys)
}
expectKeys := map[string]bool{
"foo": true,
"bar": true,
"foobar": true,
}
for key := range expectKeys {
testWrite(t, db, fsdb.Key(key), "")
}
if err := db.ScanKeys(ctx, keyFunc(true), fsdb.StopAll); err != nil {
t.Fatalf("ScanKeys failed: %v", err)
}
if !reflect.DeepEqual(keys, expectKeys) {
t.Errorf("ScanKeys expected %+v, got %+v", expectKeys, keys)
}
keys = make(map[string]bool)
if err := db.ScanKeys(ctx, keyFunc(false), fsdb.StopAll); err != nil {
t.Fatalf("ScanKeys failed: %v", err)
}
if len(keys) != 1 {
t.Errorf("Scan should stop after the first key, got: %+v", keys)
}
}
func TestScanCancel(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
sleep := time.Millisecond * 100
shorter := time.Millisecond * 50
ctx, cancel := context.WithTimeout(context.Background(), shorter)
defer cancel()
root, err := ioutil.TempDir("", "fsdb_")
if err != nil {
t.Fatalf("failed to get tmp dir: %v", err)
}
defer os.RemoveAll(root)
opts := local.NewDefaultOptions(root)
db := local.Open(opts)
keys := []fsdb.Key{
fsdb.Key("foo"),
fsdb.Key("bar"),
}
for _, key := range keys {
testWrite(t, db, key, "")
}
keyFunc := func(key fsdb.Key) bool {
time.Sleep(sleep)
return true
}
started := time.Now()
err = db.ScanKeys(ctx, keyFunc, fsdb.IgnoreAll)
elapsed := time.Now().Sub(started)
t.Logf("ScanKeys took %v", elapsed)
if err != context.DeadlineExceeded {
t.Errorf("ScanKeys should return %v, got %v", context.DeadlineExceeded, err)
}
if elapsed > sleep*time.Duration(len(keys)) {
t.Errorf("ScanKeys took too long: %v", elapsed)
}
}
func BenchmarkReadWrite(b *testing.B) {
root, err := ioutil.TempDir(".", "_fsdb_bench_test_")
if err != nil {
b.Fatalf("failed to get tmp dir: %v", err)
}
defer os.RemoveAll(root)
ctx := context.Background()
keySize := 12
r := rand.New(rand.NewSource(time.Now().Unix()))
var benchmarkSizes = map[string]int{
"1K": 1024,
"10K": 10 * 1024,
"1M": 1024 * 1024,
"10M": 10 * 1024 * 1024,
"256M": 256 * 1024 * 1024,
}
var options = map[string]local.Options{
"nocompression": local.NewDefaultOptions(root).SetUseGzip(false),
"gzip-min": local.NewDefaultOptions(root).SetUseGzip(false).SetGzipLevel(gzip.BestSpeed),
"gzip-default": local.NewDefaultOptions(root).SetUseGzip(false).SetGzipLevel(gzip.DefaultCompression),
"gzip-max": local.NewDefaultOptions(root).SetUseGzip(false).SetGzipLevel(gzip.BestCompression),
}
for label, size := range benchmarkSizes {
b.Run(
label,
func(b *testing.B) {
content := randomBytes(b, r, size)
for label, opts := range options {
b.Run(
label,
func(b *testing.B) {
os.RemoveAll(root)
db := local.Open(opts)
keys := make([]fsdb.Key, 0)
b.Run(
"write",
func(b *testing.B) {
for i := 0; i < b.N; i++ {
key := fsdb.Key(randomBytes(b, r, keySize))
keys = append(keys, key)
err := db.Write(ctx, key, bytes.NewReader(content))
if err != nil {
b.Fatalf("Write failed: %v", err)
}
}
},
)
b.Run(
"read",
func(b *testing.B) {
for i := 0; i < b.N; i++ {
key := keys[r.Int31n(int32(len(keys)))]
reader, err := db.Read(ctx, key)
if err != nil {
b.Fatalf("Read failed: %v", err)
}
reader.Close()
}
},
)
},
)
}
},
)
}
}
func randomBytes(b *testing.B, r *rand.Rand, size int) []byte {
b.Helper()
reader := io.LimitReader(r, int64(size))
content, err := ioutil.ReadAll(reader)
if err != nil {
b.Fatalf("Generate content failed: %v", err)
}
if len(content) != size {
b.Fatalf(
"Generate content failed, expected %d bytes, got %d",
size,
len(content),
)
}
return content
}
func testDeleteEmpty(t *testing.T, db fsdb.FSDB, key fsdb.Key) {
t.Helper()
if err := db.Delete(context.Background(), key); !fsdb.IsNoSuchKeyError(err) {
t.Errorf("Expected NoSuchKeyError, got: %v", err)
}
}
func testDelete(t *testing.T, db fsdb.FSDB, key fsdb.Key) {
t.Helper()
if err := db.Delete(context.Background(), key); err != nil {
t.Errorf("Delete failed: %v", err)
}
}
func testReadEmpty(t *testing.T, db fsdb.FSDB, key fsdb.Key) {
t.Helper()
if _, err := db.Read(context.Background(), key); !fsdb.IsNoSuchKeyError(err) {
t.Errorf("Expected NoSuchKeyError, got: %v", err)
}
}
func testRead(t *testing.T, db fsdb.FSDB, key fsdb.Key, expect string) {
t.Helper()
reader, err := db.Read(context.Background(), key)
if err != nil {
t.Fatalf("Read failed: %v", err)
}
defer reader.Close()
actual, err := ioutil.ReadAll(reader)
if err != nil {
t.Fatalf("Read content failed: %v", err)
}
if string(actual) != expect {
t.Errorf("Read content expected %q, got %q", expect, actual)
}
}
func testWrite(t *testing.T, db fsdb.FSDB, key fsdb.Key, data string) {
t.Helper()
err := db.Write(context.Background(), key, strings.NewReader(data))
if err != nil {
t.Fatalf("Write failed: %v", err)
}
}
|
// app
package core
import (
"context"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"os/signal"
"strings"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/rpc"
"caige/componnet"
"caige/database"
"caige/schedule-service"
"comment/constant"
"comment/job"
"github.com/Unknwon/goconfig"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/go-martini/martini"
"github.com/go-xorm/core"
"github.com/martini-contrib/render"
)
type appServe struct {
Name string
Engine *martini.ClassicMartini
configFile string
ConfigHandler *goconfig.ConfigFile
LogHandler *componnet.CLog
Version string
VersionNote string
MysqlEngine *CDatabase.Mysql
MongoEngine *CDatabase.Mongo
ScheduleService *schedule_service.ScheduleService
RunStartTime time.Time
RunEndTime time.Time
Error error
}
func NewApp() *appServe {
app := new(appServe)
app.Name = "comment-dapp"
return app
}
func (app *appServe) Init(configFile string) {
app.Engine = martini.Classic()
martini.Env = constant.Prod
app.configFile = configFile
app.Version = "1.0"
app.VersionNote = "release:2018-1-5"
app.checRuntime()
}
//检查runtime
func (app *appServe) checRuntime() {
app.ConfigHandler, app.Error = goconfig.LoadConfigFile(app.configFile)
if app.Error != nil {
app.End("check config fail")
}
}
//app服务安装组件
func (app *appServe) Setup() {
app.LogHandler = new(componnet.CLog)
app.LogHandler.Init(app.ConfigHandler.MustValue("core", "log_path", "logs"))
app.LogHandler.Println(componnet.LOG_TYPE_SERVICE, "系统启动...配置文件["+app.configFile+"]")
app.Engine.Logger(app.LogHandler.Handlers[componnet.LOG_TYPE_API])
initRouter(app.Engine)
app.setupDb()
app.registerService()
app.registerMiddleware()
go app.receiveSignal()
app.setupCronJob()
app.ConnectGethRpc()
app.CreateGethAuth()
}
//连接db
func (app *appServe) setupDb() {
mysqlSection, secErr := app.ConfigHandler.GetSection("mysql")
if len(mysqlSection) > 0 && secErr == nil {
host := mysqlSection["host"]
port := mysqlSection["port"]
user := mysqlSection["user"]
password := mysqlSection["password"]
database := mysqlSection["database"]
chartset := mysqlSection["charset"]
tbPrefix := mysqlSection["table_prefix"]
app.LogHandler.Println(componnet.LOG_TYPE_SERVICE, "连接Mysql...")
app.MysqlEngine = CDatabase.NewMysql(host, port, user, password, database, chartset, tbPrefix)
connectErr := app.MysqlEngine.TryConnect()
if connectErr != nil {
app.Error = connectErr
app.End("connect to mysql fail")
}
//注入mysql实例
CDatabase.RegisterMysqlEngine(app.MysqlEngine.Engine, app.LogHandler.SqlHandler, core.LOG_DEBUG)
}
mongoSection, mSecErr := app.ConfigHandler.GetSection("mongo")
if len(mongoSection) > 0 && mSecErr == nil {
app.LogHandler.Println(componnet.LOG_TYPE_SERVICE, "连接Mongodb...")
mongoHost := mongoSection["host"]
mongoPort := mongoSection["port"]
if mongoSection["user"] != "" && mongoSection["password"] != "" {
app.MongoEngine = CDatabase.NewDefaultMongoWithAuth(mongoHost, mongoPort, mongoSection["user"], mongoSection["password"], mongoSection["dbname"])
} else {
app.MongoEngine = CDatabase.NewDefaultMongo(mongoHost, mongoPort)
}
mongoConnErr := app.MongoEngine.TryConnect()
if mongoConnErr != nil {
app.Error = mongoConnErr
app.End("connect to mongodb fail")
}
mgLogger := log.New(app.LogHandler.SqlHandler, "[mongo] ", log.Ldate|log.Ltime|log.Llongfile)
CDatabase.RegisterMongoEngine(app.MongoEngine.Engine.DB(mongoSection["dbname"]), mgLogger, true)
}
}
//计划任务
func (app *appServe) setupCronJob() {
app.ScheduleService = schedule_service.NewScheduleService(app.LogHandler.Handlers[componnet.LOG_TYPE_CRON])
app.ScheduleService.AddMultiJobs(job.ScheduleJobList())
go app.ScheduleService.Run()
}
//注册服务
func (app *appServe) registerService() {
app.LogHandler.Println(componnet.LOG_TYPE_SERVICE, "注册系统服务...")
//注册返回的json结构
var resJson componnet.ZasResponseJson
resJson.Logger = app.LogHandler.Handlers[componnet.LOG_TYPE_API]
app.Engine.Map(resJson)
//注册日志
app.Engine.Map(app.LogHandler)
//注入全局配置文件
app.Engine.Map(app.ConfigHandler)
}
//注册中间件
func (app *appServe) registerMiddleware() {
app.LogHandler.Println(componnet.LOG_TYPE_SERVICE, "注册中间件...")
app.Engine.Use(render.Renderer(render.Options{Charset: "UTF-8", IndentJSON: false}))
staticDir := app.ConfigHandler.MustValue("core", "static_directory")
if staticDir != "" {
app.Engine.Use(martini.Static(staticDir))
}
}
//监听系统输入信号
func (app *appServe) receiveSignal() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, os.Kill)
signalContent := <-c
if signalContent == os.Interrupt || signalContent == os.Kill {
app.RunEndTime = time.Now()
app.LogHandler.Println(componnet.LOG_TYPE_SERVICE, "System Interrupt")
app.LogHandler.Println(componnet.LOG_TYPE_SERVICE, "Remove Pid File...")
app.LogHandler.Println(componnet.LOG_TYPE_SERVICE, "---- Server Info ----")
app.LogHandler.Println(componnet.LOG_TYPE_SERVICE, "Version :"+app.Version)
app.LogHandler.Println(componnet.LOG_TYPE_SERVICE, "Start Time:"+app.RunStartTime.Format(constant.TimeLayout))
app.LogHandler.Println(componnet.LOG_TYPE_SERVICE, "End Time:"+app.RunEndTime.Format(constant.TimeLayout))
duration := app.RunEndTime.Sub(app.RunStartTime)
app.LogHandler.Println(componnet.LOG_TYPE_SERVICE, "Run Time:"+duration.String())
os.Exit(0)
}
}
//执行部署的内容
func (app *appServe) Deployment() {
job.DeploymentFuncs(app.LogHandler.Handlers[componnet.LOG_TYPE_SERVICE])
}
//连接geth
func (app *appServe) ConnectGethRpc() {
client, dialErr := rpc.Dial("http://localhost:8545")
if dialErr != nil {
app.Error = dialErr
app.End("connect to geth rpc fail")
}
app.LogHandler.Println(componnet.LOG_TYPE_SERVICE, "连接geth host...")
conn := ethclient.NewClient(client)
network, getIdErr := conn.NetworkID(context.TODO())
if getIdErr != nil {
app.Error = getIdErr
app.End("get geth network id fail")
}
app.LogHandler.Println(componnet.LOG_TYPE_SERVICE, "geth host:"+network.String())
app.Engine.Map(conn)
}
//生成geth认证账户
func (app *appServe) CreateGethAuth() {
keyStore := app.ConfigHandler.MustValue("geth", "keystore")
if keyStore == "" {
app.Error = errors.New("keystore config miss")
app.End("get keystore config val")
}
keyStoreContent, readErr := ioutil.ReadFile(keyStore)
if readErr != nil {
app.Error = readErr
app.End("read keystore file fail")
}
key := string(keyStoreContent)
pw := app.ConfigHandler.MustValue("geth", "pass")
auth, err := bind.NewTransactor(strings.NewReader(key), pw)
if err != nil {
app.Error = err
app.End("create geth auth fail")
}
app.Engine.Map(auth)
}
//app运行
func (app *appServe) Run() {
app.RunStartTime = time.Now()
host := app.ConfigHandler.MustValue("core", "host")
port := app.ConfigHandler.MustValue("core", "port")
app.LogHandler.Println(componnet.LOG_TYPE_SERVICE, "启动完成...listening["+host+":"+port+"]")
app.Engine.RunOnAddr(host + ":" + port)
}
//结束app
func (app *appServe) End(tag string) {
fmt.Println("Error!" + tag + ":" + app.Error.Error())
os.Exit(1)
}
|
package main
import "fmt"
func main() {
mySlice := make([]int, 0, 3)
fmt.Println("-----------------")
fmt.Println(mySlice)
fmt.Println(len(mySlice))
fmt.Println(cap(mySlice))
fmt.Println("-----------------")
for i := 0; i < 80; i++ {
mySlice = append(mySlice, i)
fmt.Println("Len:", len(mySlice), "Capacity:", cap(mySlice), "Value: ", mySlice[i])
}
}
// -----------------
// []
// 0
// 3
// -----------------
// Len: 1 Capacity: 3 Value: 0
// Len: 2 Capacity: 3 Value: 1
// Len: 3 Capacity: 3 Value: 2
// Len: 4 Capacity: 6 Value: 3
// Len: 5 Capacity: 6 Value: 4
// Len: 6 Capacity: 6 Value: 5
// Len: 7 Capacity: 12 Value: 6
// Len: 8 Capacity: 12 Value: 7
// Len: 9 Capacity: 12 Value: 8
// Len: 10 Capacity: 12 Value: 9
// Len: 11 Capacity: 12 Value: 10
// Len: 12 Capacity: 12 Value: 11
// Len: 13 Capacity: 24 Value: 12
// Len: 14 Capacity: 24 Value: 13
// Len: 15 Capacity: 24 Value: 14
// Len: 16 Capacity: 24 Value: 15
// Len: 17 Capacity: 24 Value: 16
// Len: 18 Capacity: 24 Value: 17
// Len: 19 Capacity: 24 Value: 18
// Len: 20 Capacity: 24 Value: 19
// Len: 21 Capacity: 24 Value: 20
// Len: 22 Capacity: 24 Value: 21
// Len: 23 Capacity: 24 Value: 22
// Len: 24 Capacity: 24 Value: 23
// Len: 25 Capacity: 48 Value: 24
// Len: 26 Capacity: 48 Value: 25
// Len: 27 Capacity: 48 Value: 26
// Len: 28 Capacity: 48 Value: 27
// Len: 29 Capacity: 48 Value: 28
// Len: 30 Capacity: 48 Value: 29
// Len: 31 Capacity: 48 Value: 30
// Len: 32 Capacity: 48 Value: 31
// Len: 33 Capacity: 48 Value: 32
// Len: 34 Capacity: 48 Value: 33
// Len: 35 Capacity: 48 Value: 34
// Len: 36 Capacity: 48 Value: 35
// Len: 37 Capacity: 48 Value: 36
// Len: 38 Capacity: 48 Value: 37
// Len: 39 Capacity: 48 Value: 38
// Len: 40 Capacity: 48 Value: 39
// Len: 41 Capacity: 48 Value: 40
// Len: 42 Capacity: 48 Value: 41
// Len: 43 Capacity: 48 Value: 42
// Len: 44 Capacity: 48 Value: 43
// Len: 45 Capacity: 48 Value: 44
// Len: 46 Capacity: 48 Value: 45
// Len: 47 Capacity: 48 Value: 46
// Len: 48 Capacity: 48 Value: 47
// Len: 49 Capacity: 96 Value: 48
// Len: 50 Capacity: 96 Value: 49
// Len: 51 Capacity: 96 Value: 50
// Len: 52 Capacity: 96 Value: 51
// Len: 53 Capacity: 96 Value: 52
// Len: 54 Capacity: 96 Value: 53
// Len: 55 Capacity: 96 Value: 54
// Len: 56 Capacity: 96 Value: 55
// Len: 57 Capacity: 96 Value: 56
// Len: 58 Capacity: 96 Value: 57
// Len: 59 Capacity: 96 Value: 58
// Len: 60 Capacity: 96 Value: 59
// Len: 61 Capacity: 96 Value: 60
// Len: 62 Capacity: 96 Value: 61
// Len: 63 Capacity: 96 Value: 62
// Len: 64 Capacity: 96 Value: 63
// Len: 65 Capacity: 96 Value: 64
// Len: 66 Capacity: 96 Value: 65
// Len: 67 Capacity: 96 Value: 66
// Len: 68 Capacity: 96 Value: 67
// Len: 69 Capacity: 96 Value: 68
// Len: 70 Capacity: 96 Value: 69
// Len: 71 Capacity: 96 Value: 70
// Len: 72 Capacity: 96 Value: 71
// Len: 73 Capacity: 96 Value: 72
// Len: 74 Capacity: 96 Value: 73
// Len: 75 Capacity: 96 Value: 74
// Len: 76 Capacity: 96 Value: 75
// Len: 77 Capacity: 96 Value: 76
// Len: 78 Capacity: 96 Value: 77
// Len: 79 Capacity: 96 Value: 78
// Len: 80 Capacity: 96 Value: 79
|
package mosquito
import (
"net/http"
)
var server *Server
type Method string
type Server struct {
middlewares []Middleware
routes []*Route
}
func (server *Server) Use(middleware Middleware) *Server {
server.middlewares = append(server.middlewares, middleware)
return server
}
func (server *Server) Get(path string, handler Handler) *Server {
route := NewRoute(path, Method("GET"), handler)
server.routes = append(server.routes, route)
return server
}
func (server *Server) Post(path string, handler Handler) *Server {
route := NewRoute(path, Method("POST"), handler)
server.routes = append(server.routes, route)
return server
}
func (server *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var next func()
req := &Request{Request:r}
res := Response{ResponseWriter:w}
index := 0
middlewares := server.middlewares
next = func() {
if len(middlewares) > index {
nextMiddleware := middlewares[index]
index++
nextMiddleware.ServeHTTP(res, req, next)
}
}
next();
}
|
package proxy
import (
"net/http"
"net/http/httputil"
"net/url"
"strings"
)
func HTTP(target *url.URL) http.Handler {
return &httputil.ReverseProxy{
Director: func(req *http.Request) {
req.Host = target.Host
req.URL.Host = target.Host
req.URL.Scheme = target.Scheme
req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path)
req.URL.RawQuery = combinedQuery(target, req.URL)
},
}
}
func combinedQuery(a, b *url.URL) string {
queries := []string{}
for _, q := range []string{a.RawQuery, b.RawQuery} {
if q != "" {
queries = append(queries, q)
}
}
return strings.Join(queries, "&")
}
func singleJoiningSlash(a, b string) string {
aslash := strings.HasSuffix(a, "/")
bslash := strings.HasPrefix(b, "/")
switch {
case aslash && bslash:
return a + b[1:]
case !aslash && !bslash:
return a + "/" + b
}
return a + b
}
|
package controller
import (
"encoding/json"
"net/http"
"strconv"
"github.com/fernandoporazzi/yak-shop/app/entity"
"github.com/fernandoporazzi/yak-shop/app/errors"
"github.com/fernandoporazzi/yak-shop/app/service"
"github.com/go-chi/chi/v5"
)
type StockController interface {
GetData(response http.ResponseWriter, request *http.Request)
}
type stockController struct {
service service.StockService
}
func NewStockController(service service.StockService) StockController {
return &stockController{service}
}
func (c *stockController) GetData(response http.ResponseWriter, request *http.Request) {
response.Header().Set("Content-Type", "application/json")
days, err := strconv.ParseInt(chi.URLParam(request, "days"), 10, 32)
if err != nil {
response.WriteHeader(http.StatusInternalServerError)
json.NewEncoder(response).Encode(errors.ServiceError{Message: "Error parsing the date"})
return
}
liters, err := c.service.GetMilkByDays(days)
if err != nil {
response.WriteHeader(http.StatusInternalServerError)
json.NewEncoder(response).Encode(errors.ServiceError{Message: "Error getting the mlik stock"})
return
}
skins, err := c.service.GetSkinByDays(days)
if err != nil {
response.WriteHeader(http.StatusInternalServerError)
json.NewEncoder(response).Encode(errors.ServiceError{Message: "Error getting the skin stock"})
return
}
var stock entity.Stock
stock.Milk = liters
stock.Skins = skins
response.WriteHeader(http.StatusOK)
json.NewEncoder(response).Encode(stock)
}
|
package http_server
import (
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"github.com/jsagl/go-from-scratch/models"
"github.com/jsagl/go-from-scratch/usecase"
"go.uber.org/zap"
"net/http"
"strconv"
)
type RecipeHandler struct {
logger *zap.SugaredLogger
usecase usecase.RecipeUseCaseInterface
}
func NewRecipeHandler(logger *zap.SugaredLogger, usecase usecase.RecipeUseCaseInterface) *RecipeHandler {
return &RecipeHandler{usecase: usecase, logger: logger}
}
func (handler *RecipeHandler) GetById(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
handler.logger.Infow("HTTP request", "method", r.Method,"route", r.URL)
vars := mux.Vars(r)
id, err := strconv.ParseInt(vars["id"], 10, 64)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(models.ErrBadParameters)
//fmt.Println(models.ErrInvalidIdInURL)
return
}
recipe, err := handler.usecase.GetById(id)
if err != nil {
fmt.Println(err)
return
}
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(recipe)
}
func (handler *RecipeHandler) FindAll(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
handler.logger.Infow("HTTP request", "method", r.Method,"route", r.URL)
recipes, err := handler.usecase.FindAll()
if err != nil {
fmt.Println(err)
return
}
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(recipes)
}
|
package main
// import (
// "encoding/json"
// "errors"
// "log"
// "net/url"
// "strings"
// "time"
// )
// // "title": "L'avantage d'innover \u00e0 l'\u00e9tat pur",
// // "uri": "http://www.hood.net/about.html",
// // "date": "March 30 1985"
// func (it *Item) UnmarshalJSON(j []byte) error {
// var timeFormats = []string{"2006-01-02T15", "January 02", "2006-01-02T15:04:05", "2006-01-02", "January 2 2006", "02 January 2006", "02 Jan 2006", "2006-01-02T15:04:05Z07:00"}
// //timeFormats := []
// var rawStr map[string]string
// if err := json.Unmarshal(j, &rawStr); err != nil {
// return err
// }
// for k, v := range rawStr {
// switch strings.ToLower(k) {
// case "title":
// it.Title = v
// case "url":
// u, err := url.Parse(v)
// if err != nil {
// return err
// }
// it.Uri = *u
// case "date":
// t, err := parseTime(v, timeFormats)
// if err != nil {
// return err
// }
// it.Date = t
// }
// }
// return nil
// }
// func parseTime(input string, formats []string) (time.Time, error) {
// for _, format := range formats {
// t, err := time.Parse(format, input)
// if err == nil {
// return t, nil
// }
// }
// log.Println("error parsing time: ", input)
// return time.Time{}, errors.New("Unrecognized time format")
// }
|
package main
import (
"fmt"
)
func (b *Brain) InputNeurons() map[string]*Neuron {
r := map[string]*Neuron{}
for _, n := range b.Neurons {
if n.Role == Role_input {
r[n.Id] = n
}
}
return r
}
func (b *Brain) OutputNeurons() map[string]*Neuron {
r := map[string]*Neuron{}
for _, n := range b.Neurons {
if n.Role == Role_output {
r[n.Id] = n
}
}
return r
}
func (b *Brain) RegularNeurons() map[string]*Neuron {
r := map[string]*Neuron{}
for _, n := range b.Neurons {
if n.Role == Role_regular {
r[n.Id] = n
}
}
return r
}
func (b Brain) String() string {
return fmt.Sprintf("Brain: %d input, %d regular, %d output neuroons, %d synapses", len(b.InputNeurons()), len(b.RegularNeurons()), len(b.OutputNeurons()), len(b.Synapses))
}
func NewBrain() Brain {
b := Brain{}
b.Id = <-nextId
b.Status = BrainStatus_starting
b.Neurons = map[string]*Neuron{}
b.Synapses = map[string]*Synapse{}
return b
}
func peaBrain() Brain {
regularNeuronsCount := 10
inputNeuronsCount := 3
outputNeuronsCount := 1
b := NewBrain()
for i := 0; i < regularNeuronsCount; i++ {
n := NewNeuron(Role_regular)
b.Neurons[n.Id] = n
}
for i := 0; i < inputNeuronsCount; i++ {
n := NewNeuron(Role_input)
b.Neurons[n.Id] = n
}
for i := 0; i < outputNeuronsCount; i++ {
n := NewNeuron(Role_output)
b.Neurons[n.Id] = n
}
for _, nS := range b.Neurons {
for _, nT := range b.Neurons {
s := NewSynapse(nS, nT, 0.1, baseKernel())
b.Synapses[s.Id] = s
}
}
return b
}
func (b *Brain) Init(input map[string]float64) error {
if len(input) != len(b.InputNeurons()) {
return ERROR_INPUT_MISMATCH
}
for k := range input {
if b.Neurons[k] == nil {
return ERROR_INPUT_LABELED_LINE_NOT_FOUND
}
b.Neurons[k].HillockV = input[k]
}
return nil
}
func (b *Brain) StepForward() {
// update all hillocks
for _, n := range b.Neurons {
n.StepForward()
}
// move the time forward
b.Time++
}
func (b *Brain) AddNeuron(n *Neuron) {
b.Neurons[n.Id] = n
for i, s := range n.Synapses {
b.Synapses[i] = s
}
}
|
package setr
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01200102 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:setr.012.001.02 Document"`
Message *SubscriptionMultipleOrderConfirmationV02 `xml:"setr.012.001.02"`
}
func (d *Document01200102) AddMessage() *SubscriptionMultipleOrderConfirmationV02 {
d.Message = new(SubscriptionMultipleOrderConfirmationV02)
return d.Message
}
// Scope
// The SubscriptionMultipleOrderConfirmation message is sent by an executing party, eg, a transfer agent, to the instructing party, eg, an investment manager or its authorised representative. There may be one or more intermediary parties between the executing party and the instruction party. The intermediary party is, for example, an intermediary or a concentrator.
// This message is used to confirm the details of the execution of a SubscriptionMultipleOrder message.
// Usage
// The SubscriptionMultipleOrderConfirmation message is sent, after the price has been determined, to confirm the execution of the individual orders.
// A SubscriptionMultipleOrder may be responded to by more than one SubscriptionMultipleOrderConfirmation message, as the valuation cycle of the financial instruments in each individual order may be different.
// When the executing party sends several confirmations, there is no specific indication in the message that it is an incomplete confirmation. Reconciliation must be based on the references.
// A SubscriptionMultipleOrder must in all cases be responded to by a SubscriptionMultipleOrderConfirmation message/s and in no circumstances by a SubscriptionBulkOrderConfirmation message/s.
// If the executing party needs to confirm a SubscriptionBulkOrder message, then a SubscriptionBulkOrderConfirmation message must be used.
type SubscriptionMultipleOrderConfirmationV02 struct {
// Reference assigned to a set of orders or trades in order to link them together.
MasterReference *iso20022.AdditionalReference3 `xml:"MstrRef,omitempty"`
// Collective reference identifying a set of messages.
PoolReference *iso20022.AdditionalReference3 `xml:"PoolRef,omitempty"`
// Reference to a linked message that was previously sent.
PreviousReference []*iso20022.AdditionalReference3 `xml:"PrvsRef,omitempty"`
// Reference to a linked message that was previously received.
RelatedReference *iso20022.AdditionalReference3 `xml:"RltdRef"`
// General information related to the execution of investment fund order.
MultipleExecutionDetails *iso20022.SubscriptionMultipleExecution2 `xml:"MltplExctnDtls"`
// Confirmation of the information related to an intermediary.
IntermediaryDetails []*iso20022.Intermediary4 `xml:"IntrmyDtls,omitempty"`
// Information provided when the message is a copy of a previous message.
CopyDetails *iso20022.CopyInformation1 `xml:"CpyDtls,omitempty"`
// Additional information that cannot be captured in the structured elements and/or any other specific block.
Extension []*iso20022.Extension1 `xml:"Xtnsn,omitempty"`
}
func (s *SubscriptionMultipleOrderConfirmationV02) AddMasterReference() *iso20022.AdditionalReference3 {
s.MasterReference = new(iso20022.AdditionalReference3)
return s.MasterReference
}
func (s *SubscriptionMultipleOrderConfirmationV02) AddPoolReference() *iso20022.AdditionalReference3 {
s.PoolReference = new(iso20022.AdditionalReference3)
return s.PoolReference
}
func (s *SubscriptionMultipleOrderConfirmationV02) AddPreviousReference() *iso20022.AdditionalReference3 {
newValue := new(iso20022.AdditionalReference3)
s.PreviousReference = append(s.PreviousReference, newValue)
return newValue
}
func (s *SubscriptionMultipleOrderConfirmationV02) AddRelatedReference() *iso20022.AdditionalReference3 {
s.RelatedReference = new(iso20022.AdditionalReference3)
return s.RelatedReference
}
func (s *SubscriptionMultipleOrderConfirmationV02) AddMultipleExecutionDetails() *iso20022.SubscriptionMultipleExecution2 {
s.MultipleExecutionDetails = new(iso20022.SubscriptionMultipleExecution2)
return s.MultipleExecutionDetails
}
func (s *SubscriptionMultipleOrderConfirmationV02) AddIntermediaryDetails() *iso20022.Intermediary4 {
newValue := new(iso20022.Intermediary4)
s.IntermediaryDetails = append(s.IntermediaryDetails, newValue)
return newValue
}
func (s *SubscriptionMultipleOrderConfirmationV02) AddCopyDetails() *iso20022.CopyInformation1 {
s.CopyDetails = new(iso20022.CopyInformation1)
return s.CopyDetails
}
func (s *SubscriptionMultipleOrderConfirmationV02) AddExtension() *iso20022.Extension1 {
newValue := new(iso20022.Extension1)
s.Extension = append(s.Extension, newValue)
return newValue
}
|
package main
import "fmt"
import "math"
func main() {
for n := 1; n < 500; n++ {
for m := (n + 1); m < 500; m++ {
a := int(math.Pow(float64(m), 2) - math.Pow(float64(n), 2))
b := 2 * m * n
c := int(math.Pow(float64(m), 2) + math.Pow(float64(n), 2))
if a+b+c == 1000 {
product := a * b * c
fmt.Println(a, b, c)
fmt.Println(product)
}
}
}
}
|
package main
import (
"fmt"
"net/http"
"time"
)
// Program to check website status. Make http request and print if site is up or down.
func main() {
links := []string {
"http://google.com",
"http://facebook.com",
"http://stackoverflow.com",
"http://golang.org",
"http://amazon.com",
}
// Create a channel of type string to communicate string information between go routines.
c := make(chan string)
// go keyword creates a new go routine that runs the checkLink function.
for _, link := range links {
go checkLink(link, c)
}
// Wait for a value to be sent into a channel. When we get a value, print it out immediately. Value coming through channel is a blocking call. We are putting this for loop together just to wait for every Go routine to emit a message into a channel and then print it out.
// for i := 0; i < len(links); i++ {
// fmt.Println(<- c)
// }
// The for loop means, watch the channel c and whenever it emits a value, assign it to variable l short for link. Once a value comes in, the body of the for loop is immediately executed so start a new go routine and call checkLink function. First argument is the link url that is sent into the channel, second is the channel.
for l := range c {
// Function literal ie. anonymous function. Add extra set of parentheses at the end to actually invoke the function, add link as an argument to function literal so it gets access to copy of l from the main go routine ie. the for loop. Function l refers to copy, not the same address in memory as main routine or for loop l
go func(link string) {
// Pause the current go routine for five seconds
time.Sleep(5 * time.Second)
checkLink(link, c)
}(l)
}
}
// Make HTTP request to link. We get back two values: struct response object and error if one occurred. We do not care about response, we just want to check if the website is down or not ie if there is an error. Add second argument of type channel that communicates string data between go routines. So between func main parent go routine that gets created automatically and the child go routines that are created inside for loop
func checkLink(link string, c chan string) {
_, err := http.Get(link)
if err != nil {
fmt.Println(link, "might be down!")
// Send string into a channel ie. the url of link
c <- link
return
}
fmt.Println(link, "is up!")
c <- link
}
|
package main
// dp[i][t] 表示从(0,0)走到(i,t)的不同路径数
// 状态转移方程:
// i == 0 && t == 0: dp[i][t] = 1 (初始状态)
// i == 0 && t != 0: dp[i][t] = dp[0][t-1] (表示只能从上边走到该点)
// i != 0 && t == 0: dp[i][t] = dp[i-1][0] (表示只能从左边走到该点)
// i >= 1 && t >= 1: dp[i][t] = dp[i-1][t] + dp[i][t-1] (表示只能从左边或上边走到该点)
func uniquePaths(m int, n int) int {
if m == 0 || n == 0 {
return 0
}
dp := [105][105]int{}
dp[0][0] = 1
for i := 1; i < m; i++ {
dp[i][0] = dp[i-1][0]
}
for t := 1; t < n; t++ {
dp[0][t] = dp[0][t-1]
}
for i := 1; i < m; i++ {
for t := 1; t < n; t++ {
dp[i][t] = dp[i-1][t] + dp[i][t-1]
}
}
return dp[m-1][n-1]
}
// 滚动数组优化空间复杂度,原来空间复杂度为O(mn),优化后为O(n)
func uniquePaths(m int, n int) int {
if m == 0 || n == 0 {
return 0
}
dp := [105]int{}
dp[0] = 1
for t := 1; t < n; t++ {
dp[t] = dp[t-1]
}
for i := 1; i < m; i++ {
// dp[0] = dp[0]
// 如果想知道为什么有这句废话,请看 "求左上角到右下角的不同路径_添加障碍物版.go" 的滚动数组优化部分
for t := 1; t < n; t++ {
dp[t] = dp[t] + dp[t-1]
}
}
return dp[n-1]
}
/*
题目链接:
https://leetcode-cn.com/problems/unique-paths/ 不同路径
*/
/*
总结
1. 注意题目的限制: 每次只能向下或者向右移动一步
2. 其实这题还可以用排列组合AC。
*/
|
//一些注意事项
//测试程序文件名以_test结尾
//import测试包testing
//测试函数以Test开头
package main
import (
"testing"
)
func Test_Division(t *testing.T) { //test.T记录错误或测试状态
i, err := Division(6, 2)
if i != 3 || err != nil {
t.Error("除法测试没通过!")
} else {
t.Log("第一个测试通过了!")
}
}
func Benchmark_Divisionb(b *testing.B) { //testing.B数据很有用
b.StopTimer() //停止时间计数
//做一些前置操作
//例如测试压缩解压
//在这里打开文件,读取数据,不会记录时间
b.StartTimer() //开始时间计数
for i := 0; i < b.N; i++ { //b.N是一个很大的数,自动生成
Division(4, 5)
}
}
|
package chunk_test
import (
"bytes"
"io"
"testing"
"github.com/tombell/go-serato/serato/chunk"
)
func TestNewVrsnChunk(t *testing.T) {
data := generateBytes(t, "7672736E0000003C0031002E0030002F00530065007200610074006F002000530063007200610074006300680020004C0049005600450020005200650076006900650077")
buf := bytes.NewBuffer(data)
hdr, err := chunk.NewHeader(buf)
if err != nil {
t.Fatal("expected NewHeader err to be nil")
}
vrsn, err := chunk.NewVrsnChunk(hdr, buf)
if err != nil {
t.Fatal("expected NewVrsnChunk err to be nil")
}
if vrsn == nil {
t.Fatal("expected vrsn to not be nil")
}
}
func TestNewVrsnChunkUnexpectedEOF(t *testing.T) {
data := generateBytes(t, "7672736E00000037600450020005200650076006")
buf := bytes.NewBuffer(data)
hdr, err := chunk.NewHeader(buf)
if err != nil {
t.Fatal("expected NewHeader err to be nil")
}
_, err = chunk.NewVrsnChunk(hdr, buf)
if err != io.ErrUnexpectedEOF {
t.Fatal("expected NewVrsnChunk err to be ErrUnexpectedEOF")
}
}
func TestNewVrsnChunkUnexpectedIdentifier(t *testing.T) {
data := generateBytes(t, "7572736E0000003C0031002E0030002F00530065007200610074006F002000530063007200610074006300680020004C0049005600450020005200650076006900650077")
buf := bytes.NewBuffer(data)
hdr, err := chunk.NewHeader(buf)
if err != nil {
t.Fatal("expected NewHeader err to be nil")
}
_, err = chunk.NewVrsnChunk(hdr, buf)
if err != chunk.ErrUnexpectedIdentifier {
t.Fatal("expected NewVrsnChunk err to be ErrUnexpectedIdentifier")
}
}
func TestVrsnHeader(t *testing.T) {
data := generateBytes(t, "7672736E0000003C0031002E0030002F00530065007200610074006F002000530063007200610074006300680020004C0049005600450020005200650076006900650077")
buf := bytes.NewBuffer(data)
hdr, err := chunk.NewHeader(buf)
if err != nil {
t.Fatal("expected NewHeader err to be nil")
}
vrsn, err := chunk.NewVrsnChunk(hdr, buf)
if err != nil {
t.Fatal("expected NewVrsnChunk err to be nil")
}
if vrsn.Header() != hdr {
t.Fatal("expected header to be the same")
}
}
func TestVrsnType(t *testing.T) {
data := generateBytes(t, "7672736E0000003C0031002E0030002F00530065007200610074006F002000530063007200610074006300680020004C0049005600450020005200650076006900650077")
buf := bytes.NewBuffer(data)
hdr, err := chunk.NewHeader(buf)
if err != nil {
t.Fatal("expected NewHeader err to be nil")
}
vrsn, err := chunk.NewVrsnChunk(hdr, buf)
if err != nil {
t.Fatal("expected NewVrsnChunk err to be nil")
}
actual := vrsn.Type()
expected := "vrsn"
if actual != expected {
t.Fatalf("expected type to be %v, got %v", expected, actual)
}
}
func TestVrsnVersion(t *testing.T) {
data := generateBytes(t, "7672736E0000003C0031002E0030002F00530065007200610074006F002000530063007200610074006300680020004C0049005600450020005200650076006900650077")
buf := bytes.NewBuffer(data)
hdr, err := chunk.NewHeader(buf)
if err != nil {
t.Fatal("expected NewHeader err to be nil")
}
vrsn, err := chunk.NewVrsnChunk(hdr, buf)
if err != nil {
t.Fatal("expected NewVrsnChunk err to be nil")
}
actual := vrsn.Version()
expected := "1.0/Serato Scratch LIVE Review"
if actual != expected {
t.Fatalf("expected version to be %v, got %v", expected, actual)
}
}
|
package model
import (
"testing"
"tpay_backend/test"
)
func TestUpstreamModel_FindOneByUpstreamMerchantNo(t *testing.T) {
upstreamModel := NewUpstreamModel(test.DbEngine)
upMerchantNo := "3d2f9bb9-f370-412e-91fb-d011f76706f3"
got, err := upstreamModel.FindOneByUpstreamMerchantNo(upMerchantNo)
t.Logf("err:%v", err)
t.Logf("got:%v", got)
}
|
package controllers
import (
"message/models"
"strconv"
"github.com/astaxie/beego"
"github.com/astaxie/beego/session"
)
// Operations about Users
type UserController struct {
beego.Controller
}
// Cookie
var globalSessions *session.Manager
func init() {
globalSessions, _ = session.NewManager("memory", `{"cookieName":"gosessionid", "enableSetCookie,omitempty": true, "gclifetime":3600, "maxLifetime": 3600, "secure": false, "sessionIDHashFunc": "sha1", "sessionIDHashKey": "", "cookieLifeTime": 3600, "providerConfig": ""}`)
go globalSessions.GC()
}
// @Title signin
// @Description user signin
// @Param username query string true "The username for signin"
// @Param password query string true "The password for signin"
// @Success 100 {string} signin success
// @Failure 101 signin failed
// @router /signin [get]
func (u *UserController) Signin() {
sess := u.StartSession()
// 判断是否记住登陆状态
if sess.Get("username") != nil {
u.Data["json"] = "auto signin success"
} else {
username := u.GetString("username")
password := u.GetString("password")
if models.Signin(username, password) {
sess.Set("username", username)
u.Data["json"] = "signin success"
} else {
u.Data["json"] = "signin failed"
}
}
u.ServeJSON()
}
// @Title signup
// @Description user signup
// @Param username query string true "The username for signup"
// @Param password query string true "The password for signup"
// @Success 200 {string} signup success
// @Failure 201 signup failed
// @router /signup [post]
func (u *UserController) Signup() {
username := u.GetString("username")
password := u.GetString("password")
if models.Signup(username, password) {
u.Data["json"] = "signup success"
} else {
u.Data["json"] = "signup failed"
}
u.ServeJSON()
}
// @Title signout
// @Description user signout
// @Success 300 {string} signout success
// @Failure 301 signout failed
// @router /signout [get]
func (u *UserController) Signout() {
sess := u.StartSession()
sess.Delete("username")
u.Data["json"] = "signout success"
u.ServeJSON()
}
// @Title getUser
// @Description user getUser
// @Param username query string true "The username for getUser"
// @Success 400 {string} getUser success, show username
// @Failure 401 getUser failed
// @router /:username [get]
func (u *UserController) Get() {
username := u.GetString(":username")
if models.GetUser(username) {
u.Data["json"] = username
} else {
u.Data["json"] = "getUser failed"
}
u.ServeJSON()
}
// @Title getContact
// @Description user getContact
// @Param username query string true "The username for getContact"
// @Success 500 {string} getContact success, show contacts
// @Failure 501 getContact failed
// @router /:username/contacts [get]
func (u *UserController) GetContact() {
username := u.GetString(":username")
contacts, err := models.GetContact(username)
if err == nil {
u.Data["json"] = contacts
} else {
u.Data["json"] = "getContact failed"
}
u.ServeJSON()
}
// @Title addContact
// @Description user addContact
// @Param username query string true "The username for me"
// @Param contact query string true "The username for contact"
// @Success 600 {id} addContact success , show id
// @Failure 601 addContact failed
// @router /:username/contacts/:contact_username [get]
func (u *UserController) AddContact() {
username := u.GetString(":username")
contact := u.GetString(":contact_username")
id, err := models.AddContact(username, contact)
if err == nil {
u.Data["json"] = id
} else {
u.Data["json"] = "addContact failed"
}
u.ServeJSON()
}
// @Title delContact
// @Description user delContact
// @Param username query string true "The username for me"
// @Param contact query string true "The username for contact"
// @Success 700 {string} delContact success
// @Failure 701 delContact failed
// @router /:username/contacts/:contact_username [delete]
func (u *UserController) DelContact() {
username := u.GetString(":username")
contact := u.GetString(":contact_username")
if models.DelContact(username, contact) {
u.Data["json"] = "delContact success"
} else {
u.Data["json"] = "delContact failed"
}
u.ServeJSON()
}
// @Title getChat
// @Description user getChat
// @Param username query string true "The username for me"
// @Param contact query string true "The username for contact"
// @Success 800 {string} getChat success, show chats
// @router /:username/contacts/:contact_username/chats [get]
func (u *UserController) GetChat() {
username := u.GetString(":username")
contact := u.GetString(":contact_username")
u.Data["json"] = models.GetChat(username, contact)
u.ServeJSON()
}
// @Title delChat
// @Description user delContact
// @Param id query string true "The id for chat"
// @Success 900 {string} delChat success
// @Failure 901 delChat failed
// @router /:username/contacts/:contact_username/chats/:id [delete]
func (u *UserController) DelChat() {
id_str := u.GetString(":id")
id_int, err := strconv.ParseInt(id_str, 10, 64)
if err != nil {
}
if models.DelChat(id_int) {
u.Data["json"] = "delChat success"
} else {
u.Data["json"] = "delChat failed"
}
u.ServeJSON()
}
// @Title updateChat
// @Description user updateChat
// @Param id query string true "The id for chat"
// @Success 1000 {string} updateChat success
// @Failure 1001 updateChat failed
// @router /:username/contacts/:contact_username/chats/:id [get]
func (u *UserController) UpdateChat() {
id_str := u.GetString(":id")
id_int, err := strconv.ParseInt(id_str, 10, 64)
if err != nil {
}
if models.UpdateChat(id_int) {
u.Data["json"] = "updateChat success"
} else {
u.Data["json"] = "updateChat failed"
}
u.ServeJSON()
}
// @Title getUnreadChat
// @Description user getUnreadChat
// @Param username query string true "The username for me"
// @Param contact query string true "The username for contact"
// @Success 800 {string} getUnreadChat success, show unread chats count
// @router /:username/contacts/:contact_username/chats/unread [get]
func (u *UserController) GetUnreadChat() {
username := u.GetString(":username")
contact := u.GetString(":contact_username")
u.Data["json"] = models.GetUnreadChat(username, contact)
u.ServeJSON()
}
|
package leetcode
/*Given head which is a reference node to a singly-linked list.
The value of each node in the linked list is either 0 or 1.
The linked list holds the binary representation of a number.
Return the decimal value of the number in the linked list.
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/convert-binary-number-in-a-linked-list-to-integer
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。*/
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func getDecimalValue(head *ListNode) int {
rlt := 0
for head != nil {
rlt = rlt << 1
rlt += head.Val
head = head.Next
}
return rlt
}
|
package config
import (
"github.com/Azer0s/quacktors/logging"
)
//SetLogger sets the Logger implementation used by quacktors.
//(LogrusLogger by default)
func SetLogger(l logging.Logger) {
logger = l
}
//GetLogger gets the configured Logger implementation.
func GetLogger() logging.Logger {
return logger
}
//SetQpmdPort sets the port quacktors uses to connect to
//local and remote qpmd instances. (7161 by default)
func SetQpmdPort(port uint16) {
qpmdPort = port
}
//GetQpmdPort gets the configured qpmd port.
func GetQpmdPort() uint16 {
return qpmdPort
}
|
package config
import (
"fmt"
"database/sql"
"reflect"
"unsafe"
"strconv"
"errors"
"strings"
)
type ConfigDB struct {
DriverName string
Dbhost string
Dbport string
Dbuser string
Dbpassword string
Dbname string
Tblname string
}
func (c *ConfigDB) GetParameters(ic Parameter) error {
//首先获得map
paramsStrmap := map[string]string{}
if c.Dbhost == "" {
c.Dbhost = "127.0.0.1"
}
if c.Dbport == "" {
c.Dbport = "3306"
}
if c.Dbuser == "" {
c.Dbuser = "root"
}
if c.Tblname == "" {
c.Tblname = "PARAMS"
}
dburl := c.Dbuser + ":" + c.Dbpassword + "@tcp(" + c.Dbhost + ":" + c.Dbport + ")/" + c.Dbname + "?charset=utf8"
db, err := sql.Open(c.DriverName, dburl)
if err != nil {
fmt.Println(err)
return err
}
rows, err := db.Query(`SELECT NAME,VALUE FROM ` + c.Tblname)
if err != nil {
fmt.Println(err)
return err
}
for rows.Next() {
var name, value string
err = rows.Scan(&name, &value)
if err != nil {
fmt.Println(err)
return err
}
//Change the name into lower case.
name = strings.ToLower(name)
paramsStrmap[name] = value
}
//根据map进行反射,为ic结构体赋值
val := reflect.ValueOf(ic)
kd := val.Kind()
if kd != reflect.Ptr && val.Elem().Kind() == reflect.Struct {
fmt.Println("expect struct")
return errors.New("No right input")
}
numFields := val.Elem().NumField()
for i := 0; i < numFields; i++ {
fieldName := fmt.Sprintf("%v", val.Elem().Type().Field(i).Name)
lowerFieldName := strings.ToLower(fieldName)
if _, ok := paramsStrmap[lowerFieldName]; !ok {
//FIXME:this shouldn`t return error. just skip it.
continue
//return errors.New("No value in map")
}
field := val.Elem().FieldByName(fieldName)
switch field.Kind() {
case reflect.String:
*(*string)(unsafe.Pointer(field.Addr().Pointer())) = paramsStrmap[lowerFieldName]
case reflect.Int:
item, err := strconv.Atoi(paramsStrmap[lowerFieldName])
if err != nil {
fmt.Println(err)
return err
}
*(*int)(unsafe.Pointer(field.Addr().Pointer())) = item
case reflect.Int8:
item, err := strconv.ParseInt(paramsStrmap[lowerFieldName], 10, 8)
if err != nil {
fmt.Println(err)
return err
}
*(*int8)(unsafe.Pointer(field.Addr().Pointer())) = int8(item)
case reflect.Int16:
item, err := strconv.ParseInt(paramsStrmap[lowerFieldName], 10, 16)
if err != nil {
fmt.Println(err)
return err
}
*(*int16)(unsafe.Pointer(field.Addr().Pointer())) = int16(item)
case reflect.Int32:
item, err := strconv.ParseInt(paramsStrmap[lowerFieldName], 10, 32)
if err != nil {
fmt.Println(err)
return err
}
*(*int32)(unsafe.Pointer(field.Addr().Pointer())) = int32(item)
case reflect.Int64:
item, err := strconv.ParseInt(paramsStrmap[lowerFieldName], 10, 64)
if err != nil {
fmt.Println(err)
return err
}
*(*int64)(unsafe.Pointer(field.Addr().Pointer())) = int64(item)
case reflect.Bool:
item, err := strconv.ParseBool(paramsStrmap[lowerFieldName])
if err != nil {
fmt.Println(err)
return err
}
*(*bool)(unsafe.Pointer(field.Addr().Pointer())) = item
case reflect.Float32:
item, err := strconv.ParseFloat(paramsStrmap[lowerFieldName], 32)
if err != nil {
fmt.Println(err)
return err
}
*(*float32)(unsafe.Pointer(field.Addr().Pointer())) = float32(item)
case reflect.Float64:
item, err := strconv.ParseFloat(paramsStrmap[lowerFieldName], 64)
if err != nil {
fmt.Println(err)
return err
}
*(*float64)(unsafe.Pointer(field.Addr().Pointer())) = item
}
}
return nil
}
|
package scache
import (
"sync/atomic"
)
type timer struct {
value *uint32
}
func newTimer() *timer {
var value uint32
return &timer{value: &value}
}
func (t *timer) Tick() (v uint32) {
v = atomic.AddUint32(t.value, 1)
return
}
func (t *timer) Value() (v uint32) {
v = atomic.LoadUint32(t.value)
return
}
|
package queries
import (
"log"
"github.com/jmoiron/sqlx"
"gitlab.com/semestr-6/projekt-grupowy/backend/obsluga-formularzy/configuration"
"gitlab.com/semestr-6/projekt-grupowy/backend/obsluga-formularzy/energy_resources/models"
)
const GET_ENERGY_RESOURCES_SQL = `
SELECT
*
FROM
energy_resources."EnergyResources";`
func GetAllEnergyResources() (energyRes []models.EnergyResourceEdit, err error) {
db, err := sqlx.Open("postgres", configuration.ConnectionString)
defer db.Close()
if err != nil {
log.Fatal(err)
return
}
err = db.Select(&energyRes, GET_ENERGY_RESOURCES_SQL)
if err != nil {
log.Fatal(err)
return
}
return
}
|
package infra
import "fmt"
func init() {
Register(&ConfStarter{})
}
type ConfStarter struct {
BaseStarter
}
func (c *ConfStarter)Init(ctx StarterContext) {
fmt.Println("配置初始化")
}
func (c *ConfStarter)Setup(ctx StarterContext) {
fmt.Println("配置安装")
}
func (c *ConfStarter)Start(ctx StarterContext) {
fmt.Println("配置启动")
}
|
package db
import (
"errors"
"time"
"github.com/jackc/pgx"
"github.com/brianvoe/gofakeit/v5"
)
type Club struct {
ID uint `db:"id"`
InsertedAt *time.Time `db:"inserted_at"`
UpdatedAt *time.Time `db:"updated_at"`
OwnerID string `db:"owner_id"`
RoleID *string `db:"role_id"`
ChannelID *string `db:"channel_id"`
Title string `db:"title"`
Description *string `db:"description"`
Symbol string `db:"symbol"`
IconURL *string `db:"icon_url"`
XP uint64 `db:"xp"`
ExpiredAt *time.Time `db:"expired_at"`
Verified bool `db:"verified"`
}
func (c *Club) randomize() {
desc := gofakeit.Paragraph(1, 1, 10, "")
chid := gofakeit.Numerify("test##############")
rlid := gofakeit.Numerify("test##############")
c.OwnerID = gofakeit.Numerify("test##############")
c.ChannelID = &chid
c.RoleID = &rlid
c.Title = gofakeit.Word()
c.Symbol = gofakeit.Emoji()
c.Description = &desc
}
func (c *Club) AddMember(tx *pgx.Tx, memberID string) (err error) {
_, err = tx.Exec(`
INSERT INTO "club_members"("club_id","user_id")
VALUES($1, $2)
ON CONFLICT DO NOTHING
`,
c.ID,
memberID,
)
return
}
func (c *Club) DeleteMember(memberID string) (err error) {
_, err = pgxconn.Exec(`
DELETE FROM club_members
WHERE user_id = $1
`,
memberID,
)
return
}
func (c *Club) DeleteMembers() (err error) {
_, err = pgxconn.Exec(`
DELETE FROM club_members
WHERE club_id = $1
`,
c.ID,
)
return
}
func (c *Club) HasMember(memberID string) (result bool, err error) {
err = pgxconn.QueryRow(`
SELECT EXISTS(SELECT 1 FROM club_members WHERE user_id = $1)
`,
memberID,
).Scan(
&result,
)
return
}
func (c *Club) Delete() (err error) {
_, err = pgxconn.Exec(`
DELETE FROM clubs WHERE id = $1;
DELETE FROM club_members WHERE club_id = $1;
`,
c.ID,
)
return
}
type ClubMember struct {
ClubID uint `db:"club_id"`
UserID string `db:"user_id"`
InsertedAt *time.Time `db:"inserted_at"`
UpdatedAt *time.Time `db:"updated_at"`
XP uint64 `db:"xp"`
}
type clubs struct{}
func (c *clubs) Create(club *Club) (err error) {
err = pgxconn.QueryRow(`
WITH club_id as (
INSERT INTO clubs(owner_id, title, symbol, expired_at)
VALUES ($1, $2, $3, $4)
RETURNING id
), owner_user as (
INSERT INTO club_members(club_id, user_id) VALUES
((SELECT id FROM club_id LIMIT 1), $1)
)
SELECT id FROM club_id
`,
club.OwnerID,
club.Title,
club.Symbol,
club.ExpiredAt,
).Scan(&club.ID)
return
}
func (c *clubs) DeleteByOwner(ownerID string) (err error) {
_, err = pgxconn.Exec(`
WITH club_id as (
DELETE FROM clubs WHERE owner_id = $1
RETURNING id
), owner_user as (
DELETE FROM club_members
WHERE club_id = (SELECT id FROM club_id LIMIT 1)
)
SELECT * FROM club_id
`,
ownerID,
)
return
}
func (c *clubs) GetClubByUser(userID string) (club *Club, err error) {
club = new(Club)
err = pgxconn.QueryRow(`
SELECT
c.id,
c.inserted_at,
c.updated_at,
c.owner_id,
c.role_id,
c.channel_id,
c.title,
c.description,
c.symbol,
c.icon_url,
c.xp,
c.expired_at,
c.verified
FROM clubs c
JOIN club_members cm on c.id = cm.club_id
WHERE cm.user_id = $1
`,
userID,
).Scan(
&club.ID,
&club.InsertedAt,
&club.UpdatedAt,
&club.OwnerID,
&club.RoleID,
&club.ChannelID,
&club.Title,
&club.Description,
&club.Symbol,
&club.IconURL,
&club.XP,
&club.ExpiredAt,
&club.Verified,
)
if errors.Is(err, pgx.ErrNoRows) {
return nil, nil
}
return club, err
}
func (c *clubs) GetExpired() ([]Club, error) {
rows, err := pgxconn.Query(`
SELECT
id,
inserted_at,
updated_at,
owner_id,
role_id,
channel_id,
title,
description,
symbol,
icon_url,
xp,
expired_at,
verified
FROM clubs
WHERE NOT verified
AND localtimestamp >= expired_at
`)
if errors.Is(err, pgx.ErrNoRows) {
return nil, nil
}
if err != nil {
return nil, err
}
var clubs []Club
for rows.Next() {
var club Club
err = rows.Scan(
&club.ID,
&club.InsertedAt,
&club.UpdatedAt,
&club.OwnerID,
&club.RoleID,
&club.ChannelID,
&club.Title,
&club.Description,
&club.Symbol,
&club.IconURL,
&club.XP,
&club.ExpiredAt,
&club.Verified,
)
if err != nil {
return nil, err
}
clubs = append(clubs, club)
}
return clubs, nil
}
func (c *clubs) RemoveExpired() ([]Club, error) {
rows, err := pgxconn.Query(`
WITH club_id as (
DELETE FROM clubs WHERE NOT verified
AND localtimestamp >= expired_at
RETURNING *
), owner_user as (
DELETE FROM club_members
WHERE club_id IN (SELECT id FROM club_id)
)
SELECT
id,
inserted_at,
updated_at,
owner_id,
role_id,
channel_id,
title,
description,
symbol,
icon_url,
xp,
expired_at,
verified
FROM club_id
`)
if errors.Is(err, pgx.ErrNoRows) {
return nil, nil
}
if err != nil {
return nil, err
}
var clubs []Club
for rows.Next() {
var club Club
err = rows.Scan(
&club.ID,
&club.InsertedAt,
&club.UpdatedAt,
&club.OwnerID,
&club.RoleID,
&club.ChannelID,
&club.Title,
&club.Description,
&club.Symbol,
&club.IconURL,
&club.XP,
&club.ExpiredAt,
&club.Verified,
)
if err != nil {
return nil, err
}
clubs = append(clubs, club)
}
return clubs, nil
}
|
package repos
import (
"fmt"
"os"
"path/filepath"
"testing"
"time"
"github.com/au/com/cybernostics/crowdscore/server/services"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/sqlite"
)
var (
DB *gorm.DB
)
func init() {
user := services.User{}
var err error
DB, err = gorm.Open("sqlite3", filepath.Join(os.TempDir(), "gorm.db"))
if err != nil {
panic(err.Error())
}
DB.CreateTable(&user)
}
func TestCanSetPassword(t *testing.T) {
user := services.User{
Username: "barry@barry.com",
Created: time.Now().Unix(),
}
repo := services.NewUserRepository(DB)
repo.CreateUser(&user)
users, _ := repo.FindAll()
for i, u := range users {
fmt.Printf("%s - %d\n", u.Username, i)
}
user = services.User{
Username: "john@barry.com",
Created: time.Now().Unix(),
}
repo.CreateUser(&user)
users, _ = repo.Find(func(DB *gorm.DB) *gorm.DB { return DB.Where("username = ?", "john@barry.com") })
for i, u := range users {
fmt.Printf("%s - %d\n", u.Username, i)
}
}
|
package passwordcombiner
import (
"fmt"
"github.com/cloudfoundry-incubator/cloud-service-broker/internal/encryption/gcmencryptor"
)
// CanaryInput is the value that is encrypted with the key and stored in the database
// to check that the key has not changed. Because we encrypt with a nonce, it's not
// possible to create a rainbow table for this.
const CanaryInput = "canary value"
func encryptCanary(encryptor gcmencryptor.GCMEncryptor) (string, error) {
return encryptor.Encrypt([]byte(CanaryInput))
}
func decryptCanary(encryptor gcmencryptor.GCMEncryptor, canary, label string) error {
_, err := encryptor.Decrypt(canary)
switch {
case err == nil:
return nil
// Unfortunately type is errors.errorString so we cannot do a safer type check
case err.Error() == "cipher: message authentication failed", err.Error() == "malformed ciphertext":
return fmt.Errorf("canary mismatch for password labeled %q - check that the password value has not changed", label)
default:
return err
}
}
|
package funciones
import (
"fmt"
)
// Saludar .
func Saludar(name string) {
fmt.Println("Hola", name)
}
// Despedirse .
func Despedirse(name string) {
fmt.Println("Adios", name)
}
|
package main
import (
"context"
"os"
"regexp"
"time"
"github.com/SentientTechnologies/studio-go-runner/internal/runner"
"github.com/SentientTechnologies/studio-go-runner/internal/types"
"github.com/go-stack/stack"
"github.com/prometheus/client_golang/prometheus"
)
// This file contains the implementation of a RabbitMQ service for
// retriving and handling StudioML workloads within a self hosted
// queue context
func serviceRMQ(ctx context.Context, checkInterval time.Duration, connTimeout time.Duration) {
logger.Debug("starting serviceRMQ", stack.Trace().TrimRuntime())
defer logger.Debug("stopping serviceRMQ", stack.Trace().TrimRuntime())
if len(*amqpURL) == 0 {
logger.Info("rabbitMQ services disabled", stack.Trace().TrimRuntime())
return
}
live := &Projects{
queueType: "rabbitMQ",
projects: map[string]context.CancelFunc{},
}
// NewRabbitMQ will strip off the user name and password if they appear in
// the first URL but will preserve them inside the second parameter
rmq, err := runner.NewRabbitMQ(*amqpURL, *amqpURL)
if err != nil {
logger.Error(err.Error())
}
// The regular expression is validated in the main.go file
matcher, _ := regexp.Compile(*queueMatch)
// first time through make sure the credentials are checked immediately
qCheck := time.Duration(time.Second)
// Watch for when the server should not be getting new work
state := runner.K8sStateUpdate{
State: types.K8sRunning,
}
lifecycleC := make(chan runner.K8sStateUpdate, 1)
id, err := k8sStateUpdates().Add(lifecycleC)
defer func() {
k8sStateUpdates().Delete(id)
close(lifecycleC)
}()
host, errGo := os.Hostname()
if errGo != nil {
logger.Warn(errGo.Error())
}
for {
select {
case <-ctx.Done():
live.Lock()
defer live.Unlock()
// When shutting down stop all projects
for _, quiter := range live.projects {
if quiter != nil {
quiter()
}
}
return
case state = <-lifecycleC:
case <-time.After(qCheck):
qCheck = checkInterval
// If the pulling of work is currently suspending bail out of checking the queues
if state.State != types.K8sRunning {
queueIgnored.With(prometheus.Labels{"host": host, "queue_type": live.queueType, "queue_name": "*"}).Inc()
logger.Debug("k8s has RMQ disabled", "stack", stack.Trace().TrimRuntime())
continue
}
found, err := rmq.GetKnown(matcher, connTimeout)
if err != nil {
logger.Warn("unable to refresh RMQ manifest", err.Error())
qCheck = qCheck * 2
}
if len(found) == 0 {
logger.Warn("no queues found", "uri", rmq.SafeURL, "stack", stack.Trace().TrimRuntime())
continue
}
live.Lifecycle(ctx, found)
}
}
}
|
package app
import (
"fmt"
"github.com/callumj/weave/core"
"github.com/callumj/weave/remote"
"github.com/callumj/weave/remote/uptypes"
"github.com/callumj/weave/tools"
"log"
"os"
"path/filepath"
)
func performCompilation(configPath string, options option_set) {
fullPath := filepath.Dir(configPath)
// ensure working dir exists
workingDir := fmt.Sprintf("%v/working", fullPath)
if !tools.PathExists(workingDir) {
log.Println("Working directory does not existing, creating")
err := os.Mkdir(workingDir, 0775)
if err != nil {
panicQuitf("Unable to create %v\r\n", workingDir)
}
}
instr := core.ParseInstruction(configPath)
if instr == nil {
panicQuit()
}
core.ExplainInstruction(instr)
baseContents := core.GetContents(instr.Src, instr.IgnoreReg)
if baseContents == nil {
panicQuit()
}
baseSuffix := core.GenerateNameSuffix(*baseContents)
baseFileName := fmt.Sprintf("%v/%v.tar", workingDir, baseSuffix)
baseArchive := core.CreateBaseArchive(instr.Src, baseContents.Contents, baseFileName)
if baseArchive == nil {
panicQuitf("Failed to create base archive.")
}
var col []uptypes.FileDescriptor
var found bool
for _, conf := range instr.Configurations {
if len(options.OnlyRun) != 0 {
if options.OnlyRun != conf.Name {
continue
} else {
found = true
}
}
finalPath := processConfiguration(conf, fullPath, instr, baseContents, baseArchive)
if instr.S3 != nil && options.DisableS3 != true {
col = appendForS3(finalPath, conf, col)
}
}
if len(options.OnlyRun) != 0 && !found {
panicQuitf("The specified configuration %v was not found\r\n", options.OnlyRun)
}
if len(col) != 0 {
remote.UploadToS3(*instr.S3, col)
}
}
func processConfiguration(conf core.Configuration, fullPath string, instr *core.Instruction, baseContents *core.ContentsInfo, baseArchive *core.ArchiveInfo) string {
thisPath := fmt.Sprintf("%v/configurations/%v", fullPath, conf.Name)
workingDir := fmt.Sprintf("%v/working", fullPath)
log.Printf("Configuring: %v\r\n", thisPath)
thisContents := constructContents(thisPath, baseContents, instr)
filteredContents := core.FilterContents(*baseContents, conf.ExceptReg, conf.OnlyReg)
recalcBaseSuffix := core.GenerateNameSuffix(*filteredContents)
tarPath := fmt.Sprintf("%v/%v_%v.tar", workingDir, conf.Name, core.GenerateFinalNameSuffix(recalcBaseSuffix, *thisContents))
if !core.MergeIntoBaseArchive(*baseArchive, thisPath, thisContents.Contents, tarPath, filteredContents) {
panicQuitf("Failed to merge with base archive. Quitting.")
}
gzipPath := fmt.Sprintf("%v.gz", tarPath)
core.CompressArchive(tarPath, gzipPath)
os.Remove(tarPath)
finalPath := gzipPath
if instr.Encrypt {
cryptPath := fmt.Sprintf("%v.enc", gzipPath)
keyFile := fmt.Sprintf("%v/keys/%v", fullPath, conf.Name)
if !core.EncryptFile(gzipPath, cryptPath, keyFile) {
panicQuitf("Failed to encrypt %v. Quiting..\r\n", gzipPath)
} else {
finalPath = cryptPath
}
os.Remove(gzipPath)
}
return finalPath
}
func appendForS3(finalPath string, conf core.Configuration, col []uptypes.FileDescriptor) []uptypes.FileDescriptor {
stat, err := os.Stat(finalPath)
if err != nil {
panicQuitf("Unable to query %v\r\n", finalPath)
}
desc := new(uptypes.FileDescriptor)
desc.Path = finalPath
desc.Size = stat.Size()
desc.Name = conf.Name
desc.FileName = filepath.Base(finalPath)
return append(col, *desc)
}
func constructContents(thisPath string, baseContents *core.ContentsInfo, instr *core.Instruction) *core.ContentsInfo {
var thisContents *core.ContentsInfo
if tools.PathExists(thisPath) {
thisContents = core.GetContents(thisPath, instr.IgnoreReg)
} else {
thisContents = new(core.ContentsInfo)
thisContents.Size = 0
thisContents.Contents = []core.FileInfo{}
thisContents.Newest = baseContents.Newest
}
return thisContents
}
|
package dynamic
import "fmt"
func ChangeStr(src, des []byte) int {
// 两个字符串间的编辑距离
res := [100][100]int{}
n, m := len(src), len(des)
for i := 1; i <= n; i ++ {
res[i][0] = i
}
for i := 1; i <= m; i ++ {
res[0][i] = i
}
for i := 0; i < n; i ++ {
for j := 0; j < m; j ++ {
if src[i] == des[j] {
res[i+1][j+1] = minArray(res[i][j], res[i][j+1]+1, res[i+1][j]+1)
} else {
res[i+1][j+1] = minArray(res[i][j] + 1, res[i][j+1]+1, res[i+1][j]+1)
}
}
}
fmt.Println(res[n][m])
return res[n][m]
}
func SChangeStr(src, des []byte) int {
n, m := len(src), len(des)
res := make([]int, m+1)
last := make([]int, m+1)
for i := 1; i <= m; i ++ {
res[i], last[i] = i, i
}
for i := 0; i < n; i ++ {
res[0] = i+1
for j := 0; j < m; j ++ {
if src[i] == des[j] {
res[j+1] = minArray(last[j], res[j+1]+1, res[j]+1)
} else {
res[j+1] = minArray(last[j] + 1, res[j+1]+1, res[j]+1)
}
last[j] = res[j]
}
}
return res[m]
}
|
package main
import (
"sync/atomic"
)
// RR is struct for naive round robin balance algorithm
type RR struct {
upstream []Backend
index uint64
}
// NewRR return a brand new naive round robin balancer
func NewRR(backends ...Backend) *RR {
return &RR{backends, 0}
}
// Select return a backend randomly
func (r *RR) Select() (b *Backend, found bool) {
length := uint64(len(r.upstream))
if length == 0 {
return nil, false
} else if length == 1 {
return &r.upstream[0], true
}
// TODO: shuold we check for overflow?
return &(r.upstream[atomic.AddUint64(&r.index, 1)%length]), true
}
|
package service
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/google/uuid"
"github.com/sanguohot/medichain/chain"
"github.com/sanguohot/medichain/datacenter"
"github.com/sanguohot/medichain/etc"
"github.com/sanguohot/medichain/util"
"github.com/sanguohot/medichain/zap"
)
type FileAction struct {
UUID uuid.UUID `json:"uuid"`
Keccak256Hash common.Hash `json:"keccak256Hash"`
Sha256Hash common.Hash `json:"sha256Hash"`
TransactionHash common.Hash `json:"transactionHash"`
}
type FileSignerAction struct {
UUID uuid.UUID `json:"uuid"`
//Type string `json:"type"`
}
type FileSignerAndDataAction struct {
Signers []FileSignerAction `json:"signers"`
Signatures []string `json:"signatures"`
}
type FileAddSignAction struct {
Signature string `json:"signature"`
TransactionHash common.Hash `json:"transactionHash"`
}
func requireKeccak256HashNotExist(hash common.Hash) (error) {
fileUuid, err := chain.FilesDataGetUuidByKeccak256Hash(hash)
if err != nil {
return err
}
err = requireFileUuidNotExist(*fileUuid)
if err != nil {
return err
}
return nil
}
func requireFileUuidNotExist(fileUuid uuid.UUID) error {
isExist, err := chain.FilesDataIsUuidExist(fileUuid)
if err != nil {
return err
}
if isExist {
return util.ErrFileExist
}
return nil
}
func requireFileUuidExist(fileUuid uuid.UUID) error {
isExist, err := chain.FilesDataIsUuidExist(fileUuid)
if err != nil {
return err
}
if !isExist {
return util.ErrFileNotExist
}
return nil
}
func requireSha256HashNotExist(hash common.Hash) (error) {
fileUuid, err := chain.FilesDataGetUuidBySha256Hash(hash)
if err != nil {
return err
}
err = requireFileUuidNotExist(*fileUuid)
if err != nil {
return err
}
return nil
}
func requireHashNotExist(keccak256Hash, sha256Hash common.Hash) error {
err := requireKeccak256HashNotExist(keccak256Hash)
if err != nil {
return err
}
err = requireSha256HashNotExist(sha256Hash)
if err != nil {
return err
}
return nil
}
func AddFile(ownerUuidStr, orgUuidStr, addressStr, password, fileType, fileDesc, fileUrl string, file []byte, sha256HashStr string) (error, *FileAction) {
var (
err error
sha256Hash common.Hash
)
if file != nil && sha256HashStr == "" {
return util.ErrParamdInvalid, nil
}
if file == nil && fileUrl == "" {
return util.ErrParamdInvalid, nil
}
if file == nil {
file, err = util.DownloadFromFileUrl(fileUrl)
if err != nil {
return err, nil
}
sha256Hash = util.Sha256Hash(file)
}else {
sha256Hash = common.HexToHash(sha256HashStr)
if sha256Hash != util.Sha256Hash(file) {
return util.ErrFileUploadNotComplete, nil
}
}
ownerUuid, err := uuid.Parse(ownerUuidStr)
if err != nil {
return err, nil
}
isExist, err := chain.UsersDataIsUuidExist(ownerUuid)
if err != nil {
return err, nil
}
if !isExist {
return util.ErrUserNotExist, nil
}
var orgUuid uuid.UUID
if orgUuidStr != "" {
orgUuid, err = uuid.Parse(orgUuidStr)
if err != nil {
return err, nil
}
isExist, err = chain.OrgsDataIsUuidExist(orgUuid)
if err != nil {
return err, nil
}
if !isExist {
return util.ErrOrgNotExist, nil
}
}
if !common.IsHexAddress(addressStr) {
return util.ErrInvalidAddress, nil
}
address := common.HexToAddress(addressStr)
// define them and check file type
fileTypeHash := crypto.Keccak256Hash([]byte(fileType))
if etc.FileTypeMap[fileTypeHash] != fileType {
return fmt.Errorf("%s ===> %s", util.ErrFileTypeNotSupport.Error(), fileType), nil
}
keccak256Hash := crypto.Keccak256Hash(file)
err = requireHashNotExist(keccak256Hash, sha256Hash)
if err != nil {
return err, nil
}
fileDescBytes32_4, err := util.StringToBytes32_4(fileDesc)
if err != nil {
return err, nil
}
// 上传到大数据库
err = datacenter.UploadToBigDataCenter(file)
if err != nil {
if err.Error() != "文件上传失败或者已经存在" {
return fmt.Errorf("大数据中心:%s", err.Error()), nil
}
zap.Sugar.Infof("upload file return %s, but it maybe successful because of it is shared by test/prod/pre env", err.Error())
}
fileUuid := uuid.New()
err, txHash := chain.ControllerAddFile(fileUuid, ownerUuid, orgUuid, fileTypeHash, *fileDescBytes32_4, keccak256Hash, sha256Hash, address, password)
if err != nil {
return err, nil
}
fileAction := FileAction{
UUID: fileUuid,
Keccak256Hash: keccak256Hash,
Sha256Hash: sha256Hash,
TransactionHash: *txHash,
}
return nil, &fileAction
}
func AddFileSign(fileUuidStr, addressStr, password, keccak256HashStr string) (error, *FileAddSignAction) {
fileUuid, err := uuid.Parse(fileUuidStr)
if err != nil {
return err, nil
}
isExist, err := chain.FilesDataIsUuidExist(fileUuid)
if err != nil {
return err, nil
}
if !isExist {
return util.ErrFileNotExist, nil
}
if !common.IsHexAddress(addressStr) {
return util.ErrInvalidAddress, nil
}
address := common.HexToAddress(addressStr)
keccak256HashFromChain, err := chain.FilesDataGetKeccak256Hash(fileUuid)
if err != nil {
return err, nil
}
keccak256Hash := common.HexToHash(keccak256HashStr)
if *keccak256HashFromChain != keccak256Hash {
return util.ErrFileHashNotMatch, nil
}
err, r, s, v, txHash := chain.ControllerAddSign(fileUuid, keccak256Hash, address, password)
if err != nil {
return err, nil
}
signature := hexutil.Encode(util.RSVtoSig(r, s, v))[2:]
return nil, &FileAddSignAction{
Signature: signature,
TransactionHash: *txHash,
}
}
func GetFile(fileUuidStr string) (error, []byte) {
fileUuid, err := uuid.Parse(fileUuidStr)
if err != nil {
return err, nil
}
err = requireFileUuidExist(fileUuid)
if err != nil {
return err, nil
}
hash, err := chain.FilesDataGetSha256Hash(fileUuid)
if err != nil {
return err, nil
}
file, err := datacenter.DownloadFromBigDataCenter(*hash)
if err != nil {
return err, nil
}
return nil, file
}
func GetFileSignerAndDataList(fileUuidStr string, startStr, limitStr string) (error, *FileSignerAndDataAction) {
fileUuid, err := uuid.Parse(fileUuidStr)
if err != nil {
return err, nil
}
err = requireFileUuidExist(fileUuid)
if err != nil {
return err, nil
}
size, err := chain.FilesDataGetFileSignerSize(fileUuid)
if err != nil {
return err, nil
}
err, startBig, limitBig := transformPagingParamFromStringToBigInt(startStr, limitStr)
if err != nil {
return err, nil
}
if size.Cmp(startBig) != 1 {
return util.ErrFileSignListOutOfIndex, nil
}
err, idl, rl, sl, vl := chain.ControllerGetFileSignersAndDataByUuid(fileUuid, startBig, limitBig)
if err != nil {
return err, nil
}
return nil, getFileSignerAndDataActionByChainData(idl, rl, sl, vl)
}
func getFileSignerAndDataActionByChainData(idl [][16]byte, rl [][32]byte, sl [][32]byte, vl []uint8) *FileSignerAndDataAction {
var (
signers []FileSignerAction = make([]FileSignerAction, len(idl))
signatures []string = make([]string, len(idl))
)
for i := 0; i < len(idl); i++ {
signers[i] = FileSignerAction{
UUID: idl[i],
}
signatures[i] = hexutil.Encode(util.RSVtoSig(rl[i], sl[i], vl[i]))[2:]
}
return &FileSignerAndDataAction{
Signers: signers,
Signatures: signatures,
}
}
|
package go_workerpool
import "log"
type Job interface {
Do() error
}
//job 队列
var JobChannel = make(chan Job)
type Worker struct {
WorkerPool chan chan Job
JobChannel chan Job
done chan bool
}
func NewWorker(pool chan chan Job) *Worker {
return &Worker{
WorkerPool: pool,
JobChannel: make(chan Job),
done: make(chan bool),
}
}
func (w *Worker) Start() {
go func() {
for {
//将自己的JobChannel注册到 WorkerPool中去
w.WorkerPool <- w.JobChannel
select {
case job := <-w.JobChannel:
err := job.Do()
if err != nil {
log.Printf("job do error:%v\n", err)
}
case <-w.done:
return
}
}
}()
}
func (w *Worker) Stop() {
go func() {
w.done <- true
}()
}
|
package main
import "fmt"
func main() {
nums := []int{3, 5, 1, 2, 7, 8}
fmt.Println(merge(nums))
}
func mergeSort(nums1, nums2 []int) []int {
n, m := len(nums1), len(nums2)
i, j := 0, 0
res := []int{}
for i < n && j < m {
if nums1[i] < nums2[j] {
res = append(res, nums1[i])
i++
} else {
res = append(res, nums2[j])
j++
}
}
res = append(res, nums1[i:]...)
res = append(res, nums2[j:]...)
return res
}
func merge(nums []int) []int {
if len(nums) < 2 {
return nums
}
med := len(nums) / 2
nums1 := merge(nums[:med])
nums2 := merge(nums[med:])
return mergeSort(nums1, nums2)
}
|
package main
import "fmt"
type Stud struct {
name string
age int
score float32
}
//定义结构体的方法
func (this *Stud) init(name string, age int, score float32){
//this不是指针的话,传入的是拷贝,无法真正改变原来的值
this.name = name
this.age = age
this.score = score
fmt.Println(this)
}
func (this Stud) get() Stud{
return this
}
func (p Stud) getName() string {
return p.name
}
func main() {
var stu Stud
//虽然init方法需要传入指针,但是go会自动帮你转
stu.init("heylink", 34, 100)
stu1 := stu.get()
fmt.Println(stu1)
name := stu.getName()
fmt.Println(name)
}
|
package entity
type UmsIntegrationConsumeSetting struct {
Id int64 `json:"id" xorm:"pk autoincr BIGINT(20) 'id'"`
DeductionPerAmount int `json:"deduction_per_amount" xorm:"default NULL comment('每一元需要抵扣的积分数量') INT(11) 'deduction_per_amount'"`
MaxPercentPerOrder int `json:"max_percent_per_order" xorm:"default NULL comment('每笔订单最高抵用百分比') INT(11) 'max_percent_per_order'"`
UseUnit int `json:"use_unit" xorm:"default NULL comment('每次使用积分最小单位100') INT(11) 'use_unit'"`
CouponStatus int `json:"coupon_status" xorm:"default NULL comment('是否可以和优惠券同用;0->不可以;1->可以') INT(1) 'coupon_status'"`
}
|
package main
type Rectangle struct {
Width float64
Height float64
}
// func (recevierName ReceiverType) MethodName(arguments)
func (r Rectangle) Area() float64 {
return r.Width * r.Height
}
|
package data
import (
"db"
"fmt"
"os"
"twitter"
)
func ExampleCheckTweet() {
os.Setenv("Juli-ENV", "test")
db.Clear()
tweets := []twitter.Tweet{
twitter.Tweet{"hi I'm new!", 1},
twitter.Tweet{"سلام من جدیدم", 2},
twitter.Tweet{"خدافظ", 3},
twitter.Tweet{"ایت یم تویت تست است", 4},
twitter.Tweet{"قشنگه", 5},
}
CheckTweet("Mazafard", tweets)
fmt.Printf("%d tweets inserted\n", db.TotalTweets())
db.TermsByUser("chischaschos", func(termDoc *twitter.TermDoc) {
fmt.Printf("%v\n", termDoc)
})
}
func ExampleNormalizeText() {
fmt.Println(normalizeText("تست"))
}
|
package main
import (
"github.com/ethereum/go-ethereum/common"
"fmt"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/sanguohot/medichain/util"
"log"
"github.com/ethereum/go-ethereum/crypto"
)
func main() {
two := [2][32]byte{}
two[0] = common.HexToHash("8ec34f461212f6bbfd759f400b5a80679ecd56f496148ad2c4669d9e1127965b")
two[1] = common.HexToHash("e94cbd7230c091e5bfa310596f354d1339a122e7335bdf799f504b3c1fcb07a5")
var one []byte
one = util.BytesCombine(two[0][:], two[1][:])
fmt.Println("0x8ec34f461212f6bbfd759f400b5a80679ecd56f496148ad2c4669d9e1127965be94cbd7230c091e5bfa310596f354d1339a122e7335bdf799f504b3c1fcb07a5 ===>", crypto.Keccak256Hash(one).Hex())
fmt.Println(len(one), hexutil.Encode(one))
three, err := util.StringToBytes32_4("广西中医一附院")
if err != nil {
log.Fatal(err)
}
tmp := []byte("广西中医一附院")
four := util.Bytes32_4ToString(*three)
fmt.Printf("[\"0x%s\", \"0x%s\", \"0x%s\", \"0x%s\"]\n", common.Bytes2Hex(three[0][:]), common.Bytes2Hex(three[1][:]), common.Bytes2Hex(three[2][:]), common.Bytes2Hex(three[3][:]))
fmt.Println("广西中医一附院 ===>", crypto.Keccak256Hash(tmp).Hex())
fmt.Println("广西中医一附院 ===>", util.Bytes32_4Hash(*three).Hex())
fmt.Println(four, string(tmp))
}
|
/*
* @lc app=leetcode.cn id=830 lang=golang
*
* [830] 较大分组的位置
*/
package main
// @lc code=start
func largeGroupPositions(s string) [][]int {
ret := [][]int{}
for start := 0; start < len(s); {
end := start + 1
for end < len(s) && s[end] == s[start] {
end++
}
if end-start >= 3 {
ret = append(ret, []int{start, end - 1})
}
start = end
}
return ret
}
// func main(){
// fmt.Println(largeGroupPositions("abbxxxxzzy"))
// }
// @lc code=end
|
package config
//only used to determine if we start the hardware or run on simulator
type ElevatorType int
const (
ET_Comedi ElevatorType = 0
ET_Simulation = 1
)
type MotorDirection int
const (
MD_Up MotorDirection = 1
MD_Down = -1
MD_Stop = 0
)
type ButtonType int
const (
B_HallUp = iota
B_HallDown
B_Cab
)
type ButtonEvent struct {
Floor int
Button ButtonType
}
|
package problem0217
import (
"github.com/stretchr/testify/assert"
"leetcode-go/pkg/test"
"testing"
)
func Test_containsDuplicate(t *testing.T) {
tcs := []test.TCSB{
{
Input: []int{1, 2, 3, 1},
Expected: true,
},
{
Input: []int{1, 2, 3, 4},
Expected: false,
},
{
Input: []int{1, 1, 1, 3, 3, 4, 3, 2, 4, 2},
Expected: true,
},
}
for _, tc := range tcs {
assert.Equal(t, tc.Expected, containsDuplicate(tc.Input), tc.Input)
}
}
|
package response
type CommonError struct {
ErrorCode int64 `json:"errcode"`
ErrorMessage string `json:"errmsg"`
}
|
package linebeacon
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestFrameData(t *testing.T) {
expect := []byte{0x02, 0x01, 0x06, 0x03, 0x03, 0x6f, 0xfe, 0x0b, 0x16, 0x6f, 0xfe, 0x02, 0xfd, 0x5e, 0xa0, 0xad, 0x1e, 0x7f, 0x00}
real := CreateLineSimpleBeaconAdvertisingPDU([]byte{0xfd, 0x5e, 0xa0, 0xad, 0x1e}, []byte{0x00})
t.Log(expect)
t.Log(real)
assert.Equal(t, expect, real)
}
|
package vote
import "github.com/google/uuid"
type UseCase interface {
Store(v Vote) (uuid.UUID,error)
}
type Service struct {}
func NewService() *Service {
return &Service{}
}
func (s *Service) Store(v Vote) (uuid.UUID,error) {
//@TODO create store rules, using databases or something else
return uuid.New(),nil
}
|
package main
import (
"os"
"fmt"
"bufio"
)
func main() {
//os.O_APPEND 追加到文件末尾,否则都会覆盖
file, err := os.OpenFile("output.txt", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
fmt.Println("open file failed:",err)
return
}
defer file.Close()
writer := bufio.NewWriter(file)
str := "hello heylink\n"
for i := 0; i < 10; i++ {
writer.WriteString(str)
}
writer.Flush()
}
|
// Package backend implements the Hakkero Project's API server.
// The server will handle all connections from the Hakkero Project's
// front-end interface, providing needed data through JSON responses
// and websocket calls.
// The server consists of 2 parts, the Room Provider and the Match Provider.
package backend
|
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grumpy
import (
"reflect"
"testing"
)
func TestNewTuple(t *testing.T) {
cases := [][]*Object{
nil,
{newObject(ObjectType)},
{newObject(ObjectType), newObject(ObjectType)},
}
for _, args := range cases {
tuple := NewTuple(args...)
if !reflect.DeepEqual(tuple.elems, args) {
t.Errorf("NewTuple(%v) = %v, want %v", args, tuple.elems, args)
}
}
}
func TestTupleBinaryOps(t *testing.T) {
fun := wrapFuncForTest(func(f *Frame, fn binaryOpFunc, v, w *Object) (*Object, *BaseException) {
return fn(f, v, w)
})
cases := []invokeTestCase{
{args: wrapArgs(Add, newTestTuple(3), newTestTuple("foo")), want: newTestTuple(3, "foo").ToObject()},
{args: wrapArgs(Add, NewTuple(None), NewTuple()), want: NewTuple(None).ToObject()},
{args: wrapArgs(Add, NewTuple(), newObject(ObjectType)), wantExc: mustCreateException(TypeErrorType, "unsupported operand type(s) for +: 'tuple' and 'object'")},
{args: wrapArgs(Add, None, NewTuple()), wantExc: mustCreateException(TypeErrorType, "unsupported operand type(s) for +: 'NoneType' and 'tuple'")},
{args: wrapArgs(Mul, NewTuple(), 10), want: NewTuple().ToObject()},
{args: wrapArgs(Mul, newTestTuple("baz"), -2), want: NewTuple().ToObject()},
{args: wrapArgs(Mul, newTestTuple(None, None), 0), want: NewTuple().ToObject()},
{args: wrapArgs(Mul, newTestTuple(1, "bar"), 2), want: newTestTuple(1, "bar", 1, "bar").ToObject()},
{args: wrapArgs(Mul, 1, newTestTuple(1, "bar")), want: newTestTuple(1, "bar").ToObject()},
{args: wrapArgs(Mul, newObject(ObjectType), newTestTuple(newObject(ObjectType))), wantExc: mustCreateException(TypeErrorType, "unsupported operand type(s) for *: 'object' and 'tuple'")},
{args: wrapArgs(Mul, NewTuple(newObject(ObjectType)), NewTuple()), wantExc: mustCreateException(TypeErrorType, "unsupported operand type(s) for *: 'tuple' and 'tuple'")},
{args: wrapArgs(Mul, NewTuple(None, None), MaxInt), wantExc: mustCreateException(OverflowErrorType, "result too large")},
}
for _, cas := range cases {
if err := runInvokeTestCase(fun, &cas); err != "" {
t.Error(err)
}
}
}
func TestTupleCompare(t *testing.T) {
o := newObject(ObjectType)
cases := []invokeTestCase{
{args: wrapArgs(NewTuple(), NewTuple()), want: compareAllResultEq},
{args: wrapArgs(newTestTuple("foo", o), newTestTuple("foo", o)), want: compareAllResultEq},
{args: wrapArgs(newTestTuple(4), newTestTuple(3, 0)), want: compareAllResultGT},
{args: wrapArgs(newTestTuple(4), newTestTuple(4, 3, 0)), want: compareAllResultLT},
{args: wrapArgs(NewTuple(o), NewTuple()), want: compareAllResultGT},
{args: wrapArgs(NewTuple(o), newTestTuple("foo")), want: compareAllResultLT},
}
for _, cas := range cases {
if err := runInvokeTestCase(compareAll, &cas); err != "" {
t.Error(err)
}
}
}
func TestTupleCompareNotImplemented(t *testing.T) {
cas := invokeTestCase{args: wrapArgs(NewTuple(), 3), want: NotImplemented}
if err := runInvokeMethodTestCase(TupleType, "__eq__", &cas); err != "" {
t.Error(err)
}
}
func TestTupleContains(t *testing.T) {
cases := []invokeTestCase{
{args: wrapArgs(newTestTuple("foo", 42, "bar"), 1), want: False.ToObject()},
{args: wrapArgs(newTestTuple("foo", 42, "bar"), "foo"), want: True.ToObject()},
{args: wrapArgs(newTestTuple("foo", 42, "bar"), 42), want: True.ToObject()},
{args: wrapArgs(newTestTuple("foo", 42, "bar"), "bar"), want: True.ToObject()},
{args: wrapArgs(NewTuple(), newTestSlice(50, 100)), want: False.ToObject()},
{args: wrapArgs(newTestTuple(1, 2, 3, 4, 5), newTestSlice(1, None, 2)), want: False.ToObject()},
{args: wrapArgs(NewTuple(), 1), want: False.ToObject()},
{args: wrapArgs(newTestTuple(32), -100), want: False.ToObject()},
{args: wrapArgs(newTestTuple(1, 2, 3), newTestSlice(1, None, 0)), want: False.ToObject()},
{args: wrapArgs(newTestTuple(true), None), want: False.ToObject()},
}
for _, cas := range cases {
if err := runInvokeMethodTestCase(TupleType, "__contains__", &cas); err != "" {
t.Error(err)
}
}
}
func TestTupleCount(t *testing.T) {
cases := []invokeTestCase{
{args: wrapArgs(NewTuple(), NewInt(1)), want: NewInt(0).ToObject()},
{args: wrapArgs(NewTuple(None, None, None), None), want: NewInt(3).ToObject()},
{args: wrapArgs(NewTuple()), wantExc: mustCreateException(TypeErrorType, "'count' of 'tuple' requires 2 arguments")},
}
for _, cas := range cases {
if err := runInvokeMethodTestCase(TupleType, "count", &cas); err != "" {
t.Error(err)
}
}
}
func BenchmarkTupleContains(b *testing.B) {
b.Run("false-3", func(b *testing.B) {
t := newTestTuple("foo", 42, "bar").ToObject()
a := wrapArgs(1)[0]
f := NewRootFrame()
b.ResetTimer()
for i := 0; i < b.N; i++ {
Contains(f, t, a)
}
})
b.Run("false-10", func(b *testing.B) {
t := newTestTuple("foo", 42, "bar", "foo", 42, "bar", "foo", 42, "bar", "baz").ToObject()
a := wrapArgs(1)[0]
f := NewRootFrame()
b.ResetTimer()
for i := 0; i < b.N; i++ {
Contains(f, t, a)
}
})
b.Run("true-3.1", func(b *testing.B) {
t := newTestTuple("foo", 42, "bar").ToObject()
a := wrapArgs("foo")[0]
f := NewRootFrame()
b.ResetTimer()
for i := 0; i < b.N; i++ {
Contains(f, t, a)
}
})
b.Run("true-3.3", func(b *testing.B) {
t := newTestTuple("foo", 42, "bar").ToObject()
a := wrapArgs("bar")[0]
f := NewRootFrame()
b.ResetTimer()
for i := 0; i < b.N; i++ {
Contains(f, t, a)
}
})
b.Run("true-10.10", func(b *testing.B) {
t := newTestTuple("foo", 42, "bar", "foo", 42, "bar", "foo", 42, "bar", "baz").ToObject()
a := wrapArgs("baz")[0]
f := NewRootFrame()
b.ResetTimer()
for i := 0; i < b.N; i++ {
Contains(f, t, a)
}
})
}
func TestTupleGetItem(t *testing.T) {
cases := []invokeTestCase{
{args: wrapArgs(newTestTuple("foo", 42, "bar"), 1), want: NewInt(42).ToObject()},
{args: wrapArgs(newTestTuple("foo", 42, "bar"), -3), want: NewStr("foo").ToObject()},
{args: wrapArgs(NewTuple(), newTestSlice(50, 100)), want: NewTuple().ToObject()},
{args: wrapArgs(newTestTuple(1, 2, 3, 4, 5), newTestSlice(1, None, 2)), want: newTestTuple(2, 4).ToObject()},
{args: wrapArgs(NewTuple(), 1), wantExc: mustCreateException(IndexErrorType, "index out of range")},
{args: wrapArgs(newTestTuple(32), -100), wantExc: mustCreateException(IndexErrorType, "index out of range")},
{args: wrapArgs(newTestTuple(1, 2, 3), newTestSlice(1, None, 0)), wantExc: mustCreateException(ValueErrorType, "slice step cannot be zero")},
{args: wrapArgs(newTestTuple(true), None), wantExc: mustCreateException(TypeErrorType, "sequence indices must be integers, not NoneType")},
}
for _, cas := range cases {
if err := runInvokeMethodTestCase(TupleType, "__getitem__", &cas); err != "" {
t.Error(err)
}
}
}
func TestTupleLen(t *testing.T) {
tuple := newTestTuple("foo", 42, "bar")
if got := tuple.Len(); got != 3 {
t.Errorf("%v.Len() = %v, want 3", tuple, got)
}
}
func TestTupleNew(t *testing.T) {
cases := []invokeTestCase{
{want: NewTuple().ToObject()},
{args: wrapArgs(newTestTuple(1, 2, 3)), want: newTestTuple(1, 2, 3).ToObject()},
{args: wrapArgs(newTestDict(1, "foo", "bar", None)), want: newTestTuple(1, "bar").ToObject()},
{args: wrapArgs(42), wantExc: mustCreateException(TypeErrorType, "'int' object is not iterable")},
}
for _, cas := range cases {
if err := runInvokeTestCase(TupleType.ToObject(), &cas); err != "" {
t.Error(err)
}
}
}
func TestTupleStrRepr(t *testing.T) {
fun := wrapFuncForTest(func(f *Frame, o *Object) (*Tuple, *BaseException) {
str, raised := ToStr(f, o)
if raised != nil {
return nil, raised
}
repr, raised := Repr(f, o)
if raised != nil {
return nil, raised
}
return newTestTuple(str, repr), nil
})
cases := []invokeTestCase{
{args: wrapArgs(NewTuple()), want: newTestTuple("()", "()").ToObject()},
{args: wrapArgs(newTestTuple("foo")), want: newTestTuple("('foo',)", "('foo',)").ToObject()},
{args: wrapArgs(newTestTuple(TupleType, ExceptionType)), want: newTestTuple("(<type 'tuple'>, <type 'Exception'>)", "(<type 'tuple'>, <type 'Exception'>)").ToObject()},
}
for _, cas := range cases {
if err := runInvokeTestCase(fun, &cas); err != "" {
t.Error(err)
}
}
}
func TestTupleIter(t *testing.T) {
o := newObject(ObjectType)
cases := []invokeTestCase{
{args: wrapArgs(NewTuple()), want: NewList().ToObject()},
{args: wrapArgs(newTestTuple(1, o, "foo")), want: newTestList(1, o, "foo").ToObject()},
}
for _, cas := range cases {
if err := runInvokeTestCase(ListType.ToObject(), &cas); err != "" {
t.Error(err)
}
}
}
func newTestTuple(elems ...interface{}) *Tuple {
return NewTuple(wrapArgs(elems...)...)
}
|
package rpcimpl
import (
"io"
"log"
"strconv"
"time"
pb "github.com/yjiang-dev/simplemath/api"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
"golang.org/x/net/context"
)
const (
timestampFormat = time.StampNano
)
// SimpleMathServer a struct type
type SimpleMathServer struct{}
// GreatCommonDivisor GCD
func (sms *SimpleMathServer) GreatCommonDivisor(ctx context.Context, in *pb.GCDRequest) (*pb.GCDResponse, error) {
// sending metadata to client: create trailer, using defer to record timestamp of function return
defer func() {
trailer := metadata.Pairs("timestamp", time.Now().Format(timestampFormat))
grpc.SetTrailer(ctx, trailer)
}()
// receiving metadata from client: get metadata from context
md, _ := metadata.FromIncomingContext(ctx)
if t, ok := md["timestamp"]; ok {
log.Printf("timestamp from metadata: ")
for i, e := range t {
log.Printf(" %d. %s", i, e)
}
}
first := in.First
second := in.Second
for second != 0 {
first, second = second, first%second
}
// sending metadata to client: create and send header
header := metadata.New(map[string]string{"timestamp": time.Now().Format(timestampFormat)})
grpc.SendHeader(ctx, header)
return &pb.GCDResponse{Result: first}, nil
}
// GetFibonacci Fibonacci
func (sms *SimpleMathServer) GetFibonacci(in *pb.FibonacciRequest, stream pb.SimpleMath_GetFibonacciServer) error {
a, b := 0, 1
for i := 0; i < int(in.Count); i++ {
stream.Send(&pb.FibonacciResponse{Result: int32(a)})
a, b = b, a+b
}
return nil
}
// Statistics method
func (sms *SimpleMathServer) Statistics(stream pb.SimpleMath_StatisticsServer) error {
var count, maximum, minimum int32
minimum = int32((^uint32(0)) >> 1)
maximum = -minimum - 1
var average, sum float32
// receive the requests
for {
num, err := stream.Recv()
if err == io.EOF {
average = sum / float32(count)
return stream.SendAndClose(&pb.StatisticsResponse{
Count: count,
Maximum: maximum,
Minimum: minimum,
Average: average,
})
}
if err != nil {
log.Fatalf("failed to recv: %v", err)
return err
}
count++
if maximum < num.Number {
maximum = num.Number
}
if minimum > num.Number {
minimum = num.Number
}
sum += float32(num.Number)
}
}
// PrimeFactorization method
func (sms *SimpleMathServer) PrimeFactorization(stream pb.SimpleMath_PrimeFactorizationServer) error {
// sending metadata to client: create trailer, using defer to record timestamp of function return
defer func() {
trailer := metadata.Pairs("timestamp", time.Now().Format(timestampFormat))
stream.SetTrailer(trailer)
}()
// receiving metadata from client: read metadata from client
md, _ := metadata.FromIncomingContext(stream.Context())
if t, ok := md["timestamp"]; ok {
log.Printf("timestamp from metadata:")
for i, e := range t {
log.Printf(" %d. %s", i, e)
}
}
// sending metadata to client: create and send header
header := metadata.New(map[string]string{"timestamp": time.Now().Format(timestampFormat)})
stream.SendHeader(header)
for {
in, err := stream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
log.Fatalf("failed to recv: %v", err)
return err
}
stream.Send(&pb.PrimeFactorizationResponse{Result: primeFactorization(int(in.Number))})
}
return nil
}
// primeFactorization method
func primeFactorization(num int) string {
if num <= 2 {
return strconv.Itoa(num)
}
n := num
prefix := ""
result := ""
for i := 2; i <= n; i++ {
for n != i {
if n%i == 0 {
result += prefix + strconv.Itoa(i)
prefix = " * "
n /= i
} else {
break
}
}
}
if result == "" {
result = "1"
}
result = " = " + result + " * " + strconv.Itoa(n)
return strconv.Itoa(num) + result
}
|
package pwm_profile
import (
"testing"
"time"
)
func TestInterval(t *testing.T) {
conf := `
{
"start":"10:00:00",
"end": "19:30:00",
"interval": 3600,
"values": [0,10,30,40,50,60,80,40,20,10]
}
`
i, err := Interval([]byte(conf), 13, 100)
if err != nil {
t.Fatal(err)
}
t1, err := time.Parse(tFormat, "10:30:00")
if err != nil {
t.Error(err)
}
if int(i.Get(t1)) != 0 {
t.Error("Expected 0, got:", i.Get(t1))
}
t2, err := time.Parse(tFormat, "16:30:00")
if err != nil {
t.Error(err)
}
if int(i.Get(t2)) != 60 {
t.Error("Expected 60, got:", i.Get(t2))
}
}
// https://github.com/reef-pi/reef-pi/issues/960
// start: 00:00:00
// end: 23:59:59
// interval:7849
// values:[99,0,0,0,0,0,0,0,0,50,85,99]
// values:[0,0,0,0,50,85,100,85,50,0,0,0]
func TestPanicAtMidnight(t *testing.T) {
conf := `
{
"start":"00:00:00",
"end":"23:59:59",
"values":[99,0,0,0,0,0,0,0,0,50,85,99],
"interval":7849
}
`
i, err := Interval([]byte(conf), 0, 100)
if err != nil {
t.Fatal(err)
}
t1, err := time.Parse(tFormat, "23:59:00")
if err != nil {
t.Error(err)
}
if int(i.Get(t1)) < 98 {
t.Error("Expected 99, got:", i.Get(t1))
}
}
|
package olm
import (
"context"
"fmt"
"testing"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/internal/alongside"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorlister/operatorlisterfakes"
"github.com/stretchr/testify/assert"
)
func TestRequirementAndPermissionStatus(t *testing.T) {
namespace := "ns"
type gvkn struct {
group string
version string
kind string
name string
}
tests := []struct {
description string
csv *v1alpha1.ClusterServiceVersion
existingObjs []runtime.Object
existingExtObjs []runtime.Object
met bool
expectedRequirementStatuses map[gvkn]v1alpha1.RequirementStatus
expectedError error
}{
{
description: "AllPermissionsMet",
csv: csvWithUID(csv("csv1",
namespace,
"0.0.0",
"",
installStrategy(
"csv1-dep",
[]v1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: "sa",
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Verbs: []string{"*"},
Resources: []string{"donuts"},
},
},
},
},
[]v1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: "sa",
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"get"},
NonResourceURLs: []string{"/osbs"},
},
},
},
},
),
nil,
nil,
v1alpha1.CSVPhasePending,
), types.UID("csv-uid")),
existingObjs: []runtime.Object{
&corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "sa",
Namespace: namespace,
UID: types.UID("sa"),
OwnerReferences: []metav1.OwnerReference{
{
Kind: v1alpha1.ClusterServiceVersionKind,
UID: "csv-uid",
},
},
},
},
&rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: "role",
Namespace: namespace,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Verbs: []string{"*"},
Resources: []string{"donuts"},
},
},
},
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "roleBinding",
Namespace: namespace,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
APIGroup: "",
Name: "sa",
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: "role",
},
},
&rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: "clusterRole",
},
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"get"},
NonResourceURLs: []string{"/osbs"},
},
},
},
&rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "clusterRoleBinding",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
APIGroup: "",
Name: "sa",
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "clusterRole",
},
},
},
existingExtObjs: nil,
met: true,
expectedRequirementStatuses: map[gvkn]v1alpha1.RequirementStatus{
{"", "v1", "ServiceAccount", "sa"}: {
Group: "",
Version: "v1",
Kind: "ServiceAccount",
Name: "sa",
Status: v1alpha1.RequirementStatusReasonPresent,
Dependents: []v1alpha1.DependentStatus{
{
Group: "rbac.authorization.k8s.io",
Kind: "PolicyRule",
Version: "v1",
},
{
Group: "rbac.authorization.k8s.io",
Kind: "PolicyRule",
Version: "v1",
},
},
},
{"operators.coreos.com", "v1alpha1", "ClusterServiceVersion", "csv1"}: {
Group: "operators.coreos.com",
Version: "v1alpha1",
Kind: "ClusterServiceVersion",
Name: "csv1",
Status: v1alpha1.RequirementStatusReasonPresent,
},
},
expectedError: nil,
},
{
description: "OnePermissionNotMet",
csv: csvWithUID(csv("csv1",
namespace,
"0.0.0",
"",
installStrategy(
"csv1-dep",
[]v1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: "sa",
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Verbs: []string{"*"},
Resources: []string{"donuts"},
},
},
},
},
[]v1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: "sa",
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"get"},
NonResourceURLs: []string{"/osbs"},
},
},
},
},
),
nil,
nil,
v1alpha1.CSVPhasePending,
), types.UID("csv-uid")),
existingObjs: []runtime.Object{
&corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "sa",
Namespace: namespace,
UID: types.UID("sa"),
OwnerReferences: []metav1.OwnerReference{
{
Kind: v1alpha1.ClusterServiceVersionKind,
UID: "csv-uid",
},
},
},
},
&rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: "role",
Namespace: namespace,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Verbs: []string{"*"},
Resources: []string{"donuts"},
},
},
},
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "roleBinding",
Namespace: namespace,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
APIGroup: "",
Name: "sa",
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: "role",
},
},
&rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: "clusterRole",
},
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"get"},
NonResourceURLs: []string{"/osbs/*"},
},
},
},
&rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "clusterRoleBinding",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
APIGroup: "",
Name: "sa",
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "clusterRole",
},
},
},
existingExtObjs: nil,
met: false,
expectedRequirementStatuses: map[gvkn]v1alpha1.RequirementStatus{
{"", "v1", "ServiceAccount", "sa"}: {
Group: "",
Version: "v1",
Kind: "ServiceAccount",
Name: "sa",
Status: v1alpha1.RequirementStatusReasonPresentNotSatisfied,
Dependents: []v1alpha1.DependentStatus{
{
Group: "rbac.authorization.k8s.io",
Kind: "PolicyRule",
Version: "v1",
},
{
Group: "rbac.authorization.k8s.io",
Kind: "PolicyRule",
Version: "v1",
},
},
},
{"operators.coreos.com", "v1alpha1", "ClusterServiceVersion", "csv1"}: {
Group: "operators.coreos.com",
Version: "v1alpha1",
Kind: "ClusterServiceVersion",
Name: "csv1",
Status: v1alpha1.RequirementStatusReasonPresent,
},
},
expectedError: nil,
},
{
description: "RequirementNotMet/ServiceAccountOwnerConflict",
csv: csvWithUID(csv("csv1",
namespace,
"0.0.0",
"",
installStrategy(
"csv1-dep",
[]v1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: "sa",
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Verbs: []string{"*"},
Resources: []string{"donuts"},
},
},
},
},
[]v1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: "sa",
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"get"},
NonResourceURLs: []string{"/osbs"},
},
},
},
},
),
nil,
nil,
v1alpha1.CSVPhasePending,
), types.UID("csv-uid")),
existingObjs: []runtime.Object{
&corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "sa",
Namespace: namespace,
UID: types.UID("sa"),
OwnerReferences: []metav1.OwnerReference{
{
Kind: v1alpha1.ClusterServiceVersionKind,
UID: "csv-uid-other",
},
},
},
},
&rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: "role",
Namespace: namespace,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Verbs: []string{"*"},
Resources: []string{"donuts"},
},
},
},
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "roleBinding",
Namespace: namespace,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
APIGroup: "",
Name: "sa",
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: "role",
},
},
&rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: "clusterRole",
},
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"get"},
NonResourceURLs: []string{"/osbs"},
},
},
},
&rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "clusterRoleBinding",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
APIGroup: "",
Name: "sa",
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "clusterRole",
},
},
},
existingExtObjs: nil,
met: false,
expectedRequirementStatuses: map[gvkn]v1alpha1.RequirementStatus{
{"", "v1", "ServiceAccount", "sa"}: {
Group: "",
Version: "v1",
Kind: "ServiceAccount",
Name: "sa",
Status: v1alpha1.RequirementStatusReasonPresentNotSatisfied,
Dependents: []v1alpha1.DependentStatus{},
},
{"operators.coreos.com", "v1alpha1", "ClusterServiceVersion", "csv1"}: {
Group: "operators.coreos.com",
Version: "v1alpha1",
Kind: "ClusterServiceVersion",
Name: "csv1",
Status: v1alpha1.RequirementStatusReasonPresent,
},
},
expectedError: nil,
},
{
description: "AllRequirementsMet",
csv: csvWithUID(csv("csv1",
namespace,
"0.0.0",
"",
installStrategy(
"csv1-dep",
[]v1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: "sa",
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Verbs: []string{"*"},
Resources: []string{"donuts"},
},
},
},
},
nil,
),
[]*apiextensionsv1.CustomResourceDefinition{crd("c1", "v1", "g1")},
[]*apiextensionsv1.CustomResourceDefinition{crd("c2", "v1", "g2")},
v1alpha1.CSVPhasePending,
), types.UID("csv-uid")),
existingObjs: []runtime.Object{
&corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "sa",
Namespace: namespace,
UID: types.UID("sa"),
OwnerReferences: []metav1.OwnerReference{
{
Kind: v1alpha1.ClusterServiceVersionKind,
UID: "csv-uid",
},
},
},
},
&rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: "role",
Namespace: namespace,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Verbs: []string{"*"},
Resources: []string{"donuts"},
},
},
},
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "roleBinding",
Namespace: namespace,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
APIGroup: "",
Name: "sa",
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: "role",
},
},
},
existingExtObjs: []runtime.Object{
crd("c1", "v1", "g1"),
crd("c2", "v1", "g2"),
},
met: true,
expectedRequirementStatuses: map[gvkn]v1alpha1.RequirementStatus{
{"", "v1", "ServiceAccount", "sa"}: {
Group: "",
Version: "v1",
Kind: "ServiceAccount",
Name: "sa",
Status: v1alpha1.RequirementStatusReasonPresent,
Dependents: []v1alpha1.DependentStatus{
{
Group: "rbac.authorization.k8s.io",
Kind: "PolicyRule",
Version: "v1",
},
},
},
{"", "v1", "ServiceAccount", "sa"}: {
Group: "",
Version: "v1",
Kind: "ServiceAccount",
Name: "sa",
Status: v1alpha1.RequirementStatusReasonPresent,
Dependents: []v1alpha1.DependentStatus{
{
Group: "rbac.authorization.k8s.io",
Kind: "PolicyRule",
Version: "v1",
},
},
},
{"apiextensions.k8s.io", "v1", "CustomResourceDefinition", "c1.g1"}: {
Group: "apiextensions.k8s.io",
Version: "v1",
Kind: "CustomResourceDefinition",
Name: "c1.g1",
Status: v1alpha1.RequirementStatusReasonPresent,
},
{"apiextensions.k8s.io", "v1", "CustomResourceDefinition", "c2.g2"}: {
Group: "apiextensions.k8s.io",
Version: "v1",
Kind: "CustomResourceDefinition",
Name: "c2.g2",
Status: v1alpha1.RequirementStatusReasonPresent,
},
{"operators.coreos.com", "v1alpha1", "ClusterServiceVersion", "csv1"}: {
Group: "operators.coreos.com",
Version: "v1alpha1",
Kind: "ClusterServiceVersion",
Name: "csv1",
Status: v1alpha1.RequirementStatusReasonPresent,
},
},
expectedError: nil,
},
{
description: "RequirementNotMet/NonServedCRDVersion",
csv: csv("csv1",
namespace,
"0.0.0",
"",
installStrategy("csv1-dep", nil, nil),
[]*apiextensionsv1.CustomResourceDefinition{crd("c1", "v2", "g1")},
nil,
v1alpha1.CSVPhasePending,
),
existingObjs: nil,
existingExtObjs: []runtime.Object{
crd("c1", "v1", "g1"),
},
met: false,
expectedRequirementStatuses: map[gvkn]v1alpha1.RequirementStatus{
{"apiextensions.k8s.io", "v1", "CustomResourceDefinition", "c1.g1"}: {
Group: "apiextensions.k8s.io",
Version: "v1",
Kind: "CustomResourceDefinition",
Name: "c1.g1",
Status: v1alpha1.RequirementStatusReasonNotPresent,
},
{"operators.coreos.com", "v1alpha1", "ClusterServiceVersion", "csv1"}: {
Group: "operators.coreos.com",
Version: "v1alpha1",
Kind: "ClusterServiceVersion",
Name: "csv1",
Status: v1alpha1.RequirementStatusReasonPresent,
},
},
expectedError: nil,
},
{
description: "RequirementNotMet/NotEstablishedCRDVersion",
csv: csv("csv1",
namespace,
"0.0.0",
"",
installStrategy("csv1-dep", nil, nil),
[]*apiextensionsv1.CustomResourceDefinition{crd("c1", "version-not-found", "g1")},
nil,
v1alpha1.CSVPhasePending,
),
existingObjs: nil,
existingExtObjs: []runtime.Object{
crd("c1", "v2", "g1"),
},
met: false,
expectedRequirementStatuses: map[gvkn]v1alpha1.RequirementStatus{
{"apiextensions.k8s.io", "v1", "CustomResourceDefinition", "c1.g1"}: {
Group: "apiextensions.k8s.io",
Version: "v1",
Kind: "CustomResourceDefinition",
Name: "c1.g1",
Status: v1alpha1.RequirementStatusReasonNotPresent,
},
{"operators.coreos.com", "v1alpha1", "ClusterServiceVersion", "csv1"}: {
Group: "operators.coreos.com",
Version: "v1alpha1",
Kind: "ClusterServiceVersion",
Name: "csv1",
Status: v1alpha1.RequirementStatusReasonPresent,
},
},
expectedError: nil,
},
{
description: "RequirementNotMet/NamesConflictedCRD",
csv: csv("csv1",
namespace,
"0.0.0",
"",
installStrategy("csv1-dep", nil, nil),
[]*apiextensionsv1.CustomResourceDefinition{crd("c1", "v2", "g1")},
nil,
v1alpha1.CSVPhasePending,
),
existingObjs: nil,
existingExtObjs: []runtime.Object{
func() *apiextensionsv1.CustomResourceDefinition {
newCRD := crd("c1", "v2", "g1")
// condition order: established, name accepted
newCRD.Status.Conditions[0].Status = apiextensionsv1.ConditionTrue
newCRD.Status.Conditions[1].Status = apiextensionsv1.ConditionFalse
return newCRD
}(),
},
met: false,
expectedRequirementStatuses: map[gvkn]v1alpha1.RequirementStatus{
{"apiextensions.k8s.io", "v1", "CustomResourceDefinition", "c1.g1"}: {
Group: "apiextensions.k8s.io",
Version: "v1",
Kind: "CustomResourceDefinition",
Name: "c1.g1",
Status: v1alpha1.RequirementStatusReasonNotAvailable,
},
{"operators.coreos.com", "v1alpha1", "ClusterServiceVersion", "csv1"}: {
Group: "operators.coreos.com",
Version: "v1alpha1",
Kind: "ClusterServiceVersion",
Name: "csv1",
Status: v1alpha1.RequirementStatusReasonPresent,
},
},
expectedError: nil,
},
{
description: "RequirementNotMet/CRDResourceInactive",
csv: csv("csv1",
namespace,
"0.0.0",
"",
installStrategy("csv1-dep", nil, nil),
[]*apiextensionsv1.CustomResourceDefinition{crd("c1", "v2", "g1")},
nil,
v1alpha1.CSVPhasePending,
),
existingObjs: nil,
existingExtObjs: []runtime.Object{
func() *apiextensionsv1.CustomResourceDefinition {
newCRD := crd("c1", "v2", "g1")
// condition order: established, name accepted
newCRD.Status.Conditions[0].Status = apiextensionsv1.ConditionFalse
newCRD.Status.Conditions[1].Status = apiextensionsv1.ConditionTrue
return newCRD
}(),
},
met: false,
expectedRequirementStatuses: map[gvkn]v1alpha1.RequirementStatus{
{"apiextensions.k8s.io", "v1", "CustomResourceDefinition", "c1.g1"}: {
Group: "apiextensions.k8s.io",
Version: "v1",
Kind: "CustomResourceDefinition",
Name: "c1.g1",
Status: v1alpha1.RequirementStatusReasonNotAvailable,
},
{"operators.coreos.com", "v1alpha1", "ClusterServiceVersion", "csv1"}: {
Group: "operators.coreos.com",
Version: "v1alpha1",
Kind: "ClusterServiceVersion",
Name: "csv1",
Status: v1alpha1.RequirementStatusReasonPresent,
},
},
expectedError: nil,
},
{
description: "RequirementNotMet/StaleServiceAccount",
csv: csvWithUID(csv("csv1",
namespace,
"0.0.0",
"",
installStrategy(
"csv1-dep",
[]v1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: "sa",
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Verbs: []string{"*"},
Resources: []string{"donuts"},
},
},
},
},
nil,
),
nil,
nil,
v1alpha1.CSVPhasePending,
), types.UID("csv-uid")),
existingObjs: []runtime.Object{
&corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "sa",
Namespace: namespace,
UID: types.UID("sa"),
OwnerReferences: []metav1.OwnerReference{
{
Kind: v1alpha1.ClusterServiceVersionKind,
UID: "csv-wrong",
},
},
},
},
},
existingExtObjs: nil,
met: false,
expectedRequirementStatuses: map[gvkn]v1alpha1.RequirementStatus{
{"", "v1", "ServiceAccount", "sa"}: {
Version: "v1",
Kind: "ServiceAccount",
Name: "sa",
Status: v1alpha1.RequirementStatusReasonPresentNotSatisfied,
Dependents: []v1alpha1.DependentStatus{},
},
{"operators.coreos.com", "v1alpha1", "ClusterServiceVersion", "csv1"}: {
Group: "operators.coreos.com",
Version: "v1alpha1",
Kind: "ClusterServiceVersion",
Name: "csv1",
Status: v1alpha1.RequirementStatusReasonPresent,
},
},
expectedError: nil,
},
{
description: "RequirementMet/ServiceAccountOwnedByNonCSV",
csv: csvWithUID(csv("csv",
namespace,
"0.0.0",
"",
installStrategy(
"csv-dep",
[]v1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: "sa",
},
},
nil,
),
nil,
nil,
v1alpha1.CSVPhasePending,
), types.UID("csv-uid")),
existingObjs: []runtime.Object{
&corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "sa",
Namespace: namespace,
UID: types.UID("sa"),
OwnerReferences: []metav1.OwnerReference{
{
Kind: v1alpha1.SubscriptionKind, // arbitrary non-CSV kind
UID: "non-csv",
},
},
},
},
},
existingExtObjs: nil,
met: true,
expectedRequirementStatuses: map[gvkn]v1alpha1.RequirementStatus{
{"", "v1", "ServiceAccount", "sa"}: {
Version: "v1",
Kind: "ServiceAccount",
Name: "sa",
Status: v1alpha1.RequirementStatusReasonPresent,
Dependents: []v1alpha1.DependentStatus{},
},
{"operators.coreos.com", "v1alpha1", "ClusterServiceVersion", "csv"}: {
Group: "operators.coreos.com",
Version: "v1alpha1",
Kind: "ClusterServiceVersion",
Name: "csv",
Status: v1alpha1.RequirementStatusReasonPresent,
},
},
expectedError: nil,
},
{
description: "RequirementMet/ServiceAccountHasNoOwner",
csv: csvWithUID(csv("csv",
namespace,
"0.0.0",
"",
installStrategy(
"csv-dep",
[]v1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: "sa",
},
},
nil,
),
nil,
nil,
v1alpha1.CSVPhasePending,
), types.UID("csv-uid")),
existingObjs: []runtime.Object{
&corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "sa",
Namespace: namespace,
UID: types.UID("sa"),
},
},
},
existingExtObjs: nil,
met: true,
expectedRequirementStatuses: map[gvkn]v1alpha1.RequirementStatus{
{"", "v1", "ServiceAccount", "sa"}: {
Version: "v1",
Kind: "ServiceAccount",
Name: "sa",
Status: v1alpha1.RequirementStatusReasonPresent,
Dependents: []v1alpha1.DependentStatus{},
},
{"operators.coreos.com", "v1alpha1", "ClusterServiceVersion", "csv"}: {
Group: "operators.coreos.com",
Version: "v1alpha1",
Kind: "ClusterServiceVersion",
Name: "csv",
Status: v1alpha1.RequirementStatusReasonPresent,
},
},
expectedError: nil,
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
op, err := NewFakeOperator(ctx, withNamespaces(namespace), withOperatorNamespace(namespace), withClientObjs(test.csv), withK8sObjs(test.existingObjs...), withExtObjs(test.existingExtObjs...))
require.NoError(t, err)
// Get the permission status
met, statuses, err := op.requirementAndPermissionStatus(test.csv)
if test.expectedError != nil {
require.Error(t, err)
require.EqualError(t, test.expectedError, err.Error())
}
assert := assert.New(t)
assert.Equal(test.met, met)
for _, status := range statuses {
key := gvkn{
group: status.Group,
version: status.Version,
kind: status.Kind,
name: status.Name,
}
expected, ok := test.expectedRequirementStatuses[key]
assert.True(ok, fmt.Sprintf("permission requirement status %+v found but not expected", key))
assert.Equal(expected.Status, status.Status)
assert.Len(status.Dependents, len(expected.Dependents), "number of dependents is not what was expected")
// Delete the requirement status to mark as found
delete(test.expectedRequirementStatuses, key)
}
assert.Len(test.expectedRequirementStatuses, 0, "not all expected permission requirement statuses were found")
})
}
}
func TestMinKubeVersionStatus(t *testing.T) {
namespace := "ns"
csv := csv("csv1",
namespace,
"0.0.0",
"",
v1alpha1.NamedInstallStrategy{StrategyName: "deployment", StrategySpec: v1alpha1.StrategyDetailsDeployment{}},
nil,
nil,
v1alpha1.CSVPhasePending,
)
tests := []struct {
description string
csvName string
minKubeVersion string
expectedMet bool
expectedRequirementStatuses []v1alpha1.RequirementStatus
}{
{
description: "minKubeVersion is not specfied",
csvName: "test1",
minKubeVersion: "",
expectedMet: true,
expectedRequirementStatuses: []v1alpha1.RequirementStatus{},
},
{
description: "minKubeVersion is met",
csvName: "test2",
minKubeVersion: "0.0.0",
expectedMet: true,
expectedRequirementStatuses: []v1alpha1.RequirementStatus{
{
Status: v1alpha1.RequirementStatusReasonPresent,
Message: fmt.Sprintf("CSV minKubeVersion (%s) less than server version", "0.0.0"),
Group: "operators.coreos.com",
Version: "v1alpha1",
Kind: "ClusterServiceVersion",
Name: "test2",
},
},
},
{
description: "minKubeVersion is unmet",
csvName: "test3",
minKubeVersion: "999.999.999",
expectedMet: false,
expectedRequirementStatuses: []v1alpha1.RequirementStatus{
{
Status: v1alpha1.RequirementStatusReasonPresentNotSatisfied,
Message: fmt.Sprintf("CSV version requirement not met: minKubeVersion (%s)", "999.999.999"),
Group: "operators.coreos.com",
Version: "v1alpha1",
Kind: "ClusterServiceVersion",
Name: "test3",
},
},
},
{
description: "minKubeVersion is invalid",
csvName: "test4",
minKubeVersion: "a.b.c",
expectedMet: false,
expectedRequirementStatuses: []v1alpha1.RequirementStatus{
{
Status: v1alpha1.RequirementStatusReasonPresentNotSatisfied,
Message: "CSV version parsing error",
Group: "operators.coreos.com",
Version: "v1alpha1",
Kind: "ClusterServiceVersion",
Name: "test4",
},
},
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
op, err := NewFakeOperator(ctx, withNamespaces(namespace), withOperatorNamespace(namespace), withClientObjs(csv))
require.NoError(t, err)
// Get the permission status
met, status := op.minKubeVersionStatus(test.csvName, test.minKubeVersion)
require.Equal(t, test.expectedMet, met)
if len(test.expectedRequirementStatuses) > 0 {
require.Equal(t, status[0].Status, test.expectedRequirementStatuses[0].Status)
require.Equal(t, status[0].Kind, test.expectedRequirementStatuses[0].Kind)
require.Equal(t, status[0].Name, test.expectedRequirementStatuses[0].Name)
require.Contains(t, status[0].Message, test.expectedRequirementStatuses[0].Message)
} else {
require.Equal(t, status, []v1alpha1.RequirementStatus(nil))
}
})
}
}
func TestOthersInstalledAlongside(t *testing.T) {
for _, tc := range []struct {
Name string
All []alongside.NamespacedName
Target v1alpha1.ClusterServiceVersion
InNamespace []v1alpha1.ClusterServiceVersion
Expected []string
}{
{
Name: "csv in different namespace excluded",
All: []alongside.NamespacedName{
{Namespace: "namespace-2", Name: "a"},
},
Target: v1alpha1.ClusterServiceVersion{
ObjectMeta: metav1.ObjectMeta{
Name: "b",
Namespace: "namespace-1",
},
},
InNamespace: []v1alpha1.ClusterServiceVersion{
{
ObjectMeta: metav1.ObjectMeta{
Name: "a",
},
},
},
Expected: nil,
},
{
Name: "given csv excluded",
All: []alongside.NamespacedName{
{Namespace: "namespace", Name: "a"},
},
Target: v1alpha1.ClusterServiceVersion{
ObjectMeta: metav1.ObjectMeta{
Name: "a",
Namespace: "namespace",
},
},
Expected: nil,
},
{
Name: "returns nil if given csv is included",
All: []alongside.NamespacedName{
{Namespace: "namespace", Name: "a"},
{Namespace: "namespace", Name: "b"},
},
Target: v1alpha1.ClusterServiceVersion{
ObjectMeta: metav1.ObjectMeta{
Name: "a",
Namespace: "namespace",
},
Spec: v1alpha1.ClusterServiceVersionSpec{
Replaces: "b",
},
},
InNamespace: []v1alpha1.ClusterServiceVersion{
{
ObjectMeta: metav1.ObjectMeta{
Name: "b",
},
},
},
Expected: nil,
},
{
Name: "copied csv excluded",
All: []alongside.NamespacedName{
{Namespace: "namespace", Name: "b"},
},
Target: v1alpha1.ClusterServiceVersion{
ObjectMeta: metav1.ObjectMeta{
Name: "a",
Namespace: "namespace",
},
},
InNamespace: []v1alpha1.ClusterServiceVersion{
{
ObjectMeta: metav1.ObjectMeta{
Name: "b",
},
Status: v1alpha1.ClusterServiceVersionStatus{
Reason: v1alpha1.CSVReasonCopied,
},
},
},
Expected: nil,
},
{
Name: "non-ancestor csv excluded",
All: []alongside.NamespacedName{
{Namespace: "namespace", Name: "b"},
},
Target: v1alpha1.ClusterServiceVersion{
ObjectMeta: metav1.ObjectMeta{
Name: "a",
Namespace: "namespace",
},
},
InNamespace: []v1alpha1.ClusterServiceVersion{
{
ObjectMeta: metav1.ObjectMeta{
Name: "b",
},
},
},
Expected: nil,
},
{
Name: "ancestor csvs included",
All: []alongside.NamespacedName{
{Namespace: "namespace", Name: "b"},
{Namespace: "namespace", Name: "c"},
},
Target: v1alpha1.ClusterServiceVersion{
ObjectMeta: metav1.ObjectMeta{
Name: "a",
Namespace: "namespace",
},
Spec: v1alpha1.ClusterServiceVersionSpec{
Replaces: "b",
},
},
InNamespace: []v1alpha1.ClusterServiceVersion{
{
ObjectMeta: metav1.ObjectMeta{
Name: "b",
},
Spec: v1alpha1.ClusterServiceVersionSpec{
Replaces: "c",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "c",
},
},
},
Expected: []string{"b", "c"},
},
{
Name: "descendant csvs excluded",
All: []alongside.NamespacedName{
{Namespace: "namespace", Name: "b"},
{Namespace: "namespace", Name: "c"},
},
Target: v1alpha1.ClusterServiceVersion{
ObjectMeta: metav1.ObjectMeta{
Name: "a",
Namespace: "namespace",
},
},
InNamespace: []v1alpha1.ClusterServiceVersion{
{
ObjectMeta: metav1.ObjectMeta{
Name: "c",
},
Spec: v1alpha1.ClusterServiceVersionSpec{
Replaces: "b",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "b",
},
Spec: v1alpha1.ClusterServiceVersionSpec{
Replaces: "a",
},
},
},
Expected: nil,
},
{
Name: "ancestor csvs included with cycle",
All: []alongside.NamespacedName{
{Namespace: "namespace", Name: "b"},
{Namespace: "namespace", Name: "c"},
},
Target: v1alpha1.ClusterServiceVersion{
ObjectMeta: metav1.ObjectMeta{
Name: "a",
Namespace: "namespace",
},
Spec: v1alpha1.ClusterServiceVersionSpec{
Replaces: "b",
},
},
InNamespace: []v1alpha1.ClusterServiceVersion{
{
ObjectMeta: metav1.ObjectMeta{
Name: "b",
},
Spec: v1alpha1.ClusterServiceVersionSpec{
Replaces: "c",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "c",
},
Spec: v1alpha1.ClusterServiceVersionSpec{
Replaces: "a",
},
},
},
Expected: []string{"b", "c"},
},
} {
t.Run(tc.Name, func(t *testing.T) {
var (
o metav1.ObjectMeta
a alongside.Annotator
nslister operatorlisterfakes.FakeClusterServiceVersionNamespaceLister
)
nslister.GetCalls(func(name string) (*v1alpha1.ClusterServiceVersion, error) {
if name == tc.Target.GetName() {
return tc.Target.DeepCopy(), nil
}
for _, csv := range tc.InNamespace {
if csv.GetName() == name {
return csv.DeepCopy(), nil
}
}
return nil, errors.NewNotFound(schema.GroupResource{}, name)
})
a.ToObject(&o, tc.All)
actual := othersInstalledAlongside(&o, tc.Target.DeepCopy(), &nslister)
assert.ElementsMatch(t, actual, tc.Expected)
})
}
}
|
package leetcode
import "math"
func reverse(x int) int {
if x == 0 {
return 0
}
ret := 0
const max = math.MaxInt32 / 10
const min = math.MinInt32 / 10
for x != 0 {
t := x % 10
if ret > max || (ret == max && t > 7) {
return 0
} else if ret < min || (ret == min && t < -8) {
return 0
}
ret = ret*10 + t
x /= 10
}
return ret
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.