text stringlengths 11 4.05M |
|---|
package main
type MemorySpec struct {
Request string
Limit string
}
|
package main
import (
_ "fmt"
"testing"
)
func TestCostAllocationMatcher(t *testing.T) {
if !monthlyCostAllocationMatcher.MatchString("376681487066-aws-cost-allocation-2013-06.csv") {
t.Fail()
}
}
func TestDetailedBillingWithResourcesMatcher(t *testing.T) {
if !detailedBillingWithResourcesMatcher.MatchString("376681487066-aws-billing-detailed-line-items-with-resources-and-tags-2014-03.csv") {
t.Fail()
}
}
func TestBillingReportTypeString(t *testing.T) {
report := &BillingReport{}
report.ReportType = MonthlyCostAllocation
if report.TypeString() != "aws_billing_monthly" {
t.Fail()
}
report.ReportType = DetailedBillingWithResourcesAndTags
if report.TypeString() != "aws_billing_hourly" {
t.Fail()
}
}
func TestParseReport(t *testing.T) {
in := make(chan []string)
out := make(chan map[string]interface{})
report := OpenBillingReport("test-2014-06.csv")
report.Mapper = FieldMapper{
"LinkedAccountId": {
"041869798014": {
"AccountName": "MYOB Advanced",
},
},
}
go ParseRecord(in, out, report)
values, _ := report.csvReader.Read()
in <- values
close(in)
}
|
package tarextract
// hat tip https://gist.github.com/indraniel/1a91458984179ab4cf80
import (
"archive/tar"
"compress/gzip"
"fmt"
"io"
"os"
"strings"
)
func ExtractTarGz(gzipStream io.Reader) error {
uncompressedStream, err := gzip.NewReader(gzipStream)
if err != nil {
return fmt.Errorf("gzip.NewReader() failed: %s", err.Error())
}
defer uncompressedStream.Close()
tarReader := tar.NewReader(uncompressedStream)
for {
header, err := tarReader.Next()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("Next() failed: %s", err.Error())
}
if err := pathLooksDangerous(header.Name); err != nil {
return err
}
switch header.Typeflag {
case tar.TypeDir:
if err := os.Mkdir(header.Name, 0755); err != nil {
return fmt.Errorf("Mkdir() failed: %s", err.Error())
}
case tar.TypeReg:
outFile, err := os.Create(header.Name)
if err != nil {
return fmt.Errorf("Create() failed: %s", err.Error())
}
if _, err := io.Copy(outFile, tarReader); err != nil {
outFile.Close()
return fmt.Errorf("Copy() failed: %s", err.Error())
}
outFile.Close() // defer would leak in a loop
default:
return fmt.Errorf("unknown type: %x in %s", header.Typeflag, header.Name)
}
}
return nil
}
func pathLooksDangerous(path string) error {
if strings.Contains(path, "..") {
return fmt.Errorf("pathLooksDangerous: %s", path)
}
return nil
}
|
package evaluator_test
import (
"testing"
"github.com/makramkd/go-monkey/evaluator"
"github.com/makramkd/go-monkey/lexer"
"github.com/makramkd/go-monkey/object"
"github.com/makramkd/go-monkey/parser"
"github.com/stretchr/testify/assert"
)
func TestEvalIntegerLiteral(t *testing.T) {
testCases := []struct {
input string
expected int64
}{
{"5", 5},
{"10", 10},
{"-5", -5},
{"-10", -10},
{"5 + 4", 9},
{"5 + 4 * 10", 45},
{"28 / 2 + 3 * 4 + 1", 27},
{"(4 + 10) * 2 + (3 + 10) * 2 + 1", 55},
{"2 ** 2", 4},
{"2 % 2", 0},
}
for _, testCase := range testCases {
l := lexer.New(testCase.input)
p := parser.New(l)
program := p.ParseProgram()
env := object.NewEnv()
val := evaluator.Eval(program, env)
assert.IsType(t, &object.Integer{}, val)
integerValue := val.(*object.Integer)
assert.Equal(t, testCase.expected, integerValue.Value)
}
}
func TestEvalBooleanLiteral(t *testing.T) {
testCases := []struct {
input string
expected bool
}{
{"true", true},
{"false", false},
{"1 != 2", true},
{"1 == 1", true},
{"1 == 2", false},
{"1 > 1", false},
{"1 >= 1", true},
{"2 > 1", true},
{"2 < 1", false},
{"2 <= 1", false},
{"1 <= 2", true},
{"1 > 2 && 2 > 1", false},
{"true == true", true},
{"false != true", true},
{"1 && 0", false},
{"1 || 0", true},
}
for _, testCase := range testCases {
l := lexer.New(testCase.input)
p := parser.New(l)
program := p.ParseProgram()
env := object.NewEnv()
val := evaluator.Eval(program, env)
assert.IsType(t, &object.Boolean{}, val)
boolValue := val.(*object.Boolean)
assert.Equal(t, testCase.expected, boolValue.Value)
}
}
func TestBangOperator(t *testing.T) {
testCases := []struct {
input string
expected bool
}{
{"!true", false},
{"!false", true},
{"!5", false},
{"!!true", true},
{"!!false", false},
{"!!5", true},
}
for _, testCase := range testCases {
l := lexer.New(testCase.input)
p := parser.New(l)
program := p.ParseProgram()
env := object.NewEnv()
val := evaluator.Eval(program, env)
assert.IsType(t, &object.Boolean{}, val)
boolValue := val.(*object.Boolean)
assert.Equal(t, testCase.expected, boolValue.Value)
}
}
func TestIfExpressions(t *testing.T) {
testCases := []struct {
input string
expected object.Object
}{
{"if (true) { 10 }", &object.Integer{Value: 10}},
{"if (true || false) { false }", &object.Boolean{Value: false}},
{"if (1 < 2 && (3 - 4) == -1) { 42 } else { 41 }", &object.Integer{Value: 42}},
{"if (false) { 41 } else { 42 }", &object.Integer{Value: 42}},
}
for _, testCase := range testCases {
l := lexer.New(testCase.input)
p := parser.New(l)
program := p.ParseProgram()
env := object.NewEnv()
val := evaluator.Eval(program, env)
assert.Equal(t, testCase.expected, val)
}
}
func TestReturnStatements(t *testing.T) {
testCases := []struct {
input string
expected object.Object
}{
{"return 10;", &object.Integer{Value: 10}},
{"return true;", &object.Boolean{Value: true}},
{"1 + 1; return if (1 == 1) { 42 } else { 43 };", &object.Integer{Value: 42}},
{"if (10 > 1) { if (10 > 2) { return 10; } return 1; }", &object.Integer{Value: 10}},
}
for _, testCase := range testCases {
l := lexer.New(testCase.input)
p := parser.New(l)
program := p.ParseProgram()
env := object.NewEnv()
val := evaluator.Eval(program, env)
assert.Equal(t, testCase.expected, val)
}
}
func TestErrorHandling(t *testing.T) {
testCases := []struct {
input string
expected *object.Error
}{
{"5 + true;", &object.Error{Message: "type mismatch: INTEGER + BOOLEAN"}},
{"-true;", &object.Error{Message: "unknown operator: -BOOLEAN"}},
{"true + false", &object.Error{Message: "unknown operator: BOOLEAN + BOOLEAN"}},
{"if (10 > 1) { if ( 10 > 2 ) { return false + true; } return 42; }", &object.Error{Message: "unknown operator: BOOLEAN + BOOLEAN"}},
{"if (true + false == 1) { return 42; }", &object.Error{Message: "unknown operator: BOOLEAN + BOOLEAN"}},
{"if (true == false * 1) { return 42; }", &object.Error{Message: "type mismatch: BOOLEAN * INTEGER"}},
{"foobar", &object.Error{Message: "identifier not found: foobar"}},
}
for _, testCase := range testCases {
l := lexer.New(testCase.input)
p := parser.New(l)
program := p.ParseProgram()
env := object.NewEnv()
val := evaluator.Eval(program, env)
assert.Equal(t, testCase.expected, val)
}
}
func TestLetStatements(t *testing.T) {
testCases := []struct {
input string
expected object.Object
}{
{"let a = 5; a;", &object.Integer{Value: 5}},
{"let a = 5 + 5; a;", &object.Integer{Value: 10}},
{"let a = if (5 > 4) { 42 } else { 41 }; a;", &object.Integer{Value: 42}},
{"let a = 5; let b = a; let c = a + b + 5; c;", &object.Integer{Value: 15}},
}
for _, testCase := range testCases {
l := lexer.New(testCase.input)
p := parser.New(l)
program := p.ParseProgram()
env := object.NewEnv()
val := evaluator.Eval(program, env)
assert.Equal(t, testCase.expected, val)
}
}
func TestFunctionObject(t *testing.T) {
testCases := []struct {
input string
expected object.Object
}{
{`let f = fn(x) { return x + 2; }; f(2);`, &object.Integer{Value: 4}},
{`let f = fn(x, y) { return x**2 + y**2; }; f(2, 2);`, &object.Integer{Value: 8}},
{`let x = 2; let f = fn(x) { return x ** 2; }; f(3);`, &object.Integer{Value: 9}},
{
`let x = 2;
let f = fn(x) {
let inner = fn(y) {
return y ** 2;
};
return inner(x + 1);
};
f(3);
`, &object.Integer{Value: 16}},
}
for _, testCase := range testCases {
l := lexer.New(testCase.input)
p := parser.New(l)
program := p.ParseProgram()
assert.Len(t, p.Errors(), 0)
env := object.NewEnv()
val := evaluator.Eval(program, env)
assert.Equal(t, testCase.expected, val)
}
}
func TestStringOperations(t *testing.T) {
testCases := []struct {
input string
expected object.Object
}{
{`"hello world" + " today";`, &object.String{Value: "hello world today"}},
{`let firstName = "Makram"; let lastName = "Kamaleddine"; let f = fn (first, last) { return first + " " + last; }; f(firstName, lastName);`,
&object.String{Value: "Makram Kamaleddine"}},
{`"hello world" == "hello world";`, &object.Boolean{Value: true}},
{`"hello world" != "today";`, &object.Boolean{Value: true}},
}
for _, testCase := range testCases {
l := lexer.New(testCase.input)
p := parser.New(l)
program := p.ParseProgram()
assert.Empty(t, p.Errors())
env := object.NewEnv()
val := evaluator.Eval(program, env)
assert.Equal(t, testCase.expected, val)
}
}
func TestBuiltinFunctions(t *testing.T) {
testCases := []struct {
input string
expected object.Object
}{
{`len("")`, &object.Integer{Value: 0}},
{`len("hello")`, &object.Integer{Value: 5}},
{`len(1)`, &object.Error{Message: "argument to 'len' not supported, got INTEGER"}},
{`len("one", "two")`, &object.Error{Message: "wrong number of arguments. got=2, want=1"}},
}
for _, testCase := range testCases {
l := lexer.New(testCase.input)
p := parser.New(l)
program := p.ParseProgram()
assert.Empty(t, p.Errors())
env := object.NewEnv()
val := evaluator.Eval(program, env)
assert.Equal(t, testCase.expected, val)
}
}
func TestArrayAccess(t *testing.T) {
testCases := []struct {
input string
expected object.Object
}{
{`let a = [1, 2, 3, 4]; a[0];`, &object.Integer{Value: 1}},
{`let a = [true, false, "hello"]; a[0];`, &object.Boolean{Value: true}},
{`let a = [true, false, "hello"]; a[2];`, &object.String{Value: "hello"}},
}
for _, testCase := range testCases {
l := lexer.New(testCase.input)
p := parser.New(l)
program := p.ParseProgram()
assert.Empty(t, p.Errors())
env := object.NewEnv()
val := evaluator.Eval(program, env)
assert.Equal(t, testCase.expected, val)
}
}
|
package encoding
import (
"bytes"
"context"
"testing"
"github.com/grafana/tempo/tempodb/backend"
"github.com/grafana/tempo/tempodb/encoding/common"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestFromVersionErrors(t *testing.T) {
encoding, err := FromVersion("definitely-not-a-real-version")
assert.Error(t, err)
assert.Nil(t, encoding)
}
func TestAllVersions(t *testing.T) {
for _, v := range allEncodings() {
encoding, err := FromVersion(v.Version())
require.Equal(t, v.Version(), encoding.Version())
require.NoError(t, err)
for _, e := range backend.SupportedEncoding {
testDataWriterReader(t, v, e)
}
}
}
func testDataWriterReader(t *testing.T, v VersionedEncoding, e backend.Encoding) {
tests := []struct {
readerBytes []byte
}{
{
readerBytes: []byte{0x01, 0x02},
},
{
readerBytes: []byte{0x01, 0x02, 0x03, 0x04},
},
}
for _, tc := range tests {
buff := bytes.NewBuffer([]byte{})
dataWriter, err := v.NewDataWriter(buff, e)
require.NoError(t, err)
_, err = dataWriter.Write([]byte{0x01}, tc.readerBytes)
require.NoError(t, err)
bytesWritten, err := dataWriter.CutPage()
require.NoError(t, err)
err = dataWriter.Complete()
require.NoError(t, err)
reader := bytes.NewReader(buff.Bytes())
dataReader, err := v.NewDataReader(backend.NewContextReaderWithAllReader(reader), e)
require.NoError(t, err)
defer dataReader.Close()
actual, _, err := dataReader.Read(context.Background(), []common.Record{
{
Start: 0,
Length: uint32(bytesWritten),
},
}, nil)
require.NoError(t, err)
require.Len(t, actual, 1)
i := NewIterator(bytes.NewReader(actual[0]), v.NewObjectReaderWriter())
defer i.Close()
id, obj, err := i.Next(context.Background())
assert.NoError(t, err)
assert.Equal(t, tc.readerBytes, obj)
assert.Equal(t, []byte{0x01}, []byte(id))
}
}
|
/*
* Neblio REST API Suite
*
* APIs for Interacting with NTP1 Tokens & The Neblio Blockchain
*
* API version: 1.3.0
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package neblioapi
type Error struct {
Code int32 `json:"code,omitempty"`
Message string `json:"message,omitempty"`
Fields string `json:"fields,omitempty"`
}
|
package adapterstest
import (
"fmt"
"net/http/httptest"
"strings"
"testing"
"net/http"
"github.com/prebid/openrtb/v19/openrtb2"
)
// OrtbMockService Represents a scaffolded OpenRTB service.
type OrtbMockService struct {
Server *httptest.Server
LastBidRequest *openrtb2.BidRequest
LastHttpRequest *http.Request
}
// BidOnTags Produces a map of TagIds, based on a comma separated strings. The map
// contains the list of tags to bid on.
func BidOnTags(tags string) map[string]bool {
values := strings.Split(tags, ",")
set := make(map[string]bool)
for _, tag := range values {
set[tag] = true
}
return set
}
// SampleBid Produces a sample bid based on params given.
func SampleBid(width *int64, height *int64, impId string, index int) openrtb2.Bid {
return openrtb2.Bid{
ID: "Bid-123",
ImpID: fmt.Sprintf("div-adunit-%d", index),
Price: 2.1,
AdM: "<div>This is an Ad</div>",
CrID: "Cr-234",
W: *width,
H: *height,
}
}
// VerifyStringValue Helper function to assert string equals.
func VerifyStringValue(value string, expected string, t *testing.T) {
if value != expected {
t.Fatalf(fmt.Sprintf("%s expected, got %s", expected, value))
}
}
// VerifyIntValue Helper function to assert Int equals.
func VerifyIntValue(value int, expected int, t *testing.T) {
if value != expected {
t.Fatalf(fmt.Sprintf("%d expected, got %d", expected, value))
}
}
// VerifyBoolValue Helper function to assert bool equals.
func VerifyBoolValue(value bool, expected bool, t *testing.T) {
if value != expected {
t.Fatalf(fmt.Sprintf("%v expected, got %v", expected, value))
}
}
// VerifyBannerSize helper function to assert banner size
func VerifyBannerSize(banner *openrtb2.Banner, expectedWidth int, expectedHeight int, t *testing.T) {
VerifyIntValue(int(*(banner.W)), expectedWidth, t)
VerifyIntValue(int(*(banner.H)), expectedHeight, t)
}
|
// Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package cli
import (
"fmt"
"os"
"strings"
"time"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/errors"
"github.com/spf13/cobra"
)
const defaultKeySize = 2048
// We use 366 days on certificate lifetimes to at least match X years,
// otherwise leap years risk putting us just under.
const defaultCALifetime = 10 * 366 * 24 * time.Hour // ten years
const defaultCertLifetime = 5 * 366 * 24 * time.Hour // five years
// Options settable via command-line flags. See below for defaults.
var keySize int
var caCertificateLifetime time.Duration
var certificateLifetime time.Duration
var allowCAKeyReuse bool
var overwriteFiles bool
var generatePKCS8Key bool
func initPreFlagsCertDefaults() {
keySize = defaultKeySize
caCertificateLifetime = defaultCALifetime
certificateLifetime = defaultCertLifetime
allowCAKeyReuse = false
overwriteFiles = false
generatePKCS8Key = false
}
// A createCACert command generates a CA certificate and stores it
// in the cert directory.
var createCACertCmd = &cobra.Command{
Use: "create-ca --certs-dir=<path to cockroach certs dir> --ca-key=<path-to-ca-key>",
Short: "create CA certificate and key",
Long: `
Generate a CA certificate "<certs-dir>/ca.crt" and CA key "<ca-key>".
The certs directory is created if it does not exist.
If the CA key exists and --allow-ca-key-reuse is true, the key is used.
If the CA certificate exists and --overwrite is true, the new CA certificate is prepended to it.
`,
Args: cobra.NoArgs,
RunE: MaybeDecorateGRPCError(runCreateCACert),
}
// runCreateCACert generates a key and CA certificate and writes them
// to their corresponding files.
func runCreateCACert(cmd *cobra.Command, args []string) error {
return errors.Wrap(
security.CreateCAPair(
baseCfg.SSLCertsDir,
baseCfg.SSLCAKey,
keySize,
caCertificateLifetime,
allowCAKeyReuse,
overwriteFiles),
"failed to generate CA cert and key")
}
// A createClientCACert command generates a client CA certificate and stores it
// in the cert directory.
var createClientCACertCmd = &cobra.Command{
Use: "create-client-ca --certs-dir=<path to cockroach certs dir> --ca-key=<path-to-client-ca-key>",
Short: "create client CA certificate and key",
Long: `
Generate a client CA certificate "<certs-dir>/ca-client.crt" and CA key "<client-ca-key>".
The certs directory is created if it does not exist.
If the CA key exists and --allow-ca-key-reuse is true, the key is used.
If the CA certificate exists and --overwrite is true, the new CA certificate is prepended to it.
The client CA is optional and should only be used when separate CAs are desired for server certificates
and client certificates.
If the client CA exists, a client.node.crt client certificate must be created using:
cockroach cert create-client node
Once the client.node.crt exists, all client certificates will be verified using the client CA.
`,
Args: cobra.NoArgs,
RunE: MaybeDecorateGRPCError(runCreateClientCACert),
}
// runCreateClientCACert generates a key and CA certificate and writes them
// to their corresponding files.
func runCreateClientCACert(cmd *cobra.Command, args []string) error {
return errors.Wrap(
security.CreateClientCAPair(
baseCfg.SSLCertsDir,
baseCfg.SSLCAKey,
keySize,
caCertificateLifetime,
allowCAKeyReuse,
overwriteFiles),
"failed to generate client CA cert and key")
}
// A createNodeCert command generates a node certificate and stores it
// in the cert directory.
var createNodeCertCmd = &cobra.Command{
Use: "create-node --certs-dir=<path to cockroach certs dir> --ca-key=<path-to-ca-key> <host 1> <host 2> ... <host N>",
Short: "create node certificate and key",
Long: `
Generate a node certificate "<certs-dir>/node.crt" and key "<certs-dir>/node.key".
If --overwrite is true, any existing files are overwritten.
At least one host should be passed in (either IP address or dns name).
Requires a CA cert in "<certs-dir>/ca.crt" and matching key in "--ca-key".
If "ca.crt" contains more than one certificate, the first is used.
Creation fails if the CA expiration time is before the desired certificate expiration.
`,
Args: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return errors.Errorf("create-node requires at least one host name or address, none was specified")
}
return nil
},
RunE: MaybeDecorateGRPCError(runCreateNodeCert),
}
// runCreateNodeCert generates key pair and CA certificate and writes them
// to their corresponding files.
// TODO(marc): there is currently no way to specify which CA cert to use if more
// than one is present. We shoult try to load each certificate along with the key
// and pick the one that works. That way, the key specifies the certificate.
func runCreateNodeCert(cmd *cobra.Command, args []string) error {
return errors.Wrap(
security.CreateNodePair(
baseCfg.SSLCertsDir,
baseCfg.SSLCAKey,
keySize,
certificateLifetime,
overwriteFiles,
args),
"failed to generate node certificate and key")
}
// A createClientCert command generates a client certificate and stores it
// in the cert directory under <username>.crt and key under <username>.key.
var createClientCertCmd = &cobra.Command{
Use: "create-client --certs-dir=<path to cockroach certs dir> --ca-key=<path-to-ca-key> <username>",
Short: "create client certificate and key",
Long: `
Generate a client certificate "<certs-dir>/client.<username>.crt" and key
"<certs-dir>/client.<username>.key".
If --overwrite is true, any existing files are overwritten.
Requires a CA cert in "<certs-dir>/ca.crt" and matching key in "--ca-key".
If "ca.crt" contains more than one certificate, the first is used.
Creation fails if the CA expiration time is before the desired certificate expiration.
`,
Args: cobra.ExactArgs(1),
RunE: MaybeDecorateGRPCError(runCreateClientCert),
}
// runCreateClientCert generates key pair and CA certificate and writes them
// to their corresponding files.
// TODO(marc): there is currently no way to specify which CA cert to use if more
// than one if present.
func runCreateClientCert(cmd *cobra.Command, args []string) error {
username, err := security.MakeSQLUsernameFromUserInput(args[0], security.UsernameCreation)
if err != nil {
return errors.Wrap(err, "failed to generate client certificate and key")
}
return errors.Wrap(
security.CreateClientPair(
baseCfg.SSLCertsDir,
baseCfg.SSLCAKey,
keySize,
certificateLifetime,
overwriteFiles,
username,
generatePKCS8Key),
"failed to generate client certificate and key")
}
// A listCerts command generates a client certificate and stores it
// in the cert directory under <username>.crt and key under <username>.key.
var listCertsCmd = &cobra.Command{
Use: "list",
Short: "list certs in --certs-dir",
Long: `
List certificates and keys found in the certificate directory.
`,
Args: cobra.NoArgs,
RunE: MaybeDecorateGRPCError(runListCerts),
}
// runListCerts loads and lists all certs.
func runListCerts(cmd *cobra.Command, args []string) error {
cm, err := security.NewCertificateManager(baseCfg.SSLCertsDir, security.CommandTLSSettings{})
if err != nil {
return errors.Wrap(err, "cannot load certificates")
}
fmt.Fprintf(os.Stdout, "Certificate directory: %s\n", baseCfg.SSLCertsDir)
certTableHeaders := []string{"Usage", "Certificate File", "Key File", "Expires", "Notes", "Error"}
alignment := "llllll"
var rows [][]string
addRow := func(ci *security.CertInfo, notes string) {
var errString string
if ci.Error != nil {
errString = ci.Error.Error()
}
rows = append(rows, []string{
ci.FileUsage.String(),
ci.Filename,
ci.KeyFilename,
ci.ExpirationTime.Format("2006/01/02"),
notes,
errString,
})
}
if cert := cm.CACert(); cert != nil {
var notes string
if cert.Error == nil && len(cert.ParsedCertificates) > 0 {
notes = fmt.Sprintf("num certs: %d", len(cert.ParsedCertificates))
}
addRow(cert, notes)
}
if cert := cm.ClientCACert(); cert != nil {
var notes string
if cert.Error == nil && len(cert.ParsedCertificates) > 0 {
notes = fmt.Sprintf("num certs: %d", len(cert.ParsedCertificates))
}
addRow(cert, notes)
}
if cert := cm.UICACert(); cert != nil {
var notes string
if cert.Error == nil && len(cert.ParsedCertificates) > 0 {
notes = fmt.Sprintf("num certs: %d", len(cert.ParsedCertificates))
}
addRow(cert, notes)
}
if cert := cm.NodeCert(); cert != nil {
var addresses []string
if cert.Error == nil && len(cert.ParsedCertificates) > 0 {
addresses = cert.ParsedCertificates[0].DNSNames
for _, ip := range cert.ParsedCertificates[0].IPAddresses {
addresses = append(addresses, ip.String())
}
} else {
addresses = append(addresses, "<unknown>")
}
addRow(cert, fmt.Sprintf("addresses: %s", strings.Join(addresses, ",")))
}
if cert := cm.UICert(); cert != nil {
var addresses []string
if cert.Error == nil && len(cert.ParsedCertificates) > 0 {
addresses = cert.ParsedCertificates[0].DNSNames
for _, ip := range cert.ParsedCertificates[0].IPAddresses {
addresses = append(addresses, ip.String())
}
} else {
addresses = append(addresses, "<unknown>")
}
addRow(cert, fmt.Sprintf("addresses: %s", strings.Join(addresses, ",")))
}
for _, cert := range cm.ClientCerts() {
var user string
if cert.Error == nil && len(cert.ParsedCertificates) > 0 {
user = cert.ParsedCertificates[0].Subject.CommonName
} else {
user = "<unknown>"
}
addRow(cert, fmt.Sprintf("user: %s", user))
}
return printQueryOutput(os.Stdout, certTableHeaders, newRowSliceIter(rows, alignment))
}
var certCmds = []*cobra.Command{
createCACertCmd,
createClientCACertCmd,
createNodeCertCmd,
createClientCertCmd,
listCertsCmd,
}
var certCmd = &cobra.Command{
Use: "cert",
Short: "create ca, node, and client certs",
RunE: usageAndErr,
}
func init() {
certCmd.AddCommand(certCmds...)
}
|
package model
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewGameID(t *testing.T) {
for i := 0; i < 100; i++ {
gID := NewGameID()
require.NotEqual(t, InvalidGameID, gID)
}
}
func TestIsValidPlayerID(t *testing.T) {
testCases := []struct {
msg string
input string
isValid bool
}{{
msg: `normal stuff`,
input: `normalStuff`,
isValid: true,
}, {
msg: `has numbers`,
input: `w1thnumb3r5`,
isValid: true,
}, {
msg: `has underscores`,
input: `has_under_scores`,
isValid: true,
}, {
msg: `caps`,
input: `hAsCaPiTaLlEtTeRs`,
isValid: true,
}, {
msg: `has dashes`,
input: `has-dashes-yanno`,
isValid: false,
}, {
msg: `spaces`,
input: `has spaces dude`,
isValid: false,
}, {
msg: `special chars`,
input: `what!`,
isValid: false,
}, {
msg: `empty string`,
input: ``,
isValid: false,
}}
for _, tc := range testCases {
assert.Equal(t, tc.isValid, IsValidPlayerID(PlayerID(tc.input)))
}
}
|
package main
import (
"encoding/json"
"fmt"
"net/http"
"os"
"sort"
"strings"
)
type Problem struct {
ContestID int `json:"contestId"`
Index string `json:"index"`
Name string `json:"name"`
}
type Problems []Problem
func (s Problems) Len() int { return len(s) }
func (s Problems) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s Problems) Less(i, j int) bool {
if s[i].ContestID != s[j].ContestID {
return s[i].ContestID > s[j].ContestID
}
return strings.Compare(s[i].Index, s[j].Index) < 0
}
type Submission struct {
Verdict string `json:"verdict"`
Problem Problem `json:"problem"`
}
type Contest struct {
ID int `json:"id"`
Name string `json:"name"`
}
func fetchAcceptedProblems(handle string) []Problem {
resq, err := http.Get("http://codeforces.com/api/user.status?from=1&handle=" + handle)
if err != nil {
panic(err)
}
defer resq.Body.Close()
var us struct {
Submissions []Submission `json:"result"`
}
if err := json.NewDecoder(resq.Body).Decode(&us); err != nil {
panic(err)
}
var res []Problem
for _, s := range us.Submissions {
if s.Verdict == "OK" {
res = append(res, s.Problem)
}
}
return res
}
func fetchContests() map[int]Contest {
resq, err := http.Get("http://codeforces.com/api/contest.list")
if err != nil {
panic(err)
}
defer resq.Body.Close()
var cl struct {
Contests []Contest `json:"result"`
}
if err := json.NewDecoder(resq.Body).Decode(&cl); err != nil {
panic(err)
}
res := map[int]Contest{}
for _, c := range cl.Contests {
res[c.ID] = c
}
return res
}
func main() {
if len(os.Args) < 3 {
return
}
handle1, handle2 := os.Args[1], os.Args[2]
ps1 := fetchAcceptedProblems(handle1)
ps2 := fetchAcceptedProblems(handle2)
diff := map[string]Problem{}
for _, p := range ps1 {
diff[p.Name] = p
}
for _, p := range ps2 {
delete(diff, p.Name)
}
var ps []Problem
for _, p := range diff {
ps = append(ps, p)
}
sort.Sort(Problems(ps))
cm := fetchContests()
for _, p := range ps {
if c, ok := cm[p.ContestID]; ok {
fmt.Printf("%s / %s / %s\n", c.Name, p.Index, p.Name)
}
}
}
|
package securityutils
import (
"crypto/cipher"
"crypto/des"
"errors"
)
// =================== ECB模式 ======================
// DES加密, 使用ECB模式,注意key必须为8位长度
func DesEncryptECB(src []byte, key []byte) ([]byte, error) {
block, err := des.NewCipher(key)
if err != nil {
return nil, err
}
blockSize := block.BlockSize()
src = PKCS5Padding(src, blockSize) // PKCS5补位
dst := make([]byte, len(src)) // 创建数组
for i, count := 0, len(src)/blockSize; i < count; i++ { // 加密
begin, end := i*blockSize, i*blockSize+blockSize
block.Encrypt(dst[begin:end], src[begin:end])
}
return dst, nil
}
// DES解密, 使用ECB模式,注意key必须为8位长度
func DesDecryptECB(src []byte, key []byte) ([]byte, error) {
block, err := des.NewCipher(key)
if err != nil {
return nil, err
}
blockSize := block.BlockSize()
dst := make([]byte, len(src)) // 创建数组
for i, count := 0, len(dst)/blockSize; i < count; i++ { // 解密
begin, end := i*blockSize, i*blockSize+blockSize
block.Decrypt(dst[begin:end], src[begin:end])
}
dst = PKCS5UnPadding(dst) // 去除PKCS5补位
return dst, nil
}
// 3DES加密, 使用ECB模式,注意key必须为24位长度
func DesEncryptECBTriple(src []byte, key []byte) ([]byte, error) {
block, err := des.NewTripleDESCipher(key)
if err != nil {
return nil, err
}
blockSize := block.BlockSize()
src = PKCS5Padding(src, blockSize) // PKCS5补位
dst := make([]byte, len(src)) // 创建数组
for i, count := 0, len(src)/blockSize; i < count; i++ { // 加密
begin, end := i*blockSize, i*blockSize+blockSize
block.Encrypt(dst[begin:end], src[begin:end])
}
return dst, nil
}
// 3DES解密, 使用ECB模式,注意key必须为24位长度
func DesDecryptECBTriple(src []byte, key []byte) ([]byte, error) {
block, err := des.NewTripleDESCipher(key)
if err != nil {
return nil, err
}
blockSize := block.BlockSize()
dst := make([]byte, len(src)) // 创建数组
for i, count := 0, len(dst)/blockSize; i < count; i++ { // 解密
begin, end := i*blockSize, i*blockSize+blockSize
block.Decrypt(dst[begin:end], src[begin:end])
}
dst = PKCS5UnPadding(dst) // 去除PKCS5补位
return dst, nil
}
// =================== CBC模式 ======================
// DES加密, 使用CBC模式,注意key必须为8位长度,iv初始化向量为非必需参数(长度为8位)
func DesEncryptCBC(src []byte, key []byte, iv ...[]byte) ([]byte, error) {
block, err := des.NewCipher(key)
if err != nil {
return nil, err
}
ivValue := ([]byte)(nil) // 获取初始化向量
if len(iv) > 0 {
ivValue = iv[0]
} else {
ivValue = key
}
src = PKCS5Padding(src, block.BlockSize()) // PKCS5补位
dst := make([]byte, len(src)) // 创建数组
blockMode := cipher.NewCBCEncrypter(block, ivValue) // 加密模式
blockMode.CryptBlocks(dst, src) // 加密
return dst, nil
}
// DES解密, 使用CBC模式,注意key必须为8位长度,iv初始化向量为非必需参数(长度为8位)
func DesDecryptCBC(src []byte, key []byte, iv ...[]byte) ([]byte, error) {
block, err := des.NewCipher(key)
if err != nil {
return nil, err
}
blockSize := block.BlockSize() // 获取秘钥块的长度
if len(src) < blockSize {
return nil, errors.New("src is too short, less than block size")
}
ivValue := ([]byte)(nil) // 获取初始化向量
if len(iv) > 0 {
ivValue = iv[0]
} else {
ivValue = key
}
if len(src)%blockSize != 0 {
return nil, errors.New("src is not a multiple of the block size")
}
dst := make([]byte, len(src)) // 创建数组
blockMode := cipher.NewCBCDecrypter(block, ivValue) // 加密模式
blockMode.CryptBlocks(dst, src) // 解密
dst = PKCS5UnPadding(dst) // 去除PKCS5补位
return dst, nil
}
// 3DES加密, 使用CBC模式,注意key必须为24位长度,iv初始化向量为非必需参数(长度为8位)
func DesEncryptCBCTriple(src []byte, key []byte, iv ...[]byte) ([]byte, error) {
block, err := des.NewTripleDESCipher(key)
if err != nil {
return nil, err
}
blockSize := block.BlockSize()
ivValue := ([]byte)(nil) // 获取初始化向量
if len(iv) > 0 {
ivValue = iv[0]
} else {
ivValue = key[:blockSize]
}
src = PKCS5Padding(src, block.BlockSize()) // PKCS5补位
dst := make([]byte, len(src)) // 创建数组
blockMode := cipher.NewCBCEncrypter(block, ivValue) // 加密模式
blockMode.CryptBlocks(dst, src) // 加密
return dst, nil
}
// 3DES解密, 使用CBC模式,注意key必须为24位长度,iv初始化向量为非必需参数(长度为8位)
func DesDecryptCBCTriple(src []byte, key []byte, iv ...[]byte) ([]byte, error) {
block, err := des.NewTripleDESCipher(key)
if err != nil {
return nil, err
}
blockSize := block.BlockSize() // 获取秘钥块的长度
if len(src) < blockSize {
return nil, errors.New("src is too short, less than block size")
}
ivValue := ([]byte)(nil) // 获取初始化向量
if len(iv) > 0 {
ivValue = iv[0]
} else {
ivValue = key[:blockSize]
}
if len(src)%blockSize != 0 {
return nil, errors.New("src is not a multiple of the block size")
}
dst := make([]byte, len(src)) // 创建数组
blockMode := cipher.NewCBCDecrypter(block, ivValue) // 加密模式
blockMode.CryptBlocks(dst, src) // 解密
dst = PKCS5UnPadding(dst) // 去除PKCS5补位
return dst, nil
}
|
package controllers
import (
"testing"
//"ncbi_proj/server/utils"
//"net/http/httptest"
//"fmt"
)
func TestShow(t *testing.T) {
//ctx := utils.NewContext()
//dc := NewDirectoryController(ctx)
//
//w := httptest.NewRecorder()
//r := httptest.NewRequest("GET", "/file", nil)
//dc.Show(w, r)
//fmt.Println(w.Body.String())
}
|
package piscine
var res string
func Itoa(nbr int) string {
result = ""
t := 1
if nbr < 0 {
result += "-"
t = -1
}
if nbr != 0 {
q := (nbr / 10) * t
if q != 0 {
Itoa(q)
}
d := ((nbr % 10) * t) + '0'
result += string(rune(d))
} else {
result += "0"
}
return result
}
/*
func cleanStr(str string){
func ia(n int, s string){
i := 0;
sign := n;
// absolute value
if n < 0{
n =-n
}
for sign = 1; sign <= n; sign *= 10{
sign /= 10
}
// loop through int and set to string
for sign > 0{
s = string(n / sign + '0')
n %= sign;
i++;
sign /= 10;
}
s = ""
fmt.Print(string(s))
}
*/
|
// Copyright (c) 2019, Arm Ltd
package main
import (
"flag"
"fmt"
"strings"
"os"
"regexp"
"syscall"
"io/ioutil"
"github.com/fsnotify/fsnotify"
"github.com/golang/glog"
"gopkg.in/yaml.v2"
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
)
var confFileName string
const (
deviceFileType uint = 0
nvidiaSysType uint = 1
)
type DeviceInstance struct {
devicePluginSmarter *SmarterDevicePlugin
devicePluginNvidia *NvidiaDevicePlugin
deviceName string
socketName string
deviceFile string
numDevices uint
deviceType uint
deviceId string
}
type DesiredDevice struct {
DeviceMatch string
NumMaxDevices uint
}
func usage() {
fmt.Fprintf(os.Stderr, "usage: smarter-device-manager\n")
flag.PrintDefaults()
os.Exit(2)
}
func init() {
flag.Usage = usage
// NOTE: This next line is key you have to call flag.Parse() for the command line
// options or "flags" that are defined in the glog module to be picked up.
flag.StringVar(&confFileName,"config","config/conf.yaml","set the configuration file to use")
flag.Parse()
}
func readDevDirectory(dirToList string, allowedRecursions uint8) (files []string, err error) {
var foundFiles []string
fType, err := os.Stat(dirToList)
if err != nil {
return nil, err
}
if !fType.IsDir() {
return nil, nil
}
f, err := os.Open(dirToList)
if err != nil {
return nil, err
}
files, err = f.Readdirnames(-1)
if err != nil {
f.Close()
return nil, err
}
f.Close()
for _, subDir := range files {
foundFiles = append(foundFiles, subDir)
if allowedRecursions > 0 {
filesDir, err := readDevDirectory(dirToList+"/"+subDir,allowedRecursions-1)
if err == nil {
for _, fileName := range filesDir {
foundFiles = append(foundFiles, subDir+"/"+fileName)
}
}
}
}
return foundFiles, nil
}
func sanitizeName(path string) string {
return strings.Replace(path, "/", "_" ,-1)
}
func findDevicesPattern(listDevices []string, pattern string) ([]string,error) {
var found []string
for _, file := range listDevices {
res,err := regexp.MatchString(pattern, file)
if err != nil {
return nil, err
}
if res {
found = append(found, file)
}
}
return found,nil
}
func main() {
defer glog.Flush()
glog.V(0).Info("Loading smarter-device-manager")
// Setting up the devices to check
var desiredDevices []DesiredDevice
glog.V(0).Info("Reading configuration file ",confFileName)
yamlFile, err := ioutil.ReadFile(confFileName)
if err != nil {
glog.Fatal("yamlFile.Get err #%v ", err)
}
err = yaml.Unmarshal(yamlFile, &desiredDevices)
if err != nil {
glog.Fatal("Unmarshal: %v", err)
os.Exit(-1)
}
glog.V(0).Info("Reading existing devices on /dev")
ExistingDevices, err := readDevDirectory("/dev",10)
if err != nil {
glog.Errorf(err.Error())
os.Exit(1)
}
ExistingDevicesSys, err := readDevDirectory("/sys/devices",0)
if err != nil {
glog.Errorf(err.Error())
os.Exit(1)
}
var listDevicesAvailable []DeviceInstance
for _, deviceToTest := range desiredDevices {
if deviceToTest.DeviceMatch == "nvidia-gpu" {
glog.V(0).Infof("Checking nvidia devices")
foundDevices,err := findDevicesPattern(ExistingDevicesSys, "gpu.[0-9]*")
if err != nil {
glog.Errorf(err.Error())
os.Exit(1)
}
// If found some create the devices entry
if len(foundDevices) > 0 {
for _, deviceToCreate := range foundDevices {
var newDevice DeviceInstance
deviceId := strings.TrimPrefix(deviceToCreate,"gpu.")
newDevice.deviceName = "smarter-devices/" + "nvidia-gpu" + deviceId
newDevice.deviceId = deviceId
newDevice.socketName = pluginapi.DevicePluginPath + "smarter-nvidia-gpu" + deviceId + ".sock"
newDevice.deviceFile = deviceId
newDevice.numDevices = deviceToTest.NumMaxDevices
newDevice.deviceType = nvidiaSysType
listDevicesAvailable = append(listDevicesAvailable, newDevice)
glog.V(0).Infof("Creating device %s socket and %s name for %s",newDevice.deviceName,newDevice.deviceFile,deviceToTest.DeviceMatch)
}
}
} else {
glog.V(0).Infof("Checking devices %s on /dev",deviceToTest.DeviceMatch)
foundDevices,err := findDevicesPattern(ExistingDevices, deviceToTest.DeviceMatch)
if err != nil {
glog.Errorf(err.Error())
os.Exit(1)
}
// If found some create the devices entry
if len(foundDevices) > 0 {
for _, deviceToCreate := range foundDevices {
var newDevice DeviceInstance
deviceSafeName := sanitizeName(deviceToCreate)
newDevice.deviceType = deviceFileType
newDevice.deviceName = "smarter-devices/" + deviceSafeName
newDevice.socketName = pluginapi.DevicePluginPath + "smarter-" + deviceSafeName + ".sock"
newDevice.deviceFile = "/dev/" + deviceToCreate
newDevice.numDevices = deviceToTest.NumMaxDevices
listDevicesAvailable = append(listDevicesAvailable, newDevice)
glog.V(0).Infof("Creating device %s socket and %s name for %s",newDevice.deviceName,newDevice.deviceFile,deviceToTest.DeviceMatch)
}
}
}
}
glog.V(0).Info("Starting FS watcher.")
watcher, err := newFSWatcher(pluginapi.DevicePluginPath)
if err != nil {
glog.V(0).Info("Failed to created FS watcher.")
os.Exit(1)
}
defer watcher.Close()
glog.V(0).Info("Starting OS watcher.")
sigs := newOSWatcher(syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
restart := true
L:
for {
if restart {
for _, devicesInUse := range listDevicesAvailable {
switch devicesInUse.deviceType {
case deviceFileType :
if devicesInUse.devicePluginSmarter != nil {
devicesInUse.devicePluginSmarter.Stop()
}
case nvidiaSysType :
if devicesInUse.devicePluginNvidia != nil {
devicesInUse.devicePluginNvidia.Stop()
}
}
}
var err error
for id, _ := range listDevicesAvailable {
switch listDevicesAvailable[id].deviceType {
case deviceFileType :
listDevicesAvailable[id].devicePluginSmarter = NewSmarterDevicePlugin(listDevicesAvailable[id].numDevices, listDevicesAvailable[id].deviceFile, listDevicesAvailable[id].deviceName, listDevicesAvailable[id].socketName)
if err = listDevicesAvailable[id].devicePluginSmarter.Serve(); err != nil {
glog.V(0).Info("Could not contact Kubelet, retrying. Did you enable the device plugin feature gate?")
break
}
case nvidiaSysType :
listDevicesAvailable[id].devicePluginNvidia = NewNvidiaDevicePlugin(listDevicesAvailable[id].numDevices, listDevicesAvailable[id].deviceName,"NVIDIA_VISIBLE_DEVICES", listDevicesAvailable[id].socketName, listDevicesAvailable[id].deviceId)
if err = listDevicesAvailable[id].devicePluginNvidia.Serve(); err != nil {
glog.V(0).Info("Could not contact Kubelet, retrying. Did you enable the device plugin feature gate?")
break
}
}
}
if err != nil {
continue
}
restart = false
}
select {
case event := <-watcher.Events:
if event.Name == pluginapi.KubeletSocket && event.Op&fsnotify.Create == fsnotify.Create {
glog.V(0).Infof("inotify: %s created, restarting.", pluginapi.KubeletSocket)
restart = true
}
case err := <-watcher.Errors:
glog.V(0).Infof("inotify: %s", err)
case s := <-sigs:
switch s {
case syscall.SIGHUP:
glog.V(0).Info("Received SIGHUP, restarting.")
restart = true
default:
glog.V(0).Infof("Received signal \"%v\", shutting down.", s)
for _, devicesInUse := range listDevicesAvailable {
glog.V(0).Info("Stopping device ", devicesInUse.deviceName)
switch devicesInUse.deviceType {
case deviceFileType :
glog.V(0).Info("Smarter device type")
if devicesInUse.devicePluginSmarter != nil {
glog.V(0).Info("Stopping device")
devicesInUse.devicePluginSmarter.Stop()
}
case nvidiaSysType :
glog.V(0).Info("Nvidia device type")
if devicesInUse.devicePluginNvidia != nil {
glog.V(0).Info("Stopping device")
devicesInUse.devicePluginNvidia.Stop()
}
}
}
break L
}
}
}
}
|
package command
import (
"fmt"
"strings"
)
type parser struct {
template *Template
command *Command
lastLink link
more bool
boolFlagUsed []bool
valueFlagUsed []bool
}
func newParser(template *Template, command *Command) *parser {
return &parser{
template: template,
command: command,
boolFlagUsed: make([]bool, len(template.BoolFlags)),
valueFlagUsed: make([]bool, len(template.ValueFlags)),
}
}
func (p *parser) parseFlag(flag string) (err error) {
if strings.HasPrefix(flag, "-") {
p.more, err = p.parseMultiCharFlag(strings.TrimPrefix(flag, "-"))
} else {
if len(flag) == 1 {
p.more = p.parseSingleCharFlag(flag)
} else {
p.parseSetOfSingleCharFlags(flag)
}
}
return
}
func (p *parser) parseSetOfSingleCharFlags(set string) {
for _, flag := range strings.Split(set, "") {
p.parseSingleCharFlag(flag)
}
}
func (p *parser) parseSingleCharFlag(flag string) (more bool) {
l, ok := p.template.flags[flag]
if ok {
if l.kind == boolFlag {
p.command.setBool(p.template.BoolFlags[l.index])
p.boolFlagUsed[l.index] = true
} else if l.kind == valueFlag {
more = true
p.lastLink = l
}
}
return
}
func (p *parser) parseValueEqualFlag(flagAndValue string) (err error) {
split := strings.SplitN(flagAndValue, "=", 2)
flag := split[0]
value := split[1]
if value == "" {
err = fmt.Errorf("cannot set empty value for [ %s ] flag", flag)
return
}
l, ok := p.template.flags[flag]
if ok {
if l.kind == boolFlag {
err = fmt.Errorf("cannot set custom value for boolean flag")
return
} else if l.kind == valueFlag {
p.command.setValue(p.template.ValueFlags[l.index], value)
p.valueFlagUsed[l.index] = true
}
}
return
}
func (p *parser) parseMultiCharFlag(flag string) (more bool, err error) {
if strings.Contains(flag, "=") {
err = p.parseValueEqualFlag(flag)
return
}
l, ok := p.template.flags[flag]
if ok {
if l.kind == boolFlag {
p.command.setBool(p.template.BoolFlags[l.index])
p.boolFlagUsed[l.index] = true
} else if l.kind == valueFlag {
more = true
p.lastLink = l
}
}
return
}
func (p *parser) parseValue(value string) {
p.command.setValue(p.template.ValueFlags[p.lastLink.index], value)
p.valueFlagUsed[p.lastLink.index] = true
p.lastLink = link{}
p.more = false
}
func (p *parser) setDefaults() {
for i, used := range p.boolFlagUsed {
if !used {
p.command.setBoolDefault(p.template.BoolFlags[i])
}
}
for i, used := range p.valueFlagUsed {
if !used {
p.command.setValueDefault(p.template.ValueFlags[i])
}
}
}
|
package follow
import (
"encoding/json"
"net/http"
"github.com/Emoto13/photo-viewer-rest/feed-service/src/follow/models"
)
type FollowClient interface {
GetFollowing(authHeader string) ([]*models.Following, error)
GetFollowers(authHeader string) ([]*models.Follower, error)
}
type followClient struct {
client *http.Client
address string
}
func NewFollowClient(client *http.Client, address string) FollowClient {
return &followClient{client: client, address: address}
}
func (c *followClient) GetFollowing(authHeader string) ([]*models.Following, error) {
req, err := http.NewRequest("GET", c.address+"/follow-service/get-following", nil)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", authHeader)
resp, err := c.client.Do(req)
if err != nil {
return nil, err
}
var responseMap map[string][]*models.Following
err = json.NewDecoder(resp.Body).Decode(&responseMap)
if err != nil {
return nil, err
}
return responseMap["following"], nil
}
func (c *followClient) GetFollowers(authHeader string) ([]*models.Follower, error) {
req, err := http.NewRequest("GET", c.address+"/follow-service/get-followers", nil)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", authHeader)
resp, err := c.client.Do(req)
if err != nil {
return nil, err
}
var responseMap map[string][]*models.Follower
err = json.NewDecoder(resp.Body).Decode(&responseMap)
if err != nil {
return nil, err
}
return responseMap["followers"], nil
}
|
package repository
import (
"github.com/jinzhu/gorm"
"github.com/pagient/pagient-server/pkg/model"
"github.com/pkg/errors"
"github.com/pagient/pagient-server/pkg/service"
)
type tokenRepository struct {
sqlRepository
}
// NewTokenRepository returns a new instance of a TokenRepository
func NewTokenRepository(db *gorm.DB) service.TokenRepository {
return &tokenRepository{sqlRepository{db}}
}
func (repo *tokenRepository) Get(sess service.DB, rawToken string) (*model.Token, error) {
session := sess.(*gorm.DB)
token := &model.Token{}
err := session.Where(&model.Token{
Raw: rawToken,
}).First(token).Error
if gorm.IsRecordNotFoundError(err) {
return nil, nil
}
return token, errors.Wrap(err, "select token failed")
}
func (repo *tokenRepository) GetByUser(sess service.DB, username string) ([]*model.Token, error) {
session := sess.(*gorm.DB)
var tokens []*model.Token
err := session.
Joins("JOIN users ON users.id = tokens.user_id").
Where("users.username = ?", username).Find(&tokens).Error
return tokens, errors.Wrap(err, "select tokens by user failed")
}
func (repo *tokenRepository) Add(sess service.DB, token *model.Token) (*model.Token, error) {
session := sess.(*gorm.DB)
err := session.Create(token).Error
return token, errors.Wrap(err, "create token failed")
}
func (repo *tokenRepository) Remove(sess service.DB, token *model.Token) (*model.Token, error) {
session := sess.(*gorm.DB)
err := session.Delete(token).Error
if gorm.IsRecordNotFoundError(err) {
return nil, &entryNotExistErr{"token not found"}
}
return token, errors.Wrap(err, "delete token failed")
}
|
package resource
import (
"os"
"strings"
"sync"
"testing"
"time"
)
type MockFileInfo struct {
fileName string
}
func NewMockFileInfo(filename string) *MockFileInfo {
return &MockFileInfo{fileName: filename}
}
func (m *MockFileInfo) Name() string { return m.fileName }
func (m *MockFileInfo) Size() int64 { return 0 } // length in bytes for regular files; system-dependent for others
func (m *MockFileInfo) Mode() os.FileMode { return 0 } // file mode bits
func (m *MockFileInfo) ModTime() time.Time { return time.Time{} } // modification time
func (m *MockFileInfo) IsDir() bool { return false } // abbreviation for Mode().IsDir()
func (m *MockFileInfo) Sys() interface{} { return nil } // underlying data source (can return nil)
func TestIsYamlFile(t *testing.T) {
for i, testCase := range []struct {
filename string
expect bool
}{
{
"file.yaml",
true,
},
{
"/path/to/my/file.yaml",
true,
},
{
"file.yml",
true,
},
{
"/path/to/my/file.yml",
true,
},
{
"file.notyaml",
false,
},
{
"/path/to/my/file.notyaml",
false,
},
{
"/path/to/my/file",
false,
},
} {
if got := isYAMLFile(NewMockFileInfo(testCase.filename)); got != testCase.expect {
t.Errorf("test %d: for filename %s, expected %t, got %t", i+1, testCase.filename, testCase.expect, got)
}
}
}
func TestIsJSONFile(t *testing.T) {
for i, testCase := range []struct {
filename string
expect bool
}{
{
"file.json",
true,
},
{
"/path/to/my/file.json",
true,
},
{
"file.notjson",
false,
},
{
"/path/to/my/file",
false,
},
} {
if got := isJSONFile(NewMockFileInfo(testCase.filename)); got != testCase.expect {
t.Errorf("test %d: for filename %s, expected %t, got %t", i+1, testCase.filename, testCase.expect, got)
}
}
}
func TestFindResourcesInReader(t *testing.T) {
maxResourceSize := 4 * 1024 * 1024 // 4MB ought to be enough for everybody
buf := make([]byte, maxResourceSize) // We reuse this to avoid multiple large memory allocations
for i, testCase := range []struct {
filePath string
yamlData string
res []Resource
errs []error
}{
{
"manifest.yaml",
``,
[]Resource{
{
Path: "manifest.yaml",
Bytes: nil,
sig: nil,
},
},
nil,
},
{
"manifest.yaml",
`---
foo: bar
`,
[]Resource{
{
Path: "manifest.yaml",
Bytes: []byte("---\nfoo: bar\n"),
sig: nil,
},
},
nil,
},
{
"manifest.yaml",
`---
foo: bar
---
lorem: ipsum
`,
[]Resource{
{
Path: "manifest.yaml",
Bytes: []byte("---\nfoo: bar"),
sig: nil,
},
{
Path: "manifest.yaml",
Bytes: []byte("lorem: ipsum\n"),
sig: nil,
},
},
nil,
},
} {
res := make(chan Resource)
errs := make(chan error)
receivedResources := []Resource{}
receivedErrs := []error{}
var wg sync.WaitGroup
wg.Add(1)
go func() {
for {
select {
case receivedResource, ok := <-res:
if ok {
receivedResources = append(receivedResources, receivedResource)
continue
}
res = nil
case receivedErr, ok := <-errs:
if ok {
receivedErrs = append(receivedErrs, receivedErr)
continue
}
errs = nil
}
if res == nil && errs == nil {
break
}
}
wg.Done()
}()
r := strings.NewReader(testCase.yamlData)
findResourcesInReader(testCase.filePath, r, res, errs, buf)
close(res)
close(errs)
wg.Wait()
if len(receivedResources) != len(testCase.res) {
t.Errorf("test %d: expected %d resources, received %d: %+v", i, len(testCase.res), len(receivedResources), receivedResources)
continue
}
for j, r := range receivedResources {
if r.Path != testCase.res[j].Path {
t.Errorf("test %d, resource %d, expected path %s, received %s", i, j, testCase.res[j].Path, r.Path)
}
if string(r.Bytes) != string(testCase.res[j].Bytes) {
t.Errorf("test %d, resource %d, expected Bytes %s, received %s", i, j, string(testCase.res[j].Bytes), string(r.Bytes))
}
}
}
}
|
package languagecode
// Format represents a specific language code format with a specific
// serialization.
type Format int
const (
// FormatAlpha3 is an ISO-639-2 language code.
FormatAlpha3 Format = iota
// FormatAlpha3B is an ISO-639-2/B language code.
FormatAlpha3B
// FormatAlpha2 is an ISO-639-1 language code.
FormatAlpha2
formatsCount
)
// Serialize the specified Language into a language code string of the Format.
func (f Format) Serialize(language Language) string {
return codes[language.code][f]
}
// Deserialize the specified language code string of the Format into a
// Language.
func (f Format) Deserialize(languageCode string) Language {
return languages[f][languageCode]
}
var languages = func() (l [formatsCount]map[string]Language) {
for f := Format(0); f < formatsCount; f++ {
l[f] = make(map[string]Language, len(codes))
for j, languageCodes := range codes {
l[f][languageCodes[f]] = Language{code: code(j)}
}
}
return
}()
|
package api
import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
"fmt"
"net/http"
"os"
"path/filepath"
"reflect"
"testing"
"github.com/boltdb/bolt"
"github.com/gorilla/mux"
"github.com/pborman/uuid"
"k8s.io/api/core/v1"
platform "kolihub.io/koli/pkg/apis/core/v1alpha1"
"kolihub.io/koli/pkg/git/conf"
"kolihub.io/koli/pkg/request"
)
func getKey(deployName, gitSha string) []byte { return []byte(filepath.Join(deployName, gitSha)) }
func fileExistsTrueFn(basepath, filename string) bool { return true }
func getBoltDb(t *testing.T) (*bolt.DB, func()) {
dbFile := fmt.Sprintf("/tmp/%s.db", uuid.New()[:6])
db, err := bolt.Open(dbFile, 0600, nil)
if err != nil {
t.Fatalf("Failed open bolt database: %v", err)
}
return db, func() {
db.Close()
os.Remove(dbFile)
}
}
func newGitInfo(namespace, name, id string, files map[string]int64) *platform.GitInfo {
i := &platform.GitInfo{
Name: name,
Namespace: namespace,
KubeRef: "foo",
GitBranch: "master",
SourceType: "github",
HeadCommit: platform.HeadCommit{
ID: id,
Author: "Koli Inc",
AvatarURL: "https://avatar-url.jpg",
Compare: "https://compare-url",
Message: "A good commit",
URL: "https://github.com/koli/go-getting-started",
},
Files: files,
}
if files == nil {
i.Files = make(map[string]int64)
}
return i
}
func LoadDbWithRandomData(db *bolt.DB, namespace, deployName string, items int) {
obj := newGitInfo(namespace, deployName, "", nil)
db.Update(func(tx *bolt.Tx) error {
b, _ := tx.CreateBucketIfNotExists([]byte(namespace))
_ = b
for i := 0; i < items; i++ {
obj.HeadCommit.ID = randomShaHash()
data, _ := json.Marshal(obj)
b.Put(getKey(deployName, obj.HeadCommit.ID), data)
}
// random data
for i := 0; i < 10; i++ {
obj.HeadCommit.ID = randomShaHash()
data, _ := json.Marshal(obj)
b.Put(getKey(deployName+"r1", obj.HeadCommit.ID), data)
}
return nil
})
}
func randomShaHash() string {
hash := sha1.New()
hash.Write([]byte(uuid.New()))
return hex.EncodeToString(hash.Sum(nil))
}
func TestCreateNewReleaseMetadata(t *testing.T) {
var (
namespace, name, gitSha = "prod-kim-koli", "myapp", "b4b36461355c0caf16b7deb8d33ba7dc5ba7093e"
requestPath = fmt.Sprintf("/releases/v1beta1/%s/%s/objects/%s", namespace, name, gitSha)
r = mux.NewRouter()
gitHandler = NewHandler(&conf.Config{GitHome: "/tmp"}, nil, nil)
requestBody = newGitInfo(namespace, name, gitSha, nil)
)
s := r.PathPrefix("/releases/v1beta1/{namespace}/{deployName}").Subrouter()
s.HandleFunc("/objects/{gitSha}", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
gitHandler.V1beta1ListReleases(w, r)
})).Methods("POST").Headers("Content-Type", "application/json")
requestURL, ts := runHttpTestServer(s, gitHandler, nil)
defer ts.Close()
db, close := getBoltDb(t)
gitHandler.boltDB = db
gitHandler.user = &platform.User{Customer: "kim", Organization: "koli"}
defer close()
_, err := request.NewRequest(nil, requestURL).
Post().
Body(requestBody).
RequestPath(requestPath).
Do().
Raw()
if err != nil {
t.Fatalf("Failed creating release: %#v", err)
}
gitHandler.boltDB.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(namespace))
key := filepath.Join(name, gitSha)
got := &platform.GitInfo{}
json.Unmarshal(b.Get([]byte(key)), got)
requestBody.CreatedAt = got.CreatedAt
if !reflect.DeepEqual(requestBody, got) {
t.Errorf("GOT: %#v, EXPECTED: %#v", got, requestBody)
}
return nil
})
}
func TestMustNotOverrideReleaseMetadata(t *testing.T) {
var (
namespace, name, gitSha = "prod-kim-koli", "myapp", "b4b36461355c0caf16b7deb8d33ba7dc5ba7093e"
requestPath = fmt.Sprintf("/releases/v1beta1/%s/%s/objects", namespace, name)
r = mux.NewRouter()
gitHandler = NewHandler(&conf.Config{GitHome: "/tmp"}, nil, nil)
requestBody = newGitInfo(namespace, name, gitSha, nil)
)
s := r.PathPrefix("/releases/v1beta1/{namespace}/{deployName}").Subrouter()
s.HandleFunc("/objects", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
gitHandler.V1beta1ListReleases(w, r)
})).Methods("POST").Headers("Content-Type", "application/json")
requestURL, ts := runHttpTestServer(s, gitHandler, nil)
defer ts.Close()
db, close := getBoltDb(t)
gitHandler.boltDB = db
gitHandler.user = &platform.User{Customer: "kim", Organization: "koli"}
defer close()
var response *request.Result
for i := 0; i < 2; i++ {
response = request.NewRequest(nil, requestURL).
Post().
Body(requestBody).
RequestPath(requestPath).
Do()
}
if response.StatusCode() != 409 {
t.Fatalf("Unexpected Status Code: %v", response.Error())
}
}
func TestUpdateMetadataFiles(t *testing.T) {
var (
namespace, name, gitSha = "prod-kim-koli", "myapp", "b4b36461355c0caf16b7deb8d33ba7dc5ba7093e"
requestPath = fmt.Sprintf("/releases/v1beta1/%s/%s/objects/%s", namespace, name, gitSha)
r = mux.NewRouter()
gitHandler = NewHandler(&conf.Config{GitHome: "/tmp"}, nil, nil)
expectedFiles = map[string]int64{"slug.tgz": 324802, "build.log": 2940}
expectedObj = newGitInfo(namespace, name, gitSha, map[string]int64{"slug.tgz": expectedFiles["slug.tgz"]})
)
s := r.PathPrefix("/releases/v1beta1/{namespace}/{deployName}").Subrouter()
s.HandleFunc("/objects/{gitSha}", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
gitHandler.V1beta1Releases(w, r)
})).Methods("PUT").Headers("Content-Type", "application/json")
requestURL, ts := runHttpTestServer(s, gitHandler, nil)
defer ts.Close()
db, close := getBoltDb(t)
gitHandler.boltDB = db
gitHandler.fileExists = fileExistsTrueFn
gitHandler.user = &platform.User{Customer: "kim", Organization: "koli"}
defer close()
db.Update(func(tx *bolt.Tx) error {
b, _ := tx.CreateBucketIfNotExists([]byte(namespace))
key := []byte(filepath.Join(name, gitSha))
data, _ := json.Marshal(expectedObj)
return b.Put(key, data)
})
requestBody := &platform.GitInfo{Files: map[string]int64{"build.log": expectedFiles["build.log"]}}
respBody := &platform.GitInfo{}
err := request.NewRequest(nil, requestURL).
Put().
Body(requestBody).
RequestPath(requestPath).
Do().
Into(respBody)
if err != nil {
t.Fatalf("Unexpected Response: %v", err)
}
if !reflect.DeepEqual(respBody.Files, expectedFiles) {
t.Errorf("GOT: %#v, EXPECTED: %#v", respBody.Files, expectedObj.Files)
}
}
func TestListReleases(t *testing.T) {
var (
namespace, name = "prod-kim-koli", "myapp"
requestPath = fmt.Sprintf("/releases/v1beta1/%s/%s/objects", namespace, name)
r = mux.NewRouter()
gitHandler = NewHandler(&conf.Config{GitHome: "/tmp"}, nil, nil)
)
s := r.PathPrefix("/releases/v1beta1/{namespace}/{deployName}").Subrouter()
s.HandleFunc("/objects", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
gitHandler.V1beta1ListReleases(w, r)
})).Methods("GET")
requestURL, ts := runHttpTestServer(s, gitHandler, nil)
defer ts.Close()
db, close := getBoltDb(t)
defer close()
LoadDbWithRandomData(db, namespace, name, 50)
gitHandler.boltDB = db
gitHandler.user = &platform.User{Customer: "kim", Organization: "koli"}
infoList := platform.GitInfoList{}
err := request.NewRequest(nil, requestURL).
Get().
RequestPath(requestPath).
Do().
Into(&infoList)
if err != nil {
t.Fatalf("Got unexpected error: %v", err)
}
if len(infoList.Items) != maxItems {
t.Errorf("EXPECTED %d items. Found %d item(s)", maxItems, len(infoList.Items))
}
}
func TestGetRelease(t *testing.T) {
var (
namespace, name, gitSha = "prod-kim-koli", "myapp", "a1b12d59152d7e2a8c387a5b736efcfda46c3eef"
requestPath = fmt.Sprintf("/releases/v1beta1/%s/%s/objects/%s", namespace, name, gitSha)
r = mux.NewRouter()
gitHandler = NewHandler(&conf.Config{GitHome: "/tmp"}, nil, nil)
expectedObj = newGitInfo(namespace, name, gitSha, map[string]int64{"slug.tgz": 120})
)
s := r.PathPrefix("/releases/v1beta1/{namespace}/{deployName}").Subrouter()
s.HandleFunc("/objects/{gitSha}", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
gitHandler.V1beta1Releases(w, r)
})).Methods("GET")
requestURL, ts := runHttpTestServer(s, gitHandler, nil)
defer ts.Close()
db, close := getBoltDb(t)
defer close()
LoadDbWithRandomData(db, namespace, name, 50)
db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(namespace))
if b == nil {
return nil
}
key := []byte(filepath.Join(name, gitSha))
data, _ := json.Marshal(expectedObj)
return b.Put(key, data)
})
gitHandler.boltDB = db
gitHandler.user = &platform.User{Customer: "kim", Organization: "koli"}
got := &platform.GitInfo{}
err := request.NewRequest(nil, requestURL).
Get().
RequestPath(requestPath).
Do().
Into(got)
if err != nil {
t.Fatalf("Got unexpected error: %v", err)
}
if !reflect.DeepEqual(expectedObj, got) {
t.Errorf("EXPECTED %v GOT %v", expectedObj, got)
}
}
func TestSeekReleasesByAttribute(t *testing.T) {
var (
namespace, name, sha = "prod-kim-koli", "myapp", "087350c7edc234fdfcd7e8836a1bb6522e641568"
requestPath = fmt.Sprintf("/releases/v1beta1/%s/%s/seek", namespace, name)
r = mux.NewRouter()
gitHandler = NewHandler(&conf.Config{GitHome: "/tmp"}, nil, nil)
expectedObj = newGitInfo(namespace, name, sha, nil)
)
s := r.PathPrefix("/releases/v1beta1/{namespace}/{deployName}").Subrouter()
s.HandleFunc("/seek", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
gitHandler.V1beta1SeekReleases(w, r)
})).Methods("GET")
requestURL, ts := runHttpTestServer(s, gitHandler, nil)
defer ts.Close()
db, close := getBoltDb(t)
defer close()
LoadDbWithRandomData(db, namespace, name, 50)
gitHandler.boltDB = db
gitHandler.user = &platform.User{Customer: "kim", Organization: "koli"}
testCases := []struct {
attr string
value string
}{
{"source", "gogs"},
{"kubeRef", "sb-build-v15"},
{"status", string(v1.PodFailed)},
}
for _, testc := range testCases {
db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(namespace))
switch testc.attr {
case "source":
expectedObj.SourceType = testc.value
case "kubeRef":
expectedObj.KubeRef = testc.value
case "status":
expectedObj.Status = v1.PodPhase(testc.value)
}
expectedObj.HeadCommit.ID = randomShaHash()
data, _ := json.Marshal(expectedObj)
key := []byte(filepath.Join(name, sha))
return b.Put(key, data)
})
}
for _, testc := range testCases {
infoList := platform.GitInfoList{}
err := request.NewRequest(nil, requestURL).
Get().
RequestPath(requestPath).
AddQuery("q", testc.value).
AddQuery("in", testc.attr).
Do().
Into(&infoList)
if err != nil {
t.Fatalf("Got unexpected error: %v", err)
}
if len(infoList.Items) != 1 {
t.Fatalf("EXPECTED 1 record, GOT: %d", len(infoList.Items))
}
i := infoList.Items[0]
switch testc.attr {
case "source":
if i.SourceType != testc.value {
t.Errorf("GOT: %v, EXPECTED: %v", i.SourceType, testc.value)
}
case "kubeRef":
if i.KubeRef != testc.value {
t.Errorf("GOT: %v, EXPECTED: %v", i.KubeRef, testc.value)
}
case "status":
if i.Status != v1.PodPhase(testc.value) {
t.Errorf("GOT: %v, EXPECTED: %v", i.Status, testc.value)
}
}
}
}
|
package main
import (
"fmt"
)
func main() {
a := 42
b := 153
fmt.Println("a:", a)
fmt.Println("b:", b)
temp := b
b = a
a = temp
fmt.Println("a:", a)
fmt.Println("b:", b)
}
|
package model
import (
"github.com/RudyDamara/golang/lib/models"
"github.com/RudyDamara/golang/pkg/user_login/structs"
)
type UserLoginModel interface {
Logout(structs.User) chan models.Result
}
|
package main
import (
"fmt"
)
// START OMIT
type CustomError struct {
Message string
}
func (e *CustomError) Error() string {
return e.Message
}
func main() {
var err error = &CustomError{Message: "It is a custom error"}
fmt.Printf("Error: %s \n", err.Error())
}
// END OMIT
|
package main
import (
"fmt"
"strings"
"bufio"
"os"
)
func main () {
var str string
var first string
var last string
fmt.Printf("Enter a string:\n")
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
str = scanner.Text()
str = strings.ToUpper(str)
first = str[0:1]
last = str[len(str)-1:]
if (first == "I" && last == "N" && strings.Contains(str, "A")) {
fmt.Printf("Found!\n")
} else {
fmt.Printf("Not Found!\n")
}
}
|
package main
func Generate(ch chan<- int) {
for i := 2; ; i++ {
ch <- i
}
}
/**
in <-chan int 意思为把channel输入到in, 所以 in <- chan
out chan<- int, 意思为输出到这个 channel, 所以 out chan<-
**/
func Filter(in <-chan int, out chan<- int, prime int) {
for {
println("-------- filter ------------")
i := <-in
println("i: ", i, "-------------")
println("prime: ", prime, "-------------")
if i%prime != 0 {
out <- i
}
}
}
func main() {
ch := make(chan int) // make the channel for ch
go Generate(ch) // generate ch for in, 这里改变一次ch的值为按顺序的自然数
for i := 0; i < 10; i++ {
prime := <-ch // 事实上改变 ch 的值的代码段是在这行下面的,但是输出 prime 的时候,却已经是改变了的ch的值,所以 channel的原理就是这样,是独立于整个程序之外的
println(prime, "\n")
ch1 := make(chan int)
go Filter(ch, ch1, prime)
// ch = ch1
}
}
|
package golang_blockchain
// type Nonce []byte
func (nonce *Nonce) Next() Nonce {
len := len(*nonce)
if len == 0 {
return Nonce{0}
}
next := make(Nonce, len)
copy(next, *nonce)
index := 0
for {
if next[index] < 255 {
next[index]++
return next
}
next[index] = 0
if index == len-1 {
return append(next, 0)
}
index++
}
}
|
package main
import "github.com/cakazies/project-service/routes"
func main() {
api := routes.ProjectServer{}
api.Run()
}
|
package command
import "runtime"
var os = runtime.GOOS
func GetCommand(command string) string {
switch command {
case "load_avg":
return getLoadAvgCommand()
case "cpu":
return getCPUCommand()
case "disk_io":
return getDiskIOCommand()
default:
return getLoadAvgCommand()
}
}
func getLoadAvgCommand() string {
cmdDarwin := "top | head -3 | tail -1 | cut -d\":\" -f2"
cmdLinux := "top -b -n1"
return chooseCmd(cmdDarwin, cmdLinux)
}
func getCPUCommand() string {
cmdDarwin := "top | head -4 | tail -1 | cut -d\":\" -f2"
cmdLinux := "top -b -n1"
return chooseCmd(cmdDarwin, cmdLinux)
}
func getDiskIOCommand() string {
cmdDarwin := "iostat -dC"
cmdLinux := "iostat -d -k"
return chooseCmd(cmdDarwin, cmdLinux)
}
func chooseCmd(cmdDarwin string, cmdLinux string) string {
switch os {
case "darwin":
return cmdDarwin
case "linux":
return cmdLinux
default:
return cmdLinux
}
}
|
package main
import (
"runtime"
"sync"
"fmt"
)
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
wg:=sync.WaitGroup{}
wg.Add(10)
for i:=0;i<10 ;i++ {
go Go(&wg,i)
}
wg.Wait()
}
func Go(wg *sync.WaitGroup,index int) {
a:=1
for i:=0;i<1000000 ;i++ {
a+=i
}
fmt.Println(index,a)
wg.Done()
}
|
package address
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type scalarType interface {
bool | int | int64 | time.Time | metav1.Time
}
func Of[T scalarType](i T) *T {
return &i
}
|
package main
import "fmt"
// const (
// winter = 1
// summer = 3
// yearly = winter + summer
// )
// func main() {
// var books [yearly]string
// books[0] = "kafka's revenge"
// books[1] = "stay Golden"
// books[2] = "Everythingship"
// books[3] = books[0] + " 2nd Edition"
// fmt.Printf("books :%#v\n", books)
// var (
// wBooks [winter]string
// sBooks [summer]string
// )
// wBooks[0] = books[0]
// sBooks[0] = books[1]
// sBooks[1] = books[2]
// sBooks[2] = books[3]
// for i:=0;i<len(sBooks);i++{
// sBooks[i] = books[i+1]
// }
// for i := range sBooks{
// sBooks[i] = books[i+1]
// }
// fmt.Printf("\nwinter : %#v\n", wBooks)
// fmt.Printf("\nsummer : %#v\n", sBooks)
// var published [len(books)]bool
// published[0] = true
// published[len(books)-1] = true
// fmt.Println("\nPublished Books:")
// for i, ok := range published {
// if ok {
// fmt.Printf("+ %s\n", books[i])
// }
// }
// }
// ------------------------------------------------------------------
// compare array value
// สามารถ compare array ได้ต้องเป็น type เดียวกัน เช่น
// [3]int กับ 3[int] เป็น type เดียวกัน แต่
// [3]int กับ [2]int ไม่เป็น type เดียวกัน
// func main() {
// var (
// blue = [3]int{6 ,9, 3}
// red = [3]int{6, 9, 3}
// )
// fmt.Printf("blue bookbase : %v\n", blue)
// fmt.Printf("red bookbase : %v\n", red)
// fmt.Println("Are they equal?", blue==red)
// }
// func main (){
// prev := [3]string{
// "Kafka's Revenge",
// "Stay Golden",
// "Everythingship",
// }
// books := prev
// for i := range prev {
// books[i] += " 2nd Ed."
// }
// fmt.Printf("last year: \n%#v\n", prev)
// fmt.Printf("hits year: \n%#v\n", books)
// }
// GOAL
// Find the average grade of the given students
func main (){
students := [...][3]float64{
{5,6,1},
{9,8,4},
}
var sum float64
// sum += students[0][0]+students[0][1]+students[0][2]
// sum += students[1][0]+students[1][1]+students[1][2]
for _, grades := range students {
for _, grade := range grades {
sum += grade
}
}
fmt.Printf("len %d\n", len(students))
const N = float64(len(students) * len(students[0]))
fmt.Printf("Avg Grade: %g\n", sum/N)
// student1 := [3]float64{5,6,1}
// student2 := [3]float64{9,8,4}
// var sum float64
// sum += student1[0]+student1[1]+student1[2]
// sum += student2[0]+student2[1]+student2[2]
// const N = float64(len(student1)*2)
// fmt.Printf("Avg Grade: %g\n", sum/N)
} |
package cli
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/tilt-dev/tilt/internal/analytics"
"github.com/tilt-dev/tilt/internal/container"
ctrltiltfile "github.com/tilt-dev/tilt/internal/controllers/apis/tiltfile"
"github.com/tilt-dev/tilt/internal/docker"
"github.com/tilt-dev/tilt/internal/engine/dockerprune"
"github.com/tilt-dev/tilt/internal/k8s"
"github.com/tilt-dev/tilt/internal/tiltfile"
"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
"github.com/tilt-dev/tilt/pkg/logger"
"github.com/tilt-dev/tilt/pkg/model"
)
type dockerPruneCmd struct {
fileName string
}
type dpDeps struct {
dCli docker.Client
kCli k8s.Client
tfl tiltfile.TiltfileLoader
}
func newDPDeps(dCli docker.Client, kCli k8s.Client, tfl tiltfile.TiltfileLoader) dpDeps {
return dpDeps{
dCli: dCli,
kCli: kCli,
tfl: tfl,
}
}
func (c *dockerPruneCmd) name() model.TiltSubcommand { return "docker-prune" }
func (c *dockerPruneCmd) register() *cobra.Command {
cmd := &cobra.Command{
Use: "docker-prune",
Short: "Run docker prune as Tilt does",
}
addTiltfileFlag(cmd, &c.fileName)
return cmd
}
func (c *dockerPruneCmd) run(ctx context.Context, args []string) error {
a := analytics.Get(ctx)
a.Incr("cmd.dockerPrune", nil)
defer a.Flush(time.Second)
if !logger.Get(ctx).Level().ShouldDisplay(logger.VerboseLvl) {
// Docker Pruner filters output when nothing is pruned if not in verbose
// logging mode, which is suitable for when it runs in the background
// during `tilt up`, but we always want to include that for the CLI cmd
// N.B. we only override if we're not already showing verbose so that
// `--debug` flag isn't impacted
l := logger.NewLogger(logger.VerboseLvl, os.Stdout)
ctx = logger.WithLogger(ctx, l)
}
deps, err := wireDockerPrune(ctx, a, "docker-prune")
if err != nil {
return err
}
tlr := deps.tfl.Load(ctx, ctrltiltfile.MainTiltfile(c.fileName, args), nil)
if tlr.Error != nil {
return tlr.Error
}
imgSelectors, err := resolveImageSelectors(ctx, deps.kCli, &tlr)
if err != nil {
return err
}
dp := dockerprune.NewDockerPruner(deps.dCli)
// TODO: print the commands being run
dp.Prune(ctx, tlr.DockerPruneSettings.MaxAge, tlr.DockerPruneSettings.KeepRecent, imgSelectors)
return nil
}
// resolveImageSelectors finds image references from a tiltfile.TiltfileLoadResult object.
//
// The Kubernetes client is used to resolve the correct image names if a local registry is in use.
//
// This method is brittle and duplicates some logic from the actual reconcilers.
// In the future, we hope to have a mode where we can launch the full apiserver
// with all resources in a "disabled" state and rely on the API, but that's not
// possible currently.
func resolveImageSelectors(ctx context.Context, kCli k8s.Client, tlr *tiltfile.TiltfileLoadResult) ([]container.RefSelector, error) {
for _, m := range tlr.Manifests {
if err := m.InferImageProperties(); err != nil {
return nil, err
}
}
var reg *v1alpha1.RegistryHosting
if tlr.HasOrchestrator(model.OrchestratorK8s) {
// k8s.Client::LocalRegistry will return an empty registry on any error,
// so ensure the client is actually functional first
if _, err := kCli.CheckConnected(ctx); err != nil {
return nil, fmt.Errorf("determining local registry: %v", err)
}
reg = kCli.LocalRegistry(ctx)
}
clusters := map[string]*v1alpha1.Cluster{
v1alpha1.ClusterNameDefault: {
ObjectMeta: metav1.ObjectMeta{Name: v1alpha1.ClusterNameDefault},
Spec: v1alpha1.ClusterSpec{DefaultRegistry: reg},
},
}
imgSelectors := model.LocalRefSelectorsForManifests(tlr.Manifests, clusters)
if len(imgSelectors) != 0 && logger.Get(ctx).Level().ShouldDisplay(logger.DebugLvl) {
var sb strings.Builder
for _, is := range imgSelectors {
sb.WriteString(" - ")
sb.WriteString(is.RefFamiliarString())
sb.WriteRune('\n')
}
logger.Get(ctx).Debugf("Running Docker Prune for images:\n%s", sb.String())
}
return imgSelectors, nil
}
|
package config
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
l "frank/src/go/helpers/log"
"frank/src/go/models"
"github.com/creasty/defaults"
"github.com/radovskyb/watcher"
"time"
)
type Config struct{}
var ParsedConfig models.Config
var FileName string
func Get(key string) string {
if val, ok := ParsedConfig.Configs[key]; ok {
return val.(string)
}
return ""
}
func Is(key string) bool {
if val, ok := ParsedConfig.Configs[key]; ok {
return val.(bool)
}
return false
}
func readFile(fileName string) []byte {
FileName = fileName
content, err := ioutil.ReadFile(fileName)
if err != nil {
fmt.Println(fileName)
log.Fatal(err)
}
return content
}
func startWatcher(fileName string) {
w := watcher.New()
w.SetMaxEvents(1)
w.FilterOps(watcher.Write)
go func() {
for {
select {
case <-w.Event:
l.Log.Info("Config File Changed")
parseConfig(readFile(fileName))
case err := <-w.Error:
l.Log.Error(err.Error())
case <-w.Closed:
return
}
}
}()
// Watch this folder for changes.
if err := w.Add(fileName); err != nil {
log.Fatalln(err)
}
// Start the watching process - it'll check for changes every 100ms.
go func() {
if err := w.Start(time.Millisecond * 100); err != nil {
l.Log.Error(err.Error())
}
}()
}
func InitConfig(fileName string) error {
content := readFile(fileName)
startWatcher(fileName)
return parseConfig(content)
}
func saveConfig() error {
err := ioutil.WriteFile(FileName, toJSON(), 0644)
if err != nil {
return err
}
return nil
}
func toJSON() []byte {
b, _ := json.MarshalIndent(ParsedConfig, "", " ")
return b
}
func SetDdns(ddns models.Ddns) error {
ParsedConfig.Ddns = ddns
return saveConfig()
}
func AddCommand(command models.Command) error {
ParsedConfig.Commands = append(ParsedConfig.Commands, command)
return saveConfig()
}
func AddDevice(device models.Device) error {
ParsedConfig.Devices = append(ParsedConfig.Devices, device)
return saveConfig()
}
func AddAction(action models.Action) error {
ParsedConfig.Actions = append(ParsedConfig.Actions, action)
return saveConfig()
}
func RemoveDevice(deviceName string) error {
found := false
devices := []models.Device{}
for _, d := range ParsedConfig.Devices {
if d.Name != deviceName {
devices = append(devices, d)
} else {
found = true
}
}
if found == false {
return fmt.Errorf("The device %s was not found, therefore was not delete", deviceName)
}
ParsedConfig.Devices = devices
return nil
}
func GetActionsByDeviceName(deviceName string) []models.Action {
actions := []models.Action{}
for _, a := range ParsedConfig.Actions {
if a.DeviceName == deviceName {
actions = append(actions, a)
}
}
fmt.Println("actions", actions)
return actions
}
func GetCommandsByDeviceName(deviceName string) []models.Command {
commands := []models.Command{}
for _, c := range ParsedConfig.Commands {
for _, cA := range c.Actions {
fmt.Println(cA)
// if cA.DeviceName == deviceName {
// commands = append(commands, c)
// break
// }
}
}
return commands
}
func GetDdns() models.Ddns {
return ParsedConfig.Ddns
}
func GetVoice() *models.Voice {
return &ParsedConfig.Voice
}
func GetHTTP() *models.HTTP {
return &ParsedConfig.HTTP
}
func GetWebSocket() *models.WebSocket {
return &ParsedConfig.WebSocket
}
func GetTelegram() *models.Telegram {
return &ParsedConfig.Telegram
}
func GetAvailablePlugins() []models.Plugin {
plugins := []models.Plugin{}
mappedPlugins := map[string]*models.Plugin{}
for _, a := range ParsedConfig.Actions {
if a.Plugin != "" {
if _, ok := mappedPlugins[a.Plugin]; !ok {
mappedPlugins[a.Plugin] = &models.Plugin{
Name: a.Plugin,
Actions: []models.Action{},
}
}
mappedPlugins[a.Plugin].Actions = append(mappedPlugins[a.Plugin].Actions, a)
}
}
for _, v := range mappedPlugins {
plugins = append(plugins, *v)
}
return plugins
}
func GetDevice(deviceName string) (models.Device, error) {
device := models.Device{}
if device, ok := ParsedConfig.NamedDevices[deviceName]; ok {
return device, nil
}
return device, fmt.Errorf("Device \"%s\" not found", deviceName)
}
func GetAction(actionName string) (models.Action, error) {
action := models.Action{}
if action, ok := ParsedConfig.NamedActions[actionName]; ok {
return action, nil
}
return action, fmt.Errorf("Action \"%s\" not found", actionName)
}
func GetReading(readingName string) (models.Reading, error) {
reading := models.Reading{}
if reading, ok := ParsedConfig.NamedReadings[readingName]; ok {
return reading, nil
}
return reading, fmt.Errorf("Reading \"%s\" not found", readingName)
}
func GetDeviceInterface(deviceName string, interfaceName string) (models.DeviceInterface, error) {
deviceInterface := models.DeviceInterface{}
device, err := GetDevice(deviceName)
if err != nil {
return deviceInterface, err
}
return getDeviceInterface(device, interfaceName)
}
func getDeviceInterface(device models.Device, interfaceName string) (models.DeviceInterface, error) {
for _, in := range device.Interfaces {
if in.Name == interfaceName {
return in, nil
}
}
return models.DeviceInterface{}, fmt.Errorf("Interface \"%s\" not found in Device \"%s\"", interfaceName, device.Name)
}
func generateParsedDevices() {
ParsedConfig.NamedDevices = map[string]models.Device{}
if len(ParsedConfig.Devices) > 0 {
for _, device := range ParsedConfig.Devices {
ParsedConfig.NamedDevices[device.Name] = device
}
}
}
func generateParsedActions() {
ParsedConfig.NamedActions = map[string]models.Action{}
if len(ParsedConfig.Actions) > 0 {
for _, action := range ParsedConfig.Actions {
ParsedConfig.NamedActions[action.Name] = action
}
}
}
func generateParsedReadings() {
ParsedConfig.NamedReadings = map[string]models.Reading{}
if len(ParsedConfig.Readings) > 0 {
for _, reading := range ParsedConfig.Readings {
ParsedConfig.NamedReadings[reading.Name] = reading
}
}
}
func parseConfig(input []byte) error {
l.Log.Debug("Parsing Config")
ParsedConfig = models.Config{}
if err := defaults.Set(&ParsedConfig); err != nil {
return err
}
if err := json.Unmarshal(input, &ParsedConfig); err != nil {
return err
}
fmt.Printf("%+v\n", ParsedConfig)
generateParsedDevices()
generateParsedActions()
generateParsedReadings()
return nil
}
|
package adapter
type Envelope struct {
// 发送者信息
User struct {
Name string
Id string
}
// 对话名称(当公共频道时设置)
Room string
// 对话ID(当私聊时设置)
Id string
}
|
package nukeprediction
import (
"fmt"
"time"
"github.com/bwmarrin/discordgo"
)
// need to implement a system to clean the cache after a restore
type NukePrediction struct {
GuildID string
SuspicionLevel int
RestorableChannels []*discordgo.Channel
RestorableRoles []*discordgo.Role
Triggered bool
Timer *time.Timer
Client *discordgo.Session
Strikes []string
Restoring bool
Cache *struct {
Pins map[string][]*discordgo.Message
}
}
// this function will add a channel to the restorable channels list
// it should only be called as a channel is deleted
func (N *NukePrediction) AddChannel(channel *discordgo.Channel) {
N.RestorableChannels = append(N.RestorableChannels, channel)
N.IncreaseSuspicionLevel(25)
N.AddStrike(fmt.Sprintf("Deleted Channel : %s", channel.Name))
}
func (N *NukePrediction) AddStrike(reason string) {
N.Strikes = append(N.Strikes, fmt.Sprintf("- %s\n", reason))
}
func (N *NukePrediction) IncreaseSuspicionLevel(amount int) {
if N.Triggered {
return
}
N.SuspicionLevel += amount
if N.SuspicionLevel >= 100 {
success := N.Timer.Stop()
if !success {
<-N.Timer.C
}
N.Triggered = true
N.Lockdown()
} else {
N.ResetTimer()
}
}
func (N *NukePrediction) ResetTimer() {
if N.Timer == nil {
N.Timer = time.AfterFunc(12*time.Second, N.ResetSuspicon)
} else {
N.Timer.Stop()
N.Timer.Reset(12 * time.Second)
}
}
func (N *NukePrediction) ResetSuspicon() {
N.SuspicionLevel = 0
N.RestorableChannels = []*discordgo.Channel{}
N.RestorableRoles = []*discordgo.Role{}
N.Strikes = []string{}
}
|
package slack
type content struct {
// expected type is "mrkdwn"
Type string `json:"type,omitempty"`
// markdown compliant message
Text string `json:"text,omitempty"`
}
// Block holds the different blocks uses in the Slack block API
// Hmm... omitempty doesn't omit zero structs https://github.com/golang/go/issues/11939
type Block struct {
Type string `json:"type"`
Section content `json:"text,omitempty"`
Elements []content `json:"elements,omitempty"`
}
// WebhookMessage is the specific structure that Slack uses for the Webhook API
type WebhookMessage struct {
Blocks []Block `json:"blocks"`
}
|
// Use strings.Builder
// Builder: Design a html builder
// Builder Facet: Design a PersonBuilder, PersonJobBuilder, PersonAddressBuilder
// Builder Parameter: Design an EmailBuilder => func SendEmail(action func(b *EmailBuilder) {})
// Functional Builder: Design PersonBuilder combining Facet with Builder Parameter for lazy building
package main
import (
"fmt"
"strings"
)
type HtmlElement struct {
TagName string
Text string
Children []HtmlElement
}
func (he *HtmlElement) Render() string {
b := &strings.Builder{}
fmt.Fprintln(b, fmt.Sprintf("<%s>", he.TagName))
for _, child := range he.Children {
fmt.Fprintln(b, child.Render())
}
fmt.Fprintln(b, he.Text)
fmt.Fprintln(b, fmt.Sprintf("</%s>", he.TagName))
return b.String()
}
type HtmlBuilder struct {
Root HtmlElement
}
func (b *HtmlBuilder) AddRoot(tagName string) *HtmlBuilder {
b.Root = HtmlElement{
TagName: tagName,
}
return b
}
func (b *HtmlBuilder) AddChild(tagName string, childText string) *HtmlBuilder {
e := HtmlElement{
TagName: tagName,
Text: childText,
Children: []HtmlElement{},
}
b.Root.Children = append(b.Root.Children, e)
return b
}
func main() {
b := HtmlBuilder{}
b.AddRoot("ul")
b.AddChild("li", "Hrishikesh")
b.AddChild("li", "Bipul")
b.AddChild("li", "Krishnan")
fmt.Println(b.Root.Render())
// b := HtmlBuilder{}
// b.AddRoot("html")
// b.AddChild("body", "")
// b.AddChild("li", "Bipul")
// b.AddChild("li", "Krishnan")
// fmt.Println(b.Root.Render())
// html := HtmlElement{TagName: "html"}
// html.Children = []HtmlElement{HtmlElement{TagName: "body", Children: []HtmlElement{{Text: "Hello, World!"}}}}
// fmt.Println(html.Render())
// var b strings.Builder // Initializes to it's zero value
// // b := strings.Builder{} // Struct-literal syntax
// fmt.Fprintln(&b, "<html>")
// fmt.Fprintln(&b, "<body>")
// fmt.Fprintln(&b, "Hello, World!")
// fmt.Fprintln(&b, "</body>")
// fmt.Fprintln(&b, "</html>")
// fmt.Println(b.String())
}
|
package main
import (
"fmt"
)
func americanNames() []string {
// fmt.Println("start1")
names := []string{"NO AMERICAN NAMES ARRAY"}
//////////////////////////////////////////////////////////////////////////////////////////////////////
switch genderIndex {
case 0:
names = []string{
//MALE
"JAMES",
"JOHN",
"ROBERT",
"MICHAEL",
"WILLIAM",
"DAVID",
"RICHARD",
"CHARLES",
"JOSEPH",
"THOMAS",
"CHRISTOPHER",
"DANIEL",
"PAUL",
"MARK",
"DONALD",
"GEORGE",
"KENNETH",
"STEVEN",
"EDWARD",
"BRIAN",
"RONALD",
"ANTHONY",
"KEVIN",
"JASON",
"MATTHEW",
"GARY",
"TIMOTHY",
"JOSE",
"LARRY",
"JEFFREY",
"FRANK",
"SCOTT",
"ERIC",
"STEPHEN",
"ANDREW",
"RAYMOND",
"GREGORY",
"JOSHUA",
"JERRY",
"DENNIS",
"WALTER",
"PATRICK",
"PETER",
"HAROLD",
"DOUGLAS",
"HENRY",
"CARL",
"ARTHUR",
"RYAN",
"ROGER",
"JOE",
"JUAN",
"JACK",
"ALBERT",
"JONATHAN",
"JUSTIN",
"TERRY",
"GERALD",
"KEITH",
"SAMUEL",
"WILLIE",
"RALPH",
"LAWRENCE",
"NICHOLAS",
"ROY",
"BENJAMIN",
"BRUCE",
"BRANDON",
"ADAM",
"HARRY",
"FRED",
"WAYNE",
"BILLY",
"STEVE",
"LOUIS",
"JEREMY",
"AARON",
"RANDY",
"HOWARD",
"EUGENE",
"CARLOS",
"RUSSELL",
"BOBBY",
"VICTOR",
"MARTIN",
"ERNEST",
"PHILLIP",
"TODD",
"JESSE",
"CRAIG",
"ALAN",
"SHAWN",
"CLARENCE",
"SEAN",
"PHILIP",
"CHRIS",
"JOHNNY",
"EARL",
"JIMMY",
"ANTONIO",
"DANNY",
"BRYAN",
"TONY",
"LUIS",
"MIKE",
"STANLEY",
"LEONARD",
"NATHAN",
"DALE",
"MANUEL",
"RODNEY",
"CURTIS",
"NORMAN",
"ALLEN",
"MARVIN",
"VINCENT",
"GLENN",
"JEFFERY",
"TRAVIS",
"JEFF",
"CHAD",
"JACOB",
"LEE",
"MELVIN",
"ALFRED",
"KYLE",
"FRANCIS",
"BRADLEY",
"JESUS",
"HERBERT",
"FREDERICK",
"RAY",
"JOEL",
"EDWIN",
"DON",
"EDDIE",
"RICKY",
"TROY",
"RANDALL",
"BARRY",
"ALEXANDER",
"BERNARD",
"MARIO",
"LEROY",
"FRANCISCO",
"MARCUS",
"MICHEAL",
"THEODORE",
"CLIFFORD",
"MIGUEL",
"OSCAR",
"JAY",
"JIM",
"TOM",
"CALVIN",
"ALEX",
"JON",
"RONNIE",
"BILL",
"LLOYD",
"TOMMY",
"LEON",
"DEREK",
"WARREN",
"DARRELL",
"JEROME",
"FLOYD",
"LEO",
"ALVIN",
"TIM",
"WESLEY",
"GORDON",
"DEAN",
"GREG",
"JORGE",
"DUSTIN",
"PEDRO",
"DERRICK",
"DAN",
"LEWIS",
"ZACHARY",
"COREY",
"HERMAN",
"MAURICE",
"VERNON",
"ROBERTO",
"CLYDE",
"GLEN",
"HECTOR",
"SHANE",
"RICARDO",
"SAM",
"RICK",
"LESTER",
"BRENT",
"RAMON",
"CHARLIE",
"TYLER",
"GILBERT",
"GENE",
"MARC",
"REGINALD",
"RUBEN",
"BRETT",
"ANGEL",
"NATHANIEL",
"RAFAEL",
"LESLIE",
"EDGAR",
"MILTON",
"RAUL",
"BEN",
"CHESTER",
"CECIL",
"DUANE",
"FRANKLIN",
"ANDRE",
"ELMER",
"BRAD",
"GABRIEL",
"RON",
"MITCHELL",
"ROLAND",
"ARNOLD",
"HARVEY",
"JARED",
"ADRIAN",
"KARL",
"CORY",
"CLAUDE",
"ERIK",
"DARRYL",
"JAMIE",
"NEIL",
"JESSIE",
"CHRISTIAN",
"JAVIER",
"FERNANDO",
"CLINTON",
"TED",
"MATHEW",
"TYRONE",
"DARREN",
"LONNIE",
"LANCE",
"CODY",
"JULIO",
"KELLY",
"KURT",
"ALLAN",
"NELSON",
"GUY",
"CLAYTON",
"HUGH",
"MAX",
"DWAYNE",
"DWIGHT",
"ARMANDO",
"FELIX",
"JIMMIE",
"EVERETT",
"JORDAN",
"IAN",
"WALLACE",
"KEN",
"BOB",
"JAIME",
"CASEY",
"ALFREDO",
"ALBERTO",
"DAVE",
"IVAN",
"JOHNNIE",
"SIDNEY",
"BYRON",
"JULIAN",
"ISAAC",
"MORRIS",
"CLIFTON",
"WILLARD",
"DARYL",
"ROSS",
"VIRGIL",
"ANDY",
"MARSHALL",
"SALVADOR",
"PERRY",
"KIRK",
"SERGIO",
"MARION",
"TRACY",
"SETH",
"KENT",
"TERRANCE",
"RENE",
"EDUARDO",
"TERRENCE",
"ENRIQUE",
"FREDDIE",
"WADE",
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
case 1:
names = []string{
//FEMALE
"MARY",
"PATRICIA",
"LINDA",
"BARBARA",
"ELIZABETH",
"JENNIFER",
"MARIA",
"SUSAN",
"MARGARET",
"DOROTHY",
"LISA",
"NANCY",
"KAREN",
"BETTY",
"HELEN",
"SANDRA",
"DONNA",
"CAROL",
"RUTH",
"SHARON",
"MICHELLE",
"LAURA",
"SARAH",
"KIMBERLY",
"DEBORAH",
"JESSICA",
"SHIRLEY",
"CYNTHIA",
"ANGELA",
"MELISSA",
"BRENDA",
"AMY",
"ANNA",
"REBECCA",
"VIRGINIA",
"KATHLEEN",
"PAMELA",
"MARTHA",
"DEBRA",
"AMANDA",
"STEPHANIE",
"CAROLYN",
"CHRISTINE",
"MARIE",
"JANET",
"CATHERINE",
"FRANCES",
"ANN",
"JOYCE",
"DIANE",
"ALICE",
"JULIE",
"HEATHER",
"TERESA",
"DORIS",
"GLORIA",
"EVELYN",
"JEAN",
"CHERYL",
"MILDRED",
"KATHERINE",
"JOAN",
"ASHLEY",
"JUDITH",
"ROSE",
"JANICE",
"KELLY",
"NICOLE",
"JUDY",
"CHRISTINA",
"KATHY",
"THERESA",
"BEVERLY",
"DENISE",
"TAMMY",
"IRENE",
"JANE",
"LORI",
"RACHEL",
"MARILYN",
"ANDREA",
"KATHRYN",
"LOUISE",
"SARA",
"ANNE",
"JACQUELINE",
"WANDA",
"BONNIE",
"JULIA",
"RUBY",
"LOIS",
"TINA",
"PHYLLIS",
"NORMA",
"PAULA",
"DIANA",
"ANNIE",
"LILLIAN",
"EMILY",
"ROBIN",
"PEGGY",
"CRYSTAL",
"GLADYS",
"RITA",
"DAWN",
"CONNIE",
"FLORENCE",
"TRACY",
"EDNA",
"TIFFANY",
"CARMEN",
"ROSA",
"CINDY",
"GRACE",
"WENDY",
"VICTORIA",
"EDITH",
"KIM",
"SHERRY",
"SYLVIA",
"JOSEPHINE",
"THELMA",
"SHANNON",
"SHEILA",
"ETHEL",
"ELLEN",
"ELAINE",
"MARJORIE",
"CARRIE",
"CHARLOTTE",
"MONICA",
"ESTHER",
"PAULINE",
"EMMA",
"JUANITA",
"ANITA",
"RHONDA",
"HAZEL",
"AMBER",
"EVA",
"DEBBIE",
"APRIL",
"LESLIE",
"CLARA",
"LUCILLE",
"JAMIE",
"JOANNE",
"ELEANOR",
"VALERIE",
"DANIELLE",
"MEGAN",
"ALICIA",
"SUZANNE",
"MICHELE",
"GAIL",
"BERTHA",
"DARLENE",
"VERONICA",
"JILL",
"ERIN",
"GERALDINE",
"LAUREN",
"CATHY",
"JOANN",
"LORRAINE",
"LYNN",
"SALLY",
"REGINA",
"ERICA",
"BEATRICE",
"DOLORES",
"BERNICE",
"AUDREY",
"YVONNE",
"ANNETTE",
"JUNE",
"SAMANTHA",
"MARION",
"DANA",
"STACY",
"ANA",
"RENEE",
"IDA",
"VIVIAN",
"ROBERTA",
"HOLLY",
"BRITTANY",
"MELANIE",
"LORETTA",
"YOLANDA",
"JEANETTE",
"LAURIE",
"KATIE",
"KRISTEN",
"VANESSA",
"ALMA",
"SUE",
"ELSIE",
"BETH",
"JEANNE",
"VICKI",
"CARLA",
"TARA",
"ROSEMARY",
"EILEEN",
"TERRI",
"GERTRUDE",
"LUCY",
"TONYA",
"ELLA",
"STACEY",
"WILMA",
"GINA",
"KRISTIN",
"JESSIE",
"NATALIE",
"AGNES",
"VERA",
"WILLIE",
"CHARLENE",
"BESSIE",
"DELORES",
"MELINDA",
"PEARL",
"ARLENE",
"MAUREEN",
"COLLEEN",
"ALLISON",
"TAMARA",
"JOY",
"GEORGIA",
"CONSTANCE",
"LILLIE",
"CLAUDIA",
"JACKIE",
"MARCIA",
"TANYA",
"NELLIE",
"MINNIE",
"MARLENE",
"HEIDI",
"GLENDA",
"LYDIA",
"VIOLA",
"COURTNEY",
"MARIAN",
"STELLA",
"CAROLINE",
"DORA",
"JO",
"VICKIE",
"MATTIE",
"TERRY",
"MAXINE",
"IRMA",
"MABEL",
"MARSHA",
"MYRTLE",
"LENA",
"CHRISTY",
"DEANNA",
"PATSY",
"HILDA",
"GWENDOLYN",
"JENNIE",
"NORA",
"MARGIE",
"NINA",
"CASSANDRA",
"LEAH",
"PENNY",
"KAY",
"PRISCILLA",
"NAOMI",
"CAROLE",
"BRANDY",
"OLGA",
"BILLIE",
"DIANNE",
"TRACEY",
"LEONA",
"JENNY",
"FELICIA",
"SONIA",
"MIRIAM",
"VELMA",
"BECKY",
"BOBBIE",
"VIOLET",
"KRISTINA",
"TONI",
"MISTY",
"MAE",
"SHELLY",
"DAISY",
"RAMONA",
"SHERRI",
"ERIKA",
"KATRINA",
"CLAIRE",
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
case 2:
names = []string{
//UNISEX
"Addison",
"Ashley",
"Ashton",
"Avery",
"Bailey",
"Cameron",
"Carson",
"Carter",
"Casey",
"Corey",
"Dakota",
"Devin",
"Drew",
"Emerson",
"Harley",
"Harper",
"Hayden",
"Hunter",
"Jaiden",
"Jamie",
"Jaylen",
"Jesse",
"Jordan",
"Justice",
"Kai",
"Kelly",
"Kelsey",
"Kendall",
"Kennedy",
"Lane",
"Logan",
"Mackenzie",
"Madison",
"Marley",
"Mason",
"Morgan",
"Parker",
"Peyton",
"Piper",
"Quinn",
"Reagan",
"Reese",
"Riley",
"Rowan",
"Ryan",
"Shane",
"Shawn",
"Sydney",
"Taylor",
"Tristan",
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
default:
fmt.Println("Error in AMERICAN Names Array")
return nil
}
// fmt.Println("finish1")
return names
}
//############################################################################################################################################################################################
func americanSurNames() []string {
surNames := []string{"NO AMERICAN SURNAMES ARRAY"}
surNames = []string{
"WASHINGTON",
"TRUMB",
"OBAMA",
"SMITH",
"JOHNSON",
"WILLIAMS",
"JONES",
"BROWN",
"DAVIS",
"MILLER",
"WILSON",
"MOORE",
"TAYLOR",
"ANDERSON",
"THOMAS",
"JACKSON",
"WHITE",
"HARRIS",
"MARTIN",
"THOMPSON",
"GARCIA",
"MARTINEZ",
"ROBINSON",
"CLARK",
"RODRIGUEZ",
"LEWIS",
"LEE",
"WALKER",
"HALL",
"ALLEN",
"YOUNG",
"HERNANDEZ",
"KING",
"WRIGHT",
"LOPEZ",
"HILL",
"SCOTT",
"GREEN",
"ADAMS",
"BAKER",
"GONZALEZ",
"NELSON",
"CARTER",
"MITCHELL",
"PEREZ",
"ROBERTS",
"TURNER",
"PHILLIPS",
"CAMPBELL",
"PARKER",
"EVANS",
"EDWARDS",
"COLLINS",
"STEWART",
"SANCHEZ",
"MORRIS",
"ROGERS",
"REED",
"COOK",
"MORGAN",
"BELL",
"MURPHY",
"BAILEY",
"RIVERA",
"COOPER",
"RICHARDSON",
"COX",
"HOWARD",
"WARD",
"TORRES",
"PETERSON",
"GRAY",
"RAMIREZ",
"JAMES",
"WATSON",
"BROOKS",
"KELLY",
"SANDERS",
"PRICE",
"BENNETT",
"WOOD",
"BARNES",
"ROSS",
"HENDERSON",
"COLEMAN",
"JENKINS",
"PERRY",
"POWELL",
"LONG",
"PATTERSON",
"HUGHES",
"FLORES",
"WASHINGTON",
"BUTLER",
"SIMMONS",
"FOSTER",
"GONZALES",
"BRYANT",
"ALEXANDER",
"RUSSELL",
"GRIFFIN",
"DIAZ",
"HAYES",
"MYERS",
"FORD",
"HAMILTON",
"GRAHAM",
"SULLIVAN",
"WALLACE",
"WOODS",
"COLE",
"WEST",
"JORDAN",
"OWENS",
"REYNOLDS",
"FISHER",
"ELLIS",
"HARRISON",
"GIBSON",
"MCDONALD",
"CRUZ",
"MARSHALL",
"ORTIZ",
"GOMEZ",
"MURRAY",
"FREEMAN",
"WELLS",
"WEBB",
"SIMPSON",
"STEVENS",
"TUCKER",
"PORTER",
"HUNTER",
"HICKS",
"CRAWFORD",
"HENRY",
"BOYD",
"MASON",
"MORALES",
"KENNEDY",
"WARREN",
"DIXON",
"RAMOS",
"REYES",
"BURNS",
"GORDON",
"SHAW",
"HOLMES",
"RICE",
"ROBERTSON",
"HUNT",
"BLACK",
"DANIELS",
"PALMER",
"MILLS",
"NICHOLS",
"GRANT",
"KNIGHT",
"FERGUSON",
"ROSE",
"STONE",
"HAWKINS",
"DUNN",
"PERKINS",
"HUDSON",
"SPENCER",
"GARDNER",
"STEPHENS",
"PAYNE",
"PIERCE",
"BERRY",
"MATTHEWS",
"ARNOLD",
"WAGNER",
"WILLIS",
"RAY",
"WATKINS",
"OLSON",
"CARROLL",
"DUNCAN",
"SNYDER",
"HART",
"CUNNINGHAM",
"BRADLEY",
"LANE",
"ANDREWS",
"RUIZ",
"HARPER",
"FOX",
"RILEY",
"ARMSTRONG",
"CARPENTER",
"WEAVER",
"GREENE",
"LAWRENCE",
"ELLIOTT",
"CHAVEZ",
"SIMS",
"AUSTIN",
"PETERS",
"KELLEY",
"FRANKLIN",
"LAWSON",
"FIELDS",
"GUTIERREZ",
"RYAN",
"SCHMIDT",
"CARR",
"VASQUEZ",
"CASTILLO",
"WHEELER",
"CHAPMAN",
"OLIVER",
"MONTGOMERY",
"RICHARDS",
"WILLIAMSON",
"JOHNSTON",
"BANKS",
"MEYER",
"BISHOP",
"MCCOY",
"HOWELL",
"ALVAREZ",
"MORRISON",
"HANSEN",
"FERNANDEZ",
"GARZA",
"HARVEY",
"LITTLE",
"BURTON",
"STANLEY",
"NGUYEN",
"GEORGE",
"JACOBS",
"REID",
"KIM",
"FULLER",
"LYNCH",
"DEAN",
"GILBERT",
"GARRETT",
"ROMERO",
"WELCH",
"LARSON",
"FRAZIER",
"BURKE",
"HANSON",
"DAY",
"MENDOZA",
"MORENO",
"BOWMAN",
"MEDINA",
"FOWLER",
"BREWER",
"HOFFMAN",
"CARLSON",
"SILVA",
"PEARSON",
"HOLLAND",
"DOUGLAS",
"FLEMING",
"JENSEN",
"VARGAS",
"BYRD",
"DAVIDSON",
"HOPKINS",
"MAY",
"TERRY",
"HERRERA",
"WADE",
"SOTO",
"WALTERS",
"CURTIS",
"NEAL",
"CALDWELL",
"LOWE",
"JENNINGS",
"BARNETT",
"GRAVES",
"JIMENEZ",
"HORTON",
"SHELTON",
"BARRETT",
"OBRIEN",
"CASTRO",
"SUTTON",
"GREGORY",
"MCKINNEY",
"LUCAS",
"MILES",
"CRAIG",
"RODRIQUEZ",
"CHAMBERS",
"HOLT",
"LAMBERT",
"FLETCHER",
"WATTS",
"BATES",
"HALE",
"RHODES",
"PENA",
"BECK",
"NEWMAN",
}
return surNames
}
|
package consul
import (
"sync"
"time"
"github.com/hashicorp/consul/api"
)
type stat struct {
Svcs sync.Map
}
func (c *Consul) GetStat(svc string) {
if _, ok := c.Svcs.Load(svc); ok {
return
}
hc, meta, _ := c.cc.Health().State("passing", &api.QueryOptions{
Filter: c.Proj + " in ServiceTags and " + svc + " in ServiceTags",
})
c.Index.Store(svc, meta.LastIndex)
c.stat.Svcs.Store(svc, hc)
}
func (c *Consul) WatchStat(svc string) (<-chan map[string]*api.HealthCheck, chan struct{}) {
cb := make(chan map[string]*api.HealthCheck)
stop := make(chan struct{}, 1)
go func(svc string, ch chan map[string]*api.HealthCheck, stop chan struct{}) {
for {
select {
case <-stop:
close(stop)
close(cb)
return
default:
dst, e := c.watchStat(svc)
if e != nil {
close(stop)
close(cb)
return
}
isrc, iok := c.stat.Svcs.Load(svc)
src, ok := isrc.(map[string]*api.HealthCheck)
if ok && iok && statEqual(src, dst) {
continue
}
c.stat.Svcs.Store(svc, dst)
cb <- dst
}
}
}(svc, cb, stop)
return cb, stop
}
func (c *Consul) watchStat(svc string) (map[string]*api.HealthCheck, error) {
idx := c.getIdx(svc)
filter := c.Proj + " in ServiceTags and " + svc + " in ServiceTags"
ss, meta, e := c.cc.Health().State(svc, &api.QueryOptions{
WaitIndex: idx,
WaitTime: time.Minute,
Filter: filter,
})
if e != nil {
return nil, e
}
mss := make(map[string]*api.HealthCheck, len(ss))
for _, svc := range ss {
mss[svc.ServiceID] = svc
}
c.Index.Store(svc, meta.LastIndex)
return mss, nil
}
func statEqual(src, dst map[string]*api.HealthCheck) bool {
return false
}
|
package main
import (
"fmt"
"os"
"reflect"
"strconv"
"time"
)
// Sprint format x
func Sprint(x interface{}) string {
type Stringer interface {
String() string
}
switch x := x.(type) {
case Stringer:
return x.String()
case string:
return x
case int:
return strconv.Itoa(x)
case float64:
return strconv.FormatFloat(x, 'b', 6, 64)
case bool:
if x {
return "true"
} else {
return "false"
}
default:
return ""
}
}
func typeof(x interface{}) string {
t := reflect.TypeOf(x)
fmt.Println(t)
return t.String()
}
func valueof(x interface{}) string {
v := reflect.ValueOf(x)
fmt.Println(v)
fmt.Println(v.String())
fmt.Println(v.Type())
f := reflect.ValueOf(2)
i := f.Interface()
fmt.Println(i.(int))
return v.String()
}
type Foo struct {
Foo string
Age int
List []string
M map[string]string
}
func main() {
// tagServer()
printMethods(time.Hour)
}
func xexpress() {
foo := Foo{
"bar",
18,
[]string{"123", "Hello", "good"},
map[string]string{"name": "22", "get": "45"},
}
// Display("a", nil)
// Display("Foo", foo)
// fmt.Println()
// Display("Stderr", os.Stderr)
b, _ := Marshal(foo)
fmt.Println(string(b))
fmt.Println("---------------------------------")
s, _ := Marshal(os.Stderr)
fmt.Println(string(s))
fmt.Println("---------------------------------")
var foo2 Foo
Unmarchal(b, &foo2)
fmt.Println(foo2)
}
func refSet() {
list := []string{"12", "he"}
val := reflect.ValueOf(list)
// pt := val.Index(1).Addr().Interface().(*string)
// *pt = "abc"
val.Index(1).Set(reflect.ValueOf("ABC"))
val.Index(0).SetString("456")
fmt.Println(list)
var f interface{}
f1 := reflect.ValueOf(&f).Elem()
f1.Set(reflect.ValueOf(1))
fmt.Println(f) // 1
f1.Set(reflect.ValueOf("hello"))
fmt.Println(f) // hello
f1.SetInt(2) //panic: reflect: call of reflect.Value.SetInt on interface Value
}
|
package draft
import (
"regexp"
"k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// this constant represents the length of a shortened git sha - 8 characters long
const shortShaIdx = 8
var shaRegex = regexp.MustCompile(`^[\da-f]{40}$`)
// NewSha creates a raw string to a SHA. Returns ErrInvalidGitSha if the sha was invalid.
func NewSha(rawSha string) (*SHA, error) {
if !shaRegex.MatchString(rawSha) {
return nil, ErrInvalidGitSha{sha: rawSha}
}
return &SHA{full: rawSha, short: rawSha[0:shortShaIdx]}, nil
}
// NewDeployment generates a new draft.Deployment
func NewDeployment(obj *v1beta1.Deployment) *Deployment {
d := &Deployment{Deployment: *obj}
d.DraftMeta = DraftMeta{objectMeta: &d.ObjectMeta}
return d
}
func NewNamespaceMetadata(namespace string) *NamespaceMeta {
m := &DraftMeta{objectMeta: &metav1.ObjectMeta{Namespace: namespace}}
return m.GetNamespaceMetadata()
}
// NewMapValue creates a new *MapValue
func NewMapValue(value string) *MapValue {
return &MapValue{Val: value}
}
// NewDeployment generates a new draft.Deployment
func NewIngress(obj *v1beta1.Ingress) *Ingress {
i := &Ingress{Ingress: *obj}
i.DraftMeta = DraftMeta{objectMeta: &i.ObjectMeta}
return i
}
|
// Copyright 2015 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License. See the AUTHORS file
// for names of contributors.
//
// Author: Spencer Kimball (spencer.kimball@gmail.com)
// Author: Tobias Schottdorf (tobias.schottdorf@gmail.com)
package engine
import (
"bytes"
"fmt"
"strconv"
"github.com/cockroachdb/cockroach/proto"
"github.com/cockroachdb/cockroach/util/encoding"
"github.com/cockroachdb/cockroach/util/log"
)
// MakeKey makes a new key which is the concatenation of the
// given inputs, in order.
func MakeKey(keys ...proto.Key) proto.Key {
return proto.MakeKey(keys...)
}
// MakeStoreKey creates a store-local key based on the metadata key
// suffix, and optional detail.
func MakeStoreKey(suffix, detail proto.Key) proto.Key {
return MakeKey(KeyLocalStorePrefix, suffix, detail)
}
// StoreIdentKey returns a store-local key for the store metadata.
func StoreIdentKey() proto.Key {
return MakeStoreKey(KeyLocalStoreIdentSuffix, proto.Key{})
}
// StoreStatKey returns the key for accessing the named stat.
func StoreStatKey(stat proto.Key) proto.Key {
return MakeStoreKey(KeyLocalStoreStatSuffix, stat)
}
// StoreStatusKey returns the key for accessing the store status for the
// specified store ID.
func StoreStatusKey(storeID int32) proto.Key {
return MakeKey(KeyStatusStorePrefix, proto.Key(strconv.FormatInt(int64(storeID), 10)))
}
// MakeRangeIDKey creates a range-local key based on the range's
// Raft ID, metadata key suffix, and optional detail (e.g. the
// encoded command ID for a response cache entry, etc.).
func MakeRangeIDKey(raftID int64, suffix, detail proto.Key) proto.Key {
if len(suffix) != KeyLocalSuffixLength {
panic(fmt.Sprintf("suffix len(%q) != %d", suffix, KeyLocalSuffixLength))
}
return MakeKey(KeyLocalRangeIDPrefix, encoding.EncodeUvarint(nil, uint64(raftID)), suffix, detail)
}
// RaftLogKey returns a system-local key for a Raft log entry.
func RaftLogKey(raftID int64, logIndex uint64) proto.Key {
// The log is stored "backwards" so we can easily find the highest index stored.
return MakeRangeIDKey(raftID, KeyLocalRaftLogSuffix, encoding.EncodeUint64Decreasing(nil, logIndex))
}
// RaftLogPrefix returns the system-local prefix shared by all entries in a Raft log.
func RaftLogPrefix(raftID int64) proto.Key {
return MakeRangeIDKey(raftID, KeyLocalRaftLogSuffix, proto.Key{})
}
// RaftHardStateKey returns a system-local key for a Raft HardState.
func RaftHardStateKey(raftID int64) proto.Key {
return MakeRangeIDKey(raftID, KeyLocalRaftHardStateSuffix, proto.Key{})
}
// DecodeRaftStateKey extracts the Raft ID from a RaftStateKey.
func DecodeRaftStateKey(key proto.Key) int64 {
if !bytes.HasPrefix(key, KeyLocalRangeIDPrefix) {
panic(fmt.Sprintf("key %q does not have %q prefix", key, KeyLocalRangeIDPrefix))
}
// Cut the prefix and the Raft ID.
b := key[len(KeyLocalRangeIDPrefix):]
_, raftID := encoding.DecodeUvarint(b)
return int64(raftID)
}
// RaftTruncatedStateKey returns a system-local key for a RaftTruncatedState.
func RaftTruncatedStateKey(raftID int64) proto.Key {
return MakeRangeIDKey(raftID, KeyLocalRaftTruncatedStateSuffix, proto.Key{})
}
// RaftAppliedIndexKey returns a system-local key for a raft applied index.
func RaftAppliedIndexKey(raftID int64) proto.Key {
return MakeRangeIDKey(raftID, KeyLocalRaftAppliedIndexSuffix, proto.Key{})
}
// RangeStatKey returns the key for accessing the named stat
// for the specified Raft ID.
func RangeStatKey(raftID int64, stat proto.Key) proto.Key {
return MakeRangeIDKey(raftID, KeyLocalRangeStatSuffix, stat)
}
// ResponseCacheKey returns a range-local key by Raft ID for a
// response cache entry, with detail specified by encoding the
// supplied client command ID.
func ResponseCacheKey(raftID int64, cmdID *proto.ClientCmdID) proto.Key {
detail := proto.Key{}
if cmdID != nil {
detail = encoding.EncodeUvarint(nil, uint64(cmdID.WallTime)) // wall time helps sort for locality
detail = encoding.EncodeUint64(detail, uint64(cmdID.Random))
}
return MakeRangeIDKey(raftID, KeyLocalResponseCacheSuffix, detail)
}
// MakeRangeKey creates a range-local key based on the range
// start key, metadata key suffix, and optional detail (e.g. the
// transaction UUID for a txn record, etc.).
func MakeRangeKey(key, suffix, detail proto.Key) proto.Key {
if len(suffix) != KeyLocalSuffixLength {
panic(fmt.Sprintf("suffix len(%q) != %d", suffix, KeyLocalSuffixLength))
}
return MakeKey(KeyLocalRangeKeyPrefix, encoding.EncodeBytes(nil, key), suffix, detail)
}
// DecodeRangeKey decodes the range key into range start key,
// suffix and optional detail (may be nil).
func DecodeRangeKey(key proto.Key) (startKey, suffix, detail proto.Key) {
if !bytes.HasPrefix(key, KeyLocalRangeKeyPrefix) {
panic(fmt.Sprintf("key %q does not have %q prefix", key, KeyLocalRangeKeyPrefix))
}
// Cut the prefix and the Raft ID.
b := key[len(KeyLocalRangeKeyPrefix):]
b, startKey = encoding.DecodeBytes(b)
if len(b) < KeyLocalSuffixLength {
panic(fmt.Sprintf("key %q does not have suffix of length %d", key, KeyLocalSuffixLength))
}
// Cut the response cache suffix.
suffix = b[:KeyLocalSuffixLength]
detail = b[KeyLocalSuffixLength:]
return
}
// RangeGCMetadataKey returns a range-local key for range garbage
// collection metadata.
func RangeGCMetadataKey(raftID int64) proto.Key {
return MakeRangeIDKey(raftID, KeyLocalRangeGCMetadataSuffix, proto.Key{})
}
// RangeLastVerificationTimestampKey returns a range-local key for
// the range's last verification timestamp.
func RangeLastVerificationTimestampKey(raftID int64) proto.Key {
return MakeRangeIDKey(raftID, KeyLocalRangeLastVerificationTimestampSuffix, proto.Key{})
}
// RangeTreeNodeKey returns a range-local key for the the range's
// node in the range tree.
func RangeTreeNodeKey(key proto.Key) proto.Key {
return MakeRangeKey(key, KeyLocalRangeTreeNodeSuffix, proto.Key{})
}
// RangeDescriptorKey returns a range-local key for the descriptor
// for the range with specified key.
func RangeDescriptorKey(key proto.Key) proto.Key {
return MakeRangeKey(key, KeyLocalRangeDescriptorSuffix, proto.Key{})
}
// TransactionKey returns a transaction key based on the provided
// transaction key and ID. The base key is encoded in order to
// guarantee that all transaction records for a range sort together.
func TransactionKey(key proto.Key, id []byte) proto.Key {
return MakeRangeKey(key, KeyLocalTransactionSuffix, proto.Key(id))
}
// KeyAddress returns the address for the key, used to lookup the
// range containing the key. In the normal case, this is simply the
// key's value. However, for local keys, such as transaction records,
// range-spanning binary tree node pointers, and message queues, the
// address is the trailing suffix of the key, with the local key
// prefix removed. In this way, local keys address to the same range
// as non-local keys, but are stored separately so that they don't
// collide with user-space or global system keys.
//
// However, not all local keys are addressable in the global map. Only
// range local keys incorporating a range key (start key or transaction
// key) are addressable (e.g. range metadata and txn records). Range
// local keys incorporating the Raft ID are not (e.g. response cache
// entries, and range stats).
func KeyAddress(k proto.Key) proto.Key {
if !bytes.HasPrefix(k, KeyLocalPrefix) {
return k
}
if bytes.HasPrefix(k, KeyLocalRangeKeyPrefix) {
k = k[len(KeyLocalRangeKeyPrefix):]
_, k = encoding.DecodeBytes(k)
return k
}
log.Fatalf("local key %q malformed; should contain prefix %q", k, KeyLocalRangeKeyPrefix)
return nil
}
// RangeMetaKey returns a range metadata (meta1, meta2) indexing key
// for the given key. For ordinary keys this returns a level 2
// metadata key - for level 2 keys, it returns a level 1 key. For
// level 1 keys and local keys, KeyMin is returned.
func RangeMetaKey(key proto.Key) proto.Key {
if len(key) == 0 {
return KeyMin
}
addr := KeyAddress(key)
if !bytes.HasPrefix(addr, KeyMetaPrefix) {
return MakeKey(KeyMeta2Prefix, addr)
}
if bytes.HasPrefix(addr, KeyMeta2Prefix) {
return MakeKey(KeyMeta1Prefix, addr[len(KeyMeta2Prefix):])
}
return KeyMin
}
// ValidateRangeMetaKey validates that the given key is a valid Range Metadata
// key. It must have an appropriate metadata range prefix, and the original key
// value must be less than KeyMax. As a special case, KeyMin is considered a
// valid Range Metadata Key.
func ValidateRangeMetaKey(key proto.Key) error {
// KeyMin is a valid key.
if len(key) == 0 {
return nil
}
// Key must be at least as long as KeyMeta1Prefix.
if len(key) < len(KeyMeta1Prefix) {
return NewInvalidRangeMetaKeyError("too short", key)
}
prefix, body := key[:len(KeyMeta1Prefix)], key[len(KeyMeta1Prefix):]
// The prefix must be equal to KeyMeta1Prefix or KeyMeta2Prefix
if !bytes.HasPrefix(key, KeyMetaPrefix) {
return NewInvalidRangeMetaKeyError(fmt.Sprintf("does not have %q prefix", KeyMetaPrefix), key)
}
if lvl := string(prefix[len(KeyMetaPrefix)]); lvl != "1" && lvl != "2" {
return NewInvalidRangeMetaKeyError("meta level is not 1 or 2", key)
}
// Body of the key must sort before KeyMax
if !body.Less(KeyMax) {
return NewInvalidRangeMetaKeyError("body of range lookup is >= KeyMax", key)
}
return nil
}
// Constants for stat key construction.
var (
// StatLiveBytes counts how many bytes are "live", including bytes
// from both keys and values. Live rows include only non-deleted
// keys and only the most recent value.
StatLiveBytes = proto.Key("live-bytes")
// StatKeyBytes counts how many bytes are used to store all keys,
// including bytes from deleted keys. Key bytes are re-counted for
// each versioned value.
StatKeyBytes = proto.Key("key-bytes")
// StatValBytes counts how many bytes are used to store all values,
// including all historical versions and deleted tombstones.
StatValBytes = proto.Key("val-bytes")
// StatIntentBytes counts how many bytes are used to store values
// which are unresolved intents. Includes bytes used for both intent
// keys and values.
StatIntentBytes = proto.Key("intent-bytes")
// StatLiveCount counts how many keys are "live". This includes only
// non-deleted keys.
StatLiveCount = proto.Key("live-count")
// StatKeyCount counts the total number of keys, including both live
// and deleted keys.
StatKeyCount = proto.Key("key-count")
// StatValCount counts the total number of values, including all
// historical versions and deleted tombstones.
StatValCount = proto.Key("val-count")
// StatIntentCount counts the number of unresolved intents.
StatIntentCount = proto.Key("intent-count")
// StatIntentAge counts the total age of unresolved intents.
StatIntentAge = proto.Key("intent-age")
// StatGCBytesAge counts the total age of gc'able bytes.
StatGCBytesAge = proto.Key("gc-age")
// StatLastUpdateNanos counts nanoseconds since the unix epoch for
// the last update to the intent / GC'able bytes ages. This really
// is tracking the wall time as at last update, but is a merged
// stat, with successive counts of elapsed nanos being added at each
// stat computation.
StatLastUpdateNanos = proto.Key("update-nanos")
)
// Constants for system-reserved keys in the KV map.
var (
// KeyMaxLength is the maximum key length in bytes. This value is
// somewhat arbitrary. It is chosen high enough to allow most
// conceivable use cases while also still being comfortably short of
// a limit which would affect the performance of the system, both
// from performance of key comparisons and from memory usage for
// things like the timestamp cache, lookup cache, and command queue.
KeyMaxLength = proto.KeyMaxLength
// KeyMin is a minimum key value which sorts before all other keys.
KeyMin = proto.KeyMin
// KeyMax is a maximum key value which sorts after all other keys.
KeyMax = proto.KeyMax
// MVCCKeyMax is a maximum mvcc-encoded key value which sorts after
// all other keys.
MVCCKeyMax = MVCCEncodeKey(KeyMax)
// KeyLocalPrefix is the prefix for keys which hold data local to a
// RocksDB instance, such as store and range-specific metadata which
// must not pollute the user key space, but must be collocate with
// the store and/or ranges which they refer to. Storing this
// information in the normal system keyspace would place the data on
// an arbitrary set of stores, with no guarantee of collocation.
// Local data includes store metadata, range metadata, response
// cache values, transaction records, range-spanning binary tree
// node pointers, and message queues.
//
// The local key prefix has been deliberately chosen to sort before
// the KeySystemPrefix, because these local keys are not addressable
// via the meta range addressing indexes.
//
// Some local data are not replicated, such as the store's 'ident'
// record. Most local data are replicated, such as response cache
// entries and transaction rows, but are not addressable as normal
// MVCC values as part of transactions. Finally, some local data are
// stored as MVCC values and are addressable as part of distributed
// transactions, such as range metadata, range-spanning binary tree
// node pointers, and message queues.
KeyLocalPrefix = proto.Key("\x00\x00\x00")
// KeyLocalSuffixLength specifies the length in bytes of all local
// key suffixes.
KeyLocalSuffixLength = 4
// There are three types of local key data enumerated below:
// store-local, range-local by ID, and range-local by key.
// KeyLocalStorePrefix is the prefix identifying per-store data.
KeyLocalStorePrefix = MakeKey(KeyLocalPrefix, proto.Key("s"))
// KeyLocalStoreIdentSuffix stores an immutable identifier for this
// store, created when the store is first bootstrapped.
KeyLocalStoreIdentSuffix = proto.Key("iden")
// KeyLocalStoreStatSuffix is the suffix for store statistics.
KeyLocalStoreStatSuffix = proto.Key("sst-")
// KeyLocalRangeIDPrefix is the prefix identifying per-range data
// indexed by Raft ID. The Raft ID is appended to this prefix,
// encoded using EncodeUvarint. The specific sort of per-range
// metadata is identified by one of the suffixes listed below, along
// with potentially additional encoded key info, such as a command
// ID in the case of response cache entry.
//
// NOTE: KeyLocalRangeIDPrefix must be kept in sync with the value
// in storage/engine/db.cc.
KeyLocalRangeIDPrefix = MakeKey(KeyLocalPrefix, proto.Key("i"))
// KeyLocalRaftLogSuffix is the suffix for the raft log.
KeyLocalRaftLogSuffix = proto.Key("rftl")
// KeyLocalRaftHardStateSuffix is the Suffix for the raft HardState.
KeyLocalRaftHardStateSuffix = proto.Key("rfth")
// KeyLocalRaftTruncatedStateSuffix is the suffix for the RaftTruncatedState.
KeyLocalRaftTruncatedStateSuffix = proto.Key("rftt")
// KeyLocalRaftAppliedIndexSuffix is the suffix for the raft applied index.
KeyLocalRaftAppliedIndexSuffix = proto.Key("rfta")
// KeyLocalRangeGCMetadataSuffix is the suffix for a range's GC metadata.
KeyLocalRangeGCMetadataSuffix = proto.Key("rgcm")
// KeyLocalRangeLastVerificationTimestampSuffix is the suffix for a range's
// last verification timestamp (for checking integrity of on-disk data).
KeyLocalRangeLastVerificationTimestampSuffix = proto.Key("rlvt")
// KeyLocalRangeStatSuffix is the suffix for range statistics.
KeyLocalRangeStatSuffix = proto.Key("rst-")
// KeyLocalResponseCacheSuffix is the suffix for keys storing
// command responses used to guarantee idempotency (see
// ResponseCache).
// NOTE: if this value changes, it must be updated in C++
// (storage/engine/db.cc).
KeyLocalResponseCacheSuffix = proto.Key("res-")
// KeyLocalRangeKeyPrefix is the prefix identifying per-range data
// indexed by range key (either start key, or some key in the
// range). The key is appended to this prefix, encoded using
// EncodeBytes. The specific sort of per-range metadata is
// identified by one of the suffixes listed below, along with
// potentially additional encoded key info, such as the txn UUID in
// the case of a transaction record.
//
// NOTE: KeyLocalRangeKeyPrefix must be kept in sync with the value
// in storage/engine/db.cc.
KeyLocalRangeKeyPrefix = MakeKey(KeyLocalPrefix, proto.Key("k"))
// KeyLocalRangeDescriptorSuffix is the suffix for keys storing
// range descriptors. The value is a struct of type RangeDescriptor.
KeyLocalRangeDescriptorSuffix = proto.Key("rdsc")
// KeyLocalRangeTreeNodeSuffix is the suffix for keys storing
// range tree nodes. The value is a struct of type RangeTreeNode.
KeyLocalRangeTreeNodeSuffix = proto.Key("rtn-")
// KeyLocalTransactionSuffix specifies the key suffix for
// transaction records. The additional detail is the transaction id.
// NOTE: if this value changes, it must be updated in C++
// (storage/engine/db.cc).
KeyLocalTransactionSuffix = proto.Key("txn-")
// KeyLocalMax is the end of the local key range.
KeyLocalMax = KeyLocalPrefix.PrefixEnd()
// KeySystemPrefix indicates the beginning of the key range for
// global, system data which are replicated across the cluster.
KeySystemPrefix = proto.Key("\x00")
KeySystemMax = proto.Key("\x01")
// KeyMetaPrefix is the prefix for range metadata keys. Notice that
// an extra null character in the prefix causes all range addressing
// records to sort before any system tables which they might describe.
KeyMetaPrefix = MakeKey(KeySystemPrefix, proto.Key("\x00meta"))
// KeyMeta1Prefix is the first level of key addressing. The value is a
// RangeDescriptor struct.
KeyMeta1Prefix = MakeKey(KeyMetaPrefix, proto.Key("1"))
// KeyMeta2Prefix is the second level of key addressing. The value is a
// RangeDescriptor struct.
KeyMeta2Prefix = MakeKey(KeyMetaPrefix, proto.Key("2"))
// KeyMetaMax is the end of the range of addressing keys.
KeyMetaMax = MakeKey(KeySystemPrefix, proto.Key("\x01"))
// KeyConfigAccountingPrefix specifies the key prefix for accounting
// configurations. The suffix is the affected key prefix.
KeyConfigAccountingPrefix = MakeKey(KeySystemPrefix, proto.Key("acct"))
// KeyConfigPermissionPrefix specifies the key prefix for accounting
// configurations. The suffix is the affected key prefix.
KeyConfigPermissionPrefix = MakeKey(KeySystemPrefix, proto.Key("perm"))
// KeyConfigZonePrefix specifies the key prefix for zone
// configurations. The suffix is the affected key prefix.
KeyConfigZonePrefix = MakeKey(KeySystemPrefix, proto.Key("zone"))
// KeyNodeIDGenerator is the global node ID generator sequence.
KeyNodeIDGenerator = MakeKey(KeySystemPrefix, proto.Key("node-idgen"))
// KeyRaftIDGenerator is the global Raft consensus group ID generator sequence.
KeyRaftIDGenerator = MakeKey(KeySystemPrefix, proto.Key("raft-idgen"))
// KeySchemaPrefix specifies key prefixes for schema definitions.
KeySchemaPrefix = MakeKey(KeySystemPrefix, proto.Key("schema"))
// KeyStoreIDGenerator is the global store ID generator sequence.
KeyStoreIDGenerator = MakeKey(KeySystemPrefix, proto.Key("store-idgen"))
// KeyRangeTreeRoot specifies the root range in the range tree.
KeyRangeTreeRoot = MakeKey(KeySystemPrefix, proto.Key("range-tree-root"))
// KeyStatusPrefix specifies the key prefix to store all status details.
KeyStatusPrefix = MakeKey(KeySystemPrefix, proto.Key("status-"))
// KeyStatusStorePrefix stores all status info for stores.
KeyStatusStorePrefix = MakeKey(KeyStatusPrefix, proto.Key("store-"))
)
|
package language
var LangEn = map[string]string{
"open": "open",
"edit": "edit",
"create": "create",
"list": "list",
}
|
package gobbus
import (
"fmt"
"strings"
)
type Message struct {
Topic string
Flags *MessageFlags
val interface{}
Rtopic string
}
type MessageFlags struct {
Instant bool
NonRecursive bool
Response bool
Error bool
}
const (
msgFlInstant = 1 << 0
msgFlNonrecursive = 1 << 1
msgFlResponse = 1 << 2
msgFlError = 1 << 3
msgFlMask = 0xff
)
func flagsToUint(f *MessageFlags) uint {
var flags uint
if f != nil {
if f.Instant {
flags |= msgFlInstant
}
if f.NonRecursive {
flags |= msgFlNonrecursive
}
if f.Response {
flags |= msgFlResponse
}
if f.Error {
flags |= msgFlError
}
}
return flags
}
func flagsToStruct(f uint) *MessageFlags {
flags := &MessageFlags{}
if (f & msgFlInstant) != 0 {
flags.Instant = true
}
if (f & msgFlNonrecursive) != 0 {
flags.NonRecursive = true
}
if (f & msgFlResponse) != 0 {
flags.Response = true
}
if (f & msgFlError) != 0 {
flags.Error = true
}
return flags
}
func getTopicPaths(path string) []string {
ps := []string{"*", path}
spl := strings.Split(path, ".")
for i := 1; i <= len(spl); i++ {
ps = append(ps, strings.Join(spl[:i], ".")+".*")
}
return ps
}
func (o *Obbus) asyncMsg(m *Message) {
o.subscriptionsLock.Lock()
for _, p := range getTopicPaths(m.Topic) {
if chs, ok := o.subscriptions[p]; ok {
for _, ch := range chs {
select {
case ch <- m:
default:
}
}
}
}
o.subscriptionsLock.Unlock()
}
func (o *Obbus) addAsyncSub(s string, addch chan *Message) bool {
o.subscriptionsLock.Lock()
chs, ok := o.subscriptions[s]
if !ok {
chs = []chan *Message{}
}
found := false
for _, ch := range chs {
if ch == addch {
found = true
break
}
}
if !found {
chs = append(chs, addch)
}
o.subscriptions[s] = chs
o.subscriptionsLock.Unlock()
return !ok
}
func (o *Obbus) delAsyncSub(s string, delch chan *Message) bool {
var delSub bool
o.subscriptionsLock.Lock()
chs, ok := o.subscriptions[s]
if ok {
for i, ch := range chs {
if ch == delch {
close(ch)
o.subscriptions[s] = append(chs[:i], chs[i+1:]...)
}
}
if delSub = len(o.subscriptions[s]) == 0; delSub {
delete(o.subscriptions, s)
}
}
o.subscriptionsLock.Unlock()
return delSub
}
func (o *Obbus) delAllAsyncSubs() {
o.subscriptionsLock.Lock()
for _, chs := range o.subscriptions {
for _, ch := range chs {
close(ch)
}
}
o.subscriptions = map[string][]chan *Message{}
o.subscriptionsLock.Unlock()
}
func (m *Message) String() string {
r := fmt.Sprintf("topic (%s) ", m.Topic)
switch val := m.val.(type) {
case int:
r += fmt.Sprintf("val (int: %d)", val)
case string:
r += fmt.Sprintf("val (string: %s)", val)
case float64:
r += fmt.Sprintf("val (float64: %f)", val)
case []byte:
r += fmt.Sprintf("val (buf: %x)", val)
default:
r += fmt.Sprintf("val (invalid %T: %v)", val, val)
}
r += fmt.Sprintf(" flags (%s) rtopic (%s)", m.Flags, m.Rtopic)
return r
}
func (f *MessageFlags) String() string {
rs := []string{}
if f.Instant {
rs = append(rs, "instant")
}
if f.NonRecursive {
rs = append(rs, "non-recursive")
}
if f.Response {
rs = append(rs, "response")
}
if f.Error {
rs = append(rs, "error")
}
return strings.Join(rs, " ")
}
func (m *Message) GetValue() interface{} {
return m.val
}
func (m *Message) GetInt() (int, error) {
i, ok := m.val.(int)
if ok {
return i, nil
}
return 0, fmt.Errorf("message value is not int")
}
func (m *Message) GetStr() (string, error) {
s, ok := m.val.(string)
if ok {
return s, nil
}
return "", fmt.Errorf("message value is not string")
}
func (m *Message) GetDbl() (float64, error) {
f, ok := m.val.(float64)
if ok {
return f, nil
}
return 0, fmt.Errorf("message value is not double")
}
func (m *Message) GetBuf() ([]byte, error) {
b, ok := m.val.([]byte)
if ok {
return b, nil
}
return nil, fmt.Errorf("message value is not buffer")
}
|
package project
import (
"github.com/saxon134/workflow/enum"
"time"
)
const TBNProjectRs = "project_rs"
type TblProjectRs struct {
Id int64 `json:"id"`
UserId int64 `json:"userId"`
ProjectId int64 `json:"projectId"`
Status enum.Status `json:"status"`
CreateAt *time.Time `json:"createAt"`
}
func (m *TblProjectRs) TableName() string {
return TBNProjectRs
}
|
package commands
import (
"github.com/SAP/cloud-mta/internal/logs"
"github.com/SAP/cloud-mta/internal/version"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/x-cray/logrus-prefixed-formatter"
)
var cfgFile string
func init() {
logs.Logger = logs.NewLogger()
formatter, ok := logs.Logger.Formatter.(*prefixed.TextFormatter)
if ok {
formatter.DisableColors = true
}
cobra.OnInitialize(initConfig)
}
// rootCmd - represents the base command.
var rootCmd = &cobra.Command{
Use: "MTA",
Short: "MTA tools",
Long: "MTA tools",
Version: cliVersion(),
Args: cobra.MaximumNArgs(1),
}
// Execute - adds all the child commands to the root command and sets the flags appropriately.
func Execute() error {
return rootCmd.Execute()
}
func initConfig() {
viper.SetConfigFile(cfgFile)
viper.AutomaticEnv() // Reads in the environment variables that match.
// If a configs file is found, reads it in.
if err := viper.ReadInConfig(); err == nil {
logs.Logger.Println("Using configs file:", viper.ConfigFileUsed())
}
}
func cliVersion() string {
v, _ := version.GetVersion()
return v.CliVersion
}
|
package main
import (
"testing"
"github.com/google/go-cmp/cmp"
)
func TestSwagger_Validate(t *testing.T) {
t.Parallel()
tests := []struct {
name string
input Swagger
expected Violations
}{
{
"Missing Operation ID",
Swagger{
Paths: map[Resource]Paths{
"/items": map[Method]Path{
"get": {
Tags: []string{"tag"},
},
},
},
},
map[Resource][]string{
"/items": {"Missing operation id."},
},
},
{
"Resource must begin with method",
Swagger{
Paths: map[Resource]Paths{
"/items": map[Method]Path{
"get": {
OperationID: "itemsOperationID",
Tags: []string{"tag"},
},
},
},
},
map[Resource][]string{
"/items": {"'itemsOperationID': Resource must begin with 'get'."},
},
},
{
"Resource must define at least one tag.",
Swagger{
Paths: map[Resource]Paths{
"/items": map[Method]Path{
"get": {
OperationID: "getItems",
},
},
},
},
map[Resource][]string{
"/items": {"Resource must define at least one tag."},
},
},
{
"Body request model must be prefixed with method+Request",
Swagger{
Paths: map[Resource]Paths{
"/items": map[Method]Path{
"post": {
OperationID: "postItems",
Tags: []string{"tag"},
Parameters: []Parameter{
{
In: "body",
Schema: Schema{
Ref: `#/definitions/postItems`,
},
},
},
},
},
},
},
map[Resource][]string{
"/items": {"'postItems': Body request model must be prefixed with method+Request: '#/definitions/postItems'."},
},
},
{
"Query arguments must be lowercase",
Swagger{
Paths: map[Resource]Paths{
"/items": map[Method]Path{
"get": {
OperationID: "getItems",
Tags: []string{"tag"},
Parameters: []Parameter{
{
In: "query",
Name: "deletedAt",
},
},
},
},
},
},
map[Resource][]string{
"/items": {"'getItems': Query arguments must be lowercase: 'deletedAt'"},
},
},
{
"Instead of using Array prefer defining a new model.",
Swagger{
Paths: map[Resource]Paths{
"/items": map[Method]Path{
"get": {
OperationID: "getItems",
Tags: []string{"tag"},
Responses: map[Code]Response{
"200": {
Schema: Schema{
Type: "array",
},
},
},
},
},
},
},
map[Resource][]string{
"/items": {"'getItems': Instead of using Array as a response, prefer defining a new model."},
},
},
{
"Code 200, response model must be prefixed with method+Response.",
Swagger{
Paths: map[Resource]Paths{
"/items": map[Method]Path{
"get": {
OperationID: "getItems",
Tags: []string{"tag"},
Responses: map[Code]Response{
"200": {
Schema: Schema{
Ref: `#/definitions/getItems`,
},
},
},
},
},
},
},
map[Resource][]string{
"/items": {"'getItems': Code 200, response model must be prefixed with method+Response: '#/definitions/getItems'."},
},
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
actual := test.input.Validate()
if !cmp.Equal(test.expected, actual) {
t.Errorf("expected values do not match\n%s", cmp.Diff(test.expected, actual))
}
})
}
}
|
package structil_test
import (
"fmt"
"reflect"
"testing"
"unsafe"
. "github.com/goldeneggg/structil"
)
func BenchmarkNewGetter_Val(b *testing.B) {
var g *Getter
var e error
testStructVal := newGetterTestStruct() // See: getter_test.go
b.ResetTimer()
for i := 0; i < b.N; i++ {
g, e = NewGetter(testStructVal)
if e == nil {
_ = g
} else {
b.Fatalf("abort benchmark because error %v occurd.", e)
}
}
}
func BenchmarkNewGetter_Ptr(b *testing.B) {
var g *Getter
var e error
testStructPtr := newGetterTestStructPtr() // See: getter_test.go
b.ResetTimer()
for i := 0; i < b.N; i++ {
g, e = NewGetter(testStructPtr)
if e == nil {
_ = g
} else {
b.Fatalf("abort benchmark because error %v occurd.", e)
}
}
}
func BenchmarkGetterGetType_String(b *testing.B) {
var t reflect.Type
g, err := newTestGetter() // See: getter_test.go
if err != nil {
b.Fatalf("NewGetter() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
t, _ = g.GetType("String")
_ = t
}
}
func BenchmarkGetterGetValue_String(b *testing.B) {
var v reflect.Value
g, err := newTestGetter() // See: getter_test.go
if err != nil {
b.Fatalf("NewGetter() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
v, _ = g.GetValue("String")
_ = v
}
}
func BenchmarkGetterHas_String(b *testing.B) {
var bl bool
g, err := newTestGetter() // See: getter_test.go
if err != nil {
b.Fatalf("NewGetter() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
bl = g.Has("String")
_ = bl
}
}
func BenchmarkGetterGet_String(b *testing.B) {
var it interface{}
g, err := newTestGetter() // See: getter_test.go
if err != nil {
b.Fatalf("NewGetter() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
it, _ = g.Get("String")
_ = it
}
}
func BenchmarkGetterString(b *testing.B) {
var str string
g, err := newTestGetter() // See: getter_test.go
if err != nil {
b.Fatalf("NewGetter() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
str, _ = g.String("String")
_ = str
}
}
func BenchmarkGetterBytes(b *testing.B) {
var bytes []byte
g, err := newTestGetter() // See: getter_test.go
if err != nil {
b.Fatalf("NewGetter() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
bytes, _ = g.Bytes("Bytes")
_ = bytes
}
}
func BenchmarkGetterUintptr(b *testing.B) {
var up uintptr
g, err := newTestGetter() // See: getter_test.go
if err != nil {
b.Fatalf("NewGetter() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
up, _ = g.Uintptr("Uintptr")
_ = up
}
}
func BenchmarkGetterUnsafePointer(b *testing.B) {
var up unsafe.Pointer
g, err := newTestGetter() // See: getter_test.go
if err != nil {
b.Fatalf("NewGetter() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
up, _ = g.UnsafePointer("Unsafeptr")
_ = up
}
}
func BenchmarkGetterSlice_StructPtrSlice(b *testing.B) {
var sl []interface{}
g, err := newTestGetter() // See: getter_test.go
if err != nil {
b.Fatalf("NewGetter() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
sl, _ = g.Slice("GetterTestStruct4PtrSlice")
_ = sl
}
}
func BenchmarkGetterGetGetter(b *testing.B) {
var gg *Getter
g, err := newTestGetter() // See: getter_test.go
if err != nil {
b.Fatalf("NewGetter() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
gg, _ = g.GetGetter("GetterTestStruct2")
_ = gg
}
}
func BenchmarkGetterToMap(b *testing.B) {
var m map[string]interface{}
g, err := newTestGetter() // See: getter_test.go
if err != nil {
b.Fatalf("NewGetter() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
m = g.ToMap()
_ = m
}
}
func BenchmarkGetterIsStruct(b *testing.B) {
var is bool
g, err := newTestGetter() // See: getter_test.go
if err != nil {
b.Fatalf("NewGetter() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
is = g.IsStruct("GetterTestStruct2")
_ = is
}
}
func BenchmarkGetterIsSlice_Bytes(b *testing.B) {
var is bool
g, err := newTestGetter() // See: getter_test.go
if err != nil {
b.Fatalf("NewGetter() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
is = g.IsSlice("Bytes")
_ = is
}
}
func BenchmarkGetterIsSlice_StructSlice(b *testing.B) {
var is bool
g, err := newTestGetter() // See: getter_test.go
if err != nil {
b.Fatalf("NewGetter() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
is = g.IsSlice("GetterTestStruct4Slice")
_ = is
}
}
func BenchmarkGetterIsSlice_StructPtrSlice(b *testing.B) {
var is bool
g, err := newTestGetter() // See: getter_test.go
if err != nil {
b.Fatalf("NewGetter() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
is = g.IsSlice("GetterTestStruct4PtrSlice")
_ = is
}
}
func BenchmarkGetterMapGet(b *testing.B) {
var ia []interface{}
g, err := newTestGetter() // See: getter_test.go
if err != nil {
b.Fatalf("NewGetter() occurs unexpected error: %v", err)
return
}
fn := func(i int, g *Getter) (interface{}, error) {
str, _ := g.String("String")
str2, _ := g.String("String")
return fmt.Sprintf("%s:%s", str, str2), nil
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
ia, err = g.MapGet("GetterTestStruct4PtrSlice", fn)
if err == nil {
_ = ia
} else {
b.Fatalf("abort benchmark because error %v occurd.", err)
}
}
}
func BenchmarkNewFinder_Val(b *testing.B) {
var f *Finder
var e error
testStructVal := newFinderTestStruct() // See: getter_test.go
b.ResetTimer()
for i := 0; i < b.N; i++ {
f, e = NewFinder(testStructVal)
if e == nil {
_ = f
} else {
b.Fatalf("abort benchmark because error %v occurd.", e)
}
}
}
func BenchmarkNewFinder_Ptr(b *testing.B) {
var f *Finder
var e error
testStructPtr := newFinderTestStructPtr() // See: getter_test.go
b.ResetTimer()
for i := 0; i < b.N; i++ {
f, e = NewFinder(testStructPtr)
if e == nil {
_ = f
} else {
b.Fatalf("abort benchmark because error %v occurd.", e)
}
}
}
func BenchmarkToMap_1FindOnly(b *testing.B) {
var m map[string]interface{}
f, err := NewFinder(newFinderTestStructPtr()) // See: finder_test.go
if err != nil {
b.Fatalf("NewFinder() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
m, err = f.Find("String").ToMap()
if err == nil {
_ = m
} else {
b.Fatalf("abort benchmark because error %v occurd.", err)
}
}
}
func BenchmarkToMap_2FindOnly(b *testing.B) {
var m map[string]interface{}
f, err := NewFinder(newFinderTestStructPtr()) // See: finder_test.go
if err != nil {
b.Fatalf("NewFinder() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
m, err = f.Find("String", "Int64").ToMap()
if err == nil {
_ = m
} else {
b.Fatalf("abort benchmark because error %v occurd.", err)
}
}
}
func BenchmarkToMap_1Struct_1Find(b *testing.B) {
var m map[string]interface{}
f, err := NewFinder(newFinderTestStructPtr()) // See: finder_test.go
if err != nil {
b.Fatalf("NewFinder() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
m, err = f.Into("FinderTestStruct2").Find("String").ToMap()
if err == nil {
_ = m
} else {
b.Fatalf("abort benchmark because error %v occurd.", err)
}
}
}
func BenchmarkToMap_1Struct_1Find_2Pair(b *testing.B) {
var m map[string]interface{}
f, err := NewFinder(newFinderTestStructPtr()) // See: finder_test.go
if err != nil {
b.Fatalf("NewFinder() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
m, err = f.Into("FinderTestStruct2").Find("String").Into("FinderTestStruct2Ptr").Find("String").ToMap()
if err == nil {
_ = m
} else {
b.Fatalf("abort benchmark because error %v occurd.", err)
}
}
}
func BenchmarkToMap_2Struct_1Find(b *testing.B) {
var m map[string]interface{}
f, err := NewFinder(newFinderTestStructPtr()) // See: finder_test.go
if err != nil {
b.Fatalf("NewFinder() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
m, err = f.Into("FinderTestStruct2", "FinderTestStruct3").Find("String").ToMap()
if err == nil {
_ = m
} else {
b.Fatalf("abort benchmark because error %v occurd.", err)
}
}
}
func BenchmarkToMap_2Struct_2Find(b *testing.B) {
var m map[string]interface{}
f, err := NewFinder(newFinderTestStructPtr()) // See: finder_test.go
if err != nil {
b.Fatalf("NewFinder() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
m, err = f.Into("FinderTestStruct2", "FinderTestStruct3").Find("String", "Int").ToMap()
if err == nil {
_ = m
} else {
b.Fatalf("abort benchmark because error %v occurd.", err)
}
}
}
func BenchmarkNewFinderKeys_yml(b *testing.B) {
f, err := NewFinder(newFinderTestStructPtr()) // See: finder_test.go
if err != nil {
b.Fatalf("NewFinder() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
fks, err := NewFinderKeys("testdata/finder_from_conf", "ex_test1_yml")
if err == nil {
_ = f.FromKeys(fks)
f.Reset()
} else {
b.Fatalf("abort benchmark because error %v occurd.", err)
}
}
}
func BenchmarkNewFinderKeys_json(b *testing.B) {
f, err := NewFinder(newFinderTestStructPtr()) // See: finder_test.go
if err != nil {
b.Fatalf("NewFinder() occurs unexpected error: %v", err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
fks, err := NewFinderKeys("testdata/finder_from_conf", "ex_test1_json")
if err == nil {
_ = f.FromKeys(fks)
f.Reset()
} else {
b.Fatalf("abort benchmark because error %v occurd.", err)
}
}
}
|
package authorization
// PermissionLevel enum for different forum permissions
type PermissionLevel string
const (
// Admin legends
Admin PermissionLevel = "AUTH_ADMIN"
// Moderator chat moderators
Moderator PermissionLevel = "AUTH_MODERATOR"
// Standard plebs
Standard PermissionLevel = "AUTH_STANDARD"
// LoggedOut supa plebs
LoggedOut PermissionLevel = "AUTH_LOGGED_OUT"
// Banned banned plebs
Banned PermissionLevel = "AUTH_BANNED"
)
// AtLeast tests if a PermissionLevel is at least another PermissionLevel
func (permissions PermissionLevel) AtLeast(minimumPermission PermissionLevel) bool {
if minimumPermission == Admin {
return permissions == Admin
} else if minimumPermission == Moderator {
return permissions == Moderator || permissions == Admin
} else if minimumPermission == Standard {
return permissions == Standard || permissions == Moderator || permissions == Admin
} else if minimumPermission == LoggedOut {
return permissions == LoggedOut || permissions == Standard || permissions == Moderator || permissions == Admin
}
return true
}
|
package main
type person struct {
name string
int age
int height
int weight
}
|
package output
import (
"github.com/afritzler/garden-examiner/cmd/gex/context"
. "github.com/afritzler/garden-examiner/pkg/data"
)
type ElementOutput struct {
source ProcessingSource
Elems Iterable
}
func NewElementOutput(chain ProcessChain) *ElementOutput {
return (&ElementOutput{}).new(chain)
}
func (this *ElementOutput) new(chain ProcessChain) *ElementOutput {
this.source = NewIncrementalProcessingSource()
if chain == nil {
this.Elems = this.source
} else {
this.Elems = Process(this.source).Asynchronously().Apply(chain)
}
return this
}
func (this *ElementOutput) Add(ctx *context.Context, e interface{}) error {
this.source.Add(e)
return nil
}
func (this *ElementOutput) Close(ctx *context.Context) error {
this.source.Close()
return nil
}
func (this *ElementOutput) Out(ctx *context.Context) {
}
|
package main
import (
"bufio"
"compress/gzip"
"flag"
"fmt"
"io"
"log"
"os"
"strconv"
"github.com/tsunami42/influxdb/models"
"github.com/tsunami42/influxdb/pkg/escape"
"github.com/tsunami42/influxdb/tsdb/engine/tsm1"
)
var (
tsmPath string
compress bool
outPath string
db string
rp string
limit int
)
func init() {
flag.StringVar(&tsmPath, "p", "a.tsm", "path for tsm file")
flag.BoolVar(&compress, "c", false, "Compress the output in gzip")
flag.StringVar(&db, "db", "telegraf", "Database of tsm file")
flag.StringVar(&rp, "rp", "default", "RP of tsm file")
flag.IntVar(&limit, "l", 100, "Limit keys to be read from index")
}
func main() {
flag.Parse()
if err := write(); err != nil {
log.Fatal(err)
}
}
func write() error {
// Because calling (*os.File).Write is relatively expensive,
// and we don't *need* to sync to disk on every written line of export,
// use a sized buffered writer so that we only sync the file every megabyte.
bw := bufio.NewWriterSize(os.Stdout, 1024*1024)
defer bw.Flush()
var w io.Writer = bw
if compress {
gzw := gzip.NewWriter(w)
defer gzw.Close()
w = gzw
}
if err := writeTsmFile(w, tsmPath); err != nil {
return err
}
return nil
}
func writeTsmFile(w io.Writer, tsmFilePath string) error {
f, err := os.Open(tsmFilePath)
if err != nil {
return err
}
defer f.Close()
r, err := tsm1.NewTSMReader(f)
if err != nil {
log.Printf("unable to read %s, skipping: %s\n", tsmFilePath, err.Error())
return nil
}
defer r.Close()
fmt.Fprintln(w, "# DML")
fmt.Fprintf(w, "# CONTEXT-DATABASE:%s\n", db)
fmt.Fprintf(w, "# CONTEXT-RETENTION-POLICY:%s\n", rp)
for i := 0; i < r.KeyCount(); i++ {
if i > limit {
break
}
key, _ := r.KeyAt(i)
values, err := r.ReadAll(string(key))
if err != nil {
log.Printf("unable to read key %q in %s, skipping: %s\n", string(key), tsmFilePath, err.Error())
continue
}
measurement, field := tsm1.SeriesAndFieldFromCompositeKey(key)
field = escape.Bytes(field)
if err := writeValues(w, measurement, string(field), values); err != nil {
// An error from writeValues indicates an IO error, which should be returned.
return err
}
}
return nil
}
func writeValues(w io.Writer, seriesKey []byte, field string, values []tsm1.Value) error {
buf := []byte(string(seriesKey) + " " + field + "=")
prefixLen := len(buf)
for _, value := range values {
ts := value.UnixNano()
// Re-slice buf to be "<series_key> <field>=".
buf = buf[:prefixLen]
// Append the correct representation of the value.
switch v := value.Value().(type) {
case float64:
buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
case int64:
buf = strconv.AppendInt(buf, v, 10)
buf = append(buf, 'i')
case bool:
buf = strconv.AppendBool(buf, v)
case string:
buf = append(buf, '"')
buf = append(buf, models.EscapeStringField(v)...)
buf = append(buf, '"')
default:
// This shouldn't be possible, but we'll format it anyway.
buf = append(buf, fmt.Sprintf("%v", v)...)
}
// Now buf has "<series_key> <field>=<value>".
// Append the timestamp and a newline, then write it.
buf = append(buf, ' ')
buf = strconv.AppendInt(buf, ts, 10)
buf = append(buf, '\n')
if _, err := w.Write(buf); err != nil {
// Underlying IO error needs to be returned.
return err
}
}
return nil
}
|
package schedulecontracts
import (
"context"
"github.com/adamluzsi/frameless/internal/suites"
"github.com/adamluzsi/frameless/pkg/tasker/schedule"
"github.com/adamluzsi/frameless/ports/crud/crudcontracts"
"github.com/adamluzsi/frameless/ports/guard/guardcontracts"
"github.com/adamluzsi/testcase"
"github.com/adamluzsi/testcase/random"
"testing"
)
func Repository(mk func(testing.TB) RepositorySubject) suites.Suite {
s := testcase.NewSpec(nil, testcase.AsSuite("schedule.Repository"))
s.Context(".Locks", guardcontracts.LockerFactory[schedule.StateID](func(tb testing.TB) guardcontracts.LockerFactorySubject[schedule.StateID] {
t := testcase.ToT(&tb)
subject := mk(tb)
return guardcontracts.LockerFactorySubject[schedule.StateID]{
LockerFactory: subject.Repository.Locks(),
MakeContext: subject.MakeContext,
MakeKey: func() schedule.StateID {
return schedule.StateID(t.Random.String())
},
}
}).Spec)
s.Context(".States", stateRepository(func(tb testing.TB) stateRepositorySubject {
t := testcase.ToT(&tb)
subject := mk(tb)
return stateRepositorySubject{
StateRepository: subject.Repository.States(),
MakeContext: subject.MakeContext,
MakeScheduleState: func() schedule.State {
return schedule.State{
ID: schedule.StateID(t.Random.String() + t.Random.StringNC(5, random.CharsetDigit())),
Timestamp: t.Random.Time(),
}
},
}
}).Spec)
return s.AsSuite()
}
type RepositorySubject struct {
Repository schedule.Repository
MakeContext func() context.Context
}
func stateRepository(mk func(tb testing.TB) stateRepositorySubject) suites.Suite {
s := testcase.NewSpec(nil, testcase.AsSuite("schedule.StateRepository"))
testcase.RunSuite(s,
crudcontracts.Creator[schedule.State, schedule.StateID](func(tb testing.TB) crudcontracts.CreatorSubject[schedule.State, schedule.StateID] {
sub := mk(tb)
return crudcontracts.CreatorSubject[schedule.State, schedule.StateID]{
Resource: sub.StateRepository,
MakeContext: sub.MakeContext,
MakeEntity: sub.MakeScheduleState,
SupportIDReuse: false,
SupportRecreate: false,
}
}),
crudcontracts.Updater[schedule.State, schedule.StateID](func(tb testing.TB) crudcontracts.UpdaterSubject[schedule.State, schedule.StateID] {
sub := mk(tb)
return crudcontracts.UpdaterSubject[schedule.State, schedule.StateID]{
Resource: sub.StateRepository,
MakeContext: sub.MakeContext,
MakeEntity: sub.MakeScheduleState,
ChangeEntity: func(ptr *schedule.State) {
ptr.Timestamp = testcase.ToT(&tb).Random.Time()
},
}
}),
crudcontracts.ByIDFinder[schedule.State, schedule.StateID](func(tb testing.TB) crudcontracts.ByIDFinderSubject[schedule.State, schedule.StateID] {
sub := mk(tb)
return crudcontracts.ByIDFinderSubject[schedule.State, schedule.StateID]{
Resource: sub.StateRepository,
MakeContext: sub.MakeContext,
MakeEntity: sub.MakeScheduleState,
}
}),
crudcontracts.ByIDDeleter[schedule.State, schedule.StateID](func(tb testing.TB) crudcontracts.ByIDDeleterSubject[schedule.State, schedule.StateID] {
sub := mk(tb)
return crudcontracts.ByIDDeleterSubject[schedule.State, schedule.StateID]{
Resource: sub.StateRepository,
MakeContext: sub.MakeContext,
MakeEntity: sub.MakeScheduleState,
}
}),
)
return s.AsSuite()
}
type stateRepositorySubject struct {
StateRepository schedule.StateRepository
MakeContext func() context.Context
MakeScheduleState func() schedule.State
}
|
package department
import (
"github.com/gin-gonic/gin"
//"net/http"
//"fmt"
"go-antd-admin/utils/result"
"go-antd-admin/utils/e"
"go-antd-admin/models"
"strconv"
//"go-antd-admin/middleware/jwt"
)
func Index(c *gin.Context) {
c.String(200, "Hello World2")
}
var departmentModel = new(models.Department)
// @Summary 注册用户
// @Produce json
// @Param name query string true "name"
// @Param pwd query string true "password"
// @Success 200 {object} result.Response
// @Failure 500 {object} result.Response
// @Router /register [get]
// 注册信息
type DepartmentInfo struct {
Name string `form:"name" binding:"required"`
ParentID int `form:"parent_id" binding:"required"`
}
func CreateDepartment(c *gin.Context) {
var dataIn DepartmentInfo
if c.ShouldBind(&dataIn) == nil {
data:=models.Department{Name:dataIn.Name,ParentID:dataIn.ParentID}
if departmentModel.AddDepartment(data)==nil{
result.Success(c,e.GetMsg(e.SUCCESS))
}else{
result.Error(c,e.ERROR_CREATE_MENU)
}
}else{
result.Error(c,e.INVALID_PARAMS)
}
}
func GetDepartment(c *gin.Context) {
id,err2:=strconv.Atoi(c.Query("id"))
if err2==nil {
if data,err:=departmentModel.GetDepartment(id);err==nil{
result.SuccessWithData(c,e.GetMsg(e.SUCCESS),data)
}else{
result.Error(c,e.ERROR_CREATE_MENU)
}
}else{
result.Error(c,e.INVALID_PARAMS)
}
}
func DeleteDepartment(c *gin.Context) {
id,err2:=strconv.Atoi(c.Query("id"))
if err2==nil {
if err:=departmentModel.DeleteDepartment(uint(id));err==nil{
result.Success(c,e.GetMsg(e.SUCCESS))
}else{
result.Error(c,e.ERROR_CREATE_MENU)
}
}else{
result.Error(c,e.INVALID_PARAMS)
}
}
// type TreeList struct {
// models.Department
// Routes []*TreeList `json:"routes"`
// }
type DepartmentManage struct{
ID uint
Key string `json:"key"`
Title string `json:"title"`
}
type TreeListManage struct {
ID uint `json:"id"`
Key string `json:"key"`
Title string `json:"title"`
ParentID int `json:"parent_id"`
Children []*TreeListManage `json:"children,omitempty"`
}
// func getSubMenus(id int)[]*TreeList{
// menus,_:=menuModel.GetSubMenus(id)
// //fmt.Println(menus)
// treeList := []*TreeList{}
// for _,v:=range menus {
// if int(v.ID)!=id {
// child := getSubMenus(int(v.ID))
// node := &TreeList{
// Menu:v,
// }
// node.Routes = child
// treeList = append(treeList, node)
// }
// }
// return treeList
// }
func getSubDepartmentsManage(id int)[]*TreeListManage{
menus,_:=departmentModel.GetSubDepartments(id)
//fmt.Println(menus)
treeList := []*TreeListManage{}
for _,v:=range menus {
if int(v.ID)!=id {
child := getSubDepartmentsManage(int(v.ID))
node := &TreeListManage{
ID:v.ID,
Key:strconv.Itoa(int(v.ID)),
Title:v.Name,
ParentID:v.ParentID,
}
node.Children = child
treeList = append(treeList, node)
}
}
return treeList
}
// type Menus struct{
// models.Menu
// }
// func GetMenus(c *gin.Context) {
// id,err2:=strconv.Atoi(c.Query("id"))
// if err2==nil {
// treeList:=getSubMenus(id)
// result.SuccessWithData(c,e.GetMsg(e.SUCCESS),treeList)
// }else{
// result.Error(c,e.INVALID_PARAMS)
// }
// }
func GetDepartmentsManage(c *gin.Context) {
id,err2:=strconv.Atoi(c.Query("id"))
if err2==nil {
treeList:=getSubDepartmentsManage(id)
result.SuccessWithData(c,e.GetMsg(e.SUCCESS),treeList)
}else{
result.Error(c,e.INVALID_PARAMS)
}
}
func GetAllDepartments(c *gin.Context) {
department,_:=departmentModel.GetDepartment(1)
treeList := []*TreeListManage{}
node := &TreeListManage{
ID:department.ID,
Key:strconv.Itoa(int(department.ID)),
Title:department.Name,
}
child:=getSubDepartmentsManage(1)
node.Children=child
treeList = append(treeList, node)
result.SuccessWithData(c,e.GetMsg(e.SUCCESS),treeList)
}
type EditDepartmentInfo struct {
Name string `form:"name" binding:"required"`
ID uint `form:"id" binding:"required"`
}
func EditDepartment(c *gin.Context) {
var info EditDepartmentInfo
if c.ShouldBind(&info) == nil {
depart:=models.Department{Name:info.Name}
depart.ID=info.ID
if depart2,err:=departmentModel.UpdateDepartment(depart);err==nil{
result.SuccessWithData(c,e.GetMsg(e.SUCCESS),DepartmentManage{Key:strconv.Itoa(int(depart.ID)),Title:depart2.Name,})
}else{
result.Error(c,e.ERROR_CREATE_MENU)
}
}else{
result.Error(c,e.INVALID_PARAMS)
}
} |
// Creates a predefined color selection dialog. The user receives the color in the RGB format.
package main
import (
"fmt"
"github.com/matwachich/iup"
)
func main() {
iup.Open()
defer iup.Close()
if ret, r, g, b := iup.GetColor(100, 100); ret != 0 {
iup.Message("Color", fmt.Sprintf("RGB = %v %v %v", r, g, b))
}
}
|
package requests
import (
"encoding/json"
"fmt"
"net/url"
"strings"
"github.com/google/go-querystring/query"
"github.com/atomicjolt/canvasapi"
)
// DisableAssignmentsCurrentlyEnabledForGradeExportToSIS Disable all assignments flagged as "post_to_sis", with the option of making it
// specific to a grading period, in a course.
//
// On success, the response will be 204 No Content with an empty body.
//
// On failure, the response will be 400 Bad Request with a body of a specific
// message.
//
// For disabling assignments in a specific grading period
// https://canvas.instructure.com/doc/api/sis_integration.html
//
// Path Parameters:
// # Path.CourseID (Required) The ID of the course.
//
// Form Parameters:
// # Form.GradingPeriodID (Optional) The ID of the grading period.
//
type DisableAssignmentsCurrentlyEnabledForGradeExportToSIS struct {
Path struct {
CourseID int64 `json:"course_id" url:"course_id,omitempty"` // (Required)
} `json:"path"`
Form struct {
GradingPeriodID int64 `json:"grading_period_id" url:"grading_period_id,omitempty"` // (Optional)
} `json:"form"`
}
func (t *DisableAssignmentsCurrentlyEnabledForGradeExportToSIS) GetMethod() string {
return "PUT"
}
func (t *DisableAssignmentsCurrentlyEnabledForGradeExportToSIS) GetURLPath() string {
path := "/sis/courses/{course_id}/disable_post_to_sis"
path = strings.ReplaceAll(path, "{course_id}", fmt.Sprintf("%v", t.Path.CourseID))
return path
}
func (t *DisableAssignmentsCurrentlyEnabledForGradeExportToSIS) GetQuery() (string, error) {
return "", nil
}
func (t *DisableAssignmentsCurrentlyEnabledForGradeExportToSIS) GetBody() (url.Values, error) {
return query.Values(t.Form)
}
func (t *DisableAssignmentsCurrentlyEnabledForGradeExportToSIS) GetJSON() ([]byte, error) {
j, err := json.Marshal(t.Form)
if err != nil {
return nil, nil
}
return j, nil
}
func (t *DisableAssignmentsCurrentlyEnabledForGradeExportToSIS) HasErrors() error {
return nil
}
func (t *DisableAssignmentsCurrentlyEnabledForGradeExportToSIS) Do(c *canvasapi.Canvas) error {
_, err := c.SendRequest(t)
if err != nil {
return err
}
return nil
}
|
// Copyright 2018 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package changefeedccl
import (
"context"
gosql "database/sql"
gojson "encoding/json"
"fmt"
"net/url"
"reflect"
"sort"
"strings"
"testing"
"time"
"github.com/cockroachdb/apd/v2"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/cdctest"
"github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/changefeedbase"
// Imported to allow locality-related table mutations
_ "github.com/cockroachdb/cockroach/pkg/ccl/multiregionccl"
_ "github.com/cockroachdb/cockroach/pkg/ccl/partitionccl"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
)
var testSinkFlushFrequency = 100 * time.Millisecond
func waitForSchemaChange(
t testing.TB, sqlDB *sqlutils.SQLRunner, stmt string, arguments ...interface{},
) {
sqlDB.Exec(t, stmt, arguments...)
row := sqlDB.QueryRow(t, "SELECT job_id FROM [SHOW JOBS] ORDER BY created DESC LIMIT 1")
var jobID string
row.Scan(&jobID)
testutils.SucceedsSoon(t, func() error {
row := sqlDB.QueryRow(t, "SELECT status FROM [SHOW JOBS] WHERE job_id = $1", jobID)
var status string
row.Scan(&status)
if status != "succeeded" {
return fmt.Errorf("Job %s had status %s, wanted 'succeeded'", jobID, status)
}
return nil
})
}
func readNextMessages(t testing.TB, f cdctest.TestFeed, numMessages int, stripTs bool) []string {
t.Helper()
var actual []string
var value []byte
var message map[string]interface{}
for len(actual) < numMessages {
m, err := f.Next()
if log.V(1) {
if m != nil {
log.Infof(context.Background(), `msg %s: %s->%s (%s)`, m.Topic, m.Key, m.Value, m.Resolved)
} else {
log.Infof(context.Background(), `err %v`, err)
}
}
if err != nil {
t.Fatal(err)
} else if m == nil {
t.Fatal(`expected message`)
} else if len(m.Key) > 0 || len(m.Value) > 0 {
if stripTs {
if err := gojson.Unmarshal(m.Value, &message); err != nil {
t.Fatalf(`%s: %s`, m.Value, err)
}
delete(message, "updated")
value, err = cdctest.ReformatJSON(message)
if err != nil {
t.Fatal(err)
}
} else {
value = m.Value
}
actual = append(actual, fmt.Sprintf(`%s: %s->%s`, m.Topic, m.Key, value))
}
}
return actual
}
func assertPayloadsBase(t testing.TB, f cdctest.TestFeed, expected []string, stripTs bool) {
t.Helper()
actual := readNextMessages(t, f, len(expected), stripTs)
sort.Strings(expected)
sort.Strings(actual)
if !reflect.DeepEqual(expected, actual) {
t.Fatalf("expected\n %s\ngot\n %s",
strings.Join(expected, "\n "), strings.Join(actual, "\n "))
}
}
func assertPayloads(t testing.TB, f cdctest.TestFeed, expected []string) {
t.Helper()
assertPayloadsBase(t, f, expected, false)
}
func assertPayloadsStripTs(t testing.TB, f cdctest.TestFeed, expected []string) {
t.Helper()
assertPayloadsBase(t, f, expected, true)
}
func avroToJSON(t testing.TB, reg *testSchemaRegistry, avroBytes []byte) []byte {
if len(avroBytes) == 0 {
return nil
}
native, err := reg.encodedAvroToNative(avroBytes)
if err != nil {
t.Fatal(err)
}
// The avro textual format is a more natural fit, but it's non-deterministic
// because of go's randomized map ordering. Instead, we use gojson.Marshal,
// which sorts its object keys and so is deterministic.
json, err := gojson.Marshal(native)
if err != nil {
t.Fatal(err)
}
return json
}
func assertPayloadsAvro(
t testing.TB, reg *testSchemaRegistry, f cdctest.TestFeed, expected []string,
) {
t.Helper()
var actual []string
for len(actual) < len(expected) {
m, err := f.Next()
if err != nil {
t.Fatal(err)
} else if m == nil {
t.Fatal(`expected message`)
} else if m.Key != nil {
key, value := avroToJSON(t, reg, m.Key), avroToJSON(t, reg, m.Value)
actual = append(actual, fmt.Sprintf(`%s: %s->%s`, m.Topic, key, value))
}
}
// The tests that use this aren't concerned with order, just that these are
// the next len(expected) messages.
sort.Strings(expected)
sort.Strings(actual)
if !reflect.DeepEqual(expected, actual) {
t.Fatalf("expected\n %s\ngot\n %s",
strings.Join(expected, "\n "), strings.Join(actual, "\n "))
}
}
func assertRegisteredSubjects(t testing.TB, reg *testSchemaRegistry, expected []string) {
t.Helper()
actual := make([]string, 0, len(reg.mu.subjects))
for subject := range reg.mu.subjects {
actual = append(actual, subject)
}
sort.Strings(expected)
sort.Strings(actual)
if !reflect.DeepEqual(expected, actual) {
t.Fatalf("expected\n %s\ngot\n %s",
strings.Join(expected, "\n "), strings.Join(actual, "\n "))
}
}
func parseTimeToHLC(t testing.TB, s string) hlc.Timestamp {
t.Helper()
d, _, err := apd.NewFromString(s)
if err != nil {
t.Fatal(err)
}
ts, err := tree.DecimalToHLC(d)
if err != nil {
t.Fatal(err)
}
return ts
}
func expectResolvedTimestamp(t testing.TB, f cdctest.TestFeed) hlc.Timestamp {
t.Helper()
m, err := f.Next()
if err != nil {
t.Fatal(err)
} else if m == nil {
t.Fatal(`expected message`)
}
return extractResolvedTimestamp(t, m)
}
func extractResolvedTimestamp(t testing.TB, m *cdctest.TestFeedMessage) hlc.Timestamp {
t.Helper()
if m.Key != nil {
t.Fatalf(`unexpected row %s: %s -> %s`, m.Topic, m.Key, m.Value)
}
if m.Resolved == nil {
t.Fatal(`expected a resolved timestamp notification`)
}
var resolvedRaw struct {
Resolved string `json:"resolved"`
}
if err := gojson.Unmarshal(m.Resolved, &resolvedRaw); err != nil {
t.Fatal(err)
}
return parseTimeToHLC(t, resolvedRaw.Resolved)
}
func expectResolvedTimestampAvro(
t testing.TB, reg *testSchemaRegistry, f cdctest.TestFeed,
) hlc.Timestamp {
t.Helper()
m, err := f.Next()
if err != nil {
t.Fatal(err)
} else if m == nil {
t.Fatal(`expected message`)
}
if m.Key != nil {
key, value := avroToJSON(t, reg, m.Key), avroToJSON(t, reg, m.Value)
t.Fatalf(`unexpected row %s: %s -> %s`, m.Topic, key, value)
}
if m.Resolved == nil {
t.Fatal(`expected a resolved timestamp notification`)
}
resolvedNative, err := reg.encodedAvroToNative(m.Resolved)
if err != nil {
t.Fatal(err)
}
resolved := resolvedNative.(map[string]interface{})[`resolved`]
return parseTimeToHLC(t, resolved.(map[string]interface{})[`string`].(string))
}
func sinklessTestWithServerArgs(
argsFn func(args *base.TestServerArgs),
testFn func(*testing.T, *gosql.DB, cdctest.TestFeedFactory),
) func(*testing.T) {
return func(t *testing.T) {
defer changefeedbase.TestingSetDefaultFlushFrequency(testSinkFlushFrequency)()
ctx := context.Background()
knobs := base.TestingKnobs{DistSQL: &execinfra.TestingKnobs{Changefeed: &TestingKnobs{}}}
args := base.TestServerArgs{
Knobs: knobs,
UseDatabase: `d`,
}
if argsFn != nil {
argsFn(&args)
}
s, db, _ := serverutils.StartServer(t, args)
defer s.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `SET CLUSTER SETTING kv.rangefeed.enabled = true`)
// TODO(dan): We currently have to set this to an extremely conservative
// value because otherwise schema changes become flaky (they don't commit
// their txn in time, get pushed by closed timestamps, and retry forever).
// This is more likely when the tests run slower (race builds or inside
// docker). The conservative value makes our tests take a lot longer,
// though. Figure out some way to speed this up.
sqlDB.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '1s'`)
// TODO(dan): This is still needed to speed up table_history, that should be
// moved to RangeFeed as well.
sqlDB.Exec(t, `SET CLUSTER SETTING changefeed.experimental_poll_interval = '10ms'`)
// Change a couple of settings related to the vectorized engine in
// order to ensure that changefeeds work as expected with them (note
// that we'll still use the row-by-row engine, see #55605).
sqlDB.Exec(t, `SET CLUSTER SETTING sql.defaults.vectorize=on`)
sqlDB.Exec(t, `CREATE DATABASE d`)
if region := serverArgsRegion(args); region != "" {
sqlDB.Exec(t, fmt.Sprintf(`ALTER DATABASE d PRIMARY REGION "%s"`, region))
}
sink, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser))
defer cleanup()
f := cdctest.MakeSinklessFeedFactory(s, sink)
testFn(t, db, f)
}
}
func sinklessTest(testFn func(*testing.T, *gosql.DB, cdctest.TestFeedFactory)) func(*testing.T) {
return sinklessTestWithServerArgs(nil, testFn)
}
func enterpriseTest(testFn func(*testing.T, *gosql.DB, cdctest.TestFeedFactory)) func(*testing.T) {
return enterpriseTestWithServerArgs(nil, testFn)
}
func enterpriseTestWithServerArgs(
argsFn func(args *base.TestServerArgs),
testFn func(*testing.T, *gosql.DB, cdctest.TestFeedFactory),
) func(*testing.T) {
return func(t *testing.T) {
defer changefeedbase.TestingSetDefaultFlushFrequency(testSinkFlushFrequency)()
defer jobs.TestingSetAdoptAndCancelIntervals(10*time.Millisecond, 10*time.Millisecond)()
ctx := context.Background()
flushCh := make(chan struct{}, 1)
defer close(flushCh)
knobs := base.TestingKnobs{DistSQL: &execinfra.TestingKnobs{Changefeed: &TestingKnobs{
AfterSinkFlush: func() error {
select {
case flushCh <- struct{}{}:
default:
}
return nil
},
}}}
args := base.TestServerArgs{
UseDatabase: "d",
Knobs: knobs,
}
if argsFn != nil {
argsFn(&args)
}
s, db, _ := serverutils.StartServer(t, args)
defer s.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `SET CLUSTER SETTING kv.rangefeed.enabled = true`)
sqlDB.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '1s'`)
sqlDB.Exec(t, `SET CLUSTER SETTING changefeed.experimental_poll_interval = '10ms'`)
sqlDB.Exec(t, `CREATE DATABASE d`)
if region := serverArgsRegion(args); region != "" {
sqlDB.Exec(t, fmt.Sprintf(`ALTER DATABASE d PRIMARY REGION "%s"`, region))
}
sink, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser))
defer cleanup()
f := cdctest.MakeTableFeedFactory(s, db, flushCh, sink)
testFn(t, db, f)
}
}
func serverArgsRegion(args base.TestServerArgs) string {
for _, tier := range args.Locality.Tiers {
if tier.Key == "region" {
return tier.Value
}
}
return ""
}
func cloudStorageTest(
testFn func(*testing.T, *gosql.DB, cdctest.TestFeedFactory),
) func(*testing.T) {
return func(t *testing.T) {
defer changefeedbase.TestingSetDefaultFlushFrequency(testSinkFlushFrequency)()
defer jobs.TestingSetAdoptAndCancelIntervals(10*time.Millisecond, 10*time.Millisecond)()
ctx := context.Background()
dir, dirCleanupFn := testutils.TempDir(t)
defer dirCleanupFn()
flushCh := make(chan struct{}, 1)
defer close(flushCh)
knobs := base.TestingKnobs{DistSQL: &execinfra.TestingKnobs{Changefeed: &TestingKnobs{
AfterSinkFlush: func() error {
select {
case flushCh <- struct{}{}:
default:
}
return nil
},
}}}
s, db, _ := serverutils.StartServer(t, base.TestServerArgs{
UseDatabase: "d",
ExternalIODir: dir,
Knobs: knobs,
})
defer s.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `SET CLUSTER SETTING kv.rangefeed.enabled = true`)
sqlDB.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '1s'`)
sqlDB.Exec(t, `SET CLUSTER SETTING changefeed.experimental_poll_interval = '10ms'`)
sqlDB.Exec(t, `CREATE DATABASE d`)
f := cdctest.MakeCloudFeedFactory(s, db, dir, flushCh)
testFn(t, db, f)
}
}
func feed(
t testing.TB, f cdctest.TestFeedFactory, create string, args ...interface{},
) cdctest.TestFeed {
t.Helper()
feed, err := f.Feed(create, args...)
if err != nil {
t.Fatal(err)
}
return feed
}
func closeFeed(t testing.TB, f cdctest.TestFeed) {
t.Helper()
if err := f.Close(); err != nil {
t.Fatal(err)
}
}
func forceTableGC(
t testing.TB,
tsi serverutils.TestServerInterface,
sqlDB *sqlutils.SQLRunner,
database, table string,
) {
t.Helper()
if err := tsi.ForceTableGC(context.Background(), database, table, tsi.Clock().Now()); err != nil {
t.Fatal(err)
}
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package utils
import (
"time"
)
// Timeout & interval for verifying audio, ethernet, display, power status when a dock interacts with Chromebook.
const (
AudioTimeout = 30 * time.Second
AudioInterval = 200 * time.Millisecond
EthernetTimeout = 30 * time.Second
EthernetInterval = 200 * time.Millisecond
DisplayTimeout = 30 * time.Second
DisplayInterval = 200 * time.Millisecond
PowerTimeout = 30 * time.Second
PowerInterval = 200 * time.Millisecond
)
// Timeout & interval for verify windows fitting to certain properties.
const (
WindowTimeout = 30 * time.Second
WindowInterval = 200 * time.Millisecond
)
|
/*
* Npcf_SMPolicyControl API
*
* Session Management Policy Control Service © 2019, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TSDSI, TTA, TTC). All rights reserved.
*
* API version: 1.0.4
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package openapi
type UeInitiatedResourceRequest struct {
PccRuleId string `json:"pccRuleId,omitempty"`
RuleOp RuleOperation `json:"ruleOp"`
Precedence int32 `json:"precedence,omitempty"`
PackFiltInfo []PacketFilterInfo `json:"packFiltInfo"`
ReqQos RequestedQos `json:"reqQos,omitempty"`
}
|
package root
import (
"fmt"
"os"
"strconv"
"github.com/calebcase/version/lib/version"
"github.com/inconshreveable/log15"
"github.com/spf13/cobra"
"gopkg.in/src-d/go-git.v4"
)
var (
// Log is the logger for the CLI.
Log = log15.New()
// RepoPath is the path to the repository.
RepoPath = "."
// Cmd is the root command for the CLI.
Cmd = &cobra.Command{
Use: "version",
Short: "autogenerate versions with patch level",
}
)
func init() {
lvl := log15.LvlWarn
lvlStr, lvlProvided := os.LookupEnv("VERSION_LOG_LEVEL")
if lvlProvided {
lvlParsed, err := log15.LvlFromString(lvlStr)
if err == nil {
lvl = lvlParsed
}
}
var verbosity uint = 0
verbosityStr, verbosityProvided := os.LookupEnv("VERSION_LOG_VERBOSITY")
if verbosityProvided {
verbosityParsed, err := strconv.ParseUint(verbosityStr, 10, 64)
if err == nil {
verbosity = uint(verbosityParsed)
}
}
SetLogger(lvl, verbosity, log15.TerminalFormat())
flags := Cmd.PersistentFlags()
flags.StringVarP(&RepoPath, "repopath", "r", RepoPath, "base path for the repository")
}
// SetLogger adjusts the logger Log with the given log level, verbosity, and
// format.
func SetLogger(lvl log15.Lvl, verbosity uint, format log15.Format) {
sh := log15.StreamHandler(os.Stderr, format)
fh := log15.LvlFilterHandler(lvl, sh)
if verbosity >= 1 {
fh = log15.CallerFileHandler(fh)
}
if verbosity >= 2 {
fh = log15.CallerFuncHandler(fh)
}
if verbosity >= 3 {
fh = log15.CallerStackHandler("%+v", fh)
}
Log.SetHandler(fh)
}
// Repo prints the repository version.
func Repo(vr version.Versioner) (err error) {
r, err := git.PlainOpen(RepoPath)
if err != nil {
return
}
v, err := version.Repo(r, vr)
if err != nil {
return
}
fmt.Println(v)
return nil
}
|
package session
import (
"sync"
"sync/atomic"
"github.com/diamondburned/arikawa/discord"
"github.com/diamondburned/arikawa/state"
)
type Session struct {
*state.State
id discord.Snowflake
refs uint32
}
var (
ids = make(map[string]discord.Snowflake)
sessions = make(map[discord.Snowflake]*Session)
sessionLock sync.Mutex
)
func Get(token string) (*Session, error) {
sessionLock.Lock()
defer sessionLock.Unlock()
if id, ok := ids[token]; ok {
if s, ok := sessions[id]; ok {
s.Ref()
return s, nil
}
}
discord, err := state.New(token)
if err != nil {
return nil, err
}
if err := discord.Open(); err != nil {
return nil, err
}
session := &Session{
State: discord,
refs: 0,
}
me, err := discord.Me()
if err != nil {
return nil, err
}
session.id = me.ID
ids[token] = session.id
sessions[session.id] = session
return session, nil
}
func (s *Session) Ref() {
atomic.AddUint32(&s.refs, 1)
}
func (s *Session) Unref() error {
if atomic.AddUint32(&s.refs, ^uint32(0)) == 0 {
sessionLock.Lock()
defer sessionLock.Unlock()
delete(sessions, s.Ready.User.ID)
return s.Close()
}
return nil
}
func (s *Session) Close() error {
return s.State.Close()
}
|
package util
import (
"bufio"
"fmt"
"io"
"os"
)
type Csv struct {
Body string
Split string
File string
Tmp string
Filter []string
Fh bool
Limit int
Offset int
Head []P
Data []P
Err error
LockHead bool
}
func (this *Csv) Scan(head []P) (count int) {
if this.Limit < 1 {
this.Limit = 1
}
this.Data = nil
this.Err = nil
this.Head = []P{}
if !IsEmpty(head) {
this.LockHead = true
for _, v := range head {
if !IsEmpty(v["o"]) {
this.Head = append(this.Head, v)
}
}
if !IsEmpty(this.Tmp) {
this.saveTmpHead(head)
}
}
if !IsEmpty(this.File) {
f, err := os.Open(this.File)
defer f.Close()
if err != nil {
this.Err = err
return
}
buf := bufio.NewReader(f)
cols := []string{}
half := false
line := ""
for {
seg, err := buf.ReadString('\n')
if err != nil {
if err == io.EOF {
return
}
this.Err = err
return
}
if !IsCsvEnd(seg, half) {
half = true
line += Trim(seg)
continue
} else {
half = false
line += Trim(seg)
if len(cols) == 0 {
cols = ToFields(Trim(line), this.Split)
if IsEmpty(head) {
for i, col := range cols {
if this.Filter != nil {
col = Replace(col, this.Filter, "")
}
p := P{"o": JoinStr("c", i), "n": col}
if !this.Fh {
p["n"] = p["o"]
}
this.Head = append(this.Head, p)
}
}
}
if count == 0 {
if !this.Fh {
this.scanData(count, line)
}
} else {
this.scanData(count, line)
}
line = ""
count++
}
}
} else {
md5 := Md5(this.Body)
tmpdir := "/data/tmp/"
tmp := tmpdir + md5
if !FileExists(tmp) {
Mkdir(tmpdir)
WriteFile(tmp, []byte(this.Body))
}
this.File = tmp
this.Scan(head)
}
return
}
func (this *Csv) scanData(count int, line string) {
data := ToFields(Trim(line), this.Split)
if len(data) == len(this.Head) {
p := P{}
for i, v := range this.Head {
data[i] = Trim(data[i])
k := ToString(v["o"])
v[k] = data[i]
this.setType(data, i, v)
p[k] = data[i]
}
if count >= this.Offset && len(this.Data) < this.Limit && len(this.Data) < ROW_LIMIT_MAX {
this.Data = append(this.Data, p)
}
} else {
Error("scanData", count, len(data), len(this.Head), JsonEncode(data))
}
if !IsEmpty(this.Tmp) {
AppendFile(this.Tmp, this.ToLine(data)+"\n")
}
}
func (this *Csv) ToLine(row []string) string {
line := ""
for _, v := range row {
line += v + ","
}
if len(line) > 1 {
line = line[0 : len(line)-1]
}
return line
}
func (this *Csv) saveTmpHead(head []P) {
row := []string{}
for _, h := range head {
v := ToString(h["n"], ToString(h["o"]))
row = append(row, v)
}
if this.Fh {
WriteFile(this.Tmp, []byte(this.ToLine(row)+"\n"))
} else {
WriteFile(this.Tmp, []byte(""))
}
}
func (this *Csv) setType(row []string, i int, p P) {
if p["type"] == "string" {
return
}
v := row[i]
if !this.LockHead {
if IsInt(v) {
if p["type"] == nil {
p["type"] = "number"
}
} else if IsFloat(v) {
if p["type"] == nil {
p["type"] = "number"
}
} else if IsDate(v) {
if p["type"] == nil {
p["type"] = "date"
}
} else {
if IsEmpty(v) && p["type"] != nil {
return
}
p["type"] = "string"
}
}
switch ToString(p["type"]) {
case "number":
row[i] = ToString(ToFloat(v))
case "date":
row[i], _ = ToDate(v)
}
}
func (this *Csv) Cut(cols []string) (dst string) {
str := ""
for _, v := range cols {
str = JoinStr(str, ToInt(Replace(v, []string{"C", "c"}, ""))+1, ",")
}
if len(str) > 0 {
str = str[0 : len(str)-1]
}
dst = Replace(this.File, []string{".csv"}, ".cut.csv")
cmd := fmt.Sprintf("cut -d, -f%v %v > %v", str, this.File, dst)
Exec(cmd)
return
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package jsonload
import (
"bytes"
"context"
"crypto/sha1"
gosql "database/sql"
"encoding/binary"
"fmt"
"hash"
"math"
"math/rand"
"strings"
"sync/atomic"
"github.com/cockroachdb/cockroach/pkg/util/json"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/workload"
"github.com/cockroachdb/cockroach/pkg/workload/histogram"
"github.com/cockroachdb/errors"
"github.com/spf13/pflag"
)
const (
jsonSchema = `(k BIGINT NOT NULL PRIMARY KEY, v JSONB NOT NULL)`
jsonSchemaWithInvertedIndex = `(k BIGINT NOT NULL PRIMARY KEY, v JSONB NOT NULL, INVERTED INDEX (v))`
jsonSchemaWithComputed = `(k BIGINT AS (v->>'key')::BIGINT STORED PRIMARY KEY, v JSONB NOT NULL)`
)
type jsonLoad struct {
flags workload.Flags
connFlags *workload.ConnFlags
batchSize int
cycleLength int64
readPercent int
writeSeq, seed int64
sequential bool
splits int
complexity int
inverted bool
computed bool
}
func init() {
workload.Register(jsonLoadMeta)
}
var jsonLoadMeta = workload.Meta{
Name: `json`,
Description: `JSON reads and writes to keys spread (by default, uniformly` +
` at random) across the cluster`,
Version: `1.0.0`,
New: func() workload.Generator {
g := &jsonLoad{}
g.flags.FlagSet = pflag.NewFlagSet(`json`, pflag.ContinueOnError)
g.flags.Meta = map[string]workload.FlagMeta{
`batch`: {RuntimeOnly: true},
}
g.flags.IntVar(&g.batchSize, `batch`, 1, `Number of blocks to insert in a single SQL statement`)
g.flags.Int64Var(&g.cycleLength, `cycle-length`, math.MaxInt64, `Number of keys repeatedly accessed by each writer`)
g.flags.IntVar(&g.readPercent, `read-percent`, 0, `Percent (0-100) of operations that are reads of existing keys`)
g.flags.Int64Var(&g.writeSeq, `write-seq`, 0, `Initial write sequence value.`)
g.flags.Int64Var(&g.seed, `seed`, 1, `Key hash seed.`)
g.flags.BoolVar(&g.sequential, `sequential`, false, `Pick keys sequentially instead of randomly`)
g.flags.IntVar(&g.splits, `splits`, 0, `Number of splits to perform before starting normal operations`)
g.flags.IntVar(&g.complexity, `complexity`, 20, `Complexity of generated JSON data`)
g.flags.BoolVar(&g.inverted, `inverted`, false, `Whether to include an inverted index`)
g.flags.BoolVar(&g.computed, `computed`, false, `Whether to use a computed primary key`)
g.connFlags = workload.NewConnFlags(&g.flags)
return g
},
}
// Meta implements the Generator interface.
func (*jsonLoad) Meta() workload.Meta { return jsonLoadMeta }
// Flags implements the Flagser interface.
func (w *jsonLoad) Flags() workload.Flags { return w.flags }
// Hooks implements the Hookser interface.
func (w *jsonLoad) Hooks() workload.Hooks {
return workload.Hooks{
Validate: func() error {
if w.computed && w.inverted {
return errors.Errorf("computed and inverted cannot be used together")
}
return nil
},
}
}
// Tables implements the Generator interface.
func (w *jsonLoad) Tables() []workload.Table {
schema := jsonSchema
if w.inverted {
schema = jsonSchemaWithInvertedIndex
} else if w.computed {
schema = jsonSchemaWithComputed
}
table := workload.Table{
Name: `j`,
Schema: schema,
Splits: workload.Tuples(
w.splits,
func(splitIdx int) []interface{} {
rng := rand.New(rand.NewSource(w.seed + int64(splitIdx)))
g := newHashGenerator(&sequence{config: w, val: w.writeSeq})
return []interface{}{
int(g.hash(rng.Int63())),
}
},
),
}
return []workload.Table{table}
}
// Ops implements the Opser interface.
func (w *jsonLoad) Ops(
ctx context.Context, urls []string, reg *histogram.Registry,
) (workload.QueryLoad, error) {
sqlDatabase, err := workload.SanitizeUrls(w, w.connFlags.DBOverride, urls)
if err != nil {
return workload.QueryLoad{}, err
}
db, err := gosql.Open(`cockroach`, strings.Join(urls, ` `))
if err != nil {
return workload.QueryLoad{}, err
}
// Allow a maximum of concurrency+1 connections to the database.
db.SetMaxOpenConns(w.connFlags.Concurrency + 1)
db.SetMaxIdleConns(w.connFlags.Concurrency + 1)
var buf bytes.Buffer
buf.WriteString(`SELECT k, v FROM j WHERE k IN (`)
for i := 0; i < w.batchSize; i++ {
if i > 0 {
buf.WriteString(", ")
}
fmt.Fprintf(&buf, `$%d`, i+1)
}
buf.WriteString(`)`)
readStmt, err := db.Prepare(buf.String())
if err != nil {
return workload.QueryLoad{}, err
}
buf.Reset()
if w.computed {
buf.WriteString(`UPSERT INTO j (v) VALUES`)
} else {
buf.WriteString(`UPSERT INTO j (k, v) VALUES`)
}
for i := 0; i < w.batchSize; i++ {
j := i * 2
if i > 0 {
buf.WriteString(", ")
}
if w.computed {
fmt.Fprintf(&buf, ` ($%d)`, i+1)
} else {
fmt.Fprintf(&buf, ` ($%d, $%d)`, j+1, j+2)
}
}
writeStmt, err := db.Prepare(buf.String())
if err != nil {
return workload.QueryLoad{}, err
}
ql := workload.QueryLoad{SQLDatabase: sqlDatabase}
for i := 0; i < w.connFlags.Concurrency; i++ {
op := jsonOp{
config: w,
hists: reg.GetHandle(),
db: db,
readStmt: readStmt,
writeStmt: writeStmt,
}
seq := &sequence{config: w, val: w.writeSeq}
if w.sequential {
op.g = newSequentialGenerator(seq)
} else {
op.g = newHashGenerator(seq)
}
ql.WorkerFns = append(ql.WorkerFns, op.run)
}
return ql, nil
}
type jsonOp struct {
config *jsonLoad
hists *histogram.Histograms
db *gosql.DB
readStmt *gosql.Stmt
writeStmt *gosql.Stmt
g keyGenerator
}
func (o *jsonOp) run(ctx context.Context) error {
if o.g.rand().Intn(100) < o.config.readPercent {
args := make([]interface{}, o.config.batchSize)
for i := 0; i < o.config.batchSize; i++ {
args[i] = o.g.readKey()
}
start := timeutil.Now()
rows, err := o.readStmt.Query(args...)
if err != nil {
return err
}
for rows.Next() {
}
elapsed := timeutil.Since(start)
o.hists.Get(`read`).Record(elapsed)
return rows.Err()
}
argCount := 2
if o.config.computed {
argCount = 1
}
args := make([]interface{}, argCount*o.config.batchSize)
for i := 0; i < o.config.batchSize*argCount; i += argCount {
j := i
if !o.config.computed {
args[j] = o.g.writeKey()
j++
}
js, err := json.Random(o.config.complexity, o.g.rand())
if err != nil {
return err
}
if o.config.computed {
builder := json.NewObjectBuilder(2)
builder.Add("key", json.FromInt64(o.g.writeKey()))
builder.Add("data", js)
js = builder.Build()
}
args[j] = js.String()
}
start := timeutil.Now()
_, err := o.writeStmt.Exec(args...)
elapsed := timeutil.Since(start)
o.hists.Get(`write`).Record(elapsed)
return err
}
type sequence struct {
config *jsonLoad
val int64
}
func (s *sequence) write() int64 {
return (atomic.AddInt64(&s.val, 1) - 1) % s.config.cycleLength
}
// read returns the last key index that has been written. Note that the returned
// index might not actually have been written yet, so a read operation cannot
// require that the key is present.
func (s *sequence) read() int64 {
return atomic.LoadInt64(&s.val) % s.config.cycleLength
}
// keyGenerator generates read and write keys. Read keys may not yet exist and
// write keys may already exist.
type keyGenerator interface {
writeKey() int64
readKey() int64
rand() *rand.Rand
}
type hashGenerator struct {
seq *sequence
random *rand.Rand
hasher hash.Hash
buf [sha1.Size]byte
}
func newHashGenerator(seq *sequence) *hashGenerator {
return &hashGenerator{
seq: seq,
random: rand.New(rand.NewSource(seq.config.seed)),
hasher: sha1.New(),
}
}
func (g *hashGenerator) hash(v int64) int64 {
binary.BigEndian.PutUint64(g.buf[:8], uint64(v))
binary.BigEndian.PutUint64(g.buf[8:16], uint64(g.seq.config.seed))
g.hasher.Reset()
_, _ = g.hasher.Write(g.buf[:16])
g.hasher.Sum(g.buf[:0])
return int64(binary.BigEndian.Uint64(g.buf[:8]))
}
func (g *hashGenerator) writeKey() int64 {
return g.hash(g.seq.write())
}
func (g *hashGenerator) readKey() int64 {
v := g.seq.read()
if v == 0 {
return 0
}
return g.hash(g.random.Int63n(v))
}
func (g *hashGenerator) rand() *rand.Rand {
return g.random
}
type sequentialGenerator struct {
seq *sequence
random *rand.Rand
}
func newSequentialGenerator(seq *sequence) *sequentialGenerator {
return &sequentialGenerator{
seq: seq,
random: rand.New(rand.NewSource(seq.config.seed)),
}
}
func (g *sequentialGenerator) writeKey() int64 {
return g.seq.write()
}
func (g *sequentialGenerator) readKey() int64 {
v := g.seq.read()
if v == 0 {
return 0
}
return g.random.Int63n(v)
}
func (g *sequentialGenerator) rand() *rand.Rand {
return g.random
}
|
package Median_of_Two_Sorted_Arrays
/*
m
n
i = (1+m)/2
i+j = (n+m)/2
j=(n+m)/2-i
j>=0
n+m >= 2m
n>=m
left | right
1,2,3...i | i+1,.....m
1,2,3...j | j+1,.....n
*/
// O(log(m+n))
func findMedianSortedArrays(nums1 []int, nums2 []int) float64 {
//ensure n >= m
m, n := len(nums1), len(nums2)
if m > n {
nums1, nums2 = nums2, nums1
m, n = n, m
}
iMin, iMax, halfLen := 0, m, (m+n+1)/2
for iMin <= iMax {
i := (iMin + iMax) / 2
j := halfLen - i
if i < iMax && nums2[j-1] > nums1[i] {
iMin = i + 1 // i is too small
} else if i > iMin && nums1[i-1] > nums2[j] {
iMax = i - 1 // i is too big
} else { // i is perfect
maxLeft := 0
if i == 0 {
maxLeft = nums2[j-1]
} else if j == 0 {
maxLeft = nums1[i-1]
} else {
maxLeft = Max(nums1[i-1], nums2[j-1])
}
if (m+n)%2 == 1 {
return float64(maxLeft)
}
minRight := 0
if i == m {
minRight = nums2[j]
} else if j == n {
minRight = nums1[i]
} else {
minRight = Min(nums2[j], nums1[i])
}
return float64(maxLeft+minRight) / 2.0
}
}
return 0.0
}
func Min(a, b int) int {
if a < b {
return a
}
return b
}
func Max(a, b int) int {
if a > b {
return a
}
return b
}
|
// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
// See License for license information.
package main
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/base64"
"encoding/json"
"io"
"time"
"github.com/pkg/errors"
)
const authTokenTTL = 15 * time.Minute
type AuthToken struct {
MattermostUserID string `json:"mattermost_user_id,omitempty"`
Secret string `json:"secret,omitempty"`
Expires time.Time `json:"expires,omitempty"`
}
func (p *Plugin) NewEncodedAuthToken(mattermostUserID, secret string) (returnToken string, returnErr error) {
defer func() {
if returnErr == nil {
return
}
returnErr = errors.WithMessage(returnErr, "failed to create auth token")
}()
encryptSecret, err := p.secretsStore.EnsureAuthTokenEncryptSecret()
if err != nil {
return "", err
}
t := AuthToken{
MattermostUserID: mattermostUserID,
Secret: secret,
Expires: time.Now().Add(authTokenTTL),
}
jsonBytes, err := json.Marshal(t)
if err != nil {
return "", err
}
encrypted, err := encrypt(jsonBytes, encryptSecret)
if err != nil {
return "", err
}
return encode(encrypted), nil
}
func (p *Plugin) ParseAuthToken(encoded string) (mattermostUserID, tokenSecret string, returnErr error) {
defer func() {
if returnErr == nil {
return
}
returnErr = errors.WithMessage(returnErr, "failed to parse auth token")
}()
t := AuthToken{}
err := func() error {
encryptSecret, err := p.secretsStore.EnsureAuthTokenEncryptSecret()
if err != nil {
return err
}
decoded, err := decode(encoded)
if err != nil {
return err
}
jsonBytes, err := decrypt(decoded, encryptSecret)
if err != nil {
return err
}
err = json.Unmarshal(jsonBytes, &t)
if err != nil {
return err
}
if t.Expires.Before(time.Now()) {
return errors.New("expired token")
}
return nil
}()
if err != nil {
return "", "", err
}
return t.MattermostUserID, t.Secret, nil
}
func encode(encrypted []byte) string {
encoded := make([]byte, base64.URLEncoding.EncodedLen(len(encrypted)))
base64.URLEncoding.Encode(encoded, encrypted)
return string(encoded)
}
func encrypt(plain, secret []byte) ([]byte, error) {
if len(secret) == 0 {
return plain, nil
}
block, err := aes.NewCipher(secret)
if err != nil {
return nil, err
}
aesgcm, err := cipher.NewGCM(block)
if err != nil {
return nil, err
}
nonce := make([]byte, aesgcm.NonceSize())
_, err = io.ReadFull(rand.Reader, nonce)
if err != nil {
return nil, err
}
sealed := aesgcm.Seal(nil, nonce, plain, nil)
return append(nonce, sealed...), nil
}
func decode(encoded string) ([]byte, error) {
decoded := make([]byte, base64.URLEncoding.DecodedLen(len(encoded)))
n, err := base64.URLEncoding.Decode(decoded, []byte(encoded))
if err != nil {
return nil, err
}
return decoded[:n], nil
}
func decrypt(encrypted, secret []byte) ([]byte, error) {
if len(secret) == 0 {
return encrypted, nil
}
block, err := aes.NewCipher(secret)
if err != nil {
return nil, err
}
aesgcm, err := cipher.NewGCM(block)
if err != nil {
return nil, err
}
nonceSize := aesgcm.NonceSize()
if len(encrypted) < nonceSize {
return nil, errors.New("token too short")
}
nonce, encrypted := encrypted[:nonceSize], encrypted[nonceSize:]
plain, err := aesgcm.Open(nil, nonce, encrypted, nil)
if err != nil {
return nil, err
}
return plain, nil
}
|
// The MIT License (MIT)
//
// Copyright (c) 2018 xgfone
// Copyright (c) 2017 LabStack
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package ship
import (
"bytes"
"encoding/json"
"encoding/xml"
"io"
"mime/multipart"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
//////////////////////////////////////////////////////////////////////////////
func testBindOkay(t *testing.T, r io.Reader, ctype string) {
req := httptest.NewRequest(http.MethodPost, "/", r)
rec := httptest.NewRecorder()
ctx := New().NewContext(req, rec)
req.Header.Set(HeaderContentType, ctype)
u := new(user)
err := ctx.Bind(u)
if err == nil {
assert.Equal(t, 1, u.ID)
assert.Equal(t, "Jon Snow", u.Name)
} else {
t.Fail()
}
}
func testBindError(t *testing.T, r io.Reader, ctype string, expectedInternal error) {
req := httptest.NewRequest(http.MethodPost, "/", r)
rec := httptest.NewRecorder()
ctx := New().NewContext(req, rec)
req.Header.Set(HeaderContentType, ctype)
u := new(user)
err := ctx.Bind(u)
assert.IsType(t, expectedInternal, err)
}
type (
bindTestStruct struct {
I int
PtrI *int
I8 int8
PtrI8 *int8
I16 int16
PtrI16 *int16
I32 int32
PtrI32 *int32
I64 int64
PtrI64 *int64
UI uint
PtrUI *uint
UI8 uint8
PtrUI8 *uint8
UI16 uint16
PtrUI16 *uint16
UI32 uint32
PtrUI32 *uint32
UI64 uint64
PtrUI64 *uint64
B bool
PtrB *bool
F32 float32
PtrF32 *float32
F64 float64
PtrF64 *float64
S string
PtrS *string
cantSet string
DoesntExist string
T Timestamp
Tptr *Timestamp
SA StringArray
}
Timestamp time.Time
TA []Timestamp
StringArray []string
Struct struct {
Foo string
}
)
type user struct {
ID int `json:"id" xml:"id" form:"id" query:"id"`
Name string `json:"name" xml:"name" form:"name" query:"name"`
}
const (
userJSON = `{"id":1,"name":"Jon Snow"}`
userXML = `<user><id>1</id><name>Jon Snow</name></user>`
userForm = `id=1&name=Jon Snow`
invalidContent = "invalid content"
userJSONInvalidType = `{"id":"1","name":"Jon Snow"}`
userXMLConvertNumberError = `<user><id>Number one</id><name>Jon Snow</name></user>`
userXMLUnsupportedTypeError = `<user><>Number one</><name>Jon Snow</name></user>`
)
func (t *Timestamp) UnmarshalBind(src string) error {
ts, err := time.Parse(time.RFC3339, src)
*t = Timestamp(ts)
return err
}
func (a *StringArray) UnmarshalBind(src string) error {
*a = StringArray(strings.Split(src, ","))
return nil
}
func (s *Struct) UnmarshalBind(src string) error {
*s = Struct{
Foo: src,
}
return nil
}
func TestBindJSON(t *testing.T) {
testBindOkay(t, strings.NewReader(userJSON), MIMEApplicationJSON)
testBindError(t, strings.NewReader(invalidContent), MIMEApplicationJSON,
&json.SyntaxError{})
testBindError(t, strings.NewReader(userJSONInvalidType),
MIMEApplicationJSON, &json.UnmarshalTypeError{})
}
func TestBindXML(t *testing.T) {
testBindOkay(t, strings.NewReader(userXML), MIMEApplicationXML)
testBindError(t, strings.NewReader(invalidContent), MIMEApplicationXML, ErrMissingContentType)
testBindError(t, strings.NewReader(userXMLConvertNumberError), MIMEApplicationXML, &strconv.NumError{})
testBindError(t, strings.NewReader(userXMLUnsupportedTypeError), MIMEApplicationXML, &xml.SyntaxError{})
testBindOkay(t, strings.NewReader(userXML), MIMETextXML)
testBindError(t, strings.NewReader(invalidContent), MIMETextXML, ErrMissingContentType)
testBindError(t, strings.NewReader(userXMLConvertNumberError), MIMETextXML, &strconv.NumError{})
testBindError(t, strings.NewReader(userXMLUnsupportedTypeError), MIMETextXML, &xml.SyntaxError{})
}
func TestBindForm(t *testing.T) {
testBindOkay(t, strings.NewReader(userForm), MIMEApplicationForm)
req := httptest.NewRequest(http.MethodPost, "/", strings.NewReader(userForm))
rec := httptest.NewRecorder()
ctx := New().NewContext(req, rec)
req.Header.Set(HeaderContentType, MIMEApplicationForm)
err := ctx.Bind(&[]struct{ Field string }{})
if err == nil {
t.Fail()
}
}
func TestBindQueryParams(t *testing.T) {
req := httptest.NewRequest(http.MethodGet, "/?id=1&name=Jon+Snow", nil)
rec := httptest.NewRecorder()
ctx := New().NewContext(req, rec)
u := new(user)
err := ctx.BindQuery(u)
if err == nil {
assert.Equal(t, 1, u.ID)
assert.Equal(t, "Jon Snow", u.Name)
} else {
t.Fail()
}
}
func TestBindQueryParamsCaseInsensitive(t *testing.T) {
req := httptest.NewRequest(http.MethodGet, "/?ID=1&NAME=Jon+Snow", nil)
rec := httptest.NewRecorder()
ctx := New().NewContext(req, rec)
u := new(user)
err := ctx.BindQuery(u)
if err == nil {
assert.Equal(t, 1, u.ID)
assert.Equal(t, "Jon Snow", u.Name)
} else {
t.Fail()
}
}
func TestBindQueryParamsCaseSensitivePrioritized(t *testing.T) {
req := httptest.NewRequest(http.MethodGet, "/?id=1&ID=2&NAME=Jon+Snow&name=Jon+Doe", nil)
rec := httptest.NewRecorder()
ctx := New().NewContext(req, rec)
u := new(user)
err := ctx.BindQuery(u)
if err == nil {
assert.Equal(t, 1, u.ID)
assert.Equal(t, "Jon Doe", u.Name)
} else {
t.Fail()
}
}
func TestBindUnmarshalBind(t *testing.T) {
req := httptest.NewRequest(http.MethodGet,
"/?ts=2016-12-06T19:09:05Z&sa=one,two,three&ta=2016-12-06T19:09:05Z&ta=2016-12-06T19:09:05Z&ST=baz",
nil)
rec := httptest.NewRecorder()
ctx := New().NewContext(req, rec)
result := struct {
T Timestamp `query:"ts"`
TA []Timestamp `query:"ta"`
SA StringArray `query:"sa"`
ST Struct
}{}
err := ctx.Bind(&result)
ts := Timestamp(time.Date(2016, 12, 6, 19, 9, 5, 0, time.UTC))
if err == nil {
assert.Equal(t, ts, result.T)
assert.Equal(t, StringArray([]string{"one", "two", "three"}), result.SA)
assert.Equal(t, []Timestamp{ts, ts}, result.TA)
assert.Equal(t, Struct{"baz"}, result.ST)
}
}
func TestBindUnmarshalBindPtr(t *testing.T) {
req := httptest.NewRequest(http.MethodGet, "/?ts=2016-12-06T19:09:05Z", nil)
rec := httptest.NewRecorder()
ctx := New().NewContext(req, rec)
result := struct {
Tptr *Timestamp `query:"ts"`
}{}
err := ctx.BindQuery(&result)
if err == nil {
assert.Equal(t, Timestamp(time.Date(2016, 12, 6, 19, 9, 5, 0, time.UTC)), *result.Tptr)
} else {
t.Fail()
}
}
func TestBindMultipartForm(t *testing.T) {
body := new(bytes.Buffer)
mw := multipart.NewWriter(body)
mw.WriteField("id", "1")
mw.WriteField("name", "Jon Snow")
mw.Close()
testBindOkay(t, body, mw.FormDataContentType())
}
func TestBindUnsupportedMediaType(t *testing.T) {
testBindError(t, strings.NewReader(invalidContent), MIMEApplicationJSON,
&json.SyntaxError{})
}
func TestBindUnmarshalTypeError(t *testing.T) {
body := bytes.NewBufferString(`{ "id": "text" }`)
req := httptest.NewRequest(http.MethodPost, "/", body)
req.Header.Set(HeaderContentType, MIMEApplicationJSON)
rec := httptest.NewRecorder()
ctx := New().NewContext(req, rec)
u := new(user)
err := ctx.Bind(u)
assert.Equal(t, "json: cannot unmarshal string into Go struct field user.id of type int", err.Error())
}
|
package list
import (
"context"
"encoding/json"
"fmt"
"sort"
"github.com/loft-sh/devspace/cmd/flags"
"github.com/loft-sh/devspace/pkg/util/factory"
"github.com/loft-sh/devspace/pkg/util/log"
"github.com/loft-sh/devspace/pkg/util/message"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
type varsCmd struct {
*flags.GlobalFlags
Output string
}
func newVarsCmd(f factory.Factory, globalFlags *flags.GlobalFlags) *cobra.Command {
cmd := &varsCmd{GlobalFlags: globalFlags}
varsCmd := &cobra.Command{
Use: "vars",
Short: "Lists the vars in the active config",
Long: `
#######################################################
############### devspace list vars ####################
#######################################################
Lists the defined vars in the devspace config with their
values
#######################################################
`,
Args: cobra.NoArgs,
RunE: func(cobraCmd *cobra.Command, args []string) error {
return cmd.RunListVars(f, cobraCmd, args)
}}
varsCmd.Flags().StringVarP(&cmd.Output, "output", "o", "", "The output format of the command. Can be either empty, keyvalue or json")
return varsCmd
}
// RunListVars runs the list vars command logic
func (cmd *varsCmd) RunListVars(f factory.Factory, cobraCmd *cobra.Command, args []string) error {
logger := f.GetLog()
// Set config root
configLoader, err := f.NewConfigLoader(cmd.ConfigPath)
if err != nil {
return err
}
configExists, err := configLoader.SetDevSpaceRoot(logger)
if err != nil {
return err
}
if !configExists {
return errors.New(message.ConfigNotFound)
}
// Fill variables config
config, err := configLoader.Load(context.Background(), nil, cmd.ToConfigOptions(), logger)
if err != nil {
return err
}
switch cmd.Output {
case "":
// Specify the table column names
headerColumnNames := []string{
"Variable",
"Value",
}
varRow := make([][]string, 0, len(config.Variables()))
for name, value := range config.Variables() {
varRow = append(varRow, []string{
name,
fmt.Sprintf("%v", value),
})
}
sort.Slice(varRow, func(i, j int) bool {
return varRow[i][0] < varRow[j][0]
})
// No variable found
if len(varRow) == 0 {
logger.Info("No variables found")
return nil
}
sort.SliceStable(varRow, func(i, j int) bool {
return varRow[i][0] < varRow[j][0]
})
log.PrintTable(logger, headerColumnNames, varRow)
case "keyvalue":
for name, value := range config.Variables() {
fmt.Printf("%s=%v\n", name, value)
}
case "json":
out, err := json.MarshalIndent(config.Variables(), "", " ")
if err != nil {
return err
}
fmt.Print(string(out))
default:
return errors.Errorf("unsupported value for flag --output: %s", cmd.Output)
}
return nil
}
|
package base
import (
"github.com/xuperchain/xupercore/kernel/common/xcontext"
cctx "github.com/xuperchain/xupercore/kernel/consensus/context"
)
// ConsensusInterface 定义了一个共识实例需要实现的接口,用于bcs具体共识的实现
type ConsensusImplInterface interface {
// CompeteMaster 返回是否为矿工以及是否需要进行SyncBlock
CompeteMaster(height int64) (bool, bool, error)
// CheckMinerMatch 查看block是否合法
CheckMinerMatch(ctx xcontext.XContext, block cctx.BlockInterface) (bool, error)
// ProcessBeforeMiner 开始挖矿前进行相应的处理, 返回truncate目标(如需裁剪), 返回写consensusStorage, 返回err
ProcessBeforeMiner(timestamp int64) ([]byte, []byte, error)
// CalculateBlock 矿工挖矿时共识需要做的工作, 如PoW时共识需要完成存在性证明
CalculateBlock(block cctx.BlockInterface) error
// ProcessConfirmBlock 用于确认块后进行相应的处理
ProcessConfirmBlock(block cctx.BlockInterface) error
// GetStatus 获取区块链共识信息
GetConsensusStatus() (ConsensusStatus, error)
// 共识实例的挂起逻辑, 另: 若共识实例发现绑定block结构有误,会直接停掉当前共识实例并panic
Stop() error
// 共识实例的启动逻辑
Start() error
// 共识占用blockinterface的专有存储,特定共识需要提供parse接口,在此作为接口高亮
ParseConsensusStorage(block cctx.BlockInterface) (interface{}, error)
}
// tdpos的查询接口还是保持不变
// queryConsensusStatus
/* ConsensusStatus 定义了一个共识实例需要返回的各种状态,需特定共识实例实现相应接口
*/
type ConsensusStatus interface {
// 获取共识版本号
GetVersion() int64
// pluggable consensus共识item起始高度
GetConsensusBeginInfo() int64
// 获取共识item所在consensus slice中的index
GetStepConsensusIndex() int
// 获取共识类型
GetConsensusName() string
// 获取当前状态机term
GetCurrentTerm() int64
// 获取当前矿工信息
GetCurrentValidatorsInfo() []byte
}
|
package main
import "fmt"
// this is the fan in pattern where two or more chan combines to give one single chan.
func fanindriver() {
ck := fanin(counts("amy"), counts("rose"))
// amychan := counts("amy")
// rosechan := counts("rose")
for i := 0; i < 10; i++ {
fmt.Println(<-ck)
}
}
func fanin(c, k <-chan string) <-chan string {
ck := make(chan string)
go func() {
for {
ck <- <-c
}
}()
go func() {
for {
ck <- <-k
}
}()
return ck
}
/*
rose: 0
rose: 1
amy: 0
amy: 1
amy: 2
amy: 3
rose: 2
amy: 4
rose: 3
rose: 4
*/
|
package main
import (
"fmt"
)
// START OMIT
var theMine = []string{"rock", "ore", "ore", "rock", "ore"}
func finder(mine []string) []string {
foundOre := []string{}
for _, v := range mine {
fmt.Printf("from mine: %v\n", v)
if v == "ore" {
foundOre = append(foundOre, v)
}
}
return foundOre
}
func miner(foundOre []string) []string {
minedOre := []string{}
for i := range foundOre {
minedOre = append(minedOre, fmt.Sprintf("minedOre%d", i))
}
return minedOre
}
func main() {
foundOre := finder(theMine)
minedOre := miner(foundOre)
fmt.Println(minedOre)
}
// END OMIT
|
/*
* Wodby API Client
*
* Wodby Developer Documentation https://wodby.com/docs/dev
*
* API version: 3.0.18
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package client
type ResponseTaskApp struct {
App *App `json:"app"`
Task *Task `json:"task"`
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexecproj
import (
"context"
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexectestutils"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
)
func TestDefaultCmpProjOps(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := tree.MakeTestingEvalContext(st)
defer evalCtx.Stop(ctx)
flowCtx := &execinfra.FlowCtx{
EvalCtx: &evalCtx,
Cfg: &execinfra.ServerConfig{
Settings: st,
},
}
testCases := []struct {
cmpExpr string
inputTypes []*types.T
inputTuples colexectestutils.Tuples
outputTuples colexectestutils.Tuples
}{
{
cmpExpr: "@1 ILIKE @2",
inputTypes: []*types.T{types.String, types.String},
inputTuples: colexectestutils.Tuples{
{"abc", "ABC"},
{"a42", "A%"},
{nil, "%A"},
{"abc", "A%b"},
},
outputTuples: colexectestutils.Tuples{
{"abc", "ABC", true},
{"a42", "A%", true},
{nil, "%A", nil},
{"abc", "A%b", false},
},
},
{
cmpExpr: "@1 ILIKE 'A%'",
inputTypes: []*types.T{types.String},
inputTuples: colexectestutils.Tuples{
{"abc"},
{"a42"},
{nil},
{"def"},
},
outputTuples: colexectestutils.Tuples{
{"abc", true},
{"a42", true},
{nil, nil},
{"def", false},
},
},
{
cmpExpr: "@1 IS DISTINCT FROM @2",
inputTypes: []*types.T{types.String, types.String},
inputTuples: colexectestutils.Tuples{
{"abc", "abc"},
{nil, nil},
{"abc", "ab"},
},
outputTuples: colexectestutils.Tuples{
{"abc", "abc", false},
{nil, nil, false},
{"abc", "ab", true},
},
},
{
cmpExpr: "(1, 2) IS DISTINCT FROM @1",
inputTypes: []*types.T{types.MakeTuple([]*types.T{types.Int, types.Int})},
inputTuples: colexectestutils.Tuples{
{"(1, NULL)"},
{nil},
{"(1, 2)"},
},
outputTuples: colexectestutils.Tuples{
{"(1, NULL)", true},
{nil, true},
{"(1, 2)", false},
},
},
}
for _, c := range testCases {
t.Run(c.cmpExpr, func(t *testing.T) {
colexectestutils.RunTestsWithTyps(t, testAllocator, []colexectestutils.Tuples{c.inputTuples}, [][]*types.T{c.inputTypes}, c.outputTuples, colexectestutils.OrderedVerifier,
func(input []colexecop.Operator) (colexecop.Operator, error) {
return colexectestutils.CreateTestProjectingOperator(
ctx, flowCtx, input[0], c.inputTypes,
c.cmpExpr, false /* canFallbackToRowexec */, testMemAcc,
)
})
})
}
}
func BenchmarkDefaultCmpProjOp(b *testing.B) {
defer log.Scope(b).Close(b)
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := tree.MakeTestingEvalContext(st)
defer evalCtx.Stop(ctx)
flowCtx := &execinfra.FlowCtx{
EvalCtx: &evalCtx,
Cfg: &execinfra.ServerConfig{
Settings: st,
},
}
for _, useSel := range []bool{false, true} {
for _, hasNulls := range []bool{false, true} {
inputTypes := []*types.T{types.String, types.String}
name := fmt.Sprintf("IS DISTINCT FROM/useSel=%t/hasNulls=%t", useSel, hasNulls)
benchmarkProjOp(b, name, func(source *colexecop.RepeatableBatchSource) (colexecop.Operator, error) {
return colexectestutils.CreateTestProjectingOperator(
ctx, flowCtx, source, inputTypes,
"@1 IS DISTINCT FROM @2", false /* canFallbackToRowexec */, testMemAcc,
)
}, inputTypes, useSel, hasNulls)
}
}
}
|
package tracer
import (
"fmt"
"testing"
)
var tracer *Tracer = New()
func noop(t *testing.T) {
}
// use deeper call to demo recursive calls
func deeper(depth int, n int) {
defer tracer.ScopedTrace(fmt.Sprintf("depth %4d %4d", depth, n))()
if n > 0 {
deeper(depth, n-1)
}
}
func recursive_trace(n int) {
defer tracer.ScopedTrace(fmt.Sprintf(">>%d<<", n))()
if n > 0 {
if n%2 == 1 {
recursive_trace(n - 1)
deeper(tracer.depth, n+1)
} else {
recursive_trace(n - 2)
}
}
}
func TestTracerRecurseChain(t *testing.T) {
fmt.Println()
defer tracer.Detailed(true).On().ScopedTrace()()
if tracer != nil {
recursive_trace(1)
}
}
func TestTracerRecurse(t *testing.T) {
fmt.Println()
defer tracer.ScopedTrace()()
if tracer != nil {
recursive_trace(3)
}
}
func TestTracerRecurseDetail(t *testing.T) {
tracer.Detailed(true)
fmt.Println()
defer tracer.ScopedTrace()()
if tracer != nil {
recursive_trace(3)
}
}
func TestTracerOff(t *testing.T) {
fmt.Println()
if tracer != nil {
tracer.Off()
defer tracer.ScopedTrace()()
}
}
func TestTracerOn(t *testing.T) {
fmt.Println()
tracer.On()
defer tracer.ScopedTrace()()
deeper(tracer.depth, 0)
}
func TestTracerDetailed(t *testing.T) {
fmt.Println()
tracer.Detailed(true)
tracer.On()
defer tracer.ScopedTrace()()
deeper(tracer.depth, 0)
}
func TestTracerOnOffTracer(t *testing.T) {
fmt.Println()
tracer.Reset()
tracer.On()
{
defer tracer.ScopedTrace("scoped", "in", "braces")()
tracer.Reset()
}
defer tracer.ScopedTrace("scoped", "by", "func()")()
tracer.Reset()
tracer.Off()
{
defer tracer.ScopedTrace("scoped", "in", "braces")()
tracer.Reset()
}
tracer.Off()
}
func TestTracerOnOffOnDetailTracer(t *testing.T) {
fmt.Println()
tracer.Reset()
tracer.On()
tracer.Detailed(true)
defer tracer.ScopedTrace("scoped", "fnctn", "braces")()
{
defer tracer.ScopedTrace("scoped", "in", "braces")()
}
defer tracer.ScopedTrace("scoped", "by", "func()")()
{
defer tracer.ScopedTrace("scoped", "in", "braces")()
}
}
func TestTracerOnOffOnTracer(t *testing.T) {
fmt.Println()
tracer.Reset()
tracer.On()
defer tracer.ScopedTrace("scoped", "fnctn", "braces")()
{
defer tracer.ScopedTrace("scoped", "in", "braces")()
}
defer tracer.ScopedTrace("scoped", "by", "func()")()
{
defer tracer.ScopedTrace("scoped", "in", "braces")()
}
}
func TraceFuncScope(i int) {
defer tracer.ScopedTrace("i", i)()
}
func TestTraceLoopFuncScope(t *testing.T) {
fmt.Println()
tracer.Reset()
tracer.Detailed(true).On()
for i := 0; i < 5; i++ {
TraceFuncScope(i + 1)
}
}
func TestTraceLoopScope(t *testing.T) {
fmt.Println()
tracer.Reset()
tracer.Detailed(true).On()
for i := 0; i < 5; i++ {
TraceFuncScope(i + 1)
}
for i := 0; i < 5; i++ {
defer tracer.ScopedTrace("i", i+1)()
}
for i := 0; i < 5; i++ {
TraceFuncScope(i + 1)
}
}
|
package monitor
import (
"yunion.io/x/jsonutils"
"yunion.io/x/onecloud/pkg/mcclient/options"
)
type AlertRecordListOptions struct {
options.BaseListOptions
AlertId string `help:"id of alert"`
Level string `help:"alert level"`
State string `help:"alert state"`
ResTypes []string `json:"res_types"`
Alerting bool `json:"alerting"`
}
func (o *AlertRecordListOptions) Params() (jsonutils.JSONObject, error) {
return options.ListStructToParams(o)
}
type AlertRecordShowOptions struct {
ID string `help:"ID of Metric " json:"-"`
}
func (o *AlertRecordShowOptions) Params() (jsonutils.JSONObject, error) {
return options.StructToParams(o)
}
func (o *AlertRecordShowOptions) GetId() string {
return o.ID
}
type AlertRecordTotalOptions struct {
ID string `help:"total-alert" json:"-"`
options.BaseListOptions
}
func (o *AlertRecordTotalOptions) Params() (jsonutils.JSONObject, error) {
return options.ListStructToParams(o)
}
func (o *AlertRecordTotalOptions) GetId() string {
return o.ID
}
|
package tasker
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
)
type Manifest struct {
Name string `json:"name"`
Version string `json:"version"`
RunAs string `json:"run_as"`
LogFile string `json:"log_file"`
Readme string `json:"readme"`
Env map[string]string `json:"environment"`
Config map[string]interface{} `json:"config"`
}
const (
MANIFEST_NAME = ".tasker"
TAR_PATH = "/data/tasker/tars/"
INTERNAL_PATH = "/data/tasker/.internal"
DAEMON_PID_FILE = "/data/tasker/.internal/PID"
)
var (
debug = flag.Bool("debug", false, "Whether or not bebug is on")
)
func getManifestBytes(taskPath string) ([]byte, error) {
manifestPath := fmt.Sprintf("%s/%s", taskPath, MANIFEST_NAME)
return ioutil.ReadFile(manifestPath)
}
func getInput() string {
var response string
fmt.Scanln(&response)
return response
}
func Setup() {
DebugPrintf("Setting up Tasker...")
Fatalize(exec.Command("mkdir", "-p", TAR_PATH).Run())
Fatalize(exec.Command("mkdir", "-p", INTERNAL_PATH).Run())
}
func Fatalize(err error) {
if err != nil {
fmt.Printf("Encountered an error: %s\n", err)
os.Exit(1)
}
}
func DebugPrintf(format string, a ...interface{}) {
if *debug {
fmt.Printf(format, a)
}
}
func TellDaemon(command string) {
pid, err := ioutil.ReadFile(DAEMON_PID_FILE)
if err != nil {
fmt.Printf("Could not communicate with daemon... Exiting.")
os.Exit(1)
}
stdinPath := fmt.Sprintf("/proc/%s/fd/0", string(pid))
if _, err = os.Stat(stdinPath); os.IsNotExist(err) {
fmt.Printf("Could not communicate with daemon... Exiting.")
os.Exit(1)
}
ioutil.WriteFile(stdinPath, []byte(command), 0777)
}
func GetManifest(taskPath string) *Manifest {
manifestBytes, err := getManifestBytes(taskPath)
if err != nil {
DebugPrintf("Error while getting manifest bytes: %s\n", err)
return nil
}
manifest := &Manifest{}
err = json.Unmarshal(manifestBytes, manifest)
if err != nil {
DebugPrintf("Error while unmarshaling manifest: %s\n", err)
return nil
}
return manifest
}
func GetOrCreateManifest(taskPath string) *Manifest {
manifest := GetManifest(taskPath)
if manifest == nil {
fmt.Printf("The task you specified does not have a valid manifest\n")
fmt.Printf("Do you want us to make one for you? [y/N]\n")
switch getInput() {
case "y", "yes", "yea", "Y", "YES", "SURE", "YEAH", "YEA":
break
os.Exit(0)
}
// TODO Create manifest
}
return manifest
}
|
package query
import (
"bytes"
"strconv"
"gophr.pm/gocql/gocql@3ac1aabebaf2705c6f695d4ef2c25ab6239e88b3"
)
// ColumnValueAssignment represents a value assignment for a specific column of
// a row.
type columnValueAssignment struct {
column string
value string
parameterized bool
}
// UpdateQueryBuilder constructs an insert query.
type UpdateQueryBuilder struct {
valueAssignments []columnValueAssignment
conditions []*Condition
table string
}
// Update starts constructing an insert query.
func Update(table string) *UpdateQueryBuilder {
return &UpdateQueryBuilder{
table: table,
}
}
// Set adds a value assignment to the update query.
func (qb *UpdateQueryBuilder) Set(column string, value string) *UpdateQueryBuilder {
qb.valueAssignments = append(qb.valueAssignments, columnValueAssignment{
column: column,
value: value,
parameterized: true,
})
return qb
}
// Increment increases the value of a counter by a specified amount.
func (qb *UpdateQueryBuilder) Increment(column string, amount int) *UpdateQueryBuilder {
qb.valueAssignments = append(qb.valueAssignments, columnValueAssignment{
column: column,
value: (column + "+" + strconv.Itoa(amount)),
parameterized: false,
})
return qb
}
// Where adds a condition to which all of the updated rows should adhere.
func (qb *UpdateQueryBuilder) Where(condition *Condition) *UpdateQueryBuilder {
qb.conditions = append(qb.conditions, condition)
return qb
}
// And is an alias for UpdateQueryBuilder.Where(condition).
func (qb *UpdateQueryBuilder) And(condition *Condition) *UpdateQueryBuilder {
return qb.Where(condition)
}
// Create serializes and creates the query.
func (qb *UpdateQueryBuilder) Create(session *gocql.Session) *gocql.Query {
var (
buffer bytes.Buffer
parameters []interface{}
)
buffer.WriteString("update ")
buffer.WriteString(DBKeyspaceName)
buffer.WriteByte('.')
buffer.WriteString(qb.table)
buffer.WriteString(" set ")
for i, valueAssignment := range qb.valueAssignments {
if i > 0 {
buffer.WriteByte(',')
}
buffer.WriteString(valueAssignment.column)
buffer.WriteByte('=')
if valueAssignment.parameterized {
buffer.WriteByte('?')
parameters = append(parameters, valueAssignment.value)
} else {
buffer.WriteString(valueAssignment.value)
}
}
if qb.conditions != nil {
buffer.WriteString(" where ")
for i, cond := range qb.conditions {
if i > 0 {
buffer.WriteString(" and ")
}
if cond.hasParameter {
parameters = append(parameters, cond.parameter)
}
buffer.WriteString(cond.expression)
}
}
return session.Query(buffer.String(), parameters...)
}
|
/*
Copyright 2019 The xridge kubestone contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"io/ioutil"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/xridge/kubestone/api/v1alpha1"
batchv1 "k8s.io/api/batch/v1"
"k8s.io/apimachinery/pkg/types"
)
const (
fioCrBaseDir = samplesDir + "/fio"
)
var _ = Describe("end to end test", func() {
fioCrDirs := []string{fioCrBaseDir + "/base"}
fioOverlayContents, err := ioutil.ReadDir(fioCrBaseDir + "/overlays")
if err != nil {
Fail("Didn't find any fio CRs under " + fioCrBaseDir)
}
for _, fioOverlayContent := range fioOverlayContents {
if fioOverlayContent.IsDir() {
fioCrDirs = append(fioCrDirs, fioCrBaseDir+"/overlays/"+fioOverlayContent.Name())
}
}
Describe("creating fio job from multiple CRs", func() {
for _, fioCrDir := range fioCrDirs {
splits := strings.Split(fioCrDir, "/")
dirName := splits[len(splits)-1]
crName := "fio-" + strings.ReplaceAll(dirName, "_", "-")
Context("when creating from cr", func() {
It("should create fio-sample cr", func() {
_, _, err := run(`bash -c "` +
"kustomize build " + fioCrDir + " | " +
"sed 's/name: fio-sample/name: " + crName + "/' | " +
"kubectl create -n " + e2eNamespaceFio + ` -f -"`)
Expect(err).To(BeNil())
})
})
Context("the created job", func() {
It("should finish in a pre-defined time", func() {
timeout := 90
cr := &v1alpha1.Fio{}
// TODO: find the respective objects via the CR owner reference
namespacedName := types.NamespacedName{
Namespace: e2eNamespaceFio,
Name: crName,
}
Eventually(func() bool {
if err := client.Get(ctx, namespacedName, cr); err != nil {
Fail("Unable to get fio CR: " + err.Error())
}
return !cr.Status.Running && cr.Status.Completed
}, timeout).Should(BeTrue())
})
It("Should leave a successful job", func() {
job := &batchv1.Job{}
namespacedName := types.NamespacedName{
Namespace: e2eNamespaceFio,
Name: crName,
}
Expect(client.Get(ctx, namespacedName, job)).To(Succeed())
Expect(job.Status.Succeeded).To(Equal(int32(1)))
})
})
}
})
})
|
package webapi
import (
"time"
"github.com/decred/dcrd/dcrec"
"github.com/decred/dcrd/dcrutil/v3"
"github.com/decred/dcrd/txscript/v3"
"github.com/decred/vspd/database"
"github.com/decred/vspd/rpc"
"github.com/gin-gonic/gin"
)
// payFee is the handler for "POST /payfee".
func payFee(c *gin.Context) {
// Get values which have been added to context by middleware.
ticket := c.MustGet("Ticket").(database.Ticket)
knownTicket := c.MustGet("KnownTicket").(bool)
dcrdClient := c.MustGet("DcrdClient").(*rpc.DcrdRPC)
if cfg.VspClosed {
sendError(errVspClosed, c)
return
}
if !knownTicket {
log.Warnf("Unknown ticket from %s", c.ClientIP())
sendError(errUnknownTicket, c)
return
}
var payFeeRequest PayFeeRequest
if err := c.ShouldBindJSON(&payFeeRequest); err != nil {
log.Warnf("Bad payfee request from %s: %v", c.ClientIP(), err)
sendErrorWithMsg(err.Error(), errBadRequest, c)
return
}
// Respond early if we already have the fee tx for this ticket.
if ticket.FeeTxStatus == database.FeeReceieved ||
ticket.FeeTxStatus == database.FeeBroadcast ||
ticket.FeeTxStatus == database.FeeConfirmed {
log.Warnf("Fee tx already received from %s: ticketHash=%s", c.ClientIP(), ticket.Hash)
sendError(errFeeAlreadyReceived, c)
return
}
// Get ticket details.
rawTicket, err := dcrdClient.GetRawTransaction(ticket.Hash)
if err != nil {
log.Errorf("Could not retrieve tx %s for %s: %v", ticket.Hash, c.ClientIP(), err)
sendError(errInternalError, c)
return
}
// Ensure this ticket is eligible to vote at some point in the future.
canVote, err := dcrdClient.CanTicketVote(rawTicket, ticket.Hash, cfg.NetParams)
if err != nil {
log.Errorf("canTicketVote error: %v", err)
sendError(errInternalError, c)
return
}
if !canVote {
log.Warnf("Unvotable ticket %s from %s", ticket.Hash, c.ClientIP())
sendError(errTicketCannotVote, c)
return
}
// Respond early if the fee for this ticket is expired.
if ticket.FeeExpired() {
log.Warnf("Expired payfee request from %s", c.ClientIP())
sendError(errFeeExpired, c)
return
}
// Validate VotingKey.
votingKey := payFeeRequest.VotingKey
votingWIF, err := dcrutil.DecodeWIF(votingKey, cfg.NetParams.PrivateKeyID)
if err != nil {
log.Warnf("Failed to decode WIF: %v", err)
sendError(errInvalidPrivKey, c)
return
}
// Validate VoteChoices.
voteChoices := payFeeRequest.VoteChoices
err = isValidVoteChoices(cfg.NetParams, currentVoteVersion(cfg.NetParams), voteChoices)
if err != nil {
log.Warnf("Invalid votechoices from %s: %v", c.ClientIP(), err)
sendErrorWithMsg(err.Error(), errInvalidVoteChoices, c)
return
}
// Validate FeeTx.
feeTx, err := decodeTransaction(payFeeRequest.FeeTx)
if err != nil {
log.Warnf("Failed to decode tx: %v", err)
sendError(errInvalidFeeTx, c)
return
}
// Loop through transaction outputs until we find one which pays to the
// expected fee address. Record how much is being paid to the fee address.
var feePaid dcrutil.Amount
const scriptVersion = 0
findAddress:
for _, txOut := range feeTx.TxOut {
if txOut.Version != scriptVersion {
sendErrorWithMsg("invalid script version", errInvalidFeeTx, c)
return
}
_, addresses, _, err := txscript.ExtractPkScriptAddrs(scriptVersion,
txOut.PkScript, cfg.NetParams)
if err != nil {
log.Errorf("Extract PK error: %v", err)
sendError(errInternalError, c)
return
}
for _, addr := range addresses {
if addr.Address() == ticket.FeeAddress {
feePaid = dcrutil.Amount(txOut.Value)
break findAddress
}
}
}
if feePaid == 0 {
log.Warnf("FeeTx for ticket %s did not include any payments for address %s", ticket.Hash, ticket.FeeAddress)
sendErrorWithMsg("feetx did not include any payments for fee address", errInvalidFeeTx, c)
return
}
wifAddr, err := dcrutil.NewAddressPubKeyHash(dcrutil.Hash160(votingWIF.PubKey()), cfg.NetParams,
dcrec.STEcdsaSecp256k1)
if err != nil {
log.Errorf("NewAddressPubKeyHash: %v", err)
sendError(errInvalidPrivKey, c)
return
}
// Decode ticket transaction to get its voting address.
ticketTx, err := decodeTransaction(rawTicket.Hex)
if err != nil {
log.Warnf("Failed to decode tx: %v", err)
sendError(errInternalError, c)
return
}
// Get ticket voting address.
_, votingAddr, _, err := txscript.ExtractPkScriptAddrs(scriptVersion, ticketTx.TxOut[0].PkScript, cfg.NetParams)
if err != nil {
log.Errorf("ExtractPK error: %v", err)
sendError(errInternalError, c)
return
}
if len(votingAddr) == 0 {
log.Error("No voting address found for ticket %s", ticket.Hash)
sendError(errInternalError, c)
return
}
// Ensure provided private key will allow us to vote this ticket.
if votingAddr[0].Address() != wifAddr.Address() {
log.Warnf("Voting address does not match provided private key: "+
"votingAddr=%+v, wifAddr=%+v", votingAddr[0], wifAddr)
sendErrorWithMsg("voting address does not match provided private key",
errInvalidPrivKey, c)
return
}
minFee := dcrutil.Amount(ticket.FeeAmount)
if feePaid < minFee {
log.Warnf("Fee too small from %s: was %v, expected %v", c.ClientIP(),
feePaid, minFee)
sendError(errFeeTooSmall, c)
return
}
// At this point we are satisfied that the request is valid and the FeeTx
// pays sufficient fees to the expected address. Proceed to update the
// database, and if the ticket is confirmed broadcast the transaction.
ticket.VotingWIF = votingWIF.String()
ticket.FeeTxHex = payFeeRequest.FeeTx
ticket.FeeTxHash = feeTx.TxHash().String()
ticket.VoteChoices = voteChoices
ticket.FeeTxStatus = database.FeeReceieved
err = db.UpdateTicket(ticket)
if err != nil {
log.Errorf("InsertTicket failed: %v", err)
sendError(errInternalError, c)
return
}
log.Debugf("Fee tx received for ticket: minExpectedFee=%v, feePaid=%v, "+
"ticketHash=%s", minFee, feePaid, ticket.Hash)
if ticket.Confirmed {
err = dcrdClient.SendRawTransaction(payFeeRequest.FeeTx)
if err != nil {
log.Errorf("SendRawTransaction failed: %v", err)
ticket.FeeTxStatus = database.FeeError
err = db.UpdateTicket(ticket)
if err != nil {
log.Errorf("UpdateTicket error: %v", err)
}
sendErrorWithMsg("could not broadcast fee transaction", errInvalidFeeTx, c)
return
}
ticket.FeeTxStatus = database.FeeBroadcast
err = db.UpdateTicket(ticket)
if err != nil {
log.Errorf("UpdateTicket failed: %v", err)
sendError(errInternalError, c)
return
}
log.Debugf("Fee tx broadcast for ticket: ticketHash=%s, feeHash=%s", ticket.Hash, ticket.FeeTxHash)
}
sendJSONResponse(payFeeResponse{
Timestamp: time.Now().Unix(),
Request: payFeeRequest,
}, c)
}
|
package core
import "fmt"
// ClusterDisabled error generated if cluster is disabled
type ClusterDisabled struct {
Name string
}
func (err ClusterDisabled) Error() string {
return fmt.Sprintf("The cluster is not enabled: %s", err.Name)
}
|
package intset
import (
"bytes"
"fmt"
)
type BitInt32Set struct {
words []uint32
}
func NewBitInt32Set() *BitInt32Set {
return &BitInt32Set{}
}
func (s *BitInt32Set) Has(x int) bool {
word, bit := x/32, uint(x%32)
return word < len(s.words) && s.words[word]&(1<<bit) != 0
}
func (s *BitInt32Set) Add(x int) {
word, bit := x/32, uint(x%32)
for word >= len(s.words) {
s.words = append(s.words, 0)
}
s.words[word] |= 1 << bit
}
func (s *BitInt32Set) UnionWith(t IntSet) {
if t2, ok := t.(*BitInt32Set); ok {
for i, tword := range t2.words {
if i < len(s.words) {
s.words[i] |= tword
} else {
s.words = append(s.words, tword)
}
}
}
}
func (s *BitInt32Set) String() string {
var buf bytes.Buffer
buf.WriteByte('{')
for i, word := range s.words {
if word == 0 {
continue
}
for j := 0; j < 32; j++ {
if word&(1<<uint(j)) != 0 {
if buf.Len() > len("{") {
buf.WriteByte(' ')
}
fmt.Fprintf(&buf, "%d", 32*i+j)
}
}
}
buf.WriteByte('}')
return buf.String()
}
func popcount32(x uint32) int {
count := 0
for x != 0 {
count++
x &= x - 1
}
return count
}
// return the number of elements
func (s *BitInt32Set) Len() int {
count := 0
for _, word := range s.words {
count += popcount32(word)
}
return count
}
// remove x from the set
func (s *BitInt32Set) Remove(x int) {
word, bit := x/32, uint(x%32)
s.words[word] &^= 1 << bit
}
func (s *BitInt32Set) Clear() {
for i := range s.words {
s.words[i] = 0
}
}
|
package main
// Leetcode 830. (easy)
func largeGroupPositions(s string) (res [][]int) {
cnt := 1
for i := range s {
if i == len(s)-1 || s[i] != s[i+1] {
if cnt >= 3 {
res = append(res, []int{i - cnt + 1, i})
}
cnt = 1
} else {
cnt++
}
}
return
}
|
package structs
// Platform represents a third party streaming platform.
type Platform struct {
Id int64 `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Color string `json:"color,omitempty"`
Images PlatformImages `json:"images,omitempty"`
}
|
package sparsemat
import (
"encoding/json"
"math/rand"
"reflect"
"sort"
"strconv"
"testing"
)
func TestCSRMat(t *testing.T) {
tests := []struct {
rows, cols int
data []int
expected [][]int
}{
{1, 1, []int{1}, [][]int{{1}}},
{2, 2, []int{1, 0, 0, 1}, [][]int{{1, 0}, {0, 1}}},
{2, 2, []int{}, [][]int{{0, 0}, {0, 0}}},
{2, 2, nil, [][]int{{0, 0}, {0, 0}}},
{4, 4, []int{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, [][]int{{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}},
{4, 4, []int{1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1}, [][]int{{1, 1, 1, 1}, {1, 0, 0, 1}, {1, 0, 0, 1}, {1, 1, 1, 1}}},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
var m SparseMat
if test.data != nil {
m = CSRMat(test.rows, test.cols, test.data...)
} else {
m = CSRMat(test.rows, test.cols)
}
for i := 0; i < test.rows; i++ {
for j := 0; j < test.cols; j++ {
expected := test.expected[i][j]
actual := m.At(i, j)
if actual != expected {
t.Fatalf("expected %v but found %v", expected, actual)
}
}
}
})
}
}
func TestCSRMatCopy(t *testing.T) {
tests := []struct {
mat SparseMat
}{
//{CSRMat(5, 5)},
{CSRIdentity(5)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
actual := CSRMatCopy(test.mat)
if !actual.Equals(test.mat) {
t.Fatalf("expected \n%v\n but found \n%v\n", test.mat, actual)
}
})
}
}
func BenchmarkCSRMatCopy(b *testing.B) {
benchmarks := []struct {
rows, cols int
}{
{100, 10},
{100, 100},
{100, 1000},
{100, 10000},
}
for r, bm := range benchmarks {
b.Run(strconv.Itoa(r), func(b *testing.B) {
m := randomMatrix(bm.rows, bm.cols)
b.ResetTimer()
for i := 0; i < b.N; i++ {
CSRMatCopy(m)
}
})
}
}
func randomMatrix(rows, cols int) SparseMat {
m := CSRMat(rows, cols)
//make random data
for r := 0; r < rows; r++ {
for c := 0; c < cols; c++ {
if rand.Intn(2) == 1 {
m.Set(r, c, 1)
}
}
}
return m
}
func TestCSRMatrix_Dim(t *testing.T) {
tests := []struct {
m SparseMat
expectedRows int
expectedCols int
}{
{CSRMat(5, 5), 5, 5},
{CSRMat(5, 5).Slice(1, 1, 4, 4), 4, 4},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
rows, cols := test.m.Dims()
if rows != test.expectedRows {
t.Fatalf("expected %v but found %v", test.expectedRows, rows)
}
if cols != test.expectedCols {
t.Fatalf("expected %v but found %v", test.expectedCols, cols)
}
})
}
}
func TestCSRMatrix_Slice(t *testing.T) {
tests := []struct {
sliced SparseMat
expected SparseMat
}{
{CSRMat(2, 2, 1, 0, 0, 1).Slice(0, 0, 2, 1), CSRMat(2, 1, 1, 0)},
{CSRIdentity(8).Slice(3, 0, 4, 4), CSRMat(4, 4, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)},
{CSRIdentity(8).Slice(3, 0, 4, 4).T(), CSRMat(4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
if !test.sliced.Equals(test.expected) {
t.Fatalf("expected equality between %v and %v", test.sliced, test.expected)
}
})
}
}
func TestCSRMatrix_Equals(t *testing.T) {
tests := []struct {
input1, input2 SparseMat
expected bool
}{
{CSRIdentity(3), CSRIdentity(3), true},
{CSRIdentity(3).T(), CSRIdentity(3), true},
{CSRIdentity(4), CSRIdentity(3), false},
{CSRIdentity(4), nil, false},
{CSRMat(3, 3, 0, 1, 1, 0, 1, 1, 0, 0, 0).T(), CSRMat(3, 3, 0, 1, 1, 0, 1, 1, 0, 0, 0), false},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
actual := test.input1.Equals(test.input2)
if actual != test.expected {
t.Fatalf("expected %v but found %v", test.expected, actual)
}
})
}
}
func TestCSRIdentity(t *testing.T) {
tests := []struct {
ident SparseMat
expected SparseMat
}{
{CSRIdentity(3), CSRMat(3, 3, 1, 0, 0, 0, 1, 0, 0, 0, 1)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
if !test.ident.Equals(test.expected) {
t.Fatalf("expected equality")
}
})
}
}
func TestCSRMatrix_At(t *testing.T) {
tests := []struct {
input SparseMat
i, j int
expected int
}{
{CSRIdentity(3), 0, 0, 1},
{CSRIdentity(3), 0, 1, 0},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
actual := test.input.At(test.i, test.j)
if actual != test.expected {
t.Fatalf("expected %v at (%v,%v) but found %v", test.expected, test.i, test.j, actual)
}
})
}
}
func TestCSRMatrix_Mul(t *testing.T) {
tests := []struct {
m1, m2, result, expected SparseMat
}{
{CSRMat(1, 4, 1, 0, 1, 0), CSRMat(4, 1, 1, 0, 1, 0), CSRMat(1, 1), CSRMat(1, 1, 0)},
{CSRMat(1, 4, 1, 0, 1, 0), CSRMat(4, 1, 1, 0, 0, 0), CSRMat(1, 1), CSRMat(1, 1, 1)},
{CSRMat(1, 4, 1, 1, 1, 1), CSRMat(4, 1, 1, 1, 1, 0), CSRMat(1, 1), CSRMat(1, 1, 1)},
{CSRIdentity(3), CSRIdentity(3), CSRMat(3, 3), CSRIdentity(3)},
{CSRIdentity(3), CSRMat(3, 3, 0, 1, 1, 0, 1, 1, 0, 0, 0), CSRMat(3, 3), CSRMat(3, 3, 0, 1, 1, 0, 1, 1, 0, 0, 0)},
{CSRMat(3, 3, 0, 1, 1, 0, 1, 1, 0, 0, 0), CSRIdentity(3), CSRMat(3, 3), CSRMat(3, 3, 0, 1, 1, 0, 1, 1, 0, 0, 0)},
{CSRMat(4, 3, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1).T(), CSRIdentity(4), CSRMat(3, 4), CSRMat(4, 3, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1).T()},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
test.result.Mul(test.m1, test.m2)
if !test.result.Equals(test.expected) {
t.Fatalf("expected %v but found %v", test.expected, test.result)
}
})
}
}
func TestCSRMatrix_Zeroize(t *testing.T) {
tests := []struct {
original SparseMat
expected SparseMat
}{
{CSRIdentity(3), CSRMat(3, 3)},
{CSRMat(3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1), CSRMat(3, 3)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
test.original.Zeroize()
if !test.original.Equals(test.expected) {
t.Fatalf("expcted \n%v\n but found \n%v\n", test.expected, test.original)
}
})
}
}
func TestCSRMatrix_ZeroizeRange(t *testing.T) {
tests := []struct {
original SparseMat
i, j, rows, cols int
expected SparseMat
}{
{CSRMat(4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 1, 1, 2, 2, CSRMat(4, 4, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
test.original.ZeroizeRange(test.i, test.j, test.rows, test.cols)
if !test.original.Equals(test.expected) {
t.Fatalf("expcted \n%v\n but found \n%v\n", test.expected, test.original)
}
})
}
}
func TestCSRMatrix_T(t *testing.T) {
tests := []struct {
original SparseMat
expected SparseMat
}{
{CSRMat(3, 3, 0, 1, 1, 0, 1, 1, 0, 0, 0), CSRMat(3, 3, 0, 0, 0, 1, 1, 0, 1, 1, 0)},
{CSRMat(4, 2, 0, 1, 0, 0, 0, 0, 1, 0), CSRMat(2, 4, 0, 0, 0, 1, 1, 0, 0, 0)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
if !test.original.T().Equals(test.expected) {
t.Fatalf("expcted \n%v\n but found \n%v\n", test.expected, test.original.T())
}
})
}
}
func TestCSRMatrix_Add(t *testing.T) {
tests := []struct {
a, b, result SparseMat
expected SparseMat
}{
{CSRIdentity(3), CSRIdentity(3), CSRMat(3, 3), CSRMat(3, 3)},
{CSRIdentity(3), CSRMat(3, 3), CSRMat(3, 3), CSRIdentity(3)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
test.result.Add(test.a, test.b)
if !test.result.Equals(test.expected) {
t.Fatalf("expcted \n%v\n but found \n%v\n", test.expected, test.result)
}
})
}
}
func TestCSRMatrix_Add2(t *testing.T) {
tests := []struct {
original SparseMat
i, j, rows, cols int
addToSlice SparseMat
expectedOriginal SparseMat
expectedSlice SparseMat
}{
{
CSRMat(5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
1, 1, 3, 3,
CSRIdentity(3),
CSRMat(5, 5, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1),
CSRMat(3, 3, 0, 1, 1, 1, 0, 1, 1, 1, 0),
},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
sl := test.original.Slice(test.i, test.j, test.rows, test.cols)
sl.Add(sl, test.addToSlice)
if !sl.Equals(test.expectedSlice) {
t.Fatalf("expected \n%v\n but found \n%v\n", test.expectedSlice, sl)
}
})
}
}
func TestCSRMatrix_Column(t *testing.T) {
tests := []struct {
m SparseMat
j int //column
expected SparseVector
}{
{CSRIdentity(3), 1, CSRVec(3, 0, 1, 0)},
{CSRIdentity(3), 0, CSRVec(3, 1, 0, 0)},
{CSRMat(4, 4, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0).Slice(1, 1, 2, 2).T(), 0, CSRVec(2, 0, 1)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
actual := test.m.Column(test.j)
if !actual.Equals(test.expected) {
t.Fatalf("expected %v but found %v", test.expected, actual)
}
})
}
}
func TestCSRMatrix_Row(t *testing.T) {
tests := []struct {
m SparseMat
i int //row index
expected SparseVector
}{
{CSRIdentity(3), 1, CSRVec(3, 0, 1, 0)},
{CSRIdentity(3), 0, CSRVec(3, 1, 0, 0)},
{CSRMat(4, 4, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0).Slice(1, 1, 2, 2).T(), 1, CSRVec(2, 1, 0)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
actual := test.m.Row(test.i)
if !actual.Equals(test.expected) {
t.Fatalf("expected %v but found %v", test.expected, actual)
}
})
}
}
func TestCSRMatrix_SetColumn(t *testing.T) {
tests := []struct {
m SparseMat
j int //column to change
vec SparseVector
expected SparseMat
}{
{CSRIdentity(3), 0, CSRVec(3, 0, 1, 0), CSRMat(3, 3, 0, 0, 0, 1, 1, 0, 0, 0, 1)},
{CSRIdentity(3), 1, CSRIdentity(3).Column(2), CSRMat(3, 3, 1, 0, 0, 0, 0, 0, 0, 1, 1)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
test.m.SetColumn(test.j, test.vec)
if !test.m.Equals(test.expected) {
t.Fatalf("expcted \n%v\n but found \n%v\n", test.expected, test.m)
}
})
}
}
func TestCSRMatrix_SetRow(t *testing.T) {
tests := []struct {
m SparseMat
i int //row to change
vec SparseVector
expected SparseMat
}{
{CSRIdentity(3), 0, CSRVec(3, 0, 1, 0), CSRMat(3, 3, 0, 1, 0, 0, 1, 0, 0, 0, 1)},
{CSRIdentity(3), 1, CSRIdentity(3).Row(2), CSRMat(3, 3, 1, 0, 0, 0, 0, 1, 0, 0, 1)},
{CSRMat(4, 3), 0, CSRVec(3, 1, 1, 1), CSRMat(4, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0)},
{CSRMat(3, 4), 0, CSRVec(4, 1, 1, 1, 1), CSRMat(3, 4, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0)},
{CSRMat(4, 3), 3, CSRVec(3, 1, 1, 1), CSRMat(4, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1)},
{CSRMat(3, 4), 2, CSRVec(4, 1, 1, 1, 1), CSRMat(3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
test.m.SetRow(test.i, test.vec)
if !test.m.Equals(test.expected) {
t.Fatalf("expcted \n%v\n but found \n%v\n", test.expected, test.m)
}
})
}
}
func TestCSRMatrix_Row_SetRow(t *testing.T) {
rows, cols := 25, 25
data := make(map[int]map[int]int)
//make random data
for r := 0; r < rows; r++ {
data[r] = make(map[int]int)
for c := 0; c < cols; c++ {
if rand.Intn(2) == 1 {
data[r][c] = 1
}
}
}
//create a matrix from it
m := CSRMat(rows, cols)
for r, cs := range data {
for c := range cs {
m.Set(r, c, 1)
}
}
//grab a row and make some changes
row := m.Row(rows / 2)
data[rows/2] = make(map[int]int)
for c := 0; c < cols; c++ {
if rand.Intn(2) == 1 {
data[rows/2][c] = 1
row.Set(c, 1)
} else {
row.Set(c, 0)
}
}
m.SetRow(rows/2, row)
//create expected matrix
expected := CSRMat(rows, cols)
for r, cs := range data {
for c := range cs {
expected.Set(r, c, 1)
}
}
if !expected.Equals(m) {
t.Fatalf("expected %v but found %v", expected, m)
}
}
func TestCSRMatrix_Row_SetRow2(t *testing.T) {
rows, cols := 2000, 10
swaps := 10000
m := randomMatrix(rows, cols)
for i := 0; i < swaps; i++ {
r1 := rand.Intn(rows)
r2 := rand.Intn(rows)
row1 := m.Row(r1)
row2 := m.Row(r2)
row1.Add(row1, row2) //make a change
m.SetRow(r1, row1)
row := m.Row(r1)
ra1 := row.NonzeroArray()
sort.Ints(ra1)
ra2 := row.NonzeroArray()
if !reflect.DeepEqual(ra1, ra2) {
t.Fatalf("expected \n%v\n but found \n%v\n on row %v", ra2, ra1, r1)
}
}
}
func TestCSRMatrix_SetMatrix(t *testing.T) {
tests := []struct {
dest SparseMat
source SparseMat
iOffset, jOffset int
expected SparseMat
}{
{CSRMat(3, 3), CSRIdentity(3), 0, 0, CSRIdentity(3)},
{CSRMat(4, 4), CSRMat(2, 2, 1, 1, 1, 1), 1, 1, CSRMat(4, 4, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0)},
{CSRMat(4, 4), CSRMat(2, 2, 0, 1, 0, 0).T(), 1, 1, CSRMat(4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0)},
{CSRIdentity(4), CSRMat(2, 2, 1, 1, 1, 1), 1, 1, CSRMat(4, 4, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1)},
{CSRMat(5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), CSRIdentity(3), 1, 1, CSRMat(5, 5, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
test.dest.SetMatrix(test.source, test.iOffset, test.jOffset)
if !test.dest.Equals(test.expected) {
t.Fatalf("expcted \n%v\n but found \n%v\n", test.expected, test.dest)
}
})
}
}
func TestCSRMatrix_SetMatrix2(t *testing.T) {
tests := []struct {
original SparseMat
i, j, rows, cols int
source SparseMat
iOffset, jOffset int
expectedOriginal SparseMat
expectedSlice SparseMat
}{
{CSRMat(5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
1, 1, 3, 3,
CSRIdentity(2),
1, 1,
CSRMat(5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1),
CSRMat(3, 3, 1, 1, 1, 1, 1, 0, 1, 0, 1)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
sl := test.original.Slice(test.i, test.j, test.rows, test.cols)
sl.SetMatrix(test.source, test.iOffset, test.jOffset)
if !sl.Equals(test.expectedSlice) {
t.Fatalf("expected \n%v\n but found \n%v\n", test.expectedSlice, sl)
}
})
}
}
func TestCSRMatrix_JSON(t *testing.T) {
m := CSRIdentity(3)
bs, err := json.Marshal(m)
if err != nil {
t.Fatalf("expected no error found:%v", err)
}
var actual CSRMatrix
err = json.Unmarshal(bs, &actual)
if err != nil {
t.Fatalf("expected no error found:%v", err)
}
if !m.Equals(&actual) {
t.Fatalf("expected %v but found %v", m, actual)
}
}
func TestCSRMatrix_And(t *testing.T) {
tests := []struct {
x, y, result, expected SparseMat
}{
{CSRMat(2, 2, 0, 1, 0, 1), CSRMat(2, 2, 0, 0, 1, 1), CSRMat(2, 2), CSRMat(2, 2, 0, 0, 0, 1)},
{CSRMat(2, 2, 0, 0, 1, 1), CSRMat(2, 2, 0, 1, 0, 1), CSRMat(2, 2), CSRMat(2, 2, 0, 0, 0, 1)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
test.result.And(test.x, test.y)
if !test.result.Equals(test.expected) {
t.Fatalf("expected %v but found %v", test.expected, test.result)
}
})
}
}
func TestCSRMatrix_Or(t *testing.T) {
tests := []struct {
x, y, result, expected SparseMat
}{
{CSRMat(2, 2, 0, 1, 0, 1), CSRMat(2, 2, 0, 0, 1, 1), CSRMat(2, 2), CSRMat(2, 2, 0, 1, 1, 1)},
{CSRMat(2, 2, 0, 0, 1, 1), CSRMat(2, 2, 0, 1, 0, 1), CSRMat(2, 2), CSRMat(2, 2, 0, 1, 1, 1)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
test.result.Or(test.x, test.y)
if !test.result.Equals(test.expected) {
t.Fatalf("expected %v but found %v", test.expected, test.result)
}
})
}
}
func TestCSRMatrix_XOr(t *testing.T) {
tests := []struct {
x, y, result, expected SparseMat
}{
{CSRMat(2, 2, 0, 1, 0, 1), CSRMat(2, 2, 0, 0, 1, 1), CSRMat(2, 2), CSRMat(2, 2, 0, 1, 1, 0)},
{CSRMat(2, 2, 0, 0, 1, 1), CSRMat(2, 2, 0, 1, 0, 1), CSRMat(2, 2), CSRMat(2, 2, 0, 1, 1, 0)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
test.result.XOr(test.x, test.y)
if !test.result.Equals(test.expected) {
t.Fatalf("expected %v but found %v", test.expected, test.result)
}
})
}
}
func TestCSRMatrix_Negate(t *testing.T) {
tests := []struct {
x, expected SparseMat
}{
{CSRMat(2, 2, 0, 1, 0, 1), CSRMat(2, 2, 1, 0, 1, 0)},
{CSRMat(2, 2, 0, 1, 1, 0), CSRMat(2, 2, 1, 0, 0, 1)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
test.x.Negate()
if !test.x.Equals(test.expected) {
t.Fatalf("expected %v but found %v", test.expected, test.x)
}
})
}
}
func TestCSRMatrix_SwapRows(t *testing.T) {
tests := []struct {
input SparseMat
a, b int
expected SparseMat
}{
{CSRIdentity(4), 0, 0, CSRMat(4, 4, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1)},
{CSRIdentity(4), 1, 3, CSRMat(4, 4, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0)},
{CSRMat(4, 5, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1), 2, 3, CSRMat(4, 5, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0)},
{CSRMat(5, 4, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1), 0, 1, CSRMat(5, 4, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1)},
{CSRMat(5, 4, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1), 1, 2, CSRMat(5, 4, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1)},
{CSRMat(5, 4, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0), 4, 0, CSRMat(5, 4, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1)},
{CSRMat(5, 4, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0), 0, 1, CSRMat(5, 4, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0)},
{CSRMat(5, 4, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1), 0, 2, CSRMat(5, 4, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
test.input.SwapRows(test.a, test.b)
if !test.input.Equals(test.expected) {
t.Fatalf("after rowswap(%v <> %v) expected \n%v\n but found \n%v\n", test.a, test.b, test.expected, test.input)
}
})
}
}
func TestCSRMatrix_SwapRows2(t *testing.T) {
input := CSRMat(5, 5,
1, 1, 1, 1, 1,
0, 1, 0, 0, 0,
0, 0, 0, 1, 1,
0, 0, 0, 0, 0,
1, 1, 1, 0, 0)
expected := CSRMat(5, 5,
0, 1, 0, 1, 0,
0, 0, 0, 0, 0,
0, 1, 0, 1, 0,
0, 0, 0, 0, 1,
1, 1, 1, 1, 1,
)
actual := CSRMatCopy(input)
actual.SwapRows(0, 4)
actual.SwapColumns(1, 4)
actual.SwapRows(1, 3)
r1 := actual.Row(0)
r1.Add(r1, actual.Row(4))
actual.SetRow(0, r1)
if !actual.Equals(expected) {
t.Errorf("expected \n%v\n but found \n%v\n", expected, actual)
}
}
func BenchmarkCSRMatrix_SwapRows(b *testing.B) {
a := CSRMat(5, 4, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1)
aa := CSRMat(5, 4, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0)
aaa := CSRMat(5, 4, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1)
b.ResetTimer()
for i := 0; i < b.N; i++ {
a.SwapRows(0, 1)
a.SwapRows(0, 1)
aa.SwapRows(0, 1)
aa.SwapRows(0, 1)
aaa.SwapRows(0, 4)
aaa.SwapRows(0, 4)
}
}
func TestCSRMatrix_SwapColumns(t *testing.T) {
tests := []struct {
input SparseMat
a, b int
expected SparseMat
}{
{CSRIdentity(4), 1, 3, CSRMat(4, 4, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0)},
{CSRMat(3, 3, 1, 0, 1, 0, 0, 1, 0, 1, 1), 0, 2, CSRMat(3, 3, 1, 0, 1, 1, 0, 0, 1, 1, 0)},
{CSRMat(3, 3, 1, 0, 1, 0, 0, 1, 0, 1, 1), 0, 1, CSRMat(3, 3, 0, 1, 1, 0, 0, 1, 1, 0, 1)},
{CSRMat(1, 6, 1, 1, 0, 0, 1, 1), 0, 1, CSRMat(1, 6, 1, 1, 0, 0, 1, 1)},
{CSRMat(1, 6, 1, 1, 0, 0, 1, 1), 0, 2, CSRMat(1, 6, 0, 1, 1, 0, 1, 1)},
{CSRMat(1, 6, 1, 1, 0, 0, 1, 1), 0, 4, CSRMat(1, 6, 1, 1, 0, 0, 1, 1)},
{CSRMat(1, 6, 1, 1, 0, 0, 1, 1), 1, 2, CSRMat(1, 6, 1, 0, 1, 0, 1, 1)},
{CSRMat(1, 6, 1, 1, 0, 0, 1, 1), 1, 3, CSRMat(1, 6, 1, 0, 0, 1, 1, 1)},
{CSRMat(1, 6, 1, 1, 0, 0, 1, 1), 1, 4, CSRMat(1, 6, 1, 1, 0, 0, 1, 1)},
{CSRMat(1, 6, 1, 1, 0, 0, 1, 1), 2, 3, CSRMat(1, 6, 1, 1, 0, 0, 1, 1)},
{CSRMat(1, 6, 1, 1, 0, 0, 1, 1), 2, 4, CSRMat(1, 6, 1, 1, 1, 0, 0, 1)},
{CSRMat(1, 6, 1, 1, 0, 0, 1, 1), 2, 5, CSRMat(1, 6, 1, 1, 1, 0, 1, 0)},
{CSRMat(1, 6, 0, 0, 1, 0, 0, 0), 2, 5, CSRMat(1, 6, 0, 0, 0, 0, 0, 1)},
{CSRMat(1, 6, 0, 0, 0, 0, 0, 1), 2, 5, CSRMat(1, 6, 0, 0, 1, 0, 0, 0)},
{CSRMat(1, 6, 0, 0, 0, 0, 0, 0), 2, 5, CSRMat(1, 6, 0, 0, 0, 0, 0, 0)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
test.input.SwapColumns(test.a, test.b)
if !test.input.Equals(test.expected) {
t.Fatalf("expected %v but found %v", test.expected, test.input)
}
})
}
}
func TestCSRMatrix_SwapColumns_random(t *testing.T) {
rows, cols := 10_000, 16
mat := randomMatrix(rows, cols)
for i := 0; i < cols; i++ {
for ii := i; ii < cols; ii++ {
cmat := CSRMatCopy(mat)
cmat.SwapColumns(i, ii)
if !mat.Column(i).Equals(cmat.Column(ii)) {
t.Fatalf("expected column %v and %v to be equal for \n%v", i, ii, mat)
}
if !mat.Column(ii).Equals(cmat.Column(i)) {
t.Fatalf("expected column %v and %v to be equal for \n%v", i, ii, mat)
}
}
}
}
func BenchmarkCSRMatrix_SwapColumns(b *testing.B) {
m := CSRMat(5, 5, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.SwapColumns(2, 3)
m.SwapColumns(2, 3)
}
}
func TestCSRMatrix_AddRows(t *testing.T) {
tests := []struct {
input SparseMat
i1, i2, dest int
expected SparseMat
}{
{CSRIdentity(3), 0, 1, 2, CSRMat(3, 3, 1, 0, 0, 0, 1, 0, 1, 1, 0)},
{CSRMat(4, 5, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1), 1, 2, 1, CSRMat(4, 5, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1)},
{CSRMat(4, 5, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, 2, 0, CSRMat(4, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)},
{CSRMat(4, 5, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 1, 2, 1, CSRMat(4, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)},
{CSRMat(4, 5, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 1, 2, 1, CSRMat(4, 5, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
test.input.AddRows(test.i1, test.i2, test.dest)
if !test.input.Equals(test.expected) {
t.Fatalf("expected %v but found %v", test.expected, test.input)
}
})
}
}
func BenchmarkCSRMatrix_AddRows(b *testing.B) {
a := CSRMat(5, 4, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1)
b.ResetTimer()
for i := 0; i < b.N; i++ {
for j := 1; j < 5; j++ {
a.AddRows(0, j, j)
a.AddRows(4, j, 4)
a.AddRows(4, j, 4)
}
}
}
func TestCSRMatFromVec(t *testing.T) {
tests := []struct {
vec SparseVector
}{
{CSRVec(5, 1, 0, 1, 0, 1)},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
actual := CSRMatFromVec(test.vec).Row(0)
if !actual.Equals(test.vec) {
t.Fatalf("expected %v but found %v", test.vec, actual)
}
})
}
}
func TestCSRMatrix_Set(t *testing.T) {
m := CSRMat(5, 5)
for i := 0; i < 5; i++ {
m.Set(i, i, 1)
}
expected := CSRIdentity(5)
if !expected.Equals(m) {
t.Fatalf("expected %v but found %v", expected, m)
}
}
func BenchmarkCSRMatrix_T(b *testing.B) {
data := make([]int, 100)
for i := 0; i < len(data); i++ {
data[i] = rand.Intn(2)
}
m := CSRMat(10, 10, data...)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.T()
}
}
|
package main
func main() {
}
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func isSymmetric(root *TreeNode) bool {
return areSymmetricNodes(root.Left, root.Right)
}
func areSymmetricNodes(lSubTree *TreeNode, rSubTree *TreeNode) bool {
if lSubTree == nil && rSubTree == nil {
return true
}
if lSubTree == nil || rSubTree == nil || lSubTree.Val != rSubTree.Val {
return false
}
return areSymmetricNodes(lSubTree.Left, rSubTree.Right) && areSymmetricNodes(lSubTree.Right, rSubTree.Left)
}
|
//
// Copyright 2020 IBM Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package tkn
import (
"crypto/md5"
"fmt"
"io/ioutil"
"regexp"
"strings"
"github.com/tapestry-pipelines/pkg/utils"
"github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
)
const (
task string = "Task"
taskrun string = "TaskRun"
pipeline string = "Pipeline"
triggerbinding string = "TriggerBinding"
)
type parsedTknObjects struct {
ManifestFilepath string
ManifestFilehash string
TknObjects []tknObject
}
type tknObject struct {
GroupKind string
RuntimeObject []byte
}
//getTknResources :
func getTknResources(file string) []parsedTknObjects {
parsedObjs := []parsedTknObjects{}
if utils.IsYAMLFile(file) {
if filebuf, err := ioutil.ReadFile(file); err == nil {
p := parsedTknObjects{}
p.ManifestFilepath = file
p.ManifestFilehash = fmt.Sprintf("%x", md5.Sum(filebuf))
p.TknObjects = parseK8sYaml(filebuf)
parsedObjs = append(parsedObjs, p)
}
}
return parsedObjs
}
func parseK8sYaml(fileR []byte) []tknObject {
dObjs := []tknObject{}
acceptedK8sTypes := regexp.MustCompile(fmt.Sprintf("(%s|%s|%s|%s)",
task, pipeline, taskrun, triggerbinding))
fileAsString := string(fileR[:])
sepYamlfiles := strings.Split(fileAsString, "---")
for _, f := range sepYamlfiles {
if f == "\n" || f == "" {
// ignore empty cases
continue
}
decode := scheme.Codecs.UniversalDeserializer().Decode
_, groupVersionKind, err := decode([]byte(f), nil, nil)
if err != nil {
continue
}
if acceptedK8sTypes.MatchString(groupVersionKind.Kind) {
d := tknObject{}
d.GroupKind = groupVersionKind.Kind
d.RuntimeObject = []byte(f)
dObjs = append(dObjs, d)
}
}
return dObjs
}
|
package main
import "fmt"
/*
切片操作符 [low,high]
规则0 <= low <= high <= cap(原切片)
*/
func main() {
s := make([]int, 3, 9)
fmt.Println(len(s), cap(s))
s1 := s[4:8]
fmt.Println(len(s1), cap(s1))
}
|
package utils
import (
"os"
"path/filepath"
"strconv"
"github.com/Al-un/alun-api/pkg/communication"
"github.com/joho/godotenv"
)
// AlunEmailSender is a convenient interface to send an email from a specific
// no-reply Alun email
type AlunEmailSender interface {
SendNoReplyEmail(to []string, subject string, templateName string, emailData interface{}) error
}
// AlunEmail is the default production implementation of AlunEmailSender
type AlunEmail struct {
Account communication.EmailConfiguration
Sender string
TemplateFolder string
}
// DummyEmail prevents from sending real email and does nothing
type DummyEmail struct {
}
const (
defaultSender = "Al-un.fr <no-reply@al-un.fr>"
// EmailTemplateUserRegistration when sending email for new user
EmailTemplateUserRegistration = "user_registration"
// EmailTemplateUserPwdReset when user is requesting a password reset
EmailTemplateUserPwdReset = "user_pwd-reset"
)
var (
alunEmail *AlunEmail
)
// SendNoReplyEmail sends an email from a no-reply account.
//
// The returned error is only for telling the calling method that something went
// wrong. Parent method is not expected to tell the error content to the client
// and error handling must be done by checking the logs.
//
// If templateName does not end up with `.html`, it is automatically appended
func (ae AlunEmail) SendNoReplyEmail(to []string, subject string, templateName string, emailData interface{}) error {
// Automatic appending of `.html`
if templateName[len(templateName)-5:] != ".html" {
templateName = templateName + ".html"
}
// Build HTML email
email, err := communication.NewEmailHTMLMessage(
ae.Sender,
to,
subject,
filepath.Join(ae.TemplateFolder, templateName),
emailData,
)
if err != nil {
utilsLogger.Info("Error when loading template %s: %v", templateName, err)
return err
}
// Send
err = ae.Account.Send(email)
if err != nil {
utilsLogger.Info("Error when sending email of template %s to %v: %v", templateName, to, err)
return err
}
// All good
return nil
}
// SendNoReplyEmail does nothing
func (de DummyEmail) SendNoReplyEmail(to []string, subject string, templateName string, emailData interface{}) error {
// Do nothing
return nil
}
// GetAlunEmail loads the AlunEmail singleton
func GetAlunEmail() *AlunEmail {
err := godotenv.Load()
if err != nil {
// log.Fatal("Error when loading .env: ", err)
}
// Email account configuration
if alunEmail == nil {
accountUser := os.Getenv(EnvVarEmailUsername)
accountPassword := os.Getenv(EnvVarEmailPassword)
accountServer := os.Getenv(EnvVarEmailHost)
accountPortText := os.Getenv(EnvVarEmailPort)
accountPort, err := strconv.Atoi(accountPortText)
if err != nil {
utilsLogger.Fatal(2, "Error when parsing EmailServerPort <%s>: %v",
accountPortText, err.Error())
}
// Extra email configuration
sender := os.Getenv(EnvVarEmailSender)
if sender == "" {
sender = defaultSender
}
// Template configuration
cwd, _ := os.Getwd()
templateFolder := filepath.Join(cwd, "alun/utils/email_templates/")
alunEmail = &AlunEmail{
Account: communication.EmailConfiguration{
Username: accountUser,
Password: accountPassword,
Host: accountServer,
Port: accountPort,
},
Sender: sender,
TemplateFolder: templateFolder,
}
}
return alunEmail
}
// GetDummyEmail generates a DummyEmail and keep the GetXXX singleton syntax
// to align with LoadAlunEmail
func GetDummyEmail() *DummyEmail {
return &DummyEmail{}
}
|
package config
import (
"io/ioutil"
"regexp"
logger "github.com/sirupsen/logrus"
"github.com/smallfish/simpleyaml"
)
type Handlers struct {
yaml *simpleyaml.Yaml
}
func (h *Handlers) ReadYaml(filename string) {
source, err := ioutil.ReadFile(filename)
if err != nil {
logger.Fatalf("ERROR: reading config file failed => %s", filename)
}
yaml, err := simpleyaml.NewYaml(source)
if err != nil {
logger.Fatalf("ERROR: reading config file failed +> %s", filename)
}
h.yaml = yaml
}
func (h *Handlers) getYamlValue(args []interface{}) *simpleyaml.Yaml {
vYaml := h.yaml
for i := 0; i < len(args); i++ {
switch args[i].(type) {
case int:
idx, ok := args[i].(int)
if !ok {
logger.Fatalf("ERROR: missing parameter in yaml => %#v", args)
}
vYaml = vYaml.GetIndex(idx)
case string:
attr, ok := args[i].(string)
if !ok {
logger.Fatalf("ERROR: missing parameter in yaml => %#v", args)
}
vYaml = vYaml.Get(attr)
default:
logger.Fatalf("ERROR: missing parameter in yaml => %#v", args)
}
}
return vYaml
}
func (h *Handlers) GetPathInMuxFormat(args ...interface{}) string {
pathYaml := h.getYamlValue(args)
pathStr, err := pathYaml.String()
if err != nil {
logger.Fatalf("ERROR: path not found in yaml => %#v", args)
}
re := regexp.MustCompile("/\\:([a-zA-Z][a-zA-Z0-9]*)")
return re.ReplaceAllString(pathStr, "/{$1}")
}
func (h *Handlers) GetYamlValueStr(args ...interface{}) string {
value := h.getYamlValue(args)
valueStr, err := value.String()
if err != nil {
logger.Fatalf("ERROR: missing parameter in yaml => %#v", args)
}
return valueStr
}
func (h *Handlers) GetYamlValue(args ...interface{}) *simpleyaml.Yaml {
return h.getYamlValue(args)
}
func NewConfig() *Handlers {
return &Handlers{}
}
|
package main
import (
"projects/DesignPatternsByGo/structuralPatterns/composite"
"fmt"
)
func main(){
root := composite.NewComponent(func() {
fmt.Println("My name is:"+"root")
},true).(*composite.Composite)
root.Add(composite.NewComponent(func() {
fmt.Println("I'm Leaf.")
},false).(composite.Component))
composite_2 := composite.NewComponent(func() {
fmt.Println("I'm Composite2.")
},true).(*composite.Composite)
composite_2.Add(composite.NewComponent(func() {
fmt.Println("I'm Leaf2.")
},false).(composite.Component))
composite_2.Add(composite.NewComponent(func() {
fmt.Println("I'm Leaf3.")
},false).(composite.Component))
c2 := root.Add(composite_2)
root.Operation()
root.Remove(c2)
root.Operation()
}
|
package main
import (
"fmt"
)
func main() {
s1 := []int{2, 3, 4, 5, 6}
s2 := []int{2, 3, 5, 7, 11, 13, 7, 19, 23}
s3 := []int{0, 10, 20, 30, 40, 50}
s4 := []int{3, 4, 34, 45, 56, 67}
fmt.Println("Looking for", 5, "in:", s1)
fmt.Printf("%d\n", search(s1, 5))
fmt.Println("Looking for", 5, "in:", s2)
fmt.Printf("%d\n", search(s2, 5))
fmt.Println("Looking for", 5, "in:", s3)
fmt.Printf("%d\n", search(s3, 5))
fmt.Println("Looking for", 5, "in:", s4)
fmt.Printf("%d\n", search(s4, 5))
}
// search returns the index of the element e in the
// sorted slice s. If e does not exist in s search returns -1
func search(s []int, e int) int {
left := 0
right := len(s) - 1
mid := int(left + right/2)
for {
if e < s[left] || e > s[right] {
return -1
} else if e > s[mid] {
left = mid + 1
mid = int((left + right) / 2)
} else if e < s[mid] {
right = mid - 1
mid = int((left + right) / 2)
} else if e == s[mid] {
return mid
} else {
return -1
}
}
return -1
}
|
/*
Copyright 2021 The KodeRover Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workflow
import (
"errors"
"github.com/koderover/zadig/lib/microservice/aslan/config"
"github.com/koderover/zadig/lib/microservice/aslan/core/common/dao/models/task"
commonrepo "github.com/koderover/zadig/lib/microservice/aslan/core/common/dao/repo"
"github.com/koderover/zadig/lib/tool/xlog"
)
type Queue struct {
pqColl *commonrepo.QueueColl
log *xlog.Logger
}
func NewPipelineQueue(log *xlog.Logger) *Queue {
return &Queue{
pqColl: commonrepo.NewQueueColl(),
log: log,
}
}
func (q *Queue) List() []*task.Task {
opt := new(commonrepo.ListQueueOption)
queues, err := q.pqColl.List(opt)
if err != nil {
q.log.Errorf("pqColl.List error: %v", err)
}
tasks := make([]*task.Task, 0, len(queues))
for _, queue := range queues {
t := ConvertQueueToTask(queue)
tasks = append(tasks, t)
}
return tasks
}
// Push task queue, 如果已经存在相同的 pipeline 并且 multi=false情况 设置新的 pipeline 为 blocked
// 默认task状态为 created
func (q *Queue) Push(pt *task.Task) error {
if pt == nil {
return errors.New("nil task")
}
if !pt.MultiRun {
opt := &commonrepo.ListQueueOption{
PipelineName: pt.PipelineName,
}
tasks, err := q.pqColl.List(opt)
if err == nil && len(tasks) > 0 {
q.log.Infof("blocked task received: %v %v %v", pt.CreateTime, pt.TaskID, pt.PipelineName)
pt.Status = config.StatusBlocked
}
}
if err := q.pqColl.Create(ConvertTaskToQueue(pt)); err != nil {
q.log.Errorf("pqColl.Create error: %v", err)
return err
}
return nil
}
// NextWaitingTask 查询下一个等待的task
func (q *Queue) NextWaitingTask() (*task.Task, error) {
opt := &commonrepo.ListQueueOption{
Status: config.StatusWaiting,
}
tasks, err := q.pqColl.List(opt)
if err != nil {
return nil, err
}
for _, t := range tasks {
if t.AgentID == "" {
return ConvertQueueToTask(t), nil
}
}
return nil, errors.New("no waiting task found")
}
//func (q *Queue) NextBlockedTask() (*task.Task, error) {
// opt := &commonrepo.ListQueueOption{
// Status: config.StatusBlocked,
// }
//
// tasks, err := q.pqColl.List(opt)
// if err != nil || len(tasks) == 0 {
// return nil, errors.New("no blocked task found")
// }
// return tasks[0], nil
//}
// BlockedTaskQueue ...
func (q *Queue) BlockedTaskQueue() ([]*task.Task, error) {
opt := &commonrepo.ListQueueOption{
Status: config.StatusBlocked,
}
queues, err := q.pqColl.List(opt)
if err != nil || len(queues) == 0 {
return nil, errors.New("no blocked task found")
}
tasks := make([]*task.Task, 0, len(queues))
for _, queue := range queues {
t := ConvertQueueToTask(queue)
tasks = append(tasks, t)
}
return tasks, nil
}
// UpdateAgent ...
func (q *Queue) UpdateAgent(taskID int64, pipelineName string, createTime int64, agentID string) error {
return q.pqColl.UpdateAgent(taskID, pipelineName, createTime, agentID)
}
// Update ...
func (q *Queue) Update(task *task.Task) bool {
if err := q.pqColl.Update(ConvertTaskToQueue(task)); err != nil {
return false
}
return true
}
// Remove ...
func (q *Queue) Remove(task *task.Task) error {
if err := q.pqColl.Delete(ConvertTaskToQueue(task)); err != nil {
q.log.Errorf("pqColl.Delete error: %v", err)
return err
}
return nil
}
|
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/privilege"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/errorutil"
"github.com/cockroachdb/errors"
)
type scatterNode struct {
optColumnsSlot
run scatterRun
}
// Scatter moves ranges to random stores
// (`ALTER TABLE/INDEX ... SCATTER ...` statement)
// Privileges: INSERT on table.
func (p *planner) Scatter(ctx context.Context, n *tree.Scatter) (planNode, error) {
if !p.ExecCfg().Codec.ForSystemTenant() {
return nil, errorutil.UnsupportedWithMultiTenancy(54255)
}
tableDesc, index, err := p.getTableAndIndex(ctx, &n.TableOrIndex, privilege.INSERT)
if err != nil {
return nil, err
}
var span roachpb.Span
if n.From == nil {
// No FROM/TO specified; the span is the entire table/index.
span = tableDesc.IndexSpan(p.ExecCfg().Codec, index.ID)
} else {
switch {
case len(n.From) == 0:
return nil, errors.Errorf("no columns in SCATTER FROM expression")
case len(n.From) > len(index.ColumnIDs):
return nil, errors.Errorf("too many columns in SCATTER FROM expression")
case len(n.To) == 0:
return nil, errors.Errorf("no columns in SCATTER TO expression")
case len(n.To) > len(index.ColumnIDs):
return nil, errors.Errorf("too many columns in SCATTER TO expression")
}
// Calculate the desired types for the select statement:
// - column values; it is OK if the select statement returns fewer columns
// (the relevant prefix is used).
desiredTypes := make([]*types.T, len(index.ColumnIDs))
for i, colID := range index.ColumnIDs {
c, err := tableDesc.FindColumnWithID(colID)
if err != nil {
return nil, err
}
desiredTypes[i] = c.GetType()
}
fromVals := make([]tree.Datum, len(n.From))
for i, expr := range n.From {
typedExpr, err := p.analyzeExpr(
ctx, expr, nil, tree.IndexedVarHelper{}, desiredTypes[i], true, "SCATTER",
)
if err != nil {
return nil, err
}
fromVals[i], err = typedExpr.Eval(p.EvalContext())
if err != nil {
return nil, err
}
}
toVals := make([]tree.Datum, len(n.From))
for i, expr := range n.To {
typedExpr, err := p.analyzeExpr(
ctx, expr, nil, tree.IndexedVarHelper{}, desiredTypes[i], true, "SCATTER",
)
if err != nil {
return nil, err
}
toVals[i], err = typedExpr.Eval(p.EvalContext())
if err != nil {
return nil, err
}
}
span.Key, err = getRowKey(p.ExecCfg().Codec, tableDesc, index, fromVals)
if err != nil {
return nil, err
}
span.EndKey, err = getRowKey(p.ExecCfg().Codec, tableDesc, index, toVals)
if err != nil {
return nil, err
}
// Tolerate reversing FROM and TO; this can be useful for descending
// indexes.
if cmp := span.Key.Compare(span.EndKey); cmp > 0 {
span.Key, span.EndKey = span.EndKey, span.Key
} else if cmp == 0 {
// Key==EndKey is invalid, so special-case when the user's FROM and
// TO are the same tuple.
span.EndKey = span.EndKey.Next()
}
}
return &scatterNode{
run: scatterRun{
span: span,
},
}, nil
}
// scatterRun contains the run-time state of scatterNode during local execution.
type scatterRun struct {
span roachpb.Span
rangeIdx int
ranges []roachpb.Span
}
func (n *scatterNode) startExec(params runParams) error {
db := params.p.ExecCfg().DB
req := &roachpb.AdminScatterRequest{
RequestHeader: roachpb.RequestHeader{Key: n.run.span.Key, EndKey: n.run.span.EndKey},
RandomizeLeases: true,
}
res, pErr := kv.SendWrapped(params.ctx, db.NonTransactionalSender(), req)
if pErr != nil {
return pErr.GoError()
}
scatterRes := res.(*roachpb.AdminScatterResponse)
n.run.rangeIdx = -1
n.run.ranges = make([]roachpb.Span, len(scatterRes.RangeInfos))
for i, rangeInfo := range scatterRes.RangeInfos {
n.run.ranges[i] = roachpb.Span{
Key: rangeInfo.Desc.StartKey.AsRawKey(),
EndKey: rangeInfo.Desc.EndKey.AsRawKey(),
}
}
return nil
}
func (n *scatterNode) Next(params runParams) (bool, error) {
n.run.rangeIdx++
hasNext := n.run.rangeIdx < len(n.run.ranges)
return hasNext, nil
}
func (n *scatterNode) Values() tree.Datums {
r := n.run.ranges[n.run.rangeIdx]
return tree.Datums{
tree.NewDBytes(tree.DBytes(r.Key)),
tree.NewDString(keys.PrettyPrint(nil /* valDirs */, r.Key)),
}
}
func (*scatterNode) Close(ctx context.Context) {}
|
// Copyright 2014 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License. See the AUTHORS file
// for names of contributors.
//
// Author: jqmp (jaqueramaphan@gmail.com)
package rpc
import (
"net"
"testing"
"github.com/cockroachdb/cockroach/proto"
"github.com/cockroachdb/cockroach/testutils"
"github.com/cockroachdb/cockroach/util"
"github.com/cockroachdb/cockroach/util/leaktest"
"github.com/cockroachdb/cockroach/util/stop"
)
func checkUpdateMatches(t *testing.T, network, oldAddrString, newAddrString, expAddrString string) {
oldAddr := util.MakeUnresolvedAddr(network, oldAddrString)
newAddr := util.MakeUnresolvedAddr(network, newAddrString)
expAddr := util.MakeUnresolvedAddr(network, expAddrString)
retAddr, err := updatedAddr(oldAddr, newAddr)
if err != nil {
t.Fatalf("updatedAddr failed on %v, %v: %v", oldAddr, newAddr, err)
}
if retAddr.String() != expAddrString {
t.Fatalf("updatedAddr(%v, %v) was %s; expected %s", oldAddr, newAddr, retAddr, expAddr)
}
}
func checkUpdateFails(t *testing.T, network, oldAddrString, newAddrString string) {
oldAddr := util.MakeUnresolvedAddr(network, oldAddrString)
newAddr := util.MakeUnresolvedAddr(network, newAddrString)
retAddr, err := updatedAddr(oldAddr, newAddr)
if err == nil {
t.Fatalf("updatedAddr(%v, %v) should have failed; instead returned %v", oldAddr, newAddr, retAddr)
}
}
func TestUpdatedAddr(t *testing.T) {
defer leaktest.AfterTest(t)
for _, network := range []string{"tcp", "tcp4", "tcp6"} {
checkUpdateMatches(t, network, "localhost:0", "127.0.0.1:1234", "localhost:1234")
checkUpdateMatches(t, network, "localhost:1234", "127.0.0.1:1234", "localhost:1234")
// This case emits a warning, but doesn't fail.
checkUpdateMatches(t, network, "localhost:1234", "127.0.0.1:1235", "localhost:1235")
}
checkUpdateMatches(t, "unix", "address", "address", "address")
checkUpdateFails(t, "unix", "address", "anotheraddress")
}
func TestDuplicateRegistration(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := stop.NewStopper()
defer stopper.Stop()
s := NewServer(util.CreateTestAddr("tcp"), NewNodeTestContext(nil, stopper))
heartbeat := &Heartbeat{}
if err := s.RegisterPublic("Foo.Bar", heartbeat.Ping, &proto.PingRequest{}); err != nil {
t.Fatalf("unexpected failure on first registration: %s", err)
}
if err := s.RegisterPublic("Foo.Bar", heartbeat.Ping, &proto.PingRequest{}); err == nil {
t.Fatalf("unexpected success on second registration")
}
}
func TestUnregisteredMethod(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := stop.NewStopper()
defer stopper.Stop()
nodeContext := NewNodeTestContext(nil, stopper)
s := createAndStartNewServer(t, nodeContext)
opts := Options{
N: 1,
}
// Sending an invalid method fails cleanly, but leaves the connection
// in a valid state.
_, err := sendRPC(opts, []net.Addr{s.Addr()}, nodeContext, "Foo.Bar",
&proto.PingRequest{}, &proto.PingResponse{})
if !testutils.IsError(err, ".*rpc: couldn't find method: Foo.Bar") {
t.Fatalf("expected 'couldn't find method' but got %s", err)
}
if _, err := sendPing(opts, []net.Addr{s.Addr()}, nodeContext); err != nil {
t.Fatalf("unexpected failure sending ping after unknown request: %s", err)
}
}
|
package container
import "fmt"
//数组是值类型
func printArray(arr *[5]int) {
arr[0] = 100
for i,v:= range arr{
fmt.Println(i,v)
}
}
func main() {
var arr1 [5] int
arr2 := [3]int{3, 5, 6}
arr3 := [...]int{2, 4, 5, 6, 7}
var grid [4][5]int
fmt.Println(arr1, arr2, arr3)
fmt.Println(grid)
//for i := 0; i < len(arr3); i++ {
// fmt.Println(arr3[i])
//}
//for _,v := range arr3 {
// fmt.Println(v)
//}
fmt.Println("printArray(arr1)")
printArray(&arr1)
fmt.Println("printArray(arr3)")
printArray(&arr3)
fmt.Println("arr1 and arr3")
fmt.Println(arr1,arr3)
//printArray(arr2
}
|
package main
import (
"testing"
)
func TestFindValEffect(test *testing.T) {
stringTol := "test:test"
expectedResults := []string{"test", "test"}
test.Log("testing findValEffect")
value, effect, err := findValEffect(stringTol)
if err != nil {
test.Errorf("%v", err)
} else if value != expectedResults[0] {
test.Errorf("Expected %+v got %+v", expectedResults[0], value)
} else if effect != expectedResults[1] {
test.Errorf("Expected %+v got %+v", expectedResults[1], effect)
}
}
|
package server
import "github.com/go-kit/kit/log"
//Logger fazzkit logger option
type Logger struct {
Logger log.Logger
Namespace string
Subsystem string
Action string
Domain string
}
|
package v1
import (
"fmt"
"github.com/gin-gonic/gin"
"go.rock.com/rock-platform/rock/server/clients/k8s"
"go.rock.com/rock-platform/rock/server/database/api"
"go.rock.com/rock-platform/rock/server/utils"
"k8s.io/api/core/v1"
"net/http"
"strconv"
"strings"
"time"
)
type NodeLabel struct {
Key string `json:"key" example:"beta.kubernetes.io/os"`
Value string `json:"value" example:"linux"`
}
type NodeAnnotation struct {
Key string `json:"key" example:"flannel.alpha.coreos.com/backend-type"`
Value string `json:"value" example:"host-gw"`
}
type ClusterNodeResp struct {
Name string `json:"name" example:"kubernetes-master1"`
UID string `json:"uid" example:"3550d3f1-51b4-41e7-ba65-83d029f31e2b"`
Labels []*NodeLabel `json:"labels"`
Annotations []*NodeAnnotation `json:"annotations"`
PodCIDR string `json:"pod_cidr" example:"10.244.0.0/24"`
Unschedulable bool `json:"unschedulable" example:"false"`
KernelVersion string `json:"kernel_version" example:"4.18.0-193.6.3.el8_2.x86_64"`
OSImage string `json:"os_image" example:"CentOS Linux 8 (Core)"`
OS string `json:"os" example:"linux"`
Architecture string `json:"architecture" example:"amd64"`
ContainerRunTimeVersion string `json:"container_run_time_version" example:"docker://19.3.4"`
InternalIP string `json:"internal_ip" example:"10.10.10.10"`
Hostname string `json:"hostname" example:"kubernetes-master1"`
CreatedAt time.Time `json:"created_at" example:"2021-02-13T18:12:05+08:00"`
}
type NodeReq struct {
Id int64 `json:"id" uri:"id" binding:"required,min=1" example:"1"`
Name string `json:"name" uri:"name" binding:"required" example:"kubernetes-master1"`
}
type GlobalNodeResp struct {
Name string `json:"name" example:"kubernetes-master1"`
ClusterName string `json:"cluster_name,omitempty" binding:"required" example:"devops"`
ClusterId int `json:"cluster_id,omitempty" binding:"required" example:"1"`
UID string `json:"uid" example:"3550d3f1-51b4-41e7-ba65-83d029f31e2b"`
Labels []*NodeLabel `json:"labels"`
Annotations []*NodeAnnotation `json:"annotations"`
PodCIDR string `json:"pod_cidr" example:"10.244.0.0/24"`
Unschedulable bool `json:"unschedulable" example:"false"`
KernelVersion string `json:"kernel_version" example:"4.18.0-193.6.3.el8_2.x86_64"`
OSImage string `json:"os_image" example:"CentOS Linux 8 (Core)"`
OS string `json:"os" example:"linux"`
Architecture string `json:"architecture" example:"amd64"`
ContainerRunTimeVersion string `json:"container_run_time_version" example:"docker://19.3.4"`
InternalIP string `json:"internal_ip" example:"10.10.10.10"`
Hostname string `json:"hostname" example:"kubernetes-master1"`
CreatedAt time.Time `json:"created_at" example:"2021-02-13T18:12:05+08:00"`
}
// @Summary Get specific cluster's all nodes
// @Description api for get specific cluster's all nodes
// @Tags CLUSTER
// @Accept json
// @Produce json
// @Param id path integer true "Cluster ID"
// @Success 200 {array} v1.ClusterNodeResp "StatusOK"
// @Failure 400 {object} utils.HTTPError "StatusBadRequest"
// @Failure 404 {object} utils.HTTPError "StatusNotFound"
// @Failure 500 {object} utils.HTTPError "StatusInternalServerError"
// @Router /v1/clusters/{id}/nodes [get]
func (c *Controller) GetClusterNodes(ctx *gin.Context) {
var idReq IdReq // cluster id
if err := ctx.ShouldBindUri(&idReq); err != nil {
panic(err)
}
cluster, err := api.GetClusterById(idReq.Id)
if err != nil {
panic(err)
}
nodeList, err := k8s.GetClusterNodes(cluster.Config)
if err != nil {
panic(err)
}
nodes, err := formatNodesResp(nodeList.Items)
if err != nil {
panic(err)
}
c.Logger.Infof("Get specific cluster all nodes by cluster id:%v", idReq.Id)
ctx.JSON(http.StatusOK, nodes)
}
func formatNodesResp(nodeList []v1.Node) (*[]ClusterNodeResp, error) {
nodesResp := []ClusterNodeResp{}
for _, node := range nodeList {
nodeResp, err := formatNodeResp(&node)
if err != nil {
return nil, err
}
nodesResp = append(nodesResp, *nodeResp)
}
return &nodesResp, nil
}
func formatNodeResp(node *v1.Node) (*ClusterNodeResp, error) {
Node := &ClusterNodeResp{
Name: node.Name,
UID: string(node.UID),
//Labels: node.ObjectMeta.Labels,
//Annotations: node.ObjectMeta.Annotations,
PodCIDR: node.Spec.PodCIDR,
Unschedulable: node.Spec.Unschedulable,
KernelVersion: node.Status.NodeInfo.KernelVersion,
OSImage: node.Status.NodeInfo.OSImage,
OS: node.Status.NodeInfo.OperatingSystem,
Architecture: node.Status.NodeInfo.Architecture,
ContainerRunTimeVersion: node.Status.NodeInfo.ContainerRuntimeVersion,
CreatedAt: node.CreationTimestamp.Time,
}
for _, nodeAddress := range node.Status.Addresses {
if nodeAddress.Type == "InternalIP" {
Node.InternalIP = nodeAddress.Address
}
if nodeAddress.Type == "Hostname" {
Node.Hostname = nodeAddress.Address
}
}
labels := []*NodeLabel{} // 指针存储,节省内存,但必须先初始化
for key, value := range node.ObjectMeta.Labels {
label := NodeLabel{
Key: key,
Value: value,
}
labels = append(labels, &label)
}
Node.Labels = labels
annotations := []*NodeAnnotation{}
for key, value := range node.ObjectMeta.Annotations {
annotation := NodeAnnotation{
Key: key,
Value: value,
}
annotations = append(annotations, &annotation)
}
Node.Annotations = annotations
return Node, nil
//上面 formatNodesList 函数中range nodeList中的node信息如下:
//v1.Node{
// TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""},
// ObjectMeta:v1.ObjectMeta{
// Name:"kubernetes-master",
// GenerateName:"", Namespace:"",
// SelfLink:"/api/v1/nodes/kubernetes-master",
// UID:"3f9ea9ac-5940-11eb-a0db-ac1f6b472cc8",
// ResourceVersion:"6048413", Generation:0,
// CreationTimestamp:v1.Time{
// Time:time.Time{wall:0x0, ext:63746538604, loc:(*time.Location)(0x35011e0)}
// },
// DeletionTimestamp:(*v1.Time)(nil),
// DeletionGracePeriodSeconds:(*int64)(nil),
// Labels:map[string]string{
// "beta.kubernetes.io/arch":"amd64",
// "beta.kubernetes.io/os":"linux",
// "kubernetes.io/hostname":"kubernetes-master",
// "node-role.kubernetes.io/master":""
// },
// Annotations:map[string]string{
// "flannel.alpha.coreos.com/backend-data":"null",
// "flannel.alpha.coreos.com/backend-type":"host-gw",
// "flannel.alpha.coreos.com/kube-subnet-manager":"true",
// "flannel.alpha.coreos.com/public-ip":"10.151.3.96",
// "kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/dockershim.sock",
// "node.alpha.kubernetes.io/ttl":"0",
// "volumes.kubernetes.io/controller-managed-attach-detach":"true"
// },
// OwnerReferences:[]v1.OwnerReference(nil),
// Initializers:(*v1.Initializers)(nil),
// Finalizers:[]string(nil), ClusterName:""
// },
// Spec:v1.NodeSpec{
// PodCIDR:"10.244.0.0/24",
// ProviderID:"",
// Unschedulable:false,
// Taints:[]v1.Taint(nil),
// ConfigSource:(*v1.NodeConfigSource)(nil),
// DoNotUse_ExternalID:"",
// },
// Status:v1.NodeStatus{
// Capacity:v1.ResourceList{
// "cpu":resource.Quantity{i:resource.int64Amount{value:64, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"64", Format:"DecimalSI"},
// "ephemeral-storage":resource.Quantity{i:resource.int64Amount{value:238656749568, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"", Format:"BinarySI"},
// "hugepages-1Gi":resource.Quantity{i:resource.int64Amount{value:0, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"0", Format:"DecimalSI"},
// "hugepages-2Mi":resource.Quantity{i:resource.int64Amount{value:0, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"0", Format:"DecimalSI"},
// "memory":resource.Quantity{i:resource.int64Amount{value:201234952192, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"196518508Ki", Format:"BinarySI"},
// "nvidia.com/gpu":resource.Quantity{i:resource.int64Amount{value:4, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"4", Format:"DecimalSI"},
// "pods":resource.Quantity{i:resource.int64Amount{value:110, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"110", Format:"DecimalSI"}
// },
// Allocatable:v1.ResourceList{
// "cpu":resource.Quantity{i:resource.int64Amount{value:64, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"64", Format:"DecimalSI"},
// "ephemeral-storage":resource.Quantity{i:resource.int64Amount{value:238656749568, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"", Format:"BinarySI"},
// "hugepages-1Gi":resource.Quantity{i:resource.int64Amount{value:0, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"0", Format:"DecimalSI"},
// "hugepages-2Mi":resource.Quantity{i:resource.int64Amount{value:0, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"0", Format:"DecimalSI"},
// "memory":resource.Quantity{i:resource.int64Amount{value:201234952192, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"196518508Ki", Format:"BinarySI"},
// "nvidia.com/gpu":resource.Quantity{i:resource.int64Amount{value:4, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"4", Format:"DecimalSI"},
// "pods":resource.Quantity{i:resource.int64Amount{value:110, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"110", Format:"DecimalSI"}
// },
// Phase:"",
// Conditions:[]v1.NodeCondition{
// v1.NodeCondition{Type:"MemoryPressure", Status:"False", LastHeartbeatTime:v1.Time{Time:time.Time{wall:0x0, ext:63749227334, loc:(*time.Location)(0x35011e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63746538600, loc:(*time.Location)(0x35011e0)}}, Reason:"KubeletHasSufficientMemory", Message:"kubelet has sufficient memory available"},
// v1.NodeCondition{Type:"DiskPressure", Status:"False", LastHeartbeatTime:v1.Time{Time:time.Time{wall:0x0, ext:63749227334, loc:(*time.Location)(0x35011e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63746538600, loc:(*time.Location)(0x35011e0)}}, Reason:"KubeletHasNoDiskPressure", Message:"kubelet has no disk pressure"},
// v1.NodeCondition{Type:"PIDPressure", Status:"False", LastHeartbeatTime:v1.Time{Time:time.Time{wall:0x0, ext:63749227334, loc:(*time.Location)(0x35011e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63746538600, loc:(*time.Location)(0x35011e0)}}, Reason:"KubeletHasSufficientPID", Message:"kubelet has sufficient PID available"},
// v1.NodeCondition{Type:"Ready", Status:"True", LastHeartbeatTime:v1.Time{Time:time.Time{wall:0x0, ext:63749227334, loc:(*time.Location)(0x35011e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63746539626, loc:(*time.Location)(0x35011e0)}}, Reason:"KubeletReady", Message:"kubelet is posting ready status"}
// },
// Addresses:[]v1.NodeAddress{
// v1.NodeAddress{Type:"InternalIP", Address:"10.151.3.96"},
// v1.NodeAddress{Type:"Hostname", Address:"kubernetes-master"}
// },
// DaemonEndpoints:v1.NodeDaemonEndpoints{KubeletEndpoint:v1.DaemonEndpoint{Port:10250}},
// NodeInfo:v1.NodeSystemInfo{
// MachineID:"4c618dcc6f4f4c5fb56aa1a3130b00b4",
// SystemUUID:"00000000-0000-0000-0000-AC1F6B472CC8",
// BootID:"fc2a6d96-873b-499c-8636-f69521471198",
// KernelVersion:"3.10.0-693.5.2.el7.x86_64",
// OSImage:"CentOS Linux 7 (Core)",
// ContainerRuntimeVersion:"docker://18.6.1",
// KubeletVersion:"v1.13.2",
// KubeProxyVersion:"v1.13.2",
// OperatingSystem:"linux",
// Architecture:"amd64"
// },
// Images:[]v1.ContainerImage{
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/nebula-test/engine-video-process-service@sha256:1eb6d0fac4791cfe79a0694ec733bb8afe3881b77e1c5dd7d30c0176d1299917", "registry.sensenebula.io:5000/nebula-test/engine-video-process-service:v2.3.0-master-kestrelV15-cuda11-1442037f"}, SizeBytes:6350696801},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/nebula-test/engine-image-process-service@sha256:436c445de5b938bed709e6b1af8fa2fa16e00252e6e54b1346b29ca6dd966f60", "registry.sensenebula.io:5000/nebula-test/engine-image-process-service:v2.3.0-master-1c21a89-cuda11-t4"}, SizeBytes:4584961403},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/nebula-test/gpu-searcher@sha256:c5baad54c434bb83d5e28efe9846162d6552841d651a38ef62d6dda8b9eaa63d", "registry.sensenebula.io:5000/nebula-test/gpu-searcher:v2.3.0-master-1535a13"}, SizeBytes:3823476870},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/nebula-test/engine-static-feature-db@sha256:8011f826987cc1fa5100e0e7d1394559f850b033349d8faa55b7887925f27b55", "registry.sensenebula.io:5000/nebula-test/engine-static-feature-db:v2.3.0-master-ff39b54"}, SizeBytes:3693668462},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/gitlabci/golang@sha256:86a3ba51f60fcb7d4b33184fb5263c7113a6fa2bb6146c15d20a33341c8a1536", "registry.sensenebula.io:5000/gitlabci/golang:1.9-cuda-gcc49-1"}, SizeBytes:3287755137},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/component/cassandra@sha256:a1af2d8f5e5ac81a724e97a03eac764dc2d1d212aaba29bd0dae41e7903e65b7", "registry.sensenebula.io:5000/component/cassandra:3.11.4"}, SizeBytes:992466356},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/component/kafka@sha256:f17fed4bfb5c0bf2fbb2c3763665ec339d45a49d8272ec0b16693b6e69638227", "registry.sensenebula.io:5000/component/kafka:2.11-1.1.1"}, SizeBytes:956535508}, v1.ContainerImage{Names:[]string{"senselink/database_service:v1.8.0-p"}, SizeBytes:948674937}, v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/elasticsearch/elasticsearch-oss@sha256:e40d418547ce10bbf2c0e8524f71a9f465e11967aca28213edec281bc8ea0fd8", "registry.sensenebula.io:5000/elasticsearch/elasticsearch-oss:6.5.4"}, SizeBytes:644764369},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/nebula-test/engine-image-ingress-service@sha256:c576fd2513c5c6eed600c987a3c09e5eb5ad5d3fec829ed4ddd3c2f96b02fa7b", "registry.sensenebula.io:5000/nebula-test/engine-image-ingress-service:v2.3.0-master-335d3e83"}, SizeBytes:583792459},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/mysql/mysql-agent@sha256:29ebfa3c0790823b5132e26ab93e5c8b11ed99f7cc455c8aa299a11666bb90d6", "registry.sensenebula.io:5000/mysql/mysql-agent:356674d-2020122217"}, SizeBytes:448177276},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/solsson/burrow@sha256:a14a6911386f4523a249f2942bce6053f417b63dec6779f65a5a261aecbc5397", "registry.sensenebula.io:5000/solsson/burrow:v1.0.0"}, SizeBytes:440512053},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/mysql/mysql-router@sha256:bfe13ae7258c8d63601437bd46cc3e3bc871642cb50d8f49bcfc76ac48dce3b3", "registry.sensenebula.io:5000/mysql/mysql-router:8.0.22-6c5292c"}, SizeBytes:433571043},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/component/zookeeper@sha256:94d809d938c5ff8cca5fc3e08514ace7130861ec311ab04ce16d912fb787b1b6", "registry.sensenebula.io:5000/component/zookeeper:1.0-3.4.14"}, SizeBytes:407887087},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/mysql/mysql-server@sha256:5b2e6db9829c81653d14799ccf2bdf143954edef374b6a5119f816f2e1fd4bec", "registry.sensenebula.io:5000/mysql/mysql-server:8.0.22"}, SizeBytes:405009901},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-log@sha256:4ae93b77405ddf8bdd0485c6c3ad0651ed6bf576147e04cb3c4e33b749c9e18c", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-log:2.3.0-2.3.0-001-dev-eb650a"}, SizeBytes:367618701},
// v1.ContainerImage{Names:[]string{"senselink/java_openapi_service:v2.3.0-p"}, SizeBytes:325448734},
// v1.ContainerImage{Names:[]string{"senselink/nebula_service:v2.3.0-p"}, SizeBytes:325448734},
// v1.ContainerImage{Names:[]string{"senselink/event_service:v2.3.0-p"}, SizeBytes:325448734},
// v1.ContainerImage{Names:[]string{"senselink/tk_service:v2.3.0-p"}, SizeBytes:325448734},
// v1.ContainerImage{Names:[]string{"senselink/java_websocket_service:v2.3.0-p"}, SizeBytes:325448686},
// v1.ContainerImage{Names:[]string{"senselink/java_inner_service:v2.3.0-p"}, SizeBytes:325448686},
// v1.ContainerImage{Names:[]string{"senselink/java_service:v2.3.1-p"}, SizeBytes:325447822},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/kubernetes/nginx-ingress-controller@sha256:3872438389bda2d8f878187527d68b86dbdfb3c73ac67651186f5460e01c9073", "registry.sensenebula.io:5000/kubernetes/nginx-ingress-controller:0.30.0"}, SizeBytes:322915865},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-frontend-comparison@sha256:20bf0bdee7d0061762f0135d063790ac0a29ae6a2bf5ac48a9c64767ea27e42f", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-frontend-comparison:2.3.0-2.3.0-001-dev-bab4a5"}, SizeBytes:290748261},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-integrated-adapter@sha256:7076b680f1f6cf14fac5bc2530215808d90ce6d71ac5a3a273b5875bda119f5d", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-integrated-adapter:2.3.0-2.3.0-001-dev-adc50a"}, SizeBytes:289918108},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-records-export@sha256:ac044c9aa393fe5b7492ebdafc4ce81b4e22199367979be329009c72c8f41563", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-records-export:2.3.0-2.3.0-dev-c0ed51"}, SizeBytes:286284177},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-search-face@sha256:1ae32413a1470ef81f2edaa8c95984020e92e7adfb47b5f13ec63754a06b27c5", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-search-face:2.3.0-2.3.0-001-dev-42d666"}, SizeBytes:286133352},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-guest-management@sha256:50198c53f48ce21c0d5f110e3dcb162780df7d97424e9c9f877435e30a5c9cad", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-guest-management:2.3.0-2.3.0-001-dev-b70625"}, SizeBytes:286106866},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-message-center@sha256:f94f506c26c81ec017eb08cf0214e06f948553272acf45d8b58def7eed49bf3d", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-message-center:2.3.0-2.3.0-001-dev-0a48c2"}, SizeBytes:286050391},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-attendance-management@sha256:52698b3d6ef849641235c6032e64c1354149729dcdd83f92f21807b26ee0f569", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-attendance-management:2.3.0-2.3.0-001-dev-c1d3b7"}, SizeBytes:285838577},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-lbs@sha256:4ab692dcbb9af2c90ff906a825348544d5e1189e9e558dbb380499e3c2afa7a1", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-lbs:2.3.0-2.3.0-dev-efbdbb"}, SizeBytes:285651208},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-records-management@sha256:11dc0e3caca2cef75aaf682ee8661e1635579fb50809d7fd294f54abcc909270", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-records-management:2.3.0-2.3.0-001-dev-e85dab"}, SizeBytes:285651199},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-struct-process-service@sha256:0c329bc5afb7b84cf9369fb6215a09401b7542869bb21e134e273419dd0e1593", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-struct-process-service:2.3.0-2.3.0-001-dev-2bcd01"}, SizeBytes:284876028},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-td-result-consume@sha256:cdd5b5bbeac5ba2a6cfd1e149e87f8a423fbdf96a336076b6d8cd116a8db1281", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-td-result-consume:2.3.0-2.3.0-001-dev-0fda31"}, SizeBytes:283992189},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-ac-result-consume@sha256:475d74813bfff8371436c40e57344dc82c172fbcb56f8dac1fc5500018f4b876", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-ac-result-consume:2.3.0-2.3.0-001-dev-9aba45"}, SizeBytes:283838596},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-lib-auth@sha256:7be05965f38827411a913aee4e15b54df89f88060bed6eee6d5c544755629d8d", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-lib-auth:2.3.0-2.3.0-001-dev-069361"}, SizeBytes:283327467},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-watchlist-management@sha256:489d83cfeb14a5d7f2083445f52220b36ac82b657cc31c675b5fec32a4076d43", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-watchlist-management:2.3.0-2.3.0-001-dev-251d57"}, SizeBytes:283118293},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-bulk-tool@sha256:420832da90a3a912bcdcc785e657374e30c46838a31a1af1b8b67afe98303f2a", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-bulk-tool:2.3.0-2.3.0-001-dev-51d6f8"}, SizeBytes:282984735},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-rule-management@sha256:7acd848b83e55e69d492953dc27846d774e7c8a54f9db09de405e3db56b4084e", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-rule-management:2.3.0-2.3.0-001-dev-57cb77"}, SizeBytes:275636655},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-target-export@sha256:142b8852a8e4db366545e6c0cf683b7456ae7e34b017b5d0b5d7cfccddd2170e", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-target-export:2.3.0-2.3.0-001-dev-9a6ee5"}, SizeBytes:275594613},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-device-management@sha256:734d633cde53da5a35f9399d86999eb59d9d9e90e1c02c992e056dfb0b60daf7", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-device-management:2.3.0-2.3.0-001-dev-b029f7"}, SizeBytes:275557587},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-tools@sha256:dd4ab740601cab7849a211cdfdd05b7ce52cb1bae911eadabe439633ae7eac35", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-tools:2.3.0-2.3.0-001-dev-de7021"}, SizeBytes:275065126},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-uums@sha256:94ccaa2be557c6b0cbb8b17d48e96d22f2eaf77a9d651575f0f0fb5823ee9ca7", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-uums:2.3.0-2.3.0-001-dev-75df81"}, SizeBytes:274623581},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-oauth2@sha256:9800eefd292fbbffa41f5d3f9a5c0db6931570d330a35d63e78986504b533cd7", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-oauth2:2.3.0-2.3.0-001-dev-06ecd5"}, SizeBytes:273501072},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-scheduled@sha256:13dd73926988802a59e73fbe7594b5ecf4d52f67fd8b819f75512882e5e50e38", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-scheduled:2.3.0-2.3.0-001-dev-ade00b"}, SizeBytes:273352288},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-dashboard@sha256:4f9f46c210497bd5502eb26f2952356cfc8a0e65d3786ac2c3bdddfd9d0d76dc", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-dashboard:2.3.0-2.3.0-001-dev-6f7f65"}, SizeBytes:273275775},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-timezone-management@sha256:74e14ca67211b017920505d5338c6a6608cbdfbbf22c2d54fc7ebbe8eb8ad304", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-timezone-management:2.3.0-2.3.0-001-dev-29e59e"}, SizeBytes:272915336},
// v1.ContainerImage{Names:[]string{"registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-map-management@sha256:e682411503742b7ff9670b9e023ffaeb459783434afddb1df0c216305f0c2274", "registry.sensenebula.io:5000/sensenebula-guard-std/senseguard-map-management:2.3.0-2.3.0-001-dev-beb8ab"}, SizeBytes:272905821},
// v1.ContainerImage{Names:[]string{"senselink/opq_service:v1.8.0-p"}, SizeBytes:268557806}
// },
// VolumesInUse:[]v1.UniqueVolumeName(nil), VolumesAttached:[]v1.AttachedVolume(nil), Config:(*v1.NodeConfigStatus)(nil)}
//}
}
func formatGlobalNodesResp(nodes []v1.Node) ([]*GlobalNodeResp, error) {
clusterNodes := make([]*GlobalNodeResp, 0)
for _, node := range nodes {
clusterNode, err := formatGlobalNodeResp(&node)
if err != nil {
return nil, err
}
clusterNodes = append(clusterNodes, clusterNode)
}
return clusterNodes, nil
}
func formatGlobalNodeResp(node *v1.Node) (*GlobalNodeResp, error) {
Node := &GlobalNodeResp{
Name: node.Name,
UID: string(node.UID),
PodCIDR: node.Spec.PodCIDR,
Unschedulable: node.Spec.Unschedulable,
KernelVersion: node.Status.NodeInfo.KernelVersion,
OSImage: node.Status.NodeInfo.OSImage,
OS: node.Status.NodeInfo.OperatingSystem,
Architecture: node.Status.NodeInfo.Architecture,
ContainerRunTimeVersion: node.Status.NodeInfo.ContainerRuntimeVersion,
CreatedAt: node.CreationTimestamp.Time,
}
var clusterId int
var err error
clusterId, err = strconv.Atoi(node.Annotations["console.cluster.id"])
if err != nil {
return nil, utils.NewRockError(400, 40000023, fmt.Sprintf("cluster id %s can't be converted int", node.Annotations["console.cluster.id"]))
}
Node.ClusterName = node.Annotations["console.cluster.name"] // 将之前保存的cluster.Name和cluster.Id值赋值到这个结构体中
Node.ClusterId = clusterId
for _, nodeAddress := range node.Status.Addresses {
if nodeAddress.Type == "InternalIP" {
Node.InternalIP = nodeAddress.Address
}
if nodeAddress.Type == "Hostname" {
Node.Hostname = nodeAddress.Address
}
}
labels := []*NodeLabel{} // 指针存储,节省内存,但必须先初始化
for key, value := range node.Labels {
label := NodeLabel{
Key: key,
Value: value,
}
labels = append(labels, &label)
}
Node.Labels = labels
annotations := []*NodeAnnotation{}
for key, value := range node.ObjectMeta.Annotations { // 如果是console.cluster开头,为之前记录cluster.Name和cluster.Id的,所以不能保存到数据中,则需要continue忽略掉
if strings.HasPrefix(key, "console.cluster") {
continue
}
annotation := NodeAnnotation{
Key: key,
Value: value,
}
annotations = append(annotations, &annotation)
}
Node.Annotations = annotations
return Node, nil
}
// @Summary Get a specific cluster node
// @Description api for get a specific cluster node
// @Tags CLUSTER
// @Accept json
// @Produce json
// @Param id path integer true "Cluster ID"
// @Param name path string true "Node name"
// @Success 200 {object} v1.ClusterNodeResp "StatusOK"
// @Failure 400 {object} utils.HTTPError "StatusBadRequest"
// @Failure 404 {object} utils.HTTPError "StatusNotFound"
// @Failure 500 {object} utils.HTTPError "StatusInternalServerError"
// @Router /v1/clusters/{id}/nodes/{name} [get]
func (c *Controller) GetClusterNode(ctx *gin.Context) {
var nodeReq NodeReq // cluster id + kubernetes node name
if err := ctx.ShouldBindUri(&nodeReq); err != nil {
panic(err)
}
cluster, err := api.GetClusterById(nodeReq.Id)
if err != nil {
panic(err)
}
node, err := k8s.GetClusterNode(cluster.Config, nodeReq.Name)
if err != nil {
panic(err)
}
Node, err := formatNodeResp(node)
if err != nil {
panic(err)
}
c.Logger.Infof("Get specific cluster node by cluster id(%v) and node name(%v)", nodeReq.Id, node.Name)
ctx.JSON(http.StatusOK, Node)
}
// @Summary Get cluster's all nodes
// @Description api for get cluster's all nodes
// @Tags NODE
// @Accept json
// @Produce json
// @Success 200 {object} v1.GlobalNodeResp "StatusOK"
// @Failure 400 {object} utils.HTTPError "StatusBadRequest"
// @Failure 404 {object} utils.HTTPError "StatusNotFound"
// @Failure 500 {object} utils.HTTPError "StatusInternalServerError"
// @Router /v1/nodes [get]
func (c *Controller) GetGlobalNodes(ctx *gin.Context) {
clusters, err := api.GetClustersWithoutPagination()
if err != nil {
panic(err)
}
nodes := []v1.Node{}
for _, cluster := range clusters {
nodeList, err := k8s.GetClusterNodes(cluster.Config) // 通过admin.conf获取单个集群的节点信息
if err != nil {
c.Logger.Warnf("Get cluster(%v)'s node failed, please check it", cluster.Name)
continue
}
for _, node := range nodeList.Items { // 保存cluster.Name 和cluster.Id,方便后面单个node进行保存数据
node.Annotations["console.cluster.name"] = cluster.Name // cluster.Name为集群名称,如 10.151.3.99-devops-env
node.Annotations["console.cluster.id"] = strconv.Itoa(int(cluster.Id)) // cluster.Id为集群id
}
nodes = append(nodes, nodeList.Items...)
}
resp, err := formatGlobalNodesResp(nodes)
if err != nil {
panic(err)
}
c.Logger.Infof("Get all nodes, the nodes length is %v", len(resp))
ctx.JSON(http.StatusOK, resp)
}
|
package persist
import (
"fmt"
"testing"
"github.com/bww/godb/test"
"github.com/bww/godb/uuid"
)
import (
"github.com/stretchr/testify/assert"
)
func TestCRUD(t *testing.T) {
cxt := test.DB()
if !assert.NotNil(t, cxt) {
return
}
pe := &entityPersister{New(cxt)}
pf := &foreignPersister{New(cxt)}
f := &foreignTester{Value: "Foreign value"}
err := pf.StoreTesterEntity(f, 0, nil)
assert.Nil(t, err, fmt.Sprintf("%v", err))
e := &entityTester{Id: "", Name: "This is the name", Foreign: f, Named: &namedInlineTester{true, "Named inline struct B"}}
e.Inline.A = "Anonymous inline struct A"
e.Inline.B = 998877
trans, err := pe.IsTransient(e, cxt)
if assert.Nil(t, err, fmt.Sprintf("%v", err)) {
assert.Equal(t, true, trans)
}
e.Id = uuid.New().String()
trans, err = pe.IsTransient(e, cxt)
if assert.Nil(t, err, fmt.Sprintf("%v", err)) {
assert.Equal(t, true, trans)
}
err = pe.StoreTesterEntity(e, StoreOptionCascade, nil)
assert.Nil(t, err, fmt.Sprintf("%v", err))
c, err := pe.FetchTesterEntity(e.Id, FetchOptionCascade, nil)
if assert.Nil(t, err, fmt.Sprintf("%v", err)) {
assert.Equal(t, e, c)
}
trans, err = pe.IsTransient(e, cxt)
if assert.Nil(t, err, fmt.Sprintf("%v", err)) {
assert.Equal(t, false, trans)
}
a, err := pe.FetchTesterEntities(Range{0, 100}, FetchOptionCascade, nil)
if assert.Nil(t, err, fmt.Sprintf("%v", err)) {
assert.Equal(t, []*entityTester{e}, a)
}
err = pe.DeleteTesterEntity(e, 0, nil)
assert.Nil(t, err, fmt.Sprintf("%v", err))
trans, err = pe.IsTransient(e, cxt)
if assert.Nil(t, err, fmt.Sprintf("%v", err)) {
assert.Equal(t, true, trans)
}
}
func TestFetchOne(t *testing.T) {
cxt := test.DB()
pe := &entityPersister{New(cxt)}
n := 100
_, err := cxt.Exec(fmt.Sprintf("DELETE FROM %s", table))
if !assert.Nil(t, err, fmt.Sprintf("%v", err)) {
return
}
for i := 0; i < n; i++ {
e := &entityTester{Name: fmt.Sprintf("%04d This is the name", i), Named: &namedInlineTester{true, fmt.Sprintf("Named inline struct B #%d", i)}}
e.Inline.A = fmt.Sprintf("Anonymous inline struct A #%d", i)
e.Inline.B = i
err := pe.StoreTesterEntity(e, StoreOptionCascade, nil)
assert.Nil(t, err, fmt.Sprintf("%v", err))
r, err := pe.FetchTesterEntity(e.Id, FetchOptionCascade, nil)
if assert.Nil(t, err, fmt.Sprintf("%v", err)) {
assert.Equal(t, e, r)
}
}
}
func TestFetchMany(t *testing.T) {
cxt := test.DB()
pe := &entityPersister{New(cxt)}
n := 100
_, err := cxt.Exec(fmt.Sprintf("DELETE FROM %s", table))
if !assert.Nil(t, err, fmt.Sprintf("%v", err)) {
return
}
check := make([]*entityTester, n)
for i := 0; i < n; i++ {
e := &entityTester{Name: fmt.Sprintf("%04d This is the name", i), Named: &namedInlineTester{true, fmt.Sprintf("Named inline struct B #%d", i)}}
e.Inline.A = fmt.Sprintf("Anonymous inline struct A #%d", i)
e.Inline.B = i
err := pe.StoreTesterEntity(e, StoreOptionCascade, nil)
assert.Nil(t, err, fmt.Sprintf("%v", err))
check[i] = e
}
a, err := pe.FetchTesterEntities(Range{0, n}, FetchOptionCascade, nil)
if assert.Nil(t, err, fmt.Sprintf("%v", err)) {
assert.Equal(t, check, a)
}
}
func TestFetchIter(t *testing.T) {
cxt := test.DB()
pe := &entityPersister{New(cxt)}
n := 1000
_, err := cxt.Exec(fmt.Sprintf("DELETE FROM %s", table))
if !assert.Nil(t, err, fmt.Sprintf("%v", err)) {
return
}
check := make([]*entityTester, n)
for i := 0; i < n; i++ {
e := &entityTester{Name: fmt.Sprintf("%04d This is the name", i), Named: &namedInlineTester{true, fmt.Sprintf("Named inline struct B #%d", i)}}
e.Inline.A = fmt.Sprintf("Anonymous inline struct A #%d", i)
e.Inline.B = i
err := pe.StoreTesterEntity(e, StoreOptionCascade, nil)
assert.Nil(t, err, fmt.Sprintf("%v", err))
check[i] = e
}
it, err := pe.IterTesterEntities(FetchOptionCascade, nil)
if assert.Nil(t, err, fmt.Sprintf("%v", err)) {
i := 0
for ; it.Next(); i++ {
x := &entityTester{}
err = it.Scan(x)
if assert.Nil(t, err, fmt.Sprintf("%v", err)) {
assert.Equal(t, check[i], x)
}
}
assert.Equal(t, n, i)
}
}
|
package logic
import (
"github.com/stretchr/testify/assert"
"testing"
)
func Test_TagIncluePriorierThanExcludeOk(t *testing.T) {
assert := assert.New(t)
a := Activity{
IncludeTag: []Tag{1000101, 2000101},
ExcludeTag: []Tag{1000101, 2000101},
}
tag := []Tag{1000101, 2000101}
assert.True(a.TagOK(tag))
}
func Test_TagNormalInlucdeOk(t *testing.T) {
assert := assert.New(t)
a := Activity{
IncludeTag: []Tag{1000101, 2000101},
}
tag := []Tag{1000101, 2000101}
assert.True(a.TagOK(tag))
tag = []Tag{1000101}
assert.False(a.TagOK(tag))
tag = []Tag{}
assert.False(a.TagOK(tag))
tag = []Tag{2000101}
assert.False(a.TagOK(tag))
tag = []Tag{3000101}
assert.False(a.TagOK(tag))
tag = []Tag{1001000101, 20012000101}
assert.True(a.TagOK(tag))
}
func Test_TagNormalExlucdeOk(t *testing.T) {
assert := assert.New(t)
a := Activity{
ExcludeTag: []Tag{3000101, 4000101},
}
tag := []Tag{3000101, 4000101}
assert.False(a.TagOK(tag))
tag = []Tag{3000101}
assert.False(a.TagOK(tag))
tag = []Tag{4000101}
assert.False(a.TagOK(tag))
tag = []Tag{1004000101}
assert.False(a.TagOK(tag))
tag = []Tag{2000101}
assert.True(a.TagOK(tag))
}
func Test_TagNormalExlucdeAndIncludeOk(t *testing.T) {
assert := assert.New(t)
a := Activity{
IncludeTag: []Tag{1000101, 2000101},
ExcludeTag: []Tag{3000101, 4000101},
}
tag := []Tag{3000101, 4000101}
assert.False(a.TagOK(tag))
tag = []Tag{1000101, 2000101, 3000101}
assert.True(a.TagOK(tag))
tag = []Tag{1000101, 2000101, 4000101}
assert.True(a.TagOK(tag))
tag = []Tag{10001000101, 2000101, 4000101}
assert.True(a.TagOK(tag))
tag = []Tag{4000101}
assert.False(a.TagOK(tag))
}
func Test_Status(t *testing.T) {
assert := assert.New(t)
a := Activity{
filtered: false,
}
assert.True(a.Filtered())
a.Status = 1
assert.False(a.Filtered())
a.Status = 2
assert.True(a.Filtered())
a.filtered = true
assert.True(a.Filtered())
a.filtered = true
a.Status = 2
assert.True(a.Filtered())
}
|
package channel
import (
"regexp"
"errors"
)
// Subscribe Operation Topics
var (
// SubscribeReceiveLightMeasurement is a regex expression to match the parameters in the ReceiveLightMeasurement subscribe operation's channel
SubscribeReceiveLightMeasurementRegex = regexp.MustCompile("smartylighting/streetlights/1/0/event/(.+)/lighting/measured")
)
// Parse populates the fields of a ReceiveLightMeasurementParams instance with values
// extracted from a channel
func (params *ReceiveLightMeasurementParams) Parse(ch string) error {
match := SubscribeReceiveLightMeasurementRegex.FindStringSubmatch(ch)
if len(match) < 2 {
return errors.New("channel did not match expected format: " + ch)
}
// Map the struct fields to the order they will appear in the topic
fields := []*string{¶ms.StreetlightId}
for i, field := range fields {
// Populate params fields - skipping the first index of 'match' as
// captured groups start at index=1
*field = match[i+1]
}
return nil
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.