text stringlengths 11 4.05M |
|---|
package main
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"strings"
)
type Question struct {
question string
answer string
}
func main() {
var score int = 0
questions := getFileDetials()
for i := 0; i < len(questions); i++ {
fmt.Print("\n", questions[i].question)
ans := string(questions[i].answer)
x := getInputDetails()
if strings.Compare(ans, x) == 0 {
score = score + 1
fmt.Println("correct ✅")
} else {
fmt.Println("wrong 👎")
}
continue
}
fmt.Println("this is your final score: ", score, "/", len(questions))
}
func getFileDetials() []Question {
fileName := "questions.txt"
data, err := ioutil.ReadFile(fileName)
var y []string
if err != nil {
fmt.Println("an error occured ", err, " 😵💫")
} else {
x := string(data)
y = strings.Split(x, "\n")
}
allQuestions := []Question{}
for i := 0; i < len(y); i++ {
var d = strings.Split(y[i], ",")
t := Question{
question: d[0],
answer: strings.ReplaceAll(d[1], " ", ""),
}
allQuestions = append(allQuestions, t)
}
return allQuestions
}
func getInputDetails() string {
reader := bufio.NewReader(os.Stdin)
fmt.Println("\nEnter text: ")
text, _ := reader.ReadString('\n')
text = strings.TrimSuffix(text, "\n")
return text
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2021/5/31 7:19 下午
# @File : decode_string.go
# @Description :
# @Attention :
*/
package v2
import (
"strconv"
"strings"
)
func decodeString(s string) string {
if len(s) == 0 {
return ""
}
stack := make([]byte, 0)
for index, _ := range s {
if s[index] != ']' {
stack = append(stack, s[index])
} else {
// 说明需要弹出元素,并且弹出的元素直到 为 [ 为止
// 2[1,2,3] =>
tempStack := make([]byte, 0)
for stack[len(stack)-1] != '[' {
tempStack = append(tempStack, stack[len(stack)-1])
stack = stack[:len(stack)-1]
}
// 把'[' 弹出去
stack = stack[:len(stack)-1]
value := ""
for i := len(tempStack) - 1; i >= 0; i-- {
value += string(tempStack[i])
}
// 可能 数字是多个 数字的如100 ,或者是1000 ,但是数字的上一个必然是 ]
// 还是需要反转
tempStack = tempStack[:0]
for len(stack) > 0 && stack[len(stack)-1] != ']' && stack[len(stack)-1] <= '9' {
tempStack = append(tempStack, stack[len(stack)-1])
stack = stack[:len(stack)-1]
}
counts := reverseStack(tempStack)
times, _ := strconv.Atoi(counts)
value = strings.Repeat(value, times)
for i := 0; i < len(value); i++ {
stack = append(stack, value[i])
}
}
}
return string(stack)
}
func reverseStack(s []byte) string {
for i, j := 0, len(s)-1; i < j; {
s[i], s[j] = s[j], s[i]
i++
j--
}
r := ""
for _, v := range s {
r += string(v)
}
return r
}
|
package goSolution
func isTargetValid(target []int) int {
for _, v := range target {
if v < 1 {
return -1
}
if v != 1 {
return 1
}
}
return 0
}
func isPossible(target []int) bool {
n := len(target)
if n == 0 {
return false
}
var t int
for t = isTargetValid(target); t > 0; t = isTargetValid(target) {
s := sum(target)
m := max(target...)
for i, v := range target {
if v == m {
k := s - m
if k >= target[i] || k == 0 {
return false
}
target[i] = (target[i] - 1) % k + 1
break
}
}
}
return t == 0
} |
package options
import "testing"
func Test_RepoOverride(t *testing.T) {
opts := GlobalOptions{
owner: "heaths",
repo: "gh-label",
}
if owner, repo := opts.Repo(); owner != "heaths" || repo != "gh-label" {
t.Errorf(`RepoOverride() = (%s, %s); want: ("heaths", "gh-label")`, owner, repo)
}
}
func Test_parseRepoOverride(t *testing.T) {
type args struct {
args string
env map[string]string
}
type want struct {
owner string
repo string
}
tests := []struct {
name string
args args
want want
wantE bool
}{
{
name: "empty",
want: want{
owner: ":owner",
repo: ":repo",
},
},
{
name: "from environment",
args: args{
env: map[string]string{
"GH_REPO": "heaths/gh-label",
},
},
want: want{
owner: "heaths",
repo: "gh-label",
},
},
{
name: "too few slashes",
args: args{
args: "heaths",
},
wantE: true,
},
{
name: "too many slashes",
args: args{
args: "github.com/heaths/gh-label",
},
wantE: true,
},
{
name: "empty parts",
args: args{
args: "/",
},
wantE: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
opts := GlobalOptions{
keys: &mockStore{
env: tt.args.env,
},
}
if err := opts.parseRepoOverride(tt.args.args); (err != nil) != tt.wantE {
t.Errorf("parseRepoOverride() = %v, wantE %v", err, tt.wantE)
return
}
if opts.owner != tt.want.owner {
t.Errorf("parseRepoOverride() owner = %q, want %q", opts.owner, tt.want.owner)
return
}
if opts.repo != tt.want.repo {
t.Errorf("parseRepoOverride() repo = %q, want %q", opts.repo, tt.want.repo)
}
})
}
}
type mockStore struct {
env map[string]string
}
func (m *mockStore) get(key string) string {
return m.env[key]
}
|
package envoyconfig
import (
"context"
"encoding/base64"
"os"
"path/filepath"
"testing"
"time"
envoy_config_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/volatiletech/null/v9"
"google.golang.org/protobuf/types/known/wrapperspb"
"github.com/pomerium/pomerium/config"
"github.com/pomerium/pomerium/config/envoyconfig/filemgr"
"github.com/pomerium/pomerium/internal/testutil"
"github.com/pomerium/pomerium/pkg/cryptutil"
)
func Test_buildPolicyTransportSocket(t *testing.T) {
ctx := context.Background()
cacheDir, _ := os.UserCacheDir()
customCA := filepath.Join(cacheDir, "pomerium", "envoy", "files", "custom-ca-32484c314b584447463735303142374c31414145374650305a525539554938594d524855353757313942494d473847535231.pem")
b := New("local-grpc", "local-http", "local-metrics", filemgr.NewManager(), nil)
rootCABytes, _ := getCombinedCertificateAuthority(&config.Config{Options: &config.Options{}})
rootCA := b.filemgr.BytesDataSource("ca.pem", rootCABytes).GetFilename()
o1 := config.NewDefaultOptions()
o2 := config.NewDefaultOptions()
o2.CA = base64.StdEncoding.EncodeToString([]byte{0, 0, 0, 0})
combinedCABytes, _ := getCombinedCertificateAuthority(&config.Config{Options: &config.Options{CA: o2.CA}})
combinedCA := b.filemgr.BytesDataSource("ca.pem", combinedCABytes).GetFilename()
t.Run("insecure", func(t *testing.T) {
ts, err := b.buildPolicyTransportSocket(ctx, &config.Config{Options: o1}, &config.Policy{
To: mustParseWeightedURLs(t, "http://example.com"),
}, *mustParseURL(t, "http://example.com"))
require.NoError(t, err)
assert.Nil(t, ts)
})
t.Run("host as sni", func(t *testing.T) {
ts, err := b.buildPolicyTransportSocket(ctx, &config.Config{Options: o1}, &config.Policy{
To: mustParseWeightedURLs(t, "https://example.com"),
}, *mustParseURL(t, "https://example.com"))
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `
{
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"alpnProtocols": ["h2", "http/1.1"],
"tlsParams": {
"cipherSuites": [
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305",
"ECDHE-ECDSA-AES128-SHA",
"ECDHE-RSA-AES128-SHA",
"AES128-GCM-SHA256",
"AES128-SHA",
"ECDHE-ECDSA-AES256-SHA",
"ECDHE-RSA-AES256-SHA",
"AES256-GCM-SHA384",
"AES256-SHA"
],
"ecdhCurves": [
"X25519",
"P-256",
"P-384",
"P-521"
]
},
"validationContext": {
"matchTypedSubjectAltNames": [{
"sanType": "DNS",
"matcher": {
"exact": "example.com"
}
}],
"trustedCa": {
"filename": "`+rootCA+`"
}
}
},
"sni": "example.com"
}
}
`, ts)
})
t.Run("tls_server_name as sni", func(t *testing.T) {
ts, err := b.buildPolicyTransportSocket(ctx, &config.Config{Options: o1}, &config.Policy{
To: mustParseWeightedURLs(t, "https://example.com"),
TLSServerName: "use-this-name.example.com",
}, *mustParseURL(t, "https://example.com"))
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `
{
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"alpnProtocols": ["h2", "http/1.1"],
"tlsParams": {
"cipherSuites": [
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305",
"ECDHE-ECDSA-AES128-SHA",
"ECDHE-RSA-AES128-SHA",
"AES128-GCM-SHA256",
"AES128-SHA",
"ECDHE-ECDSA-AES256-SHA",
"ECDHE-RSA-AES256-SHA",
"AES256-GCM-SHA384",
"AES256-SHA"
],
"ecdhCurves": [
"X25519",
"P-256",
"P-384",
"P-521"
]
},
"validationContext": {
"matchTypedSubjectAltNames": [{
"sanType": "DNS",
"matcher": {
"exact": "use-this-name.example.com"
}
}],
"trustedCa": {
"filename": "`+rootCA+`"
}
}
},
"sni": "use-this-name.example.com"
}
}
`, ts)
})
t.Run("tls_upstream_server_name as sni", func(t *testing.T) {
ts, err := b.buildPolicyTransportSocket(ctx, &config.Config{Options: o1}, &config.Policy{
To: mustParseWeightedURLs(t, "https://example.com"),
TLSUpstreamServerName: "use-this-name.example.com",
}, *mustParseURL(t, "https://example.com"))
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `
{
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"alpnProtocols": ["h2", "http/1.1"],
"tlsParams": {
"cipherSuites": [
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305",
"ECDHE-ECDSA-AES128-SHA",
"ECDHE-RSA-AES128-SHA",
"AES128-GCM-SHA256",
"AES128-SHA",
"ECDHE-ECDSA-AES256-SHA",
"ECDHE-RSA-AES256-SHA",
"AES256-GCM-SHA384",
"AES256-SHA"
],
"ecdhCurves": [
"X25519",
"P-256",
"P-384",
"P-521"
]
},
"validationContext": {
"matchTypedSubjectAltNames": [{
"sanType": "DNS",
"matcher": {
"exact": "use-this-name.example.com"
}
}],
"trustedCa": {
"filename": "`+rootCA+`"
}
}
},
"sni": "use-this-name.example.com"
}
}
`, ts)
})
t.Run("tls_skip_verify", func(t *testing.T) {
ts, err := b.buildPolicyTransportSocket(ctx, &config.Config{Options: o1}, &config.Policy{
To: mustParseWeightedURLs(t, "https://example.com"),
TLSSkipVerify: true,
}, *mustParseURL(t, "https://example.com"))
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `
{
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"alpnProtocols": ["h2", "http/1.1"],
"tlsParams": {
"cipherSuites": [
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305",
"ECDHE-ECDSA-AES128-SHA",
"ECDHE-RSA-AES128-SHA",
"AES128-GCM-SHA256",
"AES128-SHA",
"ECDHE-ECDSA-AES256-SHA",
"ECDHE-RSA-AES256-SHA",
"AES256-GCM-SHA384",
"AES256-SHA"
],
"ecdhCurves": [
"X25519",
"P-256",
"P-384",
"P-521"
]
},
"validationContext": {
"matchTypedSubjectAltNames": [{
"sanType": "DNS",
"matcher": {
"exact": "example.com"
}
}],
"trustedCa": {
"filename": "`+rootCA+`"
},
"trustChainVerification": "ACCEPT_UNTRUSTED"
}
},
"sni": "example.com"
}
}
`, ts)
})
t.Run("custom ca", func(t *testing.T) {
ts, err := b.buildPolicyTransportSocket(ctx, &config.Config{Options: o1}, &config.Policy{
To: mustParseWeightedURLs(t, "https://example.com"),
TLSCustomCA: base64.StdEncoding.EncodeToString([]byte{0, 0, 0, 0}),
}, *mustParseURL(t, "https://example.com"))
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `
{
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"alpnProtocols": ["h2", "http/1.1"],
"tlsParams": {
"cipherSuites": [
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305",
"ECDHE-ECDSA-AES128-SHA",
"ECDHE-RSA-AES128-SHA",
"AES128-GCM-SHA256",
"AES128-SHA",
"ECDHE-ECDSA-AES256-SHA",
"ECDHE-RSA-AES256-SHA",
"AES256-GCM-SHA384",
"AES256-SHA"
],
"ecdhCurves": [
"X25519",
"P-256",
"P-384",
"P-521"
]
},
"validationContext": {
"matchTypedSubjectAltNames": [{
"sanType": "DNS",
"matcher": {
"exact": "example.com"
}
}],
"trustedCa": {
"filename": "`+customCA+`"
}
}
},
"sni": "example.com"
}
}
`, ts)
})
t.Run("options custom ca", func(t *testing.T) {
ts, err := b.buildPolicyTransportSocket(ctx, &config.Config{Options: o2}, &config.Policy{
To: mustParseWeightedURLs(t, "https://example.com"),
}, *mustParseURL(t, "https://example.com"))
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `
{
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"alpnProtocols": ["h2", "http/1.1"],
"tlsParams": {
"cipherSuites": [
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305",
"ECDHE-ECDSA-AES128-SHA",
"ECDHE-RSA-AES128-SHA",
"AES128-GCM-SHA256",
"AES128-SHA",
"ECDHE-ECDSA-AES256-SHA",
"ECDHE-RSA-AES256-SHA",
"AES256-GCM-SHA384",
"AES256-SHA"
],
"ecdhCurves": [
"X25519",
"P-256",
"P-384",
"P-521"
]
},
"validationContext": {
"matchTypedSubjectAltNames": [{
"sanType": "DNS",
"matcher": {
"exact": "example.com"
}
}],
"trustedCa": {
"filename": "`+combinedCA+`"
}
}
},
"sni": "example.com"
}
}
`, ts)
})
t.Run("client certificate", func(t *testing.T) {
clientCert, _ := cryptutil.CertificateFromBase64(aExampleComCert, aExampleComKey)
ts, err := b.buildPolicyTransportSocket(ctx, &config.Config{Options: o1}, &config.Policy{
To: mustParseWeightedURLs(t, "https://example.com"),
ClientCertificate: clientCert,
}, *mustParseURL(t, "https://example.com"))
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `
{
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"alpnProtocols": ["h2", "http/1.1"],
"tlsParams": {
"cipherSuites": [
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305",
"ECDHE-ECDSA-AES128-SHA",
"ECDHE-RSA-AES128-SHA",
"AES128-GCM-SHA256",
"AES128-SHA",
"ECDHE-ECDSA-AES256-SHA",
"ECDHE-RSA-AES256-SHA",
"AES256-GCM-SHA384",
"AES256-SHA"
],
"ecdhCurves": [
"X25519",
"P-256",
"P-384",
"P-521"
]
},
"tlsCertificates": [{
"certificateChain":{
"filename": "`+filepath.Join(cacheDir, "pomerium", "envoy", "files", "tls-crt-354e49305a5a39414a545530374e58454e48334148524c4e324258463837364355564c4e4532464b54355139495547514a38.pem")+`"
},
"privateKey": {
"filename": "`+filepath.Join(cacheDir, "pomerium", "envoy", "files", "tls-key-3350415a38414e4e4a4655424e55393430474147324651433949384e485341334b5157364f424b4c5856365a545937383735.pem")+`"
}
}],
"validationContext": {
"matchTypedSubjectAltNames": [{
"sanType": "DNS",
"matcher": {
"exact": "example.com"
}
}],
"trustedCa": {
"filename": "`+rootCA+`"
}
}
},
"sni": "example.com"
}
}
`, ts)
})
t.Run("allow renegotiation", func(t *testing.T) {
ts, err := b.buildPolicyTransportSocket(ctx, &config.Config{Options: o1}, &config.Policy{
To: mustParseWeightedURLs(t, "https://example.com"),
TLSUpstreamAllowRenegotiation: true,
}, *mustParseURL(t, "https://example.com"))
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `
{
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"allowRenegotiation": true,
"commonTlsContext": {
"alpnProtocols": ["h2", "http/1.1"],
"tlsParams": {
"cipherSuites": [
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305",
"ECDHE-ECDSA-AES128-SHA",
"ECDHE-RSA-AES128-SHA",
"AES128-GCM-SHA256",
"AES128-SHA",
"ECDHE-ECDSA-AES256-SHA",
"ECDHE-RSA-AES256-SHA",
"AES256-GCM-SHA384",
"AES256-SHA"
],
"ecdhCurves": [
"X25519",
"P-256",
"P-384",
"P-521"
]
},
"validationContext": {
"matchTypedSubjectAltNames": [{
"sanType": "DNS",
"matcher": {
"exact": "example.com"
}
}],
"trustedCa": {
"filename": "`+rootCA+`"
}
}
},
"sni": "example.com"
}
}
`, ts)
})
}
func Test_buildCluster(t *testing.T) {
ctx := context.Background()
b := New("local-grpc", "local-http", "local-metrics", filemgr.NewManager(), nil)
rootCABytes, _ := getCombinedCertificateAuthority(&config.Config{Options: &config.Options{}})
rootCA := b.filemgr.BytesDataSource("ca.pem", rootCABytes).GetFilename()
o1 := config.NewDefaultOptions()
t.Run("insecure", func(t *testing.T) {
endpoints, err := b.buildPolicyEndpoints(ctx, &config.Config{Options: o1}, &config.Policy{
To: mustParseWeightedURLs(t, "http://example.com", "http://1.2.3.4"),
})
require.NoError(t, err)
cluster := newDefaultEnvoyClusterConfig()
cluster.DnsLookupFamily = envoy_config_cluster_v3.Cluster_V4_ONLY
err = b.buildCluster(cluster, "example", endpoints, upstreamProtocolHTTP2)
require.NoErrorf(t, err, "cluster %+v", cluster)
testutil.AssertProtoJSONEqual(t, `
{
"name": "example",
"type": "STRICT_DNS",
"connectTimeout": "10s",
"respectDnsTtl": true,
"dnsLookupFamily": "V4_ONLY",
"perConnectionBufferLimitBytes": 32768,
"typedExtensionProtocolOptions": {
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
"explicitHttpConfig": {
"http2ProtocolOptions": {
"allowConnect": true,
"initialConnectionWindowSize": 1048576,
"initialStreamWindowSize": 65536,
"maxConcurrentStreams": 100
}
}
}
},
"loadAssignment": {
"clusterName": "example",
"endpoints": [{
"lbEndpoints": [{
"endpoint": {
"address": {
"socketAddress": {
"address": "example.com",
"portValue": 80
}
}
}
}, {
"endpoint": {
"address": {
"socketAddress": {
"address": "1.2.3.4",
"portValue": 80
}
}
}
}]
}]
}
}
`, cluster)
})
t.Run("secure", func(t *testing.T) {
endpoints, err := b.buildPolicyEndpoints(ctx, &config.Config{Options: o1}, &config.Policy{
To: mustParseWeightedURLs(t,
"https://example.com",
"https://example.com",
),
})
require.NoError(t, err)
cluster := newDefaultEnvoyClusterConfig()
err = b.buildCluster(cluster, "example", endpoints, upstreamProtocolHTTP2)
require.NoErrorf(t, err, "cluster %+v", cluster)
testutil.AssertProtoJSONEqual(t, `
{
"name": "example",
"type": "STRICT_DNS",
"connectTimeout": "10s",
"respectDnsTtl": true,
"perConnectionBufferLimitBytes": 32768,
"transportSocketMatches": [{
"name": "`+endpoints[0].TransportSocketName()+`",
"match": {
"`+endpoints[0].TransportSocketName()+`": true
},
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"alpnProtocols": ["h2", "http/1.1"],
"tlsParams": {
"cipherSuites": [
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305",
"ECDHE-ECDSA-AES128-SHA",
"ECDHE-RSA-AES128-SHA",
"AES128-GCM-SHA256",
"AES128-SHA",
"ECDHE-ECDSA-AES256-SHA",
"ECDHE-RSA-AES256-SHA",
"AES256-GCM-SHA384",
"AES256-SHA"
],
"ecdhCurves": [
"X25519",
"P-256",
"P-384",
"P-521"
]
},
"validationContext": {
"matchTypedSubjectAltNames": [{
"sanType": "DNS",
"matcher": {
"exact": "example.com"
}
}],
"trustedCa": {
"filename": "`+rootCA+`"
}
}
},
"sni": "example.com"
}
}
}],
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"alpnProtocols": ["h2", "http/1.1"],
"tlsParams": {
"cipherSuites": [
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305",
"ECDHE-ECDSA-AES128-SHA",
"ECDHE-RSA-AES128-SHA",
"AES128-GCM-SHA256",
"AES128-SHA",
"ECDHE-ECDSA-AES256-SHA",
"ECDHE-RSA-AES256-SHA",
"AES256-GCM-SHA384",
"AES256-SHA"
],
"ecdhCurves": [
"X25519",
"P-256",
"P-384",
"P-521"
]
},
"validationContext": {
"matchTypedSubjectAltNames": [{
"sanType": "DNS",
"matcher": {
"exact": "example.com"
}
}],
"trustedCa": {
"filename": "`+rootCA+`"
}
}
},
"sni": "example.com"
}
},
"typedExtensionProtocolOptions": {
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
"explicitHttpConfig": {
"http2ProtocolOptions": {
"allowConnect": true,
"initialConnectionWindowSize": 1048576,
"initialStreamWindowSize": 65536,
"maxConcurrentStreams": 100
}
}
}
},
"dnsLookupFamily": "V4_PREFERRED",
"loadAssignment": {
"clusterName": "example",
"endpoints": [{
"lbEndpoints": [{
"endpoint": {
"address": {
"socketAddress": {
"address": "example.com",
"portValue": 443
}
}
},
"metadata": {
"filterMetadata": {
"envoy.transport_socket_match": {
"`+endpoints[0].TransportSocketName()+`": true
}
}
}
},{
"endpoint": {
"address": {
"socketAddress": {
"address": "example.com",
"portValue": 443
}
}
},
"metadata": {
"filterMetadata": {
"envoy.transport_socket_match": {
"`+endpoints[0].TransportSocketName()+`": true
}
}
}
}]
}]
}
}
`, cluster)
})
t.Run("ip addresses", func(t *testing.T) {
endpoints, err := b.buildPolicyEndpoints(ctx, &config.Config{Options: o1}, &config.Policy{
To: mustParseWeightedURLs(t, "http://127.0.0.1", "http://127.0.0.2"),
})
require.NoError(t, err)
cluster := newDefaultEnvoyClusterConfig()
err = b.buildCluster(cluster, "example", endpoints, upstreamProtocolHTTP2)
require.NoErrorf(t, err, "cluster %+v", cluster)
testutil.AssertProtoJSONEqual(t, `
{
"name": "example",
"type": "STATIC",
"connectTimeout": "10s",
"respectDnsTtl": true,
"perConnectionBufferLimitBytes": 32768,
"typedExtensionProtocolOptions": {
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
"explicitHttpConfig": {
"http2ProtocolOptions": {
"allowConnect": true,
"initialConnectionWindowSize": 1048576,
"initialStreamWindowSize": 65536,
"maxConcurrentStreams": 100
}
}
}
},
"dnsLookupFamily": "V4_PREFERRED",
"loadAssignment": {
"clusterName": "example",
"endpoints": [{
"lbEndpoints": [{
"endpoint": {
"address": {
"socketAddress": {
"address": "127.0.0.1",
"portValue": 80
}
}
}
},{
"endpoint": {
"address": {
"socketAddress": {
"address": "127.0.0.2",
"portValue": 80
}
}
}
}]
}]
}
}
`, cluster)
})
t.Run("weights", func(t *testing.T) {
endpoints, err := b.buildPolicyEndpoints(ctx, &config.Config{Options: o1}, &config.Policy{
To: mustParseWeightedURLs(t, "http://127.0.0.1:8080,1", "http://127.0.0.2,2"),
})
require.NoError(t, err)
cluster := newDefaultEnvoyClusterConfig()
err = b.buildCluster(cluster, "example", endpoints, upstreamProtocolHTTP2)
require.NoErrorf(t, err, "cluster %+v", cluster)
testutil.AssertProtoJSONEqual(t, `
{
"name": "example",
"type": "STATIC",
"connectTimeout": "10s",
"respectDnsTtl": true,
"perConnectionBufferLimitBytes": 32768,
"typedExtensionProtocolOptions": {
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
"explicitHttpConfig": {
"http2ProtocolOptions": {
"allowConnect": true,
"initialConnectionWindowSize": 1048576,
"initialStreamWindowSize": 65536,
"maxConcurrentStreams": 100
}
}
}
},
"dnsLookupFamily": "V4_PREFERRED",
"loadAssignment": {
"clusterName": "example",
"endpoints": [{
"lbEndpoints": [{
"endpoint": {
"address": {
"socketAddress": {
"address": "127.0.0.1",
"portValue": 8080
}
}
},
"loadBalancingWeight": 1
},{
"endpoint": {
"address": {
"socketAddress": {
"address": "127.0.0.2",
"portValue": 80
}
}
},
"loadBalancingWeight": 2
}]
}]
}
}
`, cluster)
})
t.Run("localhost", func(t *testing.T) {
endpoints, err := b.buildPolicyEndpoints(ctx, &config.Config{Options: o1}, &config.Policy{
To: mustParseWeightedURLs(t, "http://localhost"),
})
require.NoError(t, err)
cluster := newDefaultEnvoyClusterConfig()
err = b.buildCluster(cluster, "example", endpoints, upstreamProtocolHTTP2)
require.NoErrorf(t, err, "cluster %+v", cluster)
testutil.AssertProtoJSONEqual(t, `
{
"name": "example",
"type": "STATIC",
"connectTimeout": "10s",
"respectDnsTtl": true,
"perConnectionBufferLimitBytes": 32768,
"typedExtensionProtocolOptions": {
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
"explicitHttpConfig": {
"http2ProtocolOptions": {
"allowConnect": true,
"initialConnectionWindowSize": 1048576,
"initialStreamWindowSize": 65536,
"maxConcurrentStreams": 100
}
}
}
},
"dnsLookupFamily": "V4_PREFERRED",
"loadAssignment": {
"clusterName": "example",
"endpoints": [{
"lbEndpoints": [{
"endpoint": {
"address": {
"socketAddress": {
"address": "127.0.0.1",
"portValue": 80
}
}
}
}]
}]
}
}
`, cluster)
})
t.Run("outlier", func(t *testing.T) {
endpoints, err := b.buildPolicyEndpoints(ctx, &config.Config{Options: o1}, &config.Policy{
To: mustParseWeightedURLs(t, "http://example.com"),
})
require.NoError(t, err)
cluster := newDefaultEnvoyClusterConfig()
cluster.DnsLookupFamily = envoy_config_cluster_v3.Cluster_V4_ONLY
cluster.OutlierDetection = &envoy_config_cluster_v3.OutlierDetection{
EnforcingConsecutive_5Xx: wrapperspb.UInt32(17),
SplitExternalLocalOriginErrors: true,
}
err = b.buildCluster(cluster, "example", endpoints, upstreamProtocolHTTP2)
require.NoErrorf(t, err, "cluster %+v", cluster)
testutil.AssertProtoJSONEqual(t, `
{
"name": "example",
"type": "STRICT_DNS",
"connectTimeout": "10s",
"respectDnsTtl": true,
"perConnectionBufferLimitBytes": 32768,
"typedExtensionProtocolOptions": {
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
"explicitHttpConfig": {
"http2ProtocolOptions": {
"allowConnect": true,
"initialConnectionWindowSize": 1048576,
"initialStreamWindowSize": 65536,
"maxConcurrentStreams": 100
}
}
}
},
"dnsLookupFamily": "V4_ONLY",
"outlierDetection": {
"enforcingConsecutive5xx": 17,
"splitExternalLocalOriginErrors": true
},
"loadAssignment": {
"clusterName": "example",
"endpoints": [{
"lbEndpoints": [{
"endpoint": {
"address": {
"socketAddress": {
"address": "example.com",
"portValue": 80
}
}
}
}]
}]
}
}
`, cluster)
})
}
func Test_validateClusters(t *testing.T) {
type c []*envoy_config_cluster_v3.Cluster
testCases := []struct {
clusters c
expectError bool
}{
{c{{Name: "one"}, {Name: "one"}}, true},
{c{{Name: "one"}, {Name: "two"}}, false},
}
for _, tc := range testCases {
err := validateClusters(tc.clusters)
if tc.expectError {
assert.Error(t, err, "%#v", tc.clusters)
} else {
assert.NoError(t, err, "%#v", tc.clusters)
}
}
}
func Test_bindConfig(t *testing.T) {
ctx, clearTimeout := context.WithTimeout(context.Background(), time.Second*10)
defer clearTimeout()
b := New("local-grpc", "local-http", "local-metrics", filemgr.NewManager(), nil)
t.Run("no bind config", func(t *testing.T) {
cluster, err := b.buildPolicyCluster(ctx, &config.Config{Options: &config.Options{}}, &config.Policy{
From: "https://from.example.com",
To: mustParseWeightedURLs(t, "https://to.example.com"),
})
assert.NoError(t, err)
assert.Nil(t, cluster.UpstreamBindConfig)
})
t.Run("freebind", func(t *testing.T) {
cluster, err := b.buildPolicyCluster(ctx, &config.Config{Options: &config.Options{
EnvoyBindConfigFreebind: null.BoolFrom(true),
}}, &config.Policy{
From: "https://from.example.com",
To: mustParseWeightedURLs(t, "https://to.example.com"),
})
assert.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `
{
"freebind": true,
"sourceAddress": {
"address": "0.0.0.0",
"portValue": 0
}
}
`, cluster.UpstreamBindConfig)
})
t.Run("source address", func(t *testing.T) {
cluster, err := b.buildPolicyCluster(ctx, &config.Config{Options: &config.Options{
EnvoyBindConfigSourceAddress: "192.168.0.1",
}}, &config.Policy{
From: "https://from.example.com",
To: mustParseWeightedURLs(t, "https://to.example.com"),
})
assert.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `
{
"sourceAddress": {
"address": "192.168.0.1",
"portValue": 0
}
}
`, cluster.UpstreamBindConfig)
})
}
func mustParseWeightedURLs(t *testing.T, urls ...string) []config.WeightedURL {
wu, err := config.ParseWeightedUrls(urls...)
require.NoError(t, err)
return wu
}
|
package message
import (
"bytes"
"github.com/stretchr/testify/assert"
"testing"
)
func Test_Checksum(t *testing.T) {
r := bytes.NewBufferString("foobar")
c, err := CalculateChecksums(r)
assert.NoError(t, err)
assert.Equal(t, "a06e327ea7388c18e4740e350ed4e60f2e04fc41", c["ripemd160"])
assert.Equal(t, "c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2", c["sha256"])
}
|
package main
import (
. "codewizards"
)
type MyStrategy struct{}
func New() Strategy {
return &MyStrategy{}
}
func (s *MyStrategy) Move(me *Wizard, world *World, game *Game, move *Move) {
// put your code here
}
|
package main
import (
"fmt"
"sync"
)
func main() {
rw := sync.RWMutex{}
rw.RLock()
rw.RLock()
rw.RUnlock()
rw.Lock()
fmt.Println(2222)
}
|
/*
Inspired by this video by Ben Eater. This challenge forms a pair with Decode USB packets.
The USB 2.0 protocol uses, at a low level, a line code called non-return-to-zero encoding (specifically, a variant called NRZI), in which a stream of bits is encoded into a stream of two electrical level states J and K. Encoding works as follows:
Start with some initial state, which is either J or K
For each bit to be encoded:
If the bit is a 0, switch the state from J to K or from K to J
If the bit is a 1, maintain the same state as before
and output that state
For this challenge, we will assume the encoding starts in state J.
However, it's not that simple: an additional process called bit stuffing takes place, which is designed to make it easier to detect if the signal has dropped out.
After 6 consecutive 1s are read, an extra meaningless 0 bit is processed to ensure the signal never stays in the same state (J or K) for too long.
The full USB specification is available here on the USB website, or mirrored (and easier to access) here, but I'd caution you before reading it because it's extremely long and the pertinent information is hard to pin down.
Task
Given a non-empty binary string as input, encode the string using the USB implementation of NRZI described above, and output a string of Js or Ks.
Test-cases
Input Output
=====================================
1 J
0 K
1000101 JKJKKJJ
00000000 KJKJKJKJ
11111111 JJJJJJKKK
001111100 KJJJJJJKJ
0011111100 KJJJJJJJKJK
00111111100 KJJJJJJJKKJK
0011111111111100 KJJJJJJJKKKKKKKJKJ
0011011000111001 KJJJKKKJKJJJJKJJ
01101000011101000111010001110000011100110011101000101111001011110111011101110111011101110010111001111001011011110111010101110100011101010110001001100101001011100110001101101111011011010010111101110111011000010111010001100011011010000011111101110110001111010110010001010001011101110011010001110111001110010101011101100111010110000110001101010001 KKKJJKJKJJJJKKJKJJJJKKJKJJJJKJKJKKKKJKKKJKKKKJJKJKKJJJJJKJJKKKKKJJJJKKKKJJJJKKKKJJJJKKKKJKKJJJJKJJJJJKJJKKKJJJJJKKKKJJKKJJJJKKJKJJJJKKJJKKKJKJJKJJJKJJKKJKKJJJJKJJJKJKKKJJJKKKKKJJJKKKJJKJJKKKKKJJJJKKKKJJJKJKJJKKKKJJKJKKKJKJJJKKKJJKJKJKKKKKKKJKKKKJJJKJKKKKKJJKKKJKKJKJJKKJKJJKKKKJJJJKJJJKKJKJJJJKKKKJKKKKJKKJJKKJJJJKKKJKKKKJJKKKJKJKKKJKJJJKKJJKJKK
Rules
The input should be a string or array of 1s and 0s or of trues and falses
The output should be a string or array, with the two states J and K being represented by any two distinct values
You may use any standard I/O method
Standard loopholes are forbidden
This is code-golf, so the shortest code in bytes wins
*/
package main
import (
"bytes"
)
func main() {
assert(encode("1") == "J")
assert(encode("0") == "K")
assert(encode("1000101") == "JKJKKJJ")
assert(encode("00000000") == "KJKJKJKJ")
assert(encode("11111111") == "JJJJJJKKK")
assert(encode("001111100") == "KJJJJJJKJ")
assert(encode("0011111100") == "KJJJJJJJKJK")
assert(encode("00111111100") == "KJJJJJJJKKJK")
assert(encode("0011111111111100") == "KJJJJJJJKKKKKKKJKJ")
assert(encode("0011011000111001") == "KJJJKKKJKJJJJKJJ")
assert(encode("01101000011101000111010001110000011100110011101000101111001011110111011101110111011101110010111001111001011011110111010101110100011101010110001001100101001011100110001101101111011011010010111101110111011000010111010001100011011010000011111101110110001111010110010001010001011101110011010001110111001110010101011101100111010110000110001101010001") == "KKKJJKJKJJJJKKJKJJJJKKJKJJJJKJKJKKKKJKKKJKKKKJJKJKKJJJJJKJJKKKKKJJJJKKKKJJJJKKKKJJJJKKKKJKKJJJJKJJJJJKJJKKKJJJJJKKKKJJKKJJJJKKJKJJJJKKJJKKKJKJJKJJJKJJKKJKKJJJJKJJJKJKKKJJJKKKKKJJJKKKJJKJJKKKKKJJJJKKKKJJJKJKJJKKKKJJKJKKKJKJJJKKKJJKJKJKKKKKKKJKKKKJJJKJKKKKKJJKKKJKKJKJJKKJKJJKKKKJJJJKJJJKKJKJJJJKKKKJKKKKJKKJJKKJJJJKKKJKKKKJJKKKJKJKKKJKJJJKKJJKJKK")
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func encode(s string) string {
if len(s) == 0 {
return ""
}
w := new(bytes.Buffer)
t := output(w, s[0])
c := one(s[0])
for i := 1; i < len(s); i++ {
if s[i] == '0' {
t, c = flip(t), 0
}
output(w, t)
if c += one(s[i]); c >= 6 {
t, c = flip(t), 0
output(w, t)
}
}
return w.String()
}
func output(w *bytes.Buffer, c byte) byte {
v := c
switch c {
case '0':
v = 'K'
case '1':
v = 'J'
}
w.WriteByte(v)
return c
}
func flip(c byte) byte {
switch c {
case '0':
c = '1'
case '1':
c = '0'
}
return c
}
func one(c byte) int {
if c == '1' {
return 1
}
return 0
}
|
package embedded
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"strings"
"time"
"github.com/coreos/etcd/etcdmain"
"github.com/pkg/errors"
"github.com/rancher/rancher/pkg/hyperkube"
"github.com/rancher/rancher/pkg/k8scheck"
"github.com/rancher/rancher/pkg/librke"
"github.com/rancher/rancher/pkg/rkecerts"
"github.com/rancher/rke/services"
"github.com/rancher/types/apis/management.cattle.io/v3"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"k8s.io/client-go/tools/clientcmd"
)
var (
copyProcesses = []string{
services.KubeAPIContainerName,
services.KubeControllerContainerName,
services.EtcdContainerName,
}
)
func Run(ctx context.Context) (context.Context, string, error) {
rkeConfig, err := localConfig()
if err != nil {
return ctx, "", err
}
bundle, err := rkecerts.Stage(rkeConfig)
if err != nil {
return ctx, "", err
}
plan, err := librke.New().GeneratePlan(ctx, rkeConfig, nil)
if err != nil {
return ctx, "", err
}
processes := getProcesses(plan)
eg, resultCtx := errgroup.WithContext(ctx)
eg.Go(runProcessFunc(ctx, "etcd", processes["etcd"], runEtcd))
if err := checkEtcd(bundle); err != nil {
return ctx, "", errors.Wrap(err, "waiting on etcd")
}
for name, process := range processes {
runFn := func(ctx context.Context, args []string) {
runK8s(ctx, bundle.KubeConfig(), args)
}
if name == "etcd" {
continue
}
eg.Go(runProcessFunc(ctx, name, process, runFn))
}
return resultCtx, bundle.KubeConfig(), nil
}
func checkEtcd(bundle *rkecerts.Bundle) error {
certPool := x509.NewCertPool()
certPool.AddCert(bundle.Certs()["kube-ca"].Certificate)
ht := &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: certPool,
Certificates: []tls.Certificate{
{
Certificate: [][]byte{
bundle.Certs()["kube-etcd-127-0-0-1"].Certificate.Raw,
},
PrivateKey: bundle.Certs()["kube-etcd-127-0-0-1"].Key,
}},
},
}
client := http.Client{
Transport: ht,
}
defer ht.CloseIdleConnections()
for i := 0; ; i++ {
resp, err := client.Get("https://localhost:2379/health")
if err != nil {
if i > 1 {
logrus.Infof("Waiting on etcd startup: %v", err)
}
time.Sleep(time.Second)
continue
}
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
if i > 1 {
logrus.Infof("Waiting on etcd startup: status %d", resp.StatusCode)
}
time.Sleep(time.Second)
continue
}
break
}
return nil
}
func runProcessFunc(ctx context.Context, name string, process v3.Process, f func(context.Context, []string)) func() error {
return func() error {
runProcess(ctx, name, process, f)
return fmt.Errorf("%s exited", name)
}
}
func setEnv(env []string) {
for _, part := range env {
parts := strings.SplitN(part, "=", 2)
if len(parts) == 1 {
os.Setenv(parts[0], "")
} else {
os.Setenv(parts[0], parts[1])
}
}
}
func runEtcd(ctx context.Context, args []string) {
os.Args = args
logrus.Info("Running ", strings.Join(args, " "))
etcdmain.Main()
logrus.Errorf("etcd exited")
}
func runK8s(ctx context.Context, kubeConfig string, args []string) {
if logrus.GetLevel() != logrus.DebugLevel {
args = append(args, "-v=1")
}
args = append(args, "--logtostderr=false")
args = append(args, "--alsologtostderr=false")
if args[0] != "kube-apiserver" {
restConfig, err := clientcmd.BuildConfigFromFlags("", kubeConfig)
if err != nil {
logrus.Errorf("Failed to build client: %v", err)
return
}
if err := k8scheck.Wait(ctx, *restConfig); err != nil {
logrus.Errorf("Failed to build client: %v", err)
return
}
}
if args[0] == "kube-controller-manager" {
args = append(args, "--controllers", "*", "--controllers", "-resourcequota", "--controllers", "-service")
}
hk := hyperkube.HyperKube{
Name: "hyperkube",
Long: "This is an all-in-one binary that can run any of the various Kubernetes servers.",
}
hk.AddServer(hyperkube.NewKubeAPIServer())
hk.AddServer(hyperkube.NewKubeControllerManager())
hk.AddServer(hyperkube.NewScheduler())
logrus.Info("Running ", strings.Join(args, " "))
if err := hk.Run(args, ctx.Done()); err != nil {
logrus.Errorf("%s exited with error: %v", args[0], err)
}
}
func runProcess(ctx context.Context, name string, p v3.Process, f func(context.Context, []string)) {
env := append([]string{}, os.Environ()...)
env = append(env, p.Env...)
args := append([]string{}, p.Command...)
args = append(args, p.Args...)
for i, part := range args {
if strings.HasPrefix(part, "-") {
args = append([]string{name}, args[i:]...)
break
}
}
setEnv(env)
f(ctx, args)
}
func getProcesses(plan v3.RKEPlan) map[string]v3.Process {
processes := map[string]v3.Process{}
for _, name := range copyProcesses {
processes[name] = plan.Nodes[0].Processes[name]
}
return processes
}
|
/*
Copyright © 2019 Cabins <kong_lingcun@163.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"github.com/cabins/cdnjs/util"
"github.com/spf13/cobra"
)
// searchCmd represents the search command
var searchCmd = &cobra.Command{
Use: "search",
Aliases: []string{"s", "sr"},
Short: "search lib with name",
Long: `search lib with name.`,
Run: func(cmd *cobra.Command, args []string) {
number, _ := cmd.Flags().GetInt("number")
for _, v := range args {
util.SearchWithName(v).PrintAsTable(number)
}
},
}
func init() {
rootCmd.AddCommand(searchCmd)
searchCmd.Flags().IntP("number", "n", 15, "result numbers")
}
|
package protocol
import ()
type ServerRestartPacket struct {
Tid uint64
Serial uint16
}
func (p *ServerRestartPacket) Serialize() []byte {
return nil
}
func ParseServerRestart(buffer []byte) *ServerRestartPacket {
_, _, _, tid, serial := ParseHeader(buffer)
return &ServerRestartPacket{
Tid: tid,
Serial: serial,
}
}
|
package main
import "fmt"
func main() {
class := map[int][]string{
10: {"abc", "xyz", "qwer"},
}
fmt.Println(class[10])
}
|
package msp
import (
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp"
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp/secp256k1"
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp/sha3"
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp/sw"
"github.com/HNB-ECO/HNB-Blockchain/HNB/common"
"crypto/ecdsa"
"crypto/elliptic"
"encoding/json"
"errors"
"fmt"
"math/big"
// "github.com/HNB-ECO/HNB-Blockchain/HNB/config"
)
var (
ErrInvalidChainId = errors.New("invalid chain id for signer")
ErrInvalidSig = errors.New("invalid transaction v, r, s values")
)
var (
Big1 = big.NewInt(1)
Big2 = big.NewInt(2)
Big3 = big.NewInt(3)
Big0 = big.NewInt(0)
Big32 = big.NewInt(32)
Big256 = big.NewInt(256)
Big257 = big.NewInt(257)
)
var (
secp256k1N, _ = new(big.Int).SetString("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", 16)
secp256k1halfN = new(big.Int).Div(secp256k1N, big.NewInt(2))
)
var hnbSigner *HNBSigner
func GetSigner() Signer {
if hnbSigner != nil {
return hnbSigner
}
chainID := new(big.Int)
chainID.SetBytes([]byte(common.HNB))
hnbSigner = NewHNBSigner(chainID)
return hnbSigner
}
// SignTx signs the transaction using the given signer and private key
func SignTx(tx *common.Transaction, s Signer) (*common.Transaction, error) {
//txMarshal, _ := json.Marshal(tx)
hash := s.Hash(tx)
sig, err := SignWithHash(hash.GetBytes())
if err != nil {
return nil, err
}
err = WithSignature(tx, s, sig)
if err != nil {
return nil, err
}
return tx, nil
}
func WithSignature(tx *common.Transaction, signer Signer, sig []byte) error {
r, s, v, err := signer.SignatureValues(tx, sig)
if err != nil {
return err
}
tx.R = r
tx.S = s
tx.V = v
return nil
}
func Sender(signer Signer, tx *common.Transaction) (common.Address, error) {
//TODO 添加缓存 每次计算签名,tps过低
addr, err := signer.Sender(tx)
if err != nil {
return common.Address{}, err
}
return addr, nil
}
type Signer interface {
Sender(tx *common.Transaction) (common.Address, error)
SignatureValues(tx *common.Transaction, sig []byte) (r, s, v *big.Int, err error)
Hash(tx *common.Transaction) common.Hash
Equal(Signer) bool
}
// EIP155Transaction implements Signer using the EIP155 rules.
type HNBSigner struct {
chainId, chainIdMul *big.Int
}
func NewHNBSigner(chainId *big.Int) *HNBSigner {
if chainId == nil {
chainId = new(big.Int)
}
return &HNBSigner{
chainId: chainId,
chainIdMul: new(big.Int).Mul(chainId, big.NewInt(2)),
}
}
func (s HNBSigner) Equal(s2 Signer) bool {
eip155, ok := s2.(HNBSigner)
return ok && eip155.chainId.Cmp(s.chainId) == 0
}
var big8 = big.NewInt(8)
func Protected(tx *common.Transaction) bool {
return isProtectedV(tx.V)
}
func isProtectedV(V *big.Int) bool {
if V.BitLen() <= 8 {
v := V.Uint64()
return v != 27 && v != 28
}
// anything not 27 or 28 is considered protected
return true
}
func (s HNBSigner) Sender(tx *common.Transaction) (common.Address, error) {
//if strings.Compare(tx.ContractName, string(s.chainId.Bytes())) != 0 {
// return common.Address{}, ErrInvalidChainId
//}
if !Protected(tx) {
return HomesteadSigner{}.Sender(tx)
}
V := new(big.Int).Sub(tx.V, s.chainIdMul)
V.Sub(V, big8)
return recoverPlain(s.Hash(tx), tx.R, tx.S, V, true)
}
func (s HNBSigner) SignatureValues(tx *common.Transaction, sig []byte) (R, S, V *big.Int, err error) {
R, S, V, err = HomesteadSigner{}.SignatureValues(tx, sig)
if err != nil {
return nil, nil, nil, err
}
if s.chainId.Sign() != 0 {
V = big.NewInt(int64(sig[64] + 35))
V.Add(V, s.chainIdMul)
}
return R, S, V, nil
}
func signHash(x interface{}) (h common.Hash) {
hw := sha3.NewKeccak256()
m, _ := json.Marshal(x)
hw.Write(m)
hw.Sum(h[:0])
return h
}
// Hash returns the hash to be signed by the sender.
// It does not uniquely identify the transaction.
func (s HNBSigner) Hash(tx *common.Transaction) (h common.Hash) {
// if config.Config.RunMode == "dev"{
return signHash([]interface{}{
tx.ContractName,
tx.From,
tx.Payload,
tx.NonceValue,
})
//}
//return signHash([]interface{}{
// tx.ContractName,
// tx.From,
// tx.Payload,
// 0,
//})
}
// HomesteadTransaction implements TransactionInterface using the
// homestead rules.
type HomesteadSigner struct{ FrontierSigner }
func (s HomesteadSigner) Equal(s2 Signer) bool {
_, ok := s2.(HomesteadSigner)
return ok
}
// SignatureValues returns signature values. This signature
// needs to be in the [R || S || V] format where V is 0 or 1.
func (hs HomesteadSigner) SignatureValues(tx *common.Transaction, sig []byte) (r, s, v *big.Int, err error) {
return hs.FrontierSigner.SignatureValues(tx, sig)
}
func (hs HomesteadSigner) Sender(tx *common.Transaction) (common.Address, error) {
return recoverPlain(hs.Hash(tx), tx.R, tx.S, tx.V, true)
}
type FrontierSigner struct{}
func (s FrontierSigner) Equal(s2 Signer) bool {
_, ok := s2.(FrontierSigner)
return ok
}
// SignatureValues returns signature values. This signature
// needs to be in the [R || S || V] format where V is 0 or 1.
func (fs FrontierSigner) SignatureValues(tx *common.Transaction, sig []byte) (r, s, v *big.Int, err error) {
if len(sig) != 65 {
panic(fmt.Sprintf("wrong size for signature: got %d, want 65", len(sig)))
}
r = new(big.Int).SetBytes(sig[:32])
s = new(big.Int).SetBytes(sig[32:64])
v = new(big.Int).SetBytes([]byte{sig[64] + 27})
return r, s, v, nil
}
// Hash returns the hash to be signed by the sender.
// It does not uniquely identify the transaction.
func (fs FrontierSigner) Hash(tx *common.Transaction) common.Hash {
//if config.Config.RunMode == "dev"{
return signHash([]interface{}{
tx.ContractName,
tx.From,
tx.Payload,
tx.NonceValue,
})
//}
//
//return signHash([]interface{}{
// tx.ContractName,
// tx.From,
// tx.Payload,
// 0,
//})
}
func (fs FrontierSigner) Sender(tx *common.Transaction) (common.Address, error) {
return recoverPlain(fs.Hash(tx), tx.R, tx.S, tx.V, false)
}
func ValidateSignatureValues(v byte, r, s *big.Int, homestead bool) bool {
if r.Cmp(Big1) < 0 || s.Cmp(Big1) < 0 {
return false
}
// reject upper range of s values (ECDSA malleability)
// see discussion in secp256k1/libsecp256k1/include/secp256k1.h
if homestead && s.Cmp(secp256k1halfN) > 0 {
return false
}
// Frontier: allow s to be in full N range
return r.Cmp(secp256k1N) < 0 && s.Cmp(secp256k1N) < 0 && (v == 0 || v == 1)
}
func Ecrecover(hash, sig []byte) ([]byte, error) {
return secp256k1.RecoverPubkey(hash, sig)
}
func Keccak256(data ...[]byte) []byte {
d := sha3.NewKeccak256()
for _, b := range data {
d.Write(b)
}
return d.Sum(nil)
}
func recoverPlain(sighash common.Hash, R, S, Vb *big.Int, homestead bool) (common.Address, error) {
if Vb.BitLen() > 8 {
return common.Address{}, ErrInvalidSig
}
V := byte(Vb.Uint64() - 27)
if !ValidateSignatureValues(V, R, S, homestead) {
return common.Address{}, ErrInvalidSig
}
// encode the snature in uncompressed format
r, s := R.Bytes(), S.Bytes()
sig := make([]byte, 65)
copy(sig[32-len(r):32], r)
copy(sig[64-len(s):64], s)
sig[64] = V
// recover the public key from the snature
pub, err := Ecrecover(sighash[:], sig)
if err != nil {
return common.Address{}, err
}
if len(pub) == 0 || pub[0] != 4 {
return common.Address{}, errors.New("invalid public key")
}
var addr common.Address
copy(addr[:], Keccak256(pub[1:])[12:])
return addr, nil
}
func FromECDSAPub(pub *ecdsa.PublicKey) []byte {
if pub == nil || pub.X == nil || pub.Y == nil {
return nil
}
return elliptic.Marshal(secp256k1.S256(), pub.X, pub.Y)
}
func BytesToAddress(b []byte) common.Address {
var a common.Address
a.SetBytes(b)
return a
}
func AccountPubkeyToAddress() common.Address {
key := keyPair.PubKey.(*sw.Ecdsa256K1PublicKey).PubKey
pubBytes := FromECDSAPub(key)
return BytesToAddress(Keccak256(pubBytes[1:])[12:])
}
func AccountPubkeyToAddress1(pubkey bccsp.Key) common.Address {
key := pubkey.(*sw.Ecdsa256K1PublicKey).PubKey
pubBytes := FromECDSAPub(key)
return BytesToAddress(Keccak256(pubBytes[1:])[12:])
}
|
package main
import (
"net"
"fmt"
"encoding/gob"
)
func main() {
go server()
go client()
var ip int
fmt.Scanln(&ip)
}
func server(){
l,err :=net.Listen("tcp",":9999") //listen on port
if err != nil{
fmt.Println(err)
return
}
for{
c,err :=l.Accept()
if err!= nil{
fmt.Println(err)
continue
}
go handleConnection(c)
}
}
func handleConnection(c net.Conn){
var msg string
err :=gob.NewDecoder(c).Decode(&msg) //receive msg
if err !=nil{
fmt.Println(err)
}else{
fmt.Println("Received",msg)
}
c.Close()
}
func client(){
c,err :=net.Dial("tcp","127.0.0.1:9999")
if err !=nil{
fmt.Println(err)
return
}
msg :="Hello World"
fmt.Println("sending",msg)
err =gob.NewEncoder(c).Encode(msg)
if err!= nil{
fmt.Println(err)
}
c.Close()
} |
package main
import (
"github.com/micro/go-micro"
"github.com/micro/go-plugins/registry/etcdv3"
"github.com/odom11/playground_micro/api"
"github.com/odom11/playground_micro/toc"
)
func main() {
registry := etcdv3.NewRegistry()
service := micro.NewService(
micro.Name("tic"),
micro.Version("latest"),
micro.Registry(registry),
)
tic := micro.NewService()
tic.Init()
service.Init()
api.RegisterTocHandler(service.Server(), toc.New(api.NewTicService("toc", tic.Client())))
}
|
package global
import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/jackc/pgx/v4/pgxpool"
"log"
"os"
)
var Dbpool *pgxpool.Pool
var Sess *session.Session
var Uploader *s3manager.Uploader
var (
InfoLogger *log.Logger
WarningLogger *log.Logger
ErrorLogger *log.Logger
File *os.File
)
|
//divide-and-conquer 利用分治法查找最大,最小值
package main
import "fmt"
func GetMaxAndMin(arr []int) (max,min int) {
if arr == nil {
return 0,0
}
len := len(arr)
max = arr[0]
min = arr[0]
for i:=0;i<len-1;i=i+2 {
if arr[i] > arr[i+1] {
tmp := arr[i]
arr[i] = arr[i+1]
arr[i+1] = tmp
}
}
for i:=2; i<len ; i=i+2 {
if arr[i] < min {
min = arr[i]
}
}
for i:=3; i<len; i=i+2 {
if arr[i] > max {
max = arr[i]
}
}
if len%2 == 1 {
if max < arr[len-1] {
max = arr[len-1]
}
if min > arr[len-1] {
min = arr[len-1]
}
}
return max,min
}
func main() {
arr := []int{7,3,19,40,4,7,1}
max,min := GetMaxAndMin(arr)
fmt.Println("分治法")
fmt.Println(max, min)
} |
// Copyright (c) 2020 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wallet
import (
"testing"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcwallet/waddrmgr"
)
// TestComputeInputScript checks that the wallet can create the full
// witness script for a witness output.
func TestComputeInputScript(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
scope waddrmgr.KeyScope
expectedScriptLen int
}{{
name: "BIP084 P2WKH",
scope: waddrmgr.KeyScopeBIP0084,
expectedScriptLen: 0,
}, {
name: "BIP049 nested P2WKH",
scope: waddrmgr.KeyScopeBIP0049Plus,
expectedScriptLen: 23,
}}
w, cleanup := testWallet(t)
defer cleanup()
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
runTestCase(t, w, tc.scope, tc.expectedScriptLen)
})
}
}
func runTestCase(t *testing.T, w *Wallet, scope waddrmgr.KeyScope,
scriptLen int) {
// Create an address we can use to send some coins to.
addr, err := w.CurrentAddress(0, scope)
if err != nil {
t.Fatalf("unable to get current address: %v", addr)
}
p2shAddr, err := txscript.PayToAddrScript(addr)
if err != nil {
t.Fatalf("unable to convert wallet address to p2sh: %v", err)
}
// Add an output paying to the wallet's address to the database.
utxOut := wire.NewTxOut(100000, p2shAddr)
incomingTx := &wire.MsgTx{
TxIn: []*wire.TxIn{{}},
TxOut: []*wire.TxOut{utxOut},
}
addUtxo(t, w, incomingTx)
// Create a transaction that spends the UTXO created above and spends to
// the same address again.
prevOut := wire.OutPoint{
Hash: incomingTx.TxHash(),
Index: 0,
}
outgoingTx := &wire.MsgTx{
TxIn: []*wire.TxIn{{
PreviousOutPoint: prevOut,
}},
TxOut: []*wire.TxOut{utxOut},
}
fetcher := txscript.NewCannedPrevOutputFetcher(
utxOut.PkScript, utxOut.Value,
)
sigHashes := txscript.NewTxSigHashes(outgoingTx, fetcher)
// Compute the input script to spend the UTXO now.
witness, script, err := w.ComputeInputScript(
outgoingTx, utxOut, 0, sigHashes, txscript.SigHashAll, nil,
)
if err != nil {
t.Fatalf("error computing input script: %v", err)
}
if len(script) != scriptLen {
t.Fatalf("unexpected script length, got %d wanted %d",
len(script), scriptLen)
}
if len(witness) != 2 {
t.Fatalf("unexpected witness stack length, got %d, wanted %d",
len(witness), 2)
}
// Finally verify that the created witness is valid.
outgoingTx.TxIn[0].Witness = witness
outgoingTx.TxIn[0].SignatureScript = script
err = validateMsgTx(
outgoingTx, [][]byte{utxOut.PkScript}, []btcutil.Amount{100000},
)
if err != nil {
t.Fatalf("error validating tx: %v", err)
}
}
|
package logic
import (
"unsafe"
"hub000.xindong.com/rookie/rookie-framework/log"
)
//TODO:This is just a demo, need to be modified in the future.
//LogicMemoryDecorator is one decorator that can log the size of the input.
type MemoryWrapper struct {
block LogicBlock
}
//NewLogicMemoryDecorator will create a new logic memory decorator.
func NewMemoryWrapper(block LogicBlock)MemoryWrapper{
decorator := MemoryWrapper{block:block}
return decorator
}
func (h MemoryWrapper) Call0(f func(args... interface{}) error ,args... interface{} ) (err error){
mylog.Info("Input size is " , unsafe.Sizeof(args))
err = h.block.Call0(f , args...)
return
}
func (h MemoryWrapper) Call1(f func(args... interface{})(interface{} , error) ,args... interface{} ) (ret interface{} , err error){
mylog.Info("Input size is " , unsafe.Sizeof(args))
ret , err = h.block.Call1(f , args...)
return
}
func (h MemoryWrapper) CallN(f func(args... interface{}) ([]interface{} , error) ,args... interface{} ) (ret []interface{} , err error){
mylog.Info("Input size is " , unsafe.Sizeof(args))
ret , err = h.block.CallN(f , args...)
return
}
|
package server
type CreateProductFormRequest struct {
Name string `json:"name"`
Price int `json:"price"`
ImageURL string `json:"imageurl"`
}
func (r CreateProductFormRequest) Validate() map[string]interface{} {
errs := make(map[string]interface{})
nameErrs := []string{}
if r.Name == "" {
nameErrs = append(nameErrs, "Cannot be blank")
}
priceErrs := []string{}
if r.Price == 0 {
priceErrs = append(priceErrs, "Cannot be blank")
}
imageURLErrs := []string{}
if r.ImageURL == "" {
imageURLErrs = append(imageURLErrs, "Cannot be blank")
}
if len(nameErrs) > 0 {
errs["name"] = nameErrs
}
if len(priceErrs) > 0 {
errs["price"] = priceErrs
}
if len(imageURLErrs) > 0 {
errs["imageurl"] = imageURLErrs
}
return errs
}
type UpdateProductFormRequest struct {
Name *string `json:"name,omitempty"`
Price *int `json:"price,omitempty"`
ImageURL *string `json:"imageurl,omitempty"`
}
|
package bridge
import (
"errors"
"flag"
"fmt"
log "github.com/sirupsen/logrus"
"net/http"
"net/http/httputil"
"net/url"
"os"
"time"
"github.com/elazarl/go-bindata-assetfs"
"github.com/facebookgo/inject"
"github.com/stellar/gateway/bridge/config"
"github.com/stellar/gateway/bridge/gui"
"github.com/stellar/gateway/bridge/handlers"
"github.com/stellar/gateway/db"
"github.com/stellar/gateway/db/drivers/mysql"
"github.com/stellar/gateway/db/drivers/postgres"
"github.com/stellar/gateway/horizon"
"github.com/stellar/gateway/listener"
"github.com/stellar/gateway/server"
"github.com/stellar/gateway/submitter"
"github.com/stellar/go/clients/federation"
"github.com/stellar/go/clients/stellartoml"
"github.com/zenazn/goji/graceful"
"github.com/zenazn/goji/web"
"github.com/zenazn/goji/web/middleware"
)
// App is the application object
type App struct {
config config.Config
requestHandler handlers.RequestHandler
}
// NewApp constructs an new App instance from the provided config.
func NewApp(config config.Config, migrateFlag bool, versionFlag bool, version string) (app *App, err error) {
var g inject.Graph
var driver db.Driver
switch config.Database.Type {
case "mysql":
driver = &mysql.Driver{}
case "postgres":
driver = &postgres.Driver{}
case "":
// Allow to start gateway server with a single endpoint: /payment
break
default:
return nil, fmt.Errorf("%s database has no driver", config.Database.Type)
}
var entityManager db.EntityManagerInterface
var repository db.Repository
if driver != nil {
err = driver.Init(config.Database.URL)
if err != nil {
err = fmt.Errorf("Cannot connect to a DB: %s", err)
return
}
entityManager = db.NewEntityManager(driver)
repository = db.NewRepository(driver)
}
if migrateFlag {
if driver == nil {
log.Fatal("No database driver.")
return
}
var migrationsApplied int
migrationsApplied, err = driver.MigrateUp("gateway")
if err != nil {
return
}
log.Info("Applied migrations: ", migrationsApplied)
os.Exit(0)
return
}
if versionFlag {
fmt.Printf("Bridge Server Version: %s \n", version)
os.Exit(0)
return
}
h := horizon.New(config.Horizon)
log.Print("Creating and initializing TransactionSubmitter")
ts := submitter.NewTransactionSubmitter(&h, entityManager, config.NetworkPassphrase, time.Now)
if err != nil {
return
}
log.Print("Initializing Authorizing account")
if config.Accounts.AuthorizingSeed == "" {
log.Warning("No accounts.authorizing_seed param. Skipping...")
} else {
err = ts.InitAccount(config.Accounts.AuthorizingSeed)
if err != nil {
return
}
}
if config.Accounts.BaseSeed == "" {
log.Warning("No accounts.base_seed param. Skipping...")
} else {
log.Print("Initializing Base account")
err = ts.InitAccount(config.Accounts.BaseSeed)
if err != nil {
return
}
}
log.Print("TransactionSubmitter created")
log.Print("Creating and starting PaymentListener")
var paymentListener listener.PaymentListener
if config.Accounts.ReceivingAccountID == "" {
log.Warning("No accounts.receiving_account_id param. Skipping...")
} else if config.Callbacks.Receive == "" {
log.Warning("No callbacks.receive param. Skipping...")
} else {
paymentListener, err = listener.NewPaymentListener(&config, entityManager, &h, repository, time.Now)
if err != nil {
return
}
err = paymentListener.Listen()
if err != nil {
return
}
log.Print("PaymentListener created")
}
if len(config.APIKey) > 0 && len(config.APIKey) < 15 {
err = errors.New("api-key have to be at least 15 chars long")
return
}
requestHandler := handlers.RequestHandler{}
httpClientWithTimeout := http.Client{
Timeout: 10 * time.Second,
}
stellartomlClient := stellartoml.Client{
HTTP: &httpClientWithTimeout,
}
federationClient := federation.Client{
HTTP: &httpClientWithTimeout,
StellarTOML: &stellartomlClient,
}
err = g.Provide(
&inject.Object{Value: &requestHandler},
&inject.Object{Value: &config},
&inject.Object{Value: &stellartomlClient},
&inject.Object{Value: &federationClient},
&inject.Object{Value: &h},
&inject.Object{Value: &repository},
&inject.Object{Value: driver},
&inject.Object{Value: &ts},
&inject.Object{Value: &paymentListener},
&inject.Object{Value: &httpClientWithTimeout},
)
if err != nil {
log.Fatal("Injector: ", err)
}
if err := g.Populate(); err != nil {
log.Fatal("Injector: ", err)
}
app = &App{
config: config,
requestHandler: requestHandler,
}
return
}
// Serve starts the server
func (a *App) Serve() {
portString := fmt.Sprintf(":%d", *a.config.Port)
flag.Set("bind", portString)
bridge := web.New()
bridge.Abandon(middleware.Logger)
bridge.Use(server.StripTrailingSlashMiddleware())
bridge.Use(server.HeadersMiddleware())
if a.config.APIKey != "" {
bridge.Use(server.APIKeyMiddleware(a.config.APIKey))
}
if a.config.Accounts.AuthorizingSeed != "" {
bridge.Post("/authorize", a.requestHandler.Authorize)
} else {
log.Warning("accounts.authorizing_seed not provided. /authorize endpoint will not be available.")
}
bridge.Post("/create-keypair", a.requestHandler.CreateKeypair)
bridge.Post("/builder", a.requestHandler.Builder)
bridge.Post("/payment", a.requestHandler.Payment)
bridge.Get("/payment", a.requestHandler.Payment)
bridge.Post("/reprocess", a.requestHandler.Reprocess)
bridge.Get("/admin/received-payments", a.requestHandler.AdminReceivedPayments)
bridge.Get("/admin/received-payments/:id", a.requestHandler.AdminReceivedPayment)
bridge.Get("/admin/sent-transactions", a.requestHandler.AdminSentTransactions)
if a.config.Develop {
// Create a proxy server to localhost:3000 where GUI development server lives.
staticAdminURL, err := url.Parse("http://localhost:3000")
if err != nil {
panic(err)
}
bridge.Get("/*", httputil.NewSingleHostReverseProxy(staticAdminURL))
} else {
// Load go-bindata files
fileServerHandler := http.FileServer(
&assetfs.AssetFS{
Asset: gui.Asset,
AssetDir: gui.AssetDir,
AssetInfo: gui.AssetInfo,
})
bridge.Get("/admin", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/admin/", http.StatusPermanentRedirect)
})
bridge.Get("/admin/*", http.StripPrefix("/admin/", fileServerHandler))
}
err := graceful.ListenAndServe(portString, bridge)
if err != nil {
log.Fatal(err)
}
}
|
package core
import (
"context"
"reflect"
)
func (s *WSServer) Handle(name string, handler interface{}) {
t := reflect.TypeOf(handler)
if t.Kind() != reflect.Func {
panic("error: " + name + " method type not func.")
}
if t.NumIn() != 2 || t.NumOut() != 2 {
panic("error: handler wants 2 input and 2 output parameters.")
}
// check input, context and struct
arg1 := t.In(0)
// reflect.TypeOf((*context.Context)(nil)).Elem()
if !isImpl(arg1, reflect.TypeOf((*context.Context)(nil)).Elem()) {
panic("The first arg must context.Context")
}
arg2 := t.In(1)
if arg2.Kind() != reflect.Struct {
panic("The second arg must a type of struct.")
}
// check output, error
output2 := t.Out(1)
if !isImpl(output2, reflect.TypeOf((*error)(nil)).Elem()) {
panic("The second output must a type of error.")
}
_, loaded := s.handlers.LoadOrStore(name, reflect.ValueOf(handler))
if loaded {
panic("method " + name + " already exists.")
}
}
func isImpl(t reflect.Type, rt reflect.Type) (b bool) {
defer func() {
if r := recover(); r != nil {
b = false
}
}()
b = t.Implements(rt)
return
}
type HandlerNotExistError struct {
method string
}
func (e *HandlerNotExistError) Error() string {
return e.method + " handler not exists."
}
func (s *WSServer) getHandler(method string) (reflect.Value, error) {
handler, ok := s.handlers.Load(method)
if !ok {
return reflect.Value{}, &HandlerNotExistError{method}
}
return handler.(reflect.Value), nil
}
|
/*
Copyright 2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docgen
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestLoad(t *testing.T) {
svr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, _ = fmt.Fprintf(w, `{"Outputs":{"Chinese":"输出"}}`)
}))
defer svr.Close()
time.Sleep(time.Millisecond)
assert.Equal(t, En.Language(), Language("English"))
assert.Equal(t, En.Get("nihaoha"), "nihaoha")
assert.Equal(t, En.Get("AlibabaCloud"), "Alibaba Cloud")
var ni *I18n
assert.Equal(t, ni.Get("AlibabaCloud"), "Alibaba Cloud")
assert.Equal(t, ni.Get("AlibabaCloud."), "Alibaba Cloud")
assert.Equal(t, ni.Get("AlibabaCloud。"), "Alibaba Cloud")
assert.Equal(t, ni.Get("AlibabaCloud。 "), "Alibaba Cloud")
assert.Equal(t, ni.Get("AlibabaCloud 。 "), "Alibaba Cloud")
assert.Equal(t, ni.Get("AlibabaCloud \n "), "Alibaba Cloud")
assert.Equal(t, ni.Get(" A\n "), "A")
assert.Equal(t, ni.Get(" \n "), "")
assert.Equal(t, Zh.Language(), Language("Chinese"))
assert.Equal(t, Zh.Get("nihaoha"), "nihaoha")
assert.Equal(t, Zh.Get("AlibabaCloud"), "阿里云")
LoadI18nData(svr.URL)
assert.Equal(t, Zh.Get("Outputs"), "输出")
}
|
package Logger
import (
"fmt"
"os"
"time"
)
// Info appends provided log line to a txt file
func Info(line string) {
var time string = time.Now().Format(time.RFC850)
var formattedLine string = "[" + time + "] " + line
fmt.Println(formattedLine)
appendToFile(formattedLine)
}
// appendToFile appends provided line to log file.
func appendToFile(line string) {
var f *os.File = openFile("history.txt")
// Print log line into the log file.
_, err := fmt.Fprintln(f, line)
if err != nil {
fmt.Println(err)
f.Close()
return
}
// Finally close the File.
err = f.Close()
if err != nil {
fmt.Println(err)
return
}
}
// openFile opens file or create new one in case it doesn't exists.
func openFile(path string) *os.File {
f, err := os.OpenFile(path, os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
f, err = os.Create(path)
if err != nil {
fmt.Println(err)
os.Exit(1)
return nil
}
}
return f
}
|
/*
Copyright 2019 The MayaData Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generic
import (
"testing"
"openebs.io/metac/test/integration/framework"
)
// This will be run only once when go test is invoked against this
// package. All the other Test* functions will be invoked via m.Run
// call.
//
// NOTE:
// framework.TestMain provides setup & teardown features required for
// all the individual testcases to run.
func TestMain(m *testing.M) {
framework.TestWithCRDMetac(m.Run)
}
|
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certs
import (
"crypto/rsa"
"crypto/x509"
"github.com/pkg/errors"
certutil "k8s.io/client-go/util/cert"
"mobingi/ocean/pkg/constants"
pkiutil "mobingi/ocean/pkg/util/pki"
)
type configMutatorsFunc func(*certutil.Config, *config) error
// Cert represents a certificate that will create to function properly.
type cert struct {
Name string
BaseName string
CAName string
// Some attributes will depend on the InitConfiguration, only known at runtime.
// These functions will be run in series, passed both the InitConfiguration and a cert Config.
configMutators []configMutatorsFunc
config certutil.Config
}
// GetConfig returns the definition for the given cert given the provided InitConfiguration
func (c *cert) getConfig(cfg *config) (*certutil.Config, error) {
for _, f := range c.configMutators {
if err := f(&c.config, cfg); err != nil {
return nil, err
}
}
return &c.config, nil
}
func (c *cert) newCertAndKeyFromCA(cfg *config, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) {
certSpec, err := c.getConfig(cfg)
if err != nil {
return nil, nil, err
}
key, err := newPrivateKey()
if err != nil {
return nil, nil, err
}
cert, err := newSignedCert(certSpec, key, caCert, caKey)
if err != nil {
return nil, nil, err
}
return cert, key, nil
}
// CertificateTree is represents a one-level-deep tree, mapping a CA to the certs that depend on it.
type ceretificates []*cert
type certificateTree map[*cert]certificates
// CreateTree creates the CAs, certs signed by the CAs, and writes them all to disk.
func (t certificateTree) createTree(cfg *config) (map[string][]byte, error) {
certs := make(map[string][]byte)
for ca, leaves := range t {
certSpec, err := ca.getConfig(cfg)
if err != nil {
return nil, err
}
caCert, caKey, err := newCACertAndKey(certSpec)
if err != nil {
return nil, err
}
certs[pathForCert(ca.BaseName)] = pkiutil.EncodeCertPEM(caCert)
certs[pathForKey(ca.BaseName)] = pkiutil.EncodePrivateKeyPEM(caKey)
for _, leaf := range leaves {
cert, key, err := leaf.newCertAndKeyFromCA(cfg, caCert, caKey)
if err != nil {
return nil, err
}
certs[pathForCert(leaf.BaseName)] = pkiutil.EncodeCertPEM(cert)
certs[pathForKey(leaf.BaseName)] = pkiutil.EncodePrivateKeyPEM(key)
}
}
return certs, nil
}
// CertificateMap is a flat map of certificates, keyed by Name.
type certificateMap map[string]*cert
// CertTree returns a one-level-deep tree, mapping a CA cert to an array of certificates that should be signed by it.
func (m certificateMap) certTree() (certificateTree, error) {
caMap := make(certificateTree)
for _, c := range m {
if c.CAName == "" {
if _, ok := caMap[c]; !ok {
caMap[c] = []*cert{}
}
} else {
ca, ok := m[c.CAName]
if !ok {
return nil, errors.Errorf("certificate %q references unknown CA %q", c.Name, c.CAName)
}
caMap[ca] = append(caMap[ca], c)
}
}
return caMap, nil
}
// Certificates is a list of Certificates that Kubeadm should create.
type certificates []*cert
// AsMap returns the list of certificates as a map, keyed by name.
func (c certificates) asMap() certificateMap {
certMap := make(map[string]*cert)
for _, cert := range c {
certMap[cert.Name] = cert
}
return certMap
}
// GetDefaultCertList returns all of the certificates kubeadm requires to function.
func getDefaultCertList() certificates {
return certificates{
&certRootCA,
&certAPIServer,
&certKubeletClient,
// Front Proxy certs
&certFrontProxyCA,
&certFrontProxyClient,
// etcd certs
&certEtcdCA,
&certEtcdServer,
&certEtcdPeer,
&certEtcdHealthcheck,
&certEtcdAPIClient,
}
}
var (
// CertRootCA is the definition of the Kubernetes Root CA for the API Server and kubelet.
certRootCA = cert{
Name: "ca",
BaseName: constants.CACertAndKeyBaseName,
config: certutil.Config{
CommonName: "kubernetes",
},
}
// CertAPIServer is the definition of the cert used to serve the Kubernetes API.
certAPIServer = cert{
Name: "apiserver",
BaseName: constants.APIServerCertAndKeyBaseName,
CAName: "ca",
config: certutil.Config{
CommonName: constants.APIServerCertCommonName,
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
},
configMutators: []configMutatorsFunc{
makeAltNamesMutator(getAPIServerAltNames),
},
}
// certKubeletClient is the definition of the cert used by the API server to access the kubelet.
certKubeletClient = cert{
Name: "apiserver-kubelet-client",
BaseName: constants.APIServerKubeletClientCertAndKeyBaseName,
CAName: "ca",
config: certutil.Config{
CommonName: constants.APIServerKubeletClientCertCommonName,
Organization: []string{constants.MastersGroup},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
},
}
// certFrontProxyCA is the definition of the CA used for the front end proxy.
certFrontProxyCA = cert{
Name: "front-proxy-ca",
BaseName: constants.FrontProxyCACertAndKeyBaseName,
config: certutil.Config{
CommonName: "front-proxy-ca",
},
}
// certFrontProxyClient is the definition of the cert used by the API server to access the front proxy.
certFrontProxyClient = cert{
Name: "front-proxy-client",
BaseName: constants.FrontProxyClientCertAndKeyBaseName,
CAName: "front-proxy-ca",
config: certutil.Config{
CommonName: constants.FrontProxyClientCertCommonName,
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
},
}
// KubeadmCertEtcdCA is the definition of the root CA used by the hosted etcd server.
certEtcdCA = cert{
Name: "etcd-ca",
BaseName: constants.EtcdCACertAndKeyBaseName,
config: certutil.Config{
CommonName: "etcd-ca",
},
}
// certEtcdServer is the definition of the cert used to serve etcd to clients.
certEtcdServer = cert{
Name: "etcd-server",
BaseName: constants.EtcdServerCertAndKeyBaseName,
CAName: "etcd-ca",
config: certutil.Config{
// TODO: etcd 3.2 introduced an undocumented requirement for ClientAuth usage on the
// server cert: https://github.com/coreos/etcd/issues/9785#issuecomment-396715692
// Once the upstream issue is resolved, this should be returned to only allowing
// ServerAuth usage.
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
},
configMutators: []configMutatorsFunc{
makeAltNamesMutator(getEtcdAltNames),
setCommonNameToNodeName(),
},
}
// certEtcdPeer is the definition of the cert used by etcd peers to access each other.
certEtcdPeer = cert{
Name: "etcd-peer",
BaseName: constants.EtcdPeerCertAndKeyBaseName,
CAName: "etcd-ca",
config: certutil.Config{
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
},
configMutators: []configMutatorsFunc{
makeAltNamesMutator(getEtcdAltNames),
setCommonNameToNodeName(),
},
}
// certEtcdHealthcheck is the definition of the cert used by Kubernetes to check the health of the etcd server.
certEtcdHealthcheck = cert{
Name: "etcd-healthcheck-client",
BaseName: constants.EtcdHealthcheckClientCertAndKeyBaseName,
CAName: "etcd-ca",
config: certutil.Config{
CommonName: constants.EtcdHealthcheckClientCertCommonName,
Organization: []string{constants.MastersGroup},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
},
}
// certEtcdAPIClient is the definition of the cert used by the API server to access etcd.
certEtcdAPIClient = cert{
Name: "apiserver-etcd-client",
BaseName: constants.APIServerEtcdClientCertAndKeyBaseName,
CAName: "etcd-ca",
config: certutil.Config{
CommonName: constants.APIServerEtcdClientCertCommonName,
Organization: []string{constants.MastersGroup},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
},
/*
configMutators: []configMutatorsFunc{
makeAltNamesMutator(getEtcdAltNames),
},*/
}
)
func setCommonNameToNodeName() configMutatorsFunc {
return func(cc *certutil.Config, cfg *config) error {
//TODO cc.CommonName = cfg.NodeRegistration.Name
cc.CommonName = "etcd"
return nil
}
}
|
// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package uuid
import (
"encoding/json"
"fmt"
"testing"
)
func TestSQL(t *testing.T) {
t.Run("Value", testSQLValue)
t.Run("Scan", func(t *testing.T) {
t.Run("Binary", testSQLScanBinary)
t.Run("String", testSQLScanString)
t.Run("Text", testSQLScanText)
t.Run("Unsupported", testSQLScanUnsupported)
t.Run("Nil", testSQLScanNil)
})
}
func testSQLValue(t *testing.T) {
v, err := codecTestUUID.Value()
if err != nil {
t.Fatal(err)
}
got, ok := v.(string)
if !ok {
t.Fatalf("Value() returned %T, want string", v)
}
if want := codecTestUUID.String(); got != want {
t.Errorf("Value() == %q, want %q", got, want)
}
}
func testSQLScanBinary(t *testing.T) {
got := UUID{}
err := got.Scan(codecTestData)
if err != nil {
t.Fatal(err)
}
if got != codecTestUUID {
t.Errorf("Scan(%x): got %v, want %v", codecTestData, got, codecTestUUID)
}
}
func testSQLScanString(t *testing.T) {
s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
got := UUID{}
err := got.Scan(s)
if err != nil {
t.Fatal(err)
}
if got != codecTestUUID {
t.Errorf("Scan(%q): got %v, want %v", s, got, codecTestUUID)
}
}
func testSQLScanText(t *testing.T) {
text := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
got := UUID{}
err := got.Scan(text)
if err != nil {
t.Fatal(err)
}
if got != codecTestUUID {
t.Errorf("Scan(%q): got %v, want %v", text, got, codecTestUUID)
}
}
func testSQLScanUnsupported(t *testing.T) {
unsupported := []interface{}{
true,
42,
}
for _, v := range unsupported {
got := UUID{}
err := got.Scan(v)
if err == nil {
t.Errorf("Scan(%T) succeeded, got %v", v, got)
}
}
}
func testSQLScanNil(t *testing.T) {
got := UUID{}
err := got.Scan(nil)
if err == nil {
t.Errorf("Scan(nil) succeeded, got %v", got)
}
}
func TestNullUUID(t *testing.T) {
t.Run("Value", func(t *testing.T) {
t.Run("Nil", testNullUUIDValueNil)
t.Run("Valid", testNullUUIDValueValid)
})
t.Run("Scan", func(t *testing.T) {
t.Run("Nil", testNullUUIDScanNil)
t.Run("Valid", testNullUUIDScanValid)
t.Run("UUID", testNullUUIDScanUUID)
})
t.Run("MarshalJSON", func(t *testing.T) {
t.Run("Nil", testNullUUIDMarshalJSONNil)
t.Run("Null", testNullUUIDMarshalJSONNull)
t.Run("Valid", testNullUUIDMarshalJSONValid)
})
t.Run("UnmarshalJSON", func(t *testing.T) {
t.Run("Nil", testNullUUIDUnmarshalJSONNil)
t.Run("Null", testNullUUIDUnmarshalJSONNull)
t.Run("Valid", testNullUUIDUnmarshalJSONValid)
t.Run("Malformed", testNullUUIDUnmarshalJSONMalformed)
})
}
func testNullUUIDValueNil(t *testing.T) {
nu := NullUUID{}
got, err := nu.Value()
if got != nil {
t.Errorf("null NullUUID.Value returned non-nil driver.Value")
}
if err != nil {
t.Errorf("null NullUUID.Value returned non-nil error")
}
}
func testNullUUIDValueValid(t *testing.T) {
nu := NullUUID{
Valid: true,
UUID: codecTestUUID,
}
got, err := nu.Value()
if err != nil {
t.Fatal(err)
}
s, ok := got.(string)
if !ok {
t.Errorf("Value() returned %T, want string", got)
}
want := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
if s != want {
t.Errorf("%v.Value() == %s, want %s", nu, s, want)
}
}
func testNullUUIDScanNil(t *testing.T) {
u := NullUUID{}
err := u.Scan(nil)
if err != nil {
t.Fatal(err)
}
if u.Valid {
t.Error("NullUUID is valid after Scan(nil)")
}
if u.UUID != Nil {
t.Errorf("NullUUID.UUID is %v after Scan(nil) want Nil", u.UUID)
}
}
func testNullUUIDScanValid(t *testing.T) {
s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
u := NullUUID{}
err := u.Scan(s)
if err != nil {
t.Fatal(err)
}
if !u.Valid {
t.Errorf("Valid == false after Scan(%q)", s)
}
if u.UUID != codecTestUUID {
t.Errorf("UUID == %v after Scan(%q), want %v", u.UUID, s, codecTestUUID)
}
}
func testNullUUIDScanUUID(t *testing.T) {
u := NullUUID{}
err := u.Scan(codecTestUUID)
if err != nil {
t.Fatal(err)
}
if !u.Valid {
t.Errorf("Valid == false after scan(%v)", codecTestUUID)
}
if u.UUID != codecTestUUID {
t.Errorf("UUID == %v after Scan(%v), want %v", u.UUID, codecTestUUID, codecTestUUID)
}
}
func testNullUUIDMarshalJSONNil(t *testing.T) {
u := NullUUID{Valid: true}
data, err := u.MarshalJSON()
if err != nil {
t.Fatalf("(%#v).MarshalJSON err want: <nil>, got: %v", u, err)
}
dataStr := string(data)
if dataStr != fmt.Sprintf("%q", Nil) {
t.Fatalf("(%#v).MarshalJSON value want: %s, got: %s", u, Nil, dataStr)
}
}
func testNullUUIDMarshalJSONValid(t *testing.T) {
u := NullUUID{
Valid: true,
UUID: codecTestUUID,
}
data, err := u.MarshalJSON()
if err != nil {
t.Fatalf("(%#v).MarshalJSON err want: <nil>, got: %v", u, err)
}
dataStr := string(data)
if dataStr != fmt.Sprintf("%q", codecTestUUID) {
t.Fatalf("(%#v).MarshalJSON value want: %s, got: %s", u, codecTestUUID, dataStr)
}
}
func testNullUUIDMarshalJSONNull(t *testing.T) {
u := NullUUID{}
data, err := u.MarshalJSON()
if err != nil {
t.Fatalf("(%#v).MarshalJSON err want: <nil>, got: %v", u, err)
}
dataStr := string(data)
if dataStr != "null" {
t.Fatalf("(%#v).MarshalJSON value want: %s, got: %s", u, "null", dataStr)
}
}
func testNullUUIDUnmarshalJSONNil(t *testing.T) {
var u NullUUID
data := []byte(`"00000000-0000-0000-0000-000000000000"`)
if err := json.Unmarshal(data, &u); err != nil {
t.Fatalf("json.Unmarshal err = %v, want <nil>", err)
}
if !u.Valid {
t.Fatalf("u.Valid = false, want true")
}
if u.UUID != Nil {
t.Fatalf("u.UUID = %v, want %v", u.UUID, Nil)
}
}
func testNullUUIDUnmarshalJSONNull(t *testing.T) {
var u NullUUID
data := []byte(`null`)
if err := json.Unmarshal(data, &u); err != nil {
t.Fatalf("json.Unmarshal err = %v, want <nil>", err)
}
if u.Valid {
t.Fatalf("u.Valid = true, want false")
}
if u.UUID != Nil {
t.Fatalf("u.UUID = %v, want %v", u.UUID, Nil)
}
}
func testNullUUIDUnmarshalJSONValid(t *testing.T) {
var u NullUUID
data := []byte(`"6ba7b810-9dad-11d1-80b4-00c04fd430c8"`)
if err := json.Unmarshal(data, &u); err != nil {
t.Fatalf("json.Unmarshal err = %v, want <nil>", err)
}
if !u.Valid {
t.Fatalf("u.Valid = false, want true")
}
if u.UUID != codecTestUUID {
t.Fatalf("u.UUID = %v, want %v", u.UUID, Nil)
}
}
func testNullUUIDUnmarshalJSONMalformed(t *testing.T) {
var u NullUUID
data := []byte(`257`)
if err := json.Unmarshal(data, &u); err == nil {
t.Fatal("json.Unmarshal err = <nil>, want error")
}
}
func BenchmarkNullMarshalJSON(b *testing.B) {
b.Run("Valid", func(b *testing.B) {
u, err := FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
if err != nil {
b.Fatal(err)
}
n := NullUUID{UUID: u, Valid: true}
for i := 0; i < b.N; i++ {
n.MarshalJSON()
}
})
b.Run("Invalid", func(b *testing.B) {
n := NullUUID{Valid: false}
for i := 0; i < b.N; i++ {
n.MarshalJSON()
}
})
}
func BenchmarkNullUnmarshalJSON(b *testing.B) {
baseUUID, err := FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
if err != nil {
b.Fatal(err)
}
data, err := json.Marshal(&baseUUID)
if err != nil {
b.Fatal(err)
}
b.Run("Valid", func(b *testing.B) {
var u NullUUID
for i := 0; i < b.N; i++ {
u.UnmarshalJSON(data)
}
})
b.Run("Invalid", func(b *testing.B) {
invalid := []byte("null")
var n NullUUID
for i := 0; i < b.N; i++ {
n.UnmarshalJSON(invalid)
}
})
}
|
package dir
import (
"fmt"
"strings"
"time"
. "github.com/Pyorot/streams/src/utils"
"github.com/bwmarrin/discordgo"
)
var managed bool // manage dir (vs treating it as read-only)
var gameName string // (if managed) param for onUpdate
var serverID string // (if managed) param for onUpdate
var manMsgID string // current managed message
var addCh = make(chan (struct{ k, v string })) // channel connecting manage() and add()
// worker to read entries from addCh and process them
func manage() {
var err error
var p struct{ k, v string } // current entry (pair)
for {
// 1: determine input (read in new or retry old)
if p.k == "" { // p.k is blanked iff success
p = <-addCh
} else {
time.Sleep(15 * time.Second)
}
manMsgIDCopy := manMsgID // copy for concurrency coherency
var msg *discordgo.Message
if manMsgIDCopy != "" {
// 2.A.1: get managed message
msg, err = discord.ChannelMessage(channel, manMsgIDCopy)
if err != nil {
if err.Error()[:8] == "HTTP 404" {
manMsgID = "" // signals new msg needs to be created
Log.Insta <- fmt.Sprintf("d | renew - missing")
} else {
Log.Insta <- fmt.Sprintf("x | d?: %s", err)
}
continue
}
// 2.A.1: check edit fits in message
if len(msg.Content)+len(p.v)+len(p.k)+2 >= 2000 {
manMsgID = "" // signals new msg needs to be created
Log.Insta <- fmt.Sprintf("d | renew - capacity")
continue
}
} else {
// 2.B: post blank message
msg, err = discord.ChannelMessageSend(channel, "dir")
if err != nil {
Log.Insta <- fmt.Sprintf("x | d+: %s", err)
continue
}
manMsgID, manMsgIDCopy = msg.ID, msg.ID
}
// 3: edit new data into message
text := msg.Content + fmt.Sprintf("\n%s %s", p.v, p.k)
msg, err = discord.ChannelMessageEdit(channel, manMsgIDCopy, text)
if err != nil {
Log.Insta <- fmt.Sprintf("x | d~: %s", err)
continue
}
Log.Insta <- fmt.Sprintf("d | > %s %s", p.v, p.k)
p.k = "" // ack (got to the end): p is processed
}
}
// callback to post entries to addCh from WebSocket events
func add(s *discordgo.Session, pu *discordgo.PresenceUpdate) {
for _, a := range pu.Activities {
filter := pu.GuildID == serverID &&
a.Name == "Twitch" &&
a.Type == discordgo.GameTypeStreaming &&
a.State == gameName
if filter {
k, v := a.URL[strings.LastIndex(a.URL, "/")+1:], pu.User.ID
lock.Lock()
diff := data[k] != v // add to dir only if new
data[k] = v // update internal dir either way
lock.Unlock()
if managed && diff {
addCh <- struct{ k, v string }{k, v}
}
}
}
}
|
package parser
import (
"testing"
)
func Test_ParseMysqlUrl(t *testing.T) {
mysqlUrl := "mysql://b08738ff9fff5e:e79a1d81@us-cdbr-iron-east-01.cleardb.net/heroku_e16926abf051efd?reconnect=true"
if r, e := ParseMysqlUrl(mysqlUrl); e != nil {
t.Error(e)
} else {
t.Log("first test passed")
t.Log(r)
}
}
|
package main
import (
"fmt"
"net/http"
"io/ioutil"
"strings"
"github.com/Shopify/sarama"
"log"
)
func handler(writer http.ResponseWriter, request *http.Request) {
tenant := tenant(&request.Header)
path := strings.Split(request.URL.Path, "/")
defer request.Body.Close()
body, _ := ioutil.ReadAll(request.Body)
if (path[1] == "data") {
key := request.Header.Get("KEY")
streamName := path[2]
event := DataEvent{streamName, key, body}
producer, err := sarama.NewSyncProducer([]string{"localhost:9092"}, nil)
if err != nil {
log.Fatalln(err)
}
defer func() {
if err := producer.Close(); err != nil {
log.Fatalln(err)
}
}()
msg := &sarama.ProducerMessage{Topic: tenant + ".data." + event.StreamName, Key: sarama.StringEncoder(event.Key), Value: sarama.StringEncoder(event.Body)}
partition, offset, err := producer.SendMessage(msg)
if err != nil {
log.Printf("FAILED to send message: %s\n", err)
} else {
log.Printf("> message sent to partition %d at offset %d\n", partition, offset)
}
} else if (path[1] == "service") {
service := path[2]
function := path[3]
event := ServiceEvent{service, function, body}
fmt.Println(writer, event)
} else {
fmt.Println(writer, "REST endpoint supports only data and service events.")
}
}
func main() {
http.HandleFunc("/", handler)
http.ListenAndServe(":8080", nil)
}
func tenant(headers *http.Header) string {
return "anonymous"
}
// Data types
type ServiceEvent struct {
Service string
Function string
Body []byte
}
func (event ServiceEvent) String() string {
return fmt.Sprintf("service=%s function=%s body=%s", event.Service, event.Function, event.Body)
}
type DataEvent struct {
StreamName string
Key string
Body []byte
} |
package payments
import (
"encoding/json"
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/gorilla/mux"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/sqlite"
"github.com/loubard/sfapi/models"
"github.com/loubard/sfapi/sql"
"github.com/stretchr/testify/assert"
)
func TestFetch(t *testing.T) {
db, err := gorm.Open("sqlite3", ":memory:")
assert.NoError(t, err)
sql.Seed(db)
req := httptest.NewRequest(
"GET",
"http://example.com/v1/payments/4ee3a8d8-ca7b-4290-a52c-dd5b6165ec43",
nil,
)
w := httptest.NewRecorder()
router := mux.NewRouter()
router.HandleFunc("/v1/payments/{id}", Fetch(db))
router.ServeHTTP(w, req)
resp := w.Result()
body, err := ioutil.ReadAll(resp.Body)
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
assert.Contains(t, string(body), "{\"data\":")
}
func TestFetchNotFound(t *testing.T) {
db, err := gorm.Open("sqlite3", ":memory:")
assert.NoError(t, err)
req := httptest.NewRequest("GET", "http://example.com/v1/payments/not-found", nil)
w := httptest.NewRecorder()
Fetch(db)(w, req)
resp := w.Result()
assert.Equal(t, http.StatusNotFound, resp.StatusCode)
}
func TestList(t *testing.T) {
db, err := gorm.Open("sqlite3", ":memory:")
assert.NoError(t, err)
sql.Seed(db)
req := httptest.NewRequest("GET", "http://example.com/v1/payments/", nil)
w := httptest.NewRecorder()
router := mux.NewRouter()
router.HandleFunc("/v1/payments/", List(db))
router.ServeHTTP(w, req)
resp := w.Result()
body, err := ioutil.ReadAll(resp.Body)
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
assert.Contains(t, string(body), "{\"data\":[")
d := &models.ListResponse{}
json.Unmarshal(body, d)
assert.Equal(t, 2, len(*d.Data))
}
func TestDelete(t *testing.T) {
db, err := gorm.Open("sqlite3", ":memory:")
assert.NoError(t, err)
sql.Seed(db)
req := httptest.NewRequest(
"DELETE",
"http://example.com/v1/payments/4ee3a8d8-ca7b-4290-a52c-dd5b6165ec43",
nil,
)
w := httptest.NewRecorder()
router := mux.NewRouter()
router.HandleFunc("/v1/payments/{id}", Delete(db))
router.ServeHTTP(w, req)
resp := w.Result()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
func TestCreate(t *testing.T) {
db, err := gorm.Open("sqlite3", ":memory:")
assert.NoError(t, err)
sql.Seed(db)
json := `{"payment_id":"42","attributes":{"amount":"100"}}`
req := httptest.NewRequest(
"POST",
"http://example.com/v1/payments/",
strings.NewReader(json),
)
w := httptest.NewRecorder()
router := mux.NewRouter()
router.HandleFunc("/v1/payments/", Create(db))
router.ServeHTTP(w, req)
resp := w.Result()
assert.Equal(t, http.StatusCreated, resp.StatusCode)
p := &models.Payment{}
db.Where("payments.payment = ?", "42").First(&p)
assert.Equal(t, "42", p.Payment)
}
func TestCreateBadJSON(t *testing.T) {
db, err := gorm.Open("sqlite3", ":memory:")
assert.NoError(t, err)
sql.Seed(db)
json := `{`
req := httptest.NewRequest(
"POST",
"http://example.com/v1/payments/",
strings.NewReader(json),
)
w := httptest.NewRecorder()
router := mux.NewRouter()
router.HandleFunc("/v1/payments/", Create(db))
router.ServeHTTP(w, req)
resp := w.Result()
assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
}
func TestCreateInvalid(t *testing.T) {
db, err := gorm.Open("sqlite3", ":memory:")
assert.NoError(t, err)
sql.Seed(db)
json := `{"payment_id": 42}`
req := httptest.NewRequest(
"POST",
"http://example.com/v1/payments/",
strings.NewReader(json),
)
w := httptest.NewRecorder()
router := mux.NewRouter()
router.HandleFunc("/v1/payments/", Create(db))
router.ServeHTTP(w, req)
resp := w.Result()
assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
}
func TestUpdate(t *testing.T) {
db, err := gorm.Open("sqlite3", ":memory:")
assert.NoError(t, err)
sql.Seed(db)
json := `{"payment_id":"43","attributes":{"amount":"100"}}`
req := httptest.NewRequest(
"PUT",
"http://example.com/v1/payments/4ee3a8d8-ca7b-4290-a52c-dd5b6165ec43",
strings.NewReader(json),
)
w := httptest.NewRecorder()
router := mux.NewRouter()
router.HandleFunc("/v1/payments/{id}", Update(db))
router.ServeHTTP(w, req)
resp := w.Result()
assert.Equal(t, http.StatusCreated, resp.StatusCode)
p := &models.Payment{}
db.Where("payments.payment = ?", "43").First(&p)
assert.Equal(t, "43", p.Payment)
}
func TestUpdateNotFound(t *testing.T) {
db, err := gorm.Open("sqlite3", ":memory:")
assert.NoError(t, err)
sql.Seed(db)
json := `{"payment_id":"42","attributes":{"amount":"100"}}`
req := httptest.NewRequest(
"PUT",
"http://example.com/v1/payments/42",
strings.NewReader(json),
)
w := httptest.NewRecorder()
router := mux.NewRouter()
router.HandleFunc("/v1/payments/{id}", Update(db))
router.ServeHTTP(w, req)
resp := w.Result()
assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
}
func TestUpdateBadJSON(t *testing.T) {
db, err := gorm.Open("sqlite3", ":memory:")
assert.NoError(t, err)
json := `{`
req := httptest.NewRequest(
"PUT",
"http://example.com/v1/payments/42",
strings.NewReader(json),
)
w := httptest.NewRecorder()
router := mux.NewRouter()
router.HandleFunc("/v1/payments/{id}", Update(db))
router.ServeHTTP(w, req)
resp := w.Result()
assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
}
|
package myheap
type MaxHeap struct {
data []int // 数组存储堆
count int // 当前堆容量
capacity int //初始化容量
}
// 构造函数, 构造一个空堆, 可容纳capacity个元素
func NewMaxHeap(cap int) *MaxHeap {
heap := new(MaxHeap)
heap.data = make([]int, cap+1) //跳过0从1开始
heap.count = 0
heap.capacity = cap
return heap
}
// Heapify:给定一个数组排列成 堆 的形状的过程
// 数组按照 二叉树 排列,每个叶子节点本身就是 最大堆
// 完全二叉树 第一个非叶子节点=最后一个索引/2,从后向前依次考察每个不是叶子节点的节点,然后shiftDown,继续向上
// O(n)
func NewMaxHeapHeapify(arr []int) *MaxHeap {
n := len(arr)
heap := new(MaxHeap)
heap.data = make([]int, n+1) //跳过0从1开始
heap.capacity = n
for i := 0; i < n; i++ {
heap.data[i+1] = arr[i]
}
heap.count = n
for i := heap.count / 2; i >= 1; i-- { // 从第一个非叶子节点开始,叶子节点都是最大堆
heap.shiftDown(i)
}
return heap
}
// 返回堆中的元素个数
func (h MaxHeap) Size() int {
return h.count
}
// 返回一个布尔值, 表示堆中是否为空
func (h MaxHeap) IsEmpty() bool {
return h.count == 0
}
func (h *MaxHeap) Insert(item int) {
if h.count+1 <= h.capacity {
h.data[h.count+1] = item
h.count++
h.shiftUp(h.count)
}
}
// 从最大堆中取出堆顶元素, 即堆中所存储的最大数据
func (h *MaxHeap) ExtractMax() int {
var ret int
if h.count > 0 {
ret = h.data[1]
h.data[1], h.data[h.count] = h.data[h.count], h.data[1]
h.count--
h.shiftDown(1)
}
return ret
}
// 新插入数据与父节点比较
func (h *MaxHeap) shiftUp(k int) {
for k > 1 && h.data[k/2] < h.data[k] {
h.data[k], h.data[k/2] = h.data[k/2], h.data[k]
k /= 2
}
}
// 将最后一个元素放置顶端,然后向下排序(谁大跟谁换)
func (h *MaxHeap) shiftDown(k int) {
for 2*k <= h.count { // k存在左子节点
j := 2 * k // 在此轮循环中,data[k]和data[j]交换位置
if j+1 <= h.count && h.data[j+1] > h.data[j] {
j++
}
// data[j] 是 data[2*k]和data[2*k+1]中的最大值
if h.data[k] > h.data[j] {
break
}
h.data[k], h.data[j] = h.data[j], h.data[k]
k = j
}
}
|
// Copyright 2020-2021 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bufcorevalidate
import (
"errors"
"fmt"
"github.com/powerman/buf/internal/pkg/normalpath"
"github.com/powerman/buf/internal/pkg/protodescriptor"
)
// ValidateFileInfoPath validates the FileInfo path.
func ValidateFileInfoPath(path string) error {
return protodescriptor.ValidateProtoPath("root relative file path", path)
}
// ValidateFileInfoPaths validates the FileInfo paths.
func ValidateFileInfoPaths(paths []string) error {
return protodescriptor.ValidateProtoPaths("root relative file path", paths)
}
// ValidateFileOrDirPaths validates the file or direction paths are normalized and validated,
// and not duplicated.
func ValidateFileOrDirPaths(paths []string) error {
pathMap := make(map[string]struct{}, len(paths))
for _, path := range paths {
if path == "" {
return errors.New("path is empty")
}
normalized, err := normalpath.NormalizeAndValidate(path)
if err != nil {
return fmt.Errorf("path had normalization error: %w", err)
}
if path != normalized {
return fmt.Errorf("path %s was not normalized to %s", path, normalized)
}
if _, ok := pathMap[path]; ok {
return fmt.Errorf("duplicate path: %s", path)
}
pathMap[path] = struct{}{}
}
return nil
}
|
package main
import (
"fmt"
"sync"
)
//How to use Mutex
var counter int = 0
func add(a, b int, lock *sync.RWMutex) {
c := a + b
lock.Lock()
counter ++
fmt.Printf("%d : %d + %d = %d\n", counter, a, b, c)
lock.Unlock()
}
func main() {
lock := &sync.RWMutex{}
for i:=0; i<10; i++ {
go add(1,i,lock)
}
for {
// lock.RLock()
c := counter
// lock.RUnlock()
if c >= 10 {
break
}
}
} |
package main
import (
"github.com/jpillora/opts"
"github.com/wxio/tron-go/cmd"
"github.com/wxio/tron-go/tools"
)
var (
Version string
Date string
Commit string
)
type root struct{}
type build struct{}
type adl struct{}
func main() {
r := root{}
opts.New(&r).Name("tron-go").
EmbedGlobalFlagSet().
Complete().
Version(Version).
AddCommand(opts.New(&build{}).
AddCommand(tools.NewAntlr()).
AddCommand(tools.NewAntlrs().
ConfigPath(".antlr.build.json"))).
AddCommand(opts.New(&adl{}).
AddCommand(cmd.NewLoadAdlAst()).
AddCommand(cmd.BuildAdlAst())).
Parse().
RunFatal()
}
|
package git
import (
"errors"
"io"
"path"
"reflect"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
)
func TestCopyService(t *testing.T) {
s := &mockSource{localPath: "/tmp/testing"}
files := []string{"service-a/my-system/my-file.yaml", "service-a/my-system/this-file.yaml"}
for _, f := range files {
s.addFile(f)
}
d := &mockDestination{}
copied, err := CopyService("service-a", s, d)
if err != nil {
t.Fatal(err)
}
d.assertFilesWritten(t, files)
if !reflect.DeepEqual(files, copied) {
t.Fatalf("failed to copy the files, got %#v, want %#v", copied, files)
}
}
type mockSource struct {
files []string
localPath string
}
func (s *mockSource) Walk(filePath string, cb func(string, string) error) error {
if s.files == nil {
return nil
}
for _, f := range s.files {
if strings.HasPrefix(f, path.Join(s.localPath, filePath)) {
err := cb(f, strings.TrimPrefix(f, s.localPath+"/"))
if err != nil {
return err
}
}
}
return nil
}
func (s *mockSource) addFile(name string) {
if s.files == nil {
s.files = []string{}
}
s.files = append(s.files, path.Join(s.localPath, name))
}
type mockDestination struct {
written []string
}
func (d *mockDestination) CopyFile(src, dst string) error {
if d.written == nil {
d.written = []string{}
}
d.written = append(d.written, dst)
return nil
}
func (d *mockDestination) WriteFile(src io.Reader, dst string) error {
return errors.New("not implemented just now")
}
func (d *mockDestination) assertFilesWritten(t *testing.T, want []string) {
if diff := cmp.Diff(want, d.written); diff != "" {
t.Fatalf("written files do not match: %s", diff)
}
}
|
package rest
import (
"github.com/golang/protobuf/ptypes"
"github.com/jinmukeji/jiujiantang-services/pkg/rest"
analysispb "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/analysis/v1"
"github.com/kataras/iris/v12"
)
func (h *v2Handler) GetV2AnalyzeReportByRecordID(ctx iris.Context) {
recordID, _ := ctx.Params().GetInt("record_id")
req := new(analysispb.GetAnalyzeResultByRecordIDRequest)
req.RecordId = int32(recordID)
req.Cid = rest.GetCidFromContext(ctx)
resp, err := h.rpcAnalysisSvc.GetAnalyzeResultByRecordID(
newRPCContext(ctx), req,
)
if err != nil {
writeError(ctx, wrapError(ErrRPCInternal, "", err), false)
return
}
analysisReportResponse := AnalysisReportResponse{
ReportVersion: resp.GetReportVersion(),
ReportID: resp.GetReport().GetRecordId(),
TransactionID: resp.GetTransactionId(),
}
// 与引擎分析结果相关模块
analysisReportContent, err := getAnalysisModules(resp.GetReport().GetModules())
if err != nil {
writeRPCInternalError(ctx, wrapError(ErrRPCInternal, "", err), false)
return
}
// 个人信息模块
userProfileModule, err := getUserProfileModule(resp.GetReport().GetUserProfile())
if err != nil {
writeRPCInternalError(ctx, wrapError(ErrRPCInternal, "", err), false)
return
}
analysisReportContent.UserProfile = userProfileModule
// 测量上下文模块
pulseTestModule, err := getPulseTestModule(resp.GetReport().GetPulseTest())
if err != nil {
writeRPCInternalError(ctx, wrapError(ErrRPCInternal, "", err), false)
return
}
// 备注模块
analysisReportContent.PulseTest = pulseTestModule
remarkModule, err := getRemarkModule(resp.GetReport().GetRemark())
if err != nil {
writeRPCInternalError(ctx, wrapError(ErrRPCInternal, "", err), false)
return
}
analysisReportContent.Remark = remarkModule
// 测量时间
startTime, _ := ptypes.Timestamp(resp.GetReport().GetCreatedTime())
analysisReportContent.CreatedTime = startTime
analysisReportResponse.ReportContent = *analysisReportContent
rest.WriteOkJSON(ctx, analysisReportResponse)
}
|
package socketio
import (
"sync"
)
// "log"
// "runtime/debug"
// BroadcastAdaptor is the adaptor to handle broadcasts.
type BroadcastAdaptor interface {
// Join causes the socket to join a room.
Join(room string, socket Socket) error
// Leave causes the socket to leave a room.
Leave(room string, socket Socket) error
// Send will send an event with args to the room. If "ignore" is not nil, the event will be excluded from being sent to "ignore".
Send(ignore Socket, room, event string, args ...interface{}) error
// Get Room Mems
MemCount(room string) int
}
var newBroadcast = newBroadcastDefault
//type broadcast map[string]map[string]Socket
type broadcast struct {
_map map[string]map[string]Socket
_lock sync.Mutex
}
func newBroadcastDefault() BroadcastAdaptor {
//return make(broadcast)
return &broadcast{
_map: make(map[string]map[string]Socket),
}
}
func (b *broadcast) Join(room string, socket Socket) error {
b._lock.Lock()
defer b._lock.Unlock()
sockets, ok := b._map[room]
if !ok {
sockets = make(map[string]Socket)
}
sockets[socket.Id()] = socket
b._map[room] = sockets
return nil
}
func (b *broadcast) Leave(room string, socket Socket) error {
b._lock.Lock()
defer b._lock.Unlock()
sockets, ok := b._map[room]
if !ok {
return nil
}
delete(sockets, socket.Id())
if len(sockets) == 0 {
delete(b._map, room)
return nil
}
b._map[room] = sockets
return nil
}
func (b *broadcast) Send(ignore Socket, room, event string, args ...interface{}) error {
// debug.PrintStack()
b._lock.Lock()
defer b._lock.Unlock()
sockets := b._map[room]
//log.Println(">>>>>>>>>>>>>>>>room:", room, " count:", len(b._map[room]), &b)
// log.Println(">>>>>>>>>>>>>>>>room:", room, " count:", len(b._map[room]))
for id, s := range sockets {
if ignore != nil && ignore.Id() == id {
continue
}
s.Emit(event, args...)
}
return nil
}
func (b *broadcast) MemCount(room string) int {
b._lock.Lock()
defer b._lock.Unlock()
// debug.PrintStack()
return len(b._map[room])
}
|
package main
import (
"fmt"
)
/*
Notes on Interfaces:
type bot interface {
// ^interface name
getGreeting (string, int) (string, error)
// ^function name ^list of args ^list of return types
}
*/
type bot interface {
getGreeting() string
}
// englishBot and spanishBot got the bot interace implicitly by
// the usage of the print function
type englishBot struct{}
type spanishBot struct{}
func main() {
eb := englishBot{}
sb := spanishBot{}
printGreeting(eb)
printGreeting(sb)
}
func printGreeting(b bot) {
fmt.Println(b.getGreeting())
}
func (englishBot) getGreeting() string {
return "Hi There!"
}
// when not actually using the receiver, can omit
// the variable and just leave the type instead
func (spanishBot) getGreeting() string {
return "Hola!"
}
|
package gocpy
//go:generate go run script/variadic.go
/*
#include <stdio.h>
#include "Python.h"*/
import "C"
//togo converts a *C.PyObject to a *PyObject
func togo(cobject *C.PyObject) *PyObject {
return (*PyObject)(cobject)
}
func toc(object *PyObject) *C.PyObject {
return (*C.PyObject)(object)
}
/*Use this for call functions with args of type *FILE */
// Stderr : Estandar stderr.
func Stderr() *C.FILE {
return C.stderr
}
// Stdin : Estandar stdin.
func Stdin() *C.FILE {
return C.stdin
}
// Stdout : Estandar stdout.
func Stdout() *C.FILE {
return C.stdout
}
|
package graphql_test
import (
"testing"
"github.com/graphql-go/graphql"
"github.com/graphql-go/graphql/gqlerrors"
"github.com/graphql-go/graphql/testutil"
)
func TestValidate_NoCircularFragmentSpreads_SingleReferenceIsValid(t *testing.T) {
testutil.ExpectPassesRule(t, graphql.NoFragmentCyclesRule, `
fragment fragA on Dog { ...fragB }
fragment fragB on Dog { name }
`)
}
func TestValidate_NoCircularFragmentSpreads_SpreadingTwiceIsNotCircular(t *testing.T) {
testutil.ExpectPassesRule(t, graphql.NoFragmentCyclesRule, `
fragment fragA on Dog { ...fragB, ...fragB }
fragment fragB on Dog { name }
`)
}
func TestValidate_NoCircularFragmentSpreads_SpreadingTwiceIndirectlyIsNotCircular(t *testing.T) {
testutil.ExpectPassesRule(t, graphql.NoFragmentCyclesRule, `
fragment fragA on Dog { ...fragB, ...fragC }
fragment fragB on Dog { ...fragC }
fragment fragC on Dog { name }
`)
}
func TestValidate_NoCircularFragmentSpreads_DoubleSpreadWithinAbstractTypes(t *testing.T) {
testutil.ExpectPassesRule(t, graphql.NoFragmentCyclesRule, `
fragment nameFragment on Pet {
... on Dog { name }
... on Cat { name }
}
fragment spreadsInAnon on Pet {
... on Dog { ...nameFragment }
... on Cat { ...nameFragment }
}
`)
}
func TestValidate_NoCircularFragmentSpreads_DoesNotFalsePositiveOnUnknownFragment(t *testing.T) {
testutil.ExpectPassesRule(t, graphql.NoFragmentCyclesRule, `
fragment nameFragment on Pet {
...UnknownFragment
}
`)
}
func TestValidate_NoCircularFragmentSpreads_SpreadingRecursivelyWithinFieldFails(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoFragmentCyclesRule, `
fragment fragA on Human { relatives { ...fragA } },
`, []gqlerrors.FormattedError{
testutil.RuleError(`Cannot spread fragment "fragA" within itself.`, 2, 45),
})
}
func TestValidate_NoCircularFragmentSpreads_NoSpreadingItselfDirectly(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoFragmentCyclesRule, `
fragment fragA on Dog { ...fragA }
`, []gqlerrors.FormattedError{
testutil.RuleError(`Cannot spread fragment "fragA" within itself.`, 2, 31),
})
}
func TestValidate_NoCircularFragmentSpreads_NoSpreadingItselfDirectlyWithinInlineFragment(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoFragmentCyclesRule, `
fragment fragA on Pet {
... on Dog {
...fragA
}
}
`, []gqlerrors.FormattedError{
testutil.RuleError(`Cannot spread fragment "fragA" within itself.`, 4, 11),
})
}
func TestValidate_NoCircularFragmentSpreads_NoSpreadingItselfIndirectly(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoFragmentCyclesRule, `
fragment fragA on Dog { ...fragB }
fragment fragB on Dog { ...fragA }
`, []gqlerrors.FormattedError{
testutil.RuleError(`Cannot spread fragment "fragA" within itself via fragB.`, 2, 31, 3, 31),
})
}
func TestValidate_NoCircularFragmentSpreads_NoSpreadingItselfIndirectlyReportsOppositeOrder(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoFragmentCyclesRule, `
fragment fragB on Dog { ...fragA }
fragment fragA on Dog { ...fragB }
`, []gqlerrors.FormattedError{
testutil.RuleError(`Cannot spread fragment "fragB" within itself via fragA.`, 2, 31, 3, 31),
})
}
func TestValidate_NoCircularFragmentSpreads_NoSpreadingItselfIndirectlyWithinInlineFragment(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoFragmentCyclesRule, `
fragment fragA on Pet {
... on Dog {
...fragB
}
}
fragment fragB on Pet {
... on Dog {
...fragA
}
}
`, []gqlerrors.FormattedError{
testutil.RuleError(`Cannot spread fragment "fragA" within itself via fragB.`, 4, 11, 9, 11),
})
}
func TestValidate_NoCircularFragmentSpreads_NoSpreadingItselfDeeply(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoFragmentCyclesRule, `
fragment fragA on Dog { ...fragB }
fragment fragB on Dog { ...fragC }
fragment fragC on Dog { ...fragO }
fragment fragX on Dog { ...fragY }
fragment fragY on Dog { ...fragZ }
fragment fragZ on Dog { ...fragO }
fragment fragO on Dog { ...fragP }
fragment fragP on Dog { ...fragA, ...fragX }
`, []gqlerrors.FormattedError{
testutil.RuleError(`Cannot spread fragment "fragA" within itself via fragB, fragC, fragO, fragP.`,
2, 31,
3, 31,
4, 31,
8, 31,
9, 31),
testutil.RuleError(`Cannot spread fragment "fragO" within itself via fragP, fragX, fragY, fragZ.`,
8, 31,
9, 41,
5, 31,
6, 31,
7, 31),
})
}
func TestValidate_NoCircularFragmentSpreads_NoSpreadingItselfDeeplyTwoPaths(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoFragmentCyclesRule, `
fragment fragA on Dog { ...fragB, ...fragC }
fragment fragB on Dog { ...fragA }
fragment fragC on Dog { ...fragA }
`, []gqlerrors.FormattedError{
testutil.RuleError(`Cannot spread fragment "fragA" within itself via fragB.`,
2, 31,
3, 31),
testutil.RuleError(`Cannot spread fragment "fragA" within itself via fragC.`,
2, 41,
4, 31),
})
}
func TestValidate_NoCircularFragmentSpreads_NoSpreadingItselfDeeplyTwoPaths_AltTraverseOrder(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoFragmentCyclesRule, `
fragment fragA on Dog { ...fragC }
fragment fragB on Dog { ...fragC }
fragment fragC on Dog { ...fragA, ...fragB }
`, []gqlerrors.FormattedError{
testutil.RuleError(`Cannot spread fragment "fragA" within itself via fragC.`,
2, 31,
4, 31),
testutil.RuleError(`Cannot spread fragment "fragC" within itself via fragB.`,
4, 41,
3, 31),
})
}
func TestValidate_NoCircularFragmentSpreads_NoSpreadingItselfDeeplyAndImmediately(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoFragmentCyclesRule, `
fragment fragA on Dog { ...fragB }
fragment fragB on Dog { ...fragB, ...fragC }
fragment fragC on Dog { ...fragA, ...fragB }
`, []gqlerrors.FormattedError{
testutil.RuleError(`Cannot spread fragment "fragB" within itself.`, 3, 31),
testutil.RuleError(`Cannot spread fragment "fragA" within itself via fragB, fragC.`,
2, 31,
3, 41,
4, 31),
testutil.RuleError(`Cannot spread fragment "fragB" within itself via fragC.`,
3, 41,
4, 41),
})
}
|
// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
package controller
import (
"fmt"
"log"
"time"
"github.com/swinslow/peridot-core/internal/jobcontroller"
"github.com/swinslow/peridot-core/pkg/agent"
pbs "github.com/swinslow/peridot-core/pkg/status"
)
// runScheduler is the main "decider" within the Controller.
// It walks through active Jobs and JobSets, decides whether to update them,
// and decides whether to start new Jobs based on the current overall state.
func (c *Controller) runScheduler() {
// grab a writer lock
c.m.Lock()
fmt.Println("===> ENTERING runScheduler")
defer c.m.Unlock()
defer fmt.Println("===> LEAVING runScheduler")
// first, remove any stopped jobs from the active list, and update
// corresponding JobSets' statuses
for jobID, job := range c.activeJobs {
if job.Status.RunStatus == agent.JobRunStatus_STOPPED {
// tell the job's JobSet to update its own status
c.updateJobSetStatusForJob(job)
// and remove the job from the activeJobs map since it's done
delete(c.activeJobs, jobID)
}
}
// next, remove any stopped jobSets from the active list
for jobSetID, js := range c.activeJobSets {
if js.RunStatus == pbs.Status_STOPPED {
delete(c.activeJobSets, jobSetID)
}
}
// now, see if we're already at capacity for maximum number of running
// jobs. If we are, return early without checking for new jobs to add.
if len(c.activeJobs) >= c.maxJobsRunning {
return
}
// we have capacity for new jobs. start walking through the active
// jobSets, check for ready jobs and add them as we go.
for _, js := range c.activeJobSets {
// if this jobset was still in STARTUP status, it's now running
if js.RunStatus == pbs.Status_STARTUP {
js.RunStatus = pbs.Status_RUNNING
}
readyAgentSteps := c.getReadyStepsForJobSet(js)
for _, readyAgent := range readyAgentSteps {
// ready to submit this as a new Job to run
jobID := c.nextJobID
c.nextJobID++
// update corresponding step with job ID, now that we know it
readyAgent.AgentJobID = jobID
// and tell this Step that it is now running
readyAgent.RunStatus = pbs.Status_RUNNING
// create the Job's configuration
cfg := c.getJobConfigForStep(readyAgent)
// create a Job to store data within the controller
job := &Job{
JobID: jobID,
JobSetID: readyAgent.JobSetID,
JobSetStepID: readyAgent.StepID,
JobSetStepOrder: readyAgent.StepOrder,
AgentName: readyAgent.AgentName,
Cfg: *cfg,
Status: agent.StatusReport{
RunStatus: agent.JobRunStatus_STARTUP,
HealthStatus: agent.JobHealthStatus_OK,
TimeStarted: time.Now().Unix(),
},
}
// add it to the main jobs and active jobs maps
c.jobs[jobID] = job
c.activeJobs[jobID] = job
// now, create a JobRequest
// we do this _after_ adding to main jobs / active jobs maps
// so that the controller will already know about them, whenever
// the jobcontroller gets back to us with status updates
jr := jobcontroller.JobRequest{
JobID: jobID,
AgentName: readyAgent.AgentName,
Cfg: *cfg,
}
// submit it to the channel
c.inJobStream <- jr
// finally, check and see whether we're now at max jobs running
// and if we are, time to stop
if len(c.activeJobs) >= c.maxJobsRunning {
return
}
}
}
}
// updateJobSetStatusForJob updates the status of the JobSet containing the
// given Job, based on the current run and health status of that Job.
// It does not grab a lock, as runScheduler has already grabbed one and
// no other function should be calling updateJobSetStatusForJob.
func (c *Controller) updateJobSetStatusForJob(job *Job) {
// don't grab a writer lock; runScheduler already has one
// find the corresponding jobSet
js, ok := c.jobSets[job.JobSetID]
if !ok {
// FIXME this shouldn't happen; job with unknown jobSet ID
log.Fatalf("failed; job ID %d has unknown job set ID %d", job.JobID, job.JobSetID)
}
newStatus, newHealth := c.determineStepStatuses(js.Steps)
if newStatus != pbs.Status_STATUS_SAME {
js.RunStatus = newStatus
}
if newHealth != pbs.Health_HEALTH_SAME {
js.HealthStatus = newHealth
}
}
// determineStepStatuses takes a slice of steps and walks through it
// (recursively if needed), looking at what the overall RunStatus and
// HealthStatus should now be. It returns the new statuses.
// It also updates the status/health of concurrent steps as needed.
func (c *Controller) determineStepStatuses(steps []*Step) (pbs.Status, pbs.Health) {
allStopped := true
newStatus := pbs.Status_STATUS_SAME
newHealth := pbs.Health_HEALTH_SAME
for _, step := range steps {
// first, if concurrent, get sub-steps' own status and health
// so we can update the concurrent step itself
if step.T == StepTypeConcurrent {
// run recursively on sub-steps
subStatus, subHealth := c.determineStepStatuses(step.ConcurrentSteps)
if subStatus != pbs.Status_STATUS_SAME {
step.RunStatus = subStatus
}
if subHealth != pbs.Health_HEALTH_SAME {
step.HealthStatus = subHealth
}
}
// if jobset, get the separate jobSet's status and health
if step.T == StepTypeJobSet {
subJs, ok := c.jobSets[step.SubJobSetID]
if !ok {
// FIXME this shouldn't happen; job with unknown jobSet ID
log.Fatalf("failed; jobset step %d in jobset %d has unknown subJobSet ID %d", step.StepID, step.JobSetID, step.SubJobSetID)
}
step.RunStatus = subJs.RunStatus
step.HealthStatus = subJs.HealthStatus
}
// now, evaluate and bubble upwards for this step
// if it is still running or in startup, check health but go on
if step.RunStatus != pbs.Status_STOPPED {
allStopped = false
}
// check and update health
// note degraded, unless we're already in error state
if step.HealthStatus == pbs.Health_DEGRADED && newHealth != pbs.Health_ERROR {
newHealth = pbs.Health_DEGRADED
}
// and error health means the overall set of steps will be in error
// and should also stop
if step.HealthStatus == pbs.Health_ERROR {
newStatus = pbs.Status_STOPPED
newHealth = pbs.Health_ERROR
}
}
// finally, decide what to bubble upwards now that we've looked at
// all of the steps
if allStopped {
newStatus = pbs.Status_STOPPED
}
return newStatus, newHealth
}
// getReadyStepsForJobSet takes a JobSet and returns a slice of pointers
// to steps that are ready to run. The returned steps should only include
// steps that can be turned into Jobs, e.g. steps with type "agent".
// If a step with type "jobset" is ready to run, it should not be included
// in the returned steps; instead, a JobSetRequest should be submitted for
// it if one has not yet been submitted.
// If a step with type "concurrent" is ready to run, it should not be included
// in the returned steps; instead, its children (potentially including more
// sub-concurrent steps) should be handled as described above and included
// in the returned steps if they are of type "agent".
func (c *Controller) getReadyStepsForJobSet(js *JobSet) []*Step {
readyAgentSteps, readyJobSetSteps, problem := retrieveReadySteps(js.Steps)
if problem {
// some problem occurred; return and don't provide any ready steps
return nil
}
// create JobSetRequests for each JobSet that is ready
if c.openForJobSetRequests {
for _, jsStep := range readyJobSetSteps {
// get parent JobSet so we can reuse its configs
parentJobSetID := jsStep.JobSetID
parentJobSet, ok := c.jobSets[parentJobSetID]
if !ok {
// problem finding parent job set; skip this one
continue
}
jsr := JobSetRequest{
TemplateName: jsStep.SubJobSetTemplateName,
Configs: parentJobSet.Configs,
ParentJobSetID: parentJobSetID,
ParentJobStepID: jsStep.StepID,
}
// add directly to pendingJSRs list; don't send through channel
// because this is the same goroutine that would need to read
// from that channel
c.pendingJSRs.PushBack(jsr)
// and mark this one as submitted
jsStep.SubJobSetRequestSubmitted = true
}
}
// now, return the agent steps that are ready to run
return readyAgentSteps
}
|
package urlshort
import (
"net/http"
"gopkg.in/yaml.v2"
)
type option struct {
Path string
URL string
}
type Options []option
func YAMLHandler(yml []byte, fallback http.Handler) (http.HandlerFunc, error) {
options := Options{}
err := yaml.Unmarshal(yml, &options)
if err != nil {
return fallback.ServeHTTP, err
}
urlMap := map[string]string{}
for _, option := range options {
urlMap[option.Path] = option.URL
}
handler := func(w http.ResponseWriter, r *http.Request) {
if url, ok := urlMap[r.URL.Path]; ok {
http.Redirect(w, r, url, http.StatusMovedPermanently)
} else {
fallback.ServeHTTP(w, r)
}
}
return handler, nil
}
|
package main
import (
"fmt"
"reflect"
)
func main() {
var stu Student
ref := reflect.ValueOf(stu)
f := ref.MethodByName("SayHello")
f.Call([]reflect.Value{})
}
type Student struct {
}
func (stu Student) SayHello() {
fmt.Println("hello workd")
}
|
package main
import (
"encoding/json"
"io"
"errors"
"fmt"
)
type whData struct {
data map[string]*json.RawMessage
}
type label struct {
Id int `json:"id,int"`
Title string `json:"title"`
// ProjectId int `json:"project_id,int"`
// Description string `json:"description"`
// Type string `json:"type"` // can be "GroupLabel" or "ProjectLabel"
// GroupId int `json:"group_id,int"`
}
type project struct {
Id int `json:"id,int"`
Path string `json:"path_with_namespace"`
}
type issue struct {
Id int `json:"id,int"`
Iid int `json:"iid,int"`
Title string `json:"title"`
}
type user struct {
Username string `json:"username"`
}
var jsonError = errors.New("JSON decoding error")
var dataError = errors.New("webhook data error")
func (wh *whData) Prepare(body io.Reader, length int64) error {
log(LOG_DEBUG, fmt.Sprintf("Request length - %v bytes", length), nil)
bodyStr := make([]byte, length)
readLen, err := io.ReadFull(body, bodyStr)
if err != nil {
log(LOG_ERROR, "Error while reading request data", err)
log(LOG_ERROR, fmt.Sprintf("Mismatching data length: expected %v got %v", length, readLen), err)
return dataError
}
//log(LOG_DEBUG, "Full request:\n" + string(bodyStr), nil)
err = json.Unmarshal(bodyStr[:length], &wh.data)
if err != nil {
log(LOG_ERROR, "Top level JSON decoding error", err)
return jsonError
}
return nil
}
func (wh whData) GetKind() (objectKind string) {
err := json.Unmarshal(*wh.data["object_kind"], &objectKind)
if err != nil {
log(LOG_ERROR, "JSON error while getting object kind", err)
return ""
}
return
}
func (wh whData) LabelsChanged() bool {
var changes map[string]*json.RawMessage
err := json.Unmarshal(*wh.data["changes"], &changes)
if err != nil {
log(LOG_ERROR, "JSON error while getting list of changes", err)
return false
}
_, exists := changes["labels"]
return exists
}
func (wh whData) GetLabels() (labels []label, err error) {
err = json.Unmarshal(*wh.data["labels"], &labels)
if err != nil {
log(LOG_ERROR, "JSON error while getting list of labels", err)
return labels, jsonError
}
return labels, nil
}
func (wh whData) GetProject() (project project, err error) {
err = json.Unmarshal(*wh.data["project"], &project)
if err != nil {
log(LOG_ERROR, "JSON error while getting project details", err)
return project, jsonError
}
return project, nil
}
func (wh whData) GetIssue() (issue issue, err error) {
err = json.Unmarshal(*wh.data["object_attributes"], &issue)
if err != nil {
log(LOG_ERROR, "JSON error while getting issue details", err)
return issue, jsonError
}
return issue, nil
}
func (wh whData) GetUsername() string {
var usr user
err := json.Unmarshal(*wh.data["user"], &usr)
if err != nil {
log(LOG_ERROR, "JSON error while getting username of the changer", err)
return ""
}
return usr.Username
}
|
package 二叉树
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func buildTree(preorder []int, inorder []int) *TreeNode {
// 1. 空返回。
if len(preorder) == 0 {
return nil
}
// 2. 获取根节点在中序遍历序列的 index。
rootNum := preorder[0]
rootIndexInInorderSeq := 0
for index, num := range inorder {
if num == rootNum {
rootIndexInInorderSeq = index
break
}
}
// 3. 递归构建。
return &TreeNode{
Val: rootNum,
Left: buildTree(preorder[1:1+rootIndexInInorderSeq], inorder[:rootIndexInInorderSeq]),
Right: buildTree(preorder[1+rootIndexInInorderSeq:], inorder[1+rootIndexInInorderSeq:]),
}
}
|
package model
import (
"posthis/database"
)
type LikeModel struct {
Model
}
func (LikeModel) GetLikes(id uint) ([]*Like, error) {
post := &Post{}
if err := database.DB.Preload("Likes").First(&post, id).Error; err != nil {
return nil, err
}
return post.Likes, nil
}
func (lm LikeModel) CreateLike(userId, postId uint) (*PostDetailVM, error) {
user := User{}
post := Post{}
if err := database.DB.First(&user, userId).Error; err != nil {
return nil, err
}
if err := database.DB.First(&post, postId).Error; err != nil {
return nil, err
}
like := Like{UserID: user.ID, PostID: post.ID}
//Once it works add everything to the database
if err := database.DB.Create(&like).Error; err != nil {
return nil, err
}
database.DB.Model(&user).Association("likes").Append(&like)
database.DB.Model(&post).Association("likes").Append(&like)
postModel := PostModel(lm)
model, err := postModel.GetPost(userId, postId)
if err != nil {
return nil, err
}
return model, nil
}
func (lm LikeModel) DeleteLike(userId, postId uint) (*PostDetailVM, error) {
like := Like{}
post := Post{}
if err := database.DB.First(&like, "user_id = ? AND post_id = ?", userId, postId).Error; err != nil {
return nil, err
}
if err := database.DB.First(&post, postId).Error; err != nil {
return nil, err
}
if err := database.DB.Delete(&like).Error; err != nil {
return nil, err
}
postModel := PostModel(lm)
model, err := postModel.GetPost(userId, postId)
if err != nil {
return nil, err
}
return model, nil
}
|
package main
import (
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/keys"
"github.com/cosmos/cosmos-sdk/client/lcd"
_ "github.com/cosmos/cosmos-sdk/client/lcd/statik"
"github.com/cosmos/cosmos-sdk/client/rpc"
"github.com/cosmos/cosmos-sdk/client/tx"
"github.com/cosmos/cosmos-sdk/docs/examples/basecoin/app"
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/libs/cli"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/version"
authcmd "github.com/cosmos/cosmos-sdk/x/auth/client/cli"
auth "github.com/cosmos/cosmos-sdk/x/auth/client/rest"
bankcmd "github.com/cosmos/cosmos-sdk/x/bank/client/cli"
bank "github.com/cosmos/cosmos-sdk/x/bank/client/rest"
ibccmd "github.com/cosmos/cosmos-sdk/x/ibc/client/cli"
slashingcmd "github.com/cosmos/cosmos-sdk/x/slashing/client/cli"
slashing "github.com/cosmos/cosmos-sdk/x/slashing/client/rest"
stakecmd "github.com/cosmos/cosmos-sdk/x/stake/client/cli"
stake "github.com/cosmos/cosmos-sdk/x/stake/client/rest"
)
const (
storeAcc = "acc"
storeSlashing = "slashing"
storeStake = "stake"
)
// rootCmd is the entry point for this binary
var (
rootCmd = &cobra.Command{
Use: "basecli",
Short: "Basecoin light-client",
}
)
func main() {
// disable sorting
cobra.EnableCommandSorting = false
// get the codec
cdc := app.MakeCodec()
// Setup certain SDK config
config := sdk.GetConfig()
config.SetBech32PrefixForAccount("baseacc", "basepub")
config.SetBech32PrefixForValidator("baseval", "basevalpub")
config.SetBech32PrefixForConsensusNode("basecons", "baseconspub")
config.Seal()
// TODO: Setup keybase, viper object, etc. to be passed into
// the below functions and eliminate global vars, like we do
// with the cdc.
// add standard rpc, and tx commands
rootCmd.AddCommand(
rpc.InitClientCommand(),
rpc.StatusCommand(),
client.LineBreak,
tx.SearchTxCmd(cdc),
tx.QueryTxCmd(cdc),
client.LineBreak,
)
// add query/post commands (custom to binary)
rootCmd.AddCommand(
stakecmd.GetCmdQueryValidator(storeStake, cdc),
stakecmd.GetCmdQueryValidators(storeStake, cdc),
stakecmd.GetCmdQueryValidatorUnbondingDelegations(storeStake, cdc),
stakecmd.GetCmdQueryValidatorRedelegations(storeStake, cdc),
stakecmd.GetCmdQueryDelegation(storeStake, cdc),
stakecmd.GetCmdQueryDelegations(storeStake, cdc),
stakecmd.GetCmdQueryPool(storeStake, cdc),
stakecmd.GetCmdQueryParams(storeStake, cdc),
stakecmd.GetCmdQueryUnbondingDelegation(storeStake, cdc),
stakecmd.GetCmdQueryUnbondingDelegations(storeStake, cdc),
stakecmd.GetCmdQueryRedelegation(storeStake, cdc),
stakecmd.GetCmdQueryRedelegations(storeStake, cdc),
slashingcmd.GetCmdQuerySigningInfo(storeSlashing, cdc),
stakecmd.GetCmdQueryValidatorDelegations(storeStake, cdc),
authcmd.GetAccountCmd(storeAcc, cdc),
)
rootCmd.AddCommand(
bankcmd.SendTxCmd(cdc),
ibccmd.IBCTransferCmd(cdc),
ibccmd.IBCRelayCmd(cdc),
stakecmd.GetCmdCreateValidator(cdc),
stakecmd.GetCmdEditValidator(cdc),
stakecmd.GetCmdDelegate(cdc),
stakecmd.GetCmdUnbond(storeStake, cdc),
stakecmd.GetCmdRedelegate(storeStake, cdc),
slashingcmd.GetCmdUnjail(cdc),
)
// add proxy, version and key info
rootCmd.AddCommand(
client.LineBreak,
lcd.ServeCommand(cdc, registerRoutes),
keys.Commands(),
client.LineBreak,
version.VersionCmd,
)
// prepare and add flags
executor := cli.PrepareMainCmd(rootCmd, "BC", app.DefaultCLIHome)
err := executor.Execute()
if err != nil {
// Note: Handle with #870
panic(err)
}
}
func registerRoutes(rs *lcd.RestServer) {
keys.RegisterRoutes(rs.Mux, rs.CliCtx.Indent)
rpc.RegisterRoutes(rs.CliCtx, rs.Mux)
tx.RegisterRoutes(rs.CliCtx, rs.Mux, rs.Cdc)
auth.RegisterRoutes(rs.CliCtx, rs.Mux, rs.Cdc, storeAcc)
bank.RegisterRoutes(rs.CliCtx, rs.Mux, rs.Cdc, rs.KeyBase)
stake.RegisterRoutes(rs.CliCtx, rs.Mux, rs.Cdc, rs.KeyBase)
slashing.RegisterRoutes(rs.CliCtx, rs.Mux, rs.Cdc, rs.KeyBase)
}
|
package pubsub
import (
"context"
"fmt"
"math/rand"
"time"
pb "gx/ipfs/QmWL6MKfes1HuSiRUNzGmwy9YyQDwcZF9V1NaA2keYKhtE/go-libp2p-pubsub/pb"
peer "gx/ipfs/QmPJxxDsX2UbchSHobbYuvz7qnyJTFKvaKMzE2rZWJ4x5B/go-libp2p-peer"
protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol"
host "gx/ipfs/QmfRHxh8bt4jWLKRhNvR5fn7mFACrQBFLqV4wyoymEExKV/go-libp2p-host"
)
const (
GossipSubID = protocol.ID("/meshsub/1.0.0")
)
var (
// overlay parameters
GossipSubD = 6
GossipSubDlo = 4
GossipSubDhi = 12
// gossip parameters
GossipSubHistoryLength = 5
GossipSubHistoryGossip = 3
// heartbeat interval
GossipSubHeartbeatInitialDelay = 100 * time.Millisecond
GossipSubHeartbeatInterval = 1 * time.Second
// fanout ttl
GossipSubFanoutTTL = 60 * time.Second
)
// NewGossipSub returns a new PubSub object using GossipSubRouter as the router.
func NewGossipSub(ctx context.Context, h host.Host, opts ...Option) (*PubSub, error) {
rt := &GossipSubRouter{
peers: make(map[peer.ID]protocol.ID),
mesh: make(map[string]map[peer.ID]struct{}),
fanout: make(map[string]map[peer.ID]struct{}),
lastpub: make(map[string]int64),
gossip: make(map[peer.ID][]*pb.ControlIHave),
control: make(map[peer.ID]*pb.ControlMessage),
mcache: NewMessageCache(GossipSubHistoryGossip, GossipSubHistoryLength),
}
return NewPubSub(ctx, h, rt, opts...)
}
// GossipSubRouter is a router that implements the gossipsub protocol.
// For each topic we have joined, we maintain an overlay through which
// messages flow; this is the mesh map.
// For each topic we publish to without joining, we maintain a list of peers
// to use for injecting our messages in the overlay with stable routes; this
// is the fanout map. Fanout peer lists are expired if we don't publish any
// messages to their topic for GossipSubFanoutTTL.
type GossipSubRouter struct {
p *PubSub
peers map[peer.ID]protocol.ID // peer protocols
mesh map[string]map[peer.ID]struct{} // topic meshes
fanout map[string]map[peer.ID]struct{} // topic fanout
lastpub map[string]int64 // last publish time for fanout topics
gossip map[peer.ID][]*pb.ControlIHave // pending gossip
control map[peer.ID]*pb.ControlMessage // pending control messages
mcache *MessageCache
}
func (gs *GossipSubRouter) Protocols() []protocol.ID {
return []protocol.ID{GossipSubID, FloodSubID}
}
func (gs *GossipSubRouter) Attach(p *PubSub) {
gs.p = p
go gs.heartbeatTimer()
}
func (gs *GossipSubRouter) AddPeer(p peer.ID, proto protocol.ID) {
log.Debugf("PEERUP: Add new peer %s using %s", p, proto)
gs.peers[p] = proto
}
func (gs *GossipSubRouter) RemovePeer(p peer.ID) {
log.Debugf("PEERDOWN: Remove disconnected peer %s", p)
delete(gs.peers, p)
for _, peers := range gs.mesh {
delete(peers, p)
}
for _, peers := range gs.fanout {
delete(peers, p)
}
delete(gs.gossip, p)
delete(gs.control, p)
}
func (gs *GossipSubRouter) HandleRPC(rpc *RPC) {
ctl := rpc.GetControl()
if ctl == nil {
return
}
iwant := gs.handleIHave(rpc.from, ctl)
ihave := gs.handleIWant(rpc.from, ctl)
prune := gs.handleGraft(rpc.from, ctl)
gs.handlePrune(rpc.from, ctl)
if len(iwant) == 0 && len(ihave) == 0 && len(prune) == 0 {
return
}
out := rpcWithControl(ihave, nil, iwant, nil, prune)
gs.sendRPC(rpc.from, out)
}
func (gs *GossipSubRouter) handleIHave(p peer.ID, ctl *pb.ControlMessage) []*pb.ControlIWant {
iwant := make(map[string]struct{})
for _, ihave := range ctl.GetIhave() {
topic := ihave.GetTopicID()
_, ok := gs.mesh[topic]
if !ok {
continue
}
for _, mid := range ihave.GetMessageIDs() {
if gs.p.seenMessage(mid) {
continue
}
iwant[mid] = struct{}{}
}
}
if len(iwant) == 0 {
return nil
}
log.Debugf("IHAVE: Asking for %d messages from %s", len(iwant), p)
iwantlst := make([]string, 0, len(iwant))
for mid := range iwant {
iwantlst = append(iwantlst, mid)
}
return []*pb.ControlIWant{&pb.ControlIWant{MessageIDs: iwantlst}}
}
func (gs *GossipSubRouter) handleIWant(p peer.ID, ctl *pb.ControlMessage) []*pb.Message {
ihave := make(map[string]*pb.Message)
for _, iwant := range ctl.GetIwant() {
for _, mid := range iwant.GetMessageIDs() {
msg, ok := gs.mcache.Get(mid)
if ok {
ihave[mid] = msg
}
}
}
if len(ihave) == 0 {
return nil
}
log.Debugf("IWANT: Sending %d messages to %s", len(ihave), p)
msgs := make([]*pb.Message, 0, len(ihave))
for _, msg := range ihave {
msgs = append(msgs, msg)
}
return msgs
}
func (gs *GossipSubRouter) handleGraft(p peer.ID, ctl *pb.ControlMessage) []*pb.ControlPrune {
var prune []string
for _, graft := range ctl.GetGraft() {
topic := graft.GetTopicID()
peers, ok := gs.mesh[topic]
if !ok {
prune = append(prune, topic)
} else {
log.Debugf("GRAFT: Add mesh link from %s in %s", p, topic)
peers[p] = struct{}{}
gs.tagPeer(p, topic)
}
}
if len(prune) == 0 {
return nil
}
cprune := make([]*pb.ControlPrune, 0, len(prune))
for _, topic := range prune {
cprune = append(cprune, &pb.ControlPrune{TopicID: &topic})
}
return cprune
}
func (gs *GossipSubRouter) handlePrune(p peer.ID, ctl *pb.ControlMessage) {
for _, prune := range ctl.GetPrune() {
topic := prune.GetTopicID()
peers, ok := gs.mesh[topic]
if ok {
log.Debugf("PRUNE: Remove mesh link to %s in %s", p, topic)
delete(peers, p)
gs.untagPeer(p, topic)
}
}
}
func (gs *GossipSubRouter) Publish(from peer.ID, msg *pb.Message) {
gs.mcache.Put(msg)
tosend := make(map[peer.ID]struct{})
for _, topic := range msg.GetTopicIDs() {
// any peers in the topic?
tmap, ok := gs.p.topics[topic]
if !ok {
continue
}
// floodsub peers
for p := range tmap {
if gs.peers[p] == FloodSubID {
tosend[p] = struct{}{}
}
}
// gossipsub peers
gmap, ok := gs.mesh[topic]
if !ok {
// we are not in the mesh for topic, use fanout peers
gmap, ok = gs.fanout[topic]
if !ok {
// we don't have any, pick some
peers := gs.getPeers(topic, GossipSubD, func(peer.ID) bool { return true })
if len(peers) > 0 {
gmap = peerListToMap(peers)
gs.fanout[topic] = gmap
}
}
gs.lastpub[topic] = time.Now().UnixNano()
}
for p := range gmap {
tosend[p] = struct{}{}
}
}
out := rpcWithMessages(msg)
for pid := range tosend {
if pid == from || pid == peer.ID(msg.GetFrom()) {
continue
}
gs.sendRPC(pid, out)
}
}
func (gs *GossipSubRouter) Join(topic string) {
gmap, ok := gs.mesh[topic]
if ok {
return
}
log.Debugf("JOIN %s", topic)
gmap, ok = gs.fanout[topic]
if ok {
gs.mesh[topic] = gmap
delete(gs.fanout, topic)
delete(gs.lastpub, topic)
} else {
peers := gs.getPeers(topic, GossipSubD, func(peer.ID) bool { return true })
gmap = peerListToMap(peers)
gs.mesh[topic] = gmap
}
for p := range gmap {
log.Debugf("JOIN: Add mesh link to %s in %s", p, topic)
gs.sendGraft(p, topic)
gs.tagPeer(p, topic)
}
}
func (gs *GossipSubRouter) Leave(topic string) {
gmap, ok := gs.mesh[topic]
if !ok {
return
}
log.Debugf("LEAVE %s", topic)
delete(gs.mesh, topic)
for p := range gmap {
log.Debugf("LEAVE: Remove mesh link to %s in %s", p, topic)
gs.sendPrune(p, topic)
gs.untagPeer(p, topic)
}
}
func (gs *GossipSubRouter) sendGraft(p peer.ID, topic string) {
graft := []*pb.ControlGraft{&pb.ControlGraft{TopicID: &topic}}
out := rpcWithControl(nil, nil, nil, graft, nil)
gs.sendRPC(p, out)
}
func (gs *GossipSubRouter) sendPrune(p peer.ID, topic string) {
prune := []*pb.ControlPrune{&pb.ControlPrune{TopicID: &topic}}
out := rpcWithControl(nil, nil, nil, nil, prune)
gs.sendRPC(p, out)
}
func (gs *GossipSubRouter) sendRPC(p peer.ID, out *RPC) {
// do we own the RPC?
own := false
// piggyback control message retries
ctl, ok := gs.control[p]
if ok {
out = copyRPC(out)
own = true
gs.piggybackControl(p, out, ctl)
delete(gs.control, p)
}
// piggyback gossip
ihave, ok := gs.gossip[p]
if ok {
if !own {
out = copyRPC(out)
own = true
}
gs.piggybackGossip(p, out, ihave)
delete(gs.gossip, p)
}
mch, ok := gs.p.peers[p]
if !ok {
return
}
select {
case mch <- out:
default:
log.Infof("dropping message to peer %s: queue full", p)
// push control messages that need to be retried
ctl := out.GetControl()
if ctl != nil {
gs.pushControl(p, ctl)
}
}
}
func (gs *GossipSubRouter) heartbeatTimer() {
time.Sleep(GossipSubHeartbeatInitialDelay)
select {
case gs.p.eval <- gs.heartbeat:
case <-gs.p.ctx.Done():
return
}
ticker := time.NewTicker(GossipSubHeartbeatInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
select {
case gs.p.eval <- gs.heartbeat:
case <-gs.p.ctx.Done():
return
}
case <-gs.p.ctx.Done():
return
}
}
}
func (gs *GossipSubRouter) heartbeat() {
defer log.EventBegin(gs.p.ctx, "heartbeat").Done()
// flush pending control message from retries and gossip
// that hasn't been piggybacked since the last heartbeat
gs.flush()
tograft := make(map[peer.ID][]string)
toprune := make(map[peer.ID][]string)
// maintain the mesh for topics we have joined
for topic, peers := range gs.mesh {
// do we have enough peers?
if len(peers) < GossipSubDlo {
ineed := GossipSubD - len(peers)
plst := gs.getPeers(topic, ineed, func(p peer.ID) bool {
// filter our current peers
_, ok := peers[p]
return !ok
})
for _, p := range plst {
log.Debugf("HEARTBEAT: Add mesh link to %s in %s", p, topic)
peers[p] = struct{}{}
gs.tagPeer(p, topic)
topics := tograft[p]
tograft[p] = append(topics, topic)
}
}
// do we have too many peers?
if len(peers) > GossipSubDhi {
idontneed := len(peers) - GossipSubD
plst := peerMapToList(peers)
shufflePeers(plst)
for _, p := range plst[:idontneed] {
log.Debugf("HEARTBEAT: Remove mesh link to %s in %s", p, topic)
delete(peers, p)
gs.untagPeer(p, topic)
topics := toprune[p]
toprune[p] = append(topics, topic)
}
}
gs.emitGossip(topic, peers)
}
// expire fanout for topics we haven't published to in a while
now := time.Now().UnixNano()
for topic, lastpub := range gs.lastpub {
if lastpub+int64(GossipSubFanoutTTL) < now {
delete(gs.fanout, topic)
delete(gs.lastpub, topic)
}
}
// maintain our fanout for topics we are publishing but we have not joined
for topic, peers := range gs.fanout {
// check whether our peers are still in the topic
for p := range peers {
_, ok := gs.p.topics[topic][p]
if !ok {
delete(peers, p)
}
}
// do we need more peers?
if len(peers) < GossipSubD {
ineed := GossipSubD - len(peers)
plst := gs.getPeers(topic, ineed, func(p peer.ID) bool {
// filter our current peers
_, ok := peers[p]
return !ok
})
for _, p := range plst {
peers[p] = struct{}{}
}
}
gs.emitGossip(topic, peers)
}
// send coalesced GRAFT/PRUNE messages (will piggyback gossip)
gs.sendGraftPrune(tograft, toprune)
// advance the message history window
gs.mcache.Shift()
}
func (gs *GossipSubRouter) sendGraftPrune(tograft, toprune map[peer.ID][]string) {
for p, topics := range tograft {
graft := make([]*pb.ControlGraft, 0, len(topics))
for _, topic := range topics {
graft = append(graft, &pb.ControlGraft{TopicID: &topic})
}
var prune []*pb.ControlPrune
pruning, ok := toprune[p]
if ok {
delete(toprune, p)
prune = make([]*pb.ControlPrune, 0, len(pruning))
for _, topic := range pruning {
prune = append(prune, &pb.ControlPrune{TopicID: &topic})
}
}
out := rpcWithControl(nil, nil, nil, graft, prune)
gs.sendRPC(p, out)
}
for p, topics := range toprune {
prune := make([]*pb.ControlPrune, 0, len(topics))
for _, topic := range topics {
prune = append(prune, &pb.ControlPrune{TopicID: &topic})
}
out := rpcWithControl(nil, nil, nil, nil, prune)
gs.sendRPC(p, out)
}
}
func (gs *GossipSubRouter) emitGossip(topic string, peers map[peer.ID]struct{}) {
mids := gs.mcache.GetGossipIDs(topic)
if len(mids) == 0 {
return
}
gpeers := gs.getPeers(topic, GossipSubD, func(peer.ID) bool { return true })
for _, p := range gpeers {
// skip mesh peers
_, ok := peers[p]
if !ok {
gs.pushGossip(p, &pb.ControlIHave{TopicID: &topic, MessageIDs: mids})
}
}
}
func (gs *GossipSubRouter) flush() {
// send gossip first, which will also piggyback control
for p, ihave := range gs.gossip {
delete(gs.gossip, p)
out := rpcWithControl(nil, ihave, nil, nil, nil)
gs.sendRPC(p, out)
}
// send the remaining control messages
for p, ctl := range gs.control {
delete(gs.control, p)
out := rpcWithControl(nil, nil, nil, ctl.Graft, ctl.Prune)
gs.sendRPC(p, out)
}
}
func (gs *GossipSubRouter) pushGossip(p peer.ID, ihave *pb.ControlIHave) {
gossip := gs.gossip[p]
gossip = append(gossip, ihave)
gs.gossip[p] = gossip
}
func (gs *GossipSubRouter) piggybackGossip(p peer.ID, out *RPC, ihave []*pb.ControlIHave) {
ctl := out.GetControl()
if ctl == nil {
ctl = &pb.ControlMessage{}
out.Control = ctl
}
ctl.Ihave = ihave
}
func (gs *GossipSubRouter) pushControl(p peer.ID, ctl *pb.ControlMessage) {
// remove IHAVE/IWANT from control message, gossip is not retried
ctl.Ihave = nil
ctl.Iwant = nil
if ctl.Graft != nil || ctl.Prune != nil {
gs.control[p] = ctl
}
}
func (gs *GossipSubRouter) piggybackControl(p peer.ID, out *RPC, ctl *pb.ControlMessage) {
// check control message for staleness first
var tograft []*pb.ControlGraft
var toprune []*pb.ControlPrune
for _, graft := range ctl.GetGraft() {
topic := graft.GetTopicID()
peers, ok := gs.mesh[topic]
if !ok {
continue
}
_, ok = peers[p]
if ok {
tograft = append(tograft, graft)
}
}
for _, prune := range ctl.GetPrune() {
topic := prune.GetTopicID()
peers, ok := gs.mesh[topic]
if !ok {
toprune = append(toprune, prune)
continue
}
_, ok = peers[p]
if !ok {
toprune = append(toprune, prune)
}
}
if len(tograft) == 0 && len(toprune) == 0 {
return
}
xctl := out.Control
if xctl == nil {
xctl = &pb.ControlMessage{}
out.Control = xctl
}
if len(tograft) > 0 {
xctl.Graft = append(xctl.Graft, tograft...)
}
if len(toprune) > 0 {
xctl.Prune = append(xctl.Prune, toprune...)
}
}
func (gs *GossipSubRouter) getPeers(topic string, count int, filter func(peer.ID) bool) []peer.ID {
tmap, ok := gs.p.topics[topic]
if !ok {
return nil
}
peers := make([]peer.ID, 0, len(tmap))
for p := range tmap {
if gs.peers[p] == GossipSubID && filter(p) {
peers = append(peers, p)
}
}
shufflePeers(peers)
if count > 0 && len(peers) > count {
peers = peers[:count]
}
return peers
}
func (gs *GossipSubRouter) tagPeer(p peer.ID, topic string) {
tag := topicTag(topic)
gs.p.host.ConnManager().TagPeer(p, tag, 2)
}
func (gs *GossipSubRouter) untagPeer(p peer.ID, topic string) {
tag := topicTag(topic)
gs.p.host.ConnManager().UntagPeer(p, tag)
}
func topicTag(topic string) string {
return fmt.Sprintf("pubsub:%s", topic)
}
func peerListToMap(peers []peer.ID) map[peer.ID]struct{} {
pmap := make(map[peer.ID]struct{})
for _, p := range peers {
pmap[p] = struct{}{}
}
return pmap
}
func peerMapToList(peers map[peer.ID]struct{}) []peer.ID {
plst := make([]peer.ID, 0, len(peers))
for p := range peers {
plst = append(plst, p)
}
return plst
}
func shufflePeers(peers []peer.ID) {
for i := range peers {
j := rand.Intn(i + 1)
peers[i], peers[j] = peers[j], peers[i]
}
}
|
package realm
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"github.com/10gen/realm-cli/internal/utils/api"
"github.com/10gen/realm-cli/internal/utils/flags"
"go.mongodb.org/mongo-driver/bson/primitive"
)
const (
apiKeysPathPattern = appPathPattern + "/api_keys"
pendingUsersPathPattern = appPathPattern + "/user_registrations/pending_users"
usersPathPattern = appPathPattern + "/users"
userPathPattern = usersPathPattern + "/%s"
userDisablePathPattern = userPathPattern + "/disable"
userEnablePathPattern = userPathPattern + "/enable"
userLogoutPathPattern = userPathPattern + "/logout"
usersQueryStatus = "status"
usersQueryProviderTypes = "provider_types"
)
// UserState is a Realm app user state
type UserState string
// String returns the user state string
func (us UserState) String() string { return string(us) }
// Type returns the user state type
func (us UserState) Type() string { return flags.TypeString }
// Set validates and sets the user state value
func (us *UserState) Set(val string) error {
newUserState := UserState(val)
if !isValidUserState(newUserState) {
return errInvalidUserState
}
*us = newUserState
return nil
}
// set of supported user state values
const (
UserStateNil UserState = ""
UserStateEnabled UserState = "enabled"
UserStateDisabled UserState = "disabled"
)
var (
errInvalidUserState = func() error {
allUserStateTypes := []string{UserStateEnabled.String(), UserStateDisabled.String()}
return fmt.Errorf("unsupported value, use one of [%s] instead", strings.Join(allUserStateTypes, ", "))
}()
)
func isValidUserState(us UserState) bool {
switch us {
case
UserStateNil, // allow state to be optional
UserStateEnabled,
UserStateDisabled:
return true
}
return false
}
// APIKey is a Realm app api key
type APIKey struct {
ID string `json:"_id"`
Name string `json:"name"`
Disabled bool `json:"disabled"`
Key string `json:"key"`
}
// User is a Realm app user
type User struct {
ID string `json:"_id"`
Identities []UserIdentity `json:"identities,omitempty"`
Type string `json:"type"`
Disabled bool `json:"disabled"`
Data map[string]interface{} `json:"data,omitempty"`
CreationDate int64 `json:"creation_date"`
LastAuthenticationDate int64 `json:"last_authentication_date"`
}
// UserIdentity is a Realm app user identity
type UserIdentity struct {
UID string `json:"id"`
ProviderType AuthProviderType `json:"provider_type"`
ProviderID primitive.ObjectID `json:"provider_id"`
ProviderData map[string]interface{} `json:"provider_data,omitempty"`
}
// AuthProviderType is a Realm app auth provider type
type AuthProviderType string
// set of supported auth provider type values
const (
AuthProviderTypeEmpty AuthProviderType = ""
AuthProviderTypeUserPassword AuthProviderType = "local-userpass"
AuthProviderTypeAPIKey AuthProviderType = "api-key"
AuthProviderTypeFacebook AuthProviderType = "oauth2-facebook"
AuthProviderTypeGoogle AuthProviderType = "oauth2-google"
AuthProviderTypeAnonymous AuthProviderType = "anon-user"
AuthProviderTypeCustomToken AuthProviderType = "custom-token"
AuthProviderTypeApple AuthProviderType = "oauth2-apple"
AuthProviderTypeCustomFunction AuthProviderType = "custom-function"
)
// set of supported auth constants
var (
ValidAuthProviderTypes = []AuthProviderType{
AuthProviderTypeUserPassword,
AuthProviderTypeAPIKey,
AuthProviderTypeFacebook,
AuthProviderTypeGoogle,
AuthProviderTypeAnonymous,
AuthProviderTypeCustomToken,
AuthProviderTypeApple,
AuthProviderTypeCustomFunction,
}
)
// String returns the auth provider type string
func (pt AuthProviderType) String() string { return string(pt) }
// Display returns the auth provider type display string
func (pt AuthProviderType) Display() string {
switch pt {
case AuthProviderTypeAnonymous:
return "Anonymous"
case AuthProviderTypeUserPassword:
return "User/Password"
case AuthProviderTypeAPIKey:
return "ApiKey"
case AuthProviderTypeApple:
return "Apple"
case AuthProviderTypeGoogle:
return "Google"
case AuthProviderTypeFacebook:
return "Facebook"
case AuthProviderTypeCustomToken:
return "Custom JWT"
case AuthProviderTypeCustomFunction:
return "Custom Function"
default:
return "Unknown"
}
}
// AuthProviderTypes is a Realm app auth provider type slice
type AuthProviderTypes []AuthProviderType
// NewAuthProviderTypes returns an AuthProviderTypes from the provided strings
func NewAuthProviderTypes(apts ...string) AuthProviderTypes {
authProviderTypes := make([]AuthProviderType, len(apts))
for i, apt := range apts {
authProviderTypes[i] = AuthProviderType(apt)
}
return authProviderTypes
}
func (apts AuthProviderTypes) join(sep string) string {
var sb strings.Builder
for i, apt := range apts {
if i != 0 {
sb.WriteString(sep)
}
sb.WriteString(apt.String())
}
return sb.String()
}
type createAPIKeyRequest struct {
Name string `json:"name"`
}
func (c *client) CreateAPIKey(groupID, appID, apiKeyName string) (APIKey, error) {
res, resErr := c.doJSON(
http.MethodPost,
fmt.Sprintf(apiKeysPathPattern, groupID, appID),
createAPIKeyRequest{apiKeyName},
api.RequestOptions{},
)
if resErr != nil {
return APIKey{}, resErr
}
if res.StatusCode != http.StatusCreated {
return APIKey{}, api.ErrUnexpectedStatusCode{"create api key", res.StatusCode}
}
defer res.Body.Close()
var apiKey APIKey
if err := json.NewDecoder(res.Body).Decode(&apiKey); err != nil {
return APIKey{}, err
}
return apiKey, nil
}
type createUserRequest struct {
Email string `json:"email"`
Password string `json:"password"`
}
func (c *client) CreateUser(groupID, appID, email, password string) (User, error) {
res, resErr := c.doJSON(
http.MethodPost,
fmt.Sprintf(usersPathPattern, groupID, appID),
createUserRequest{email, password},
api.RequestOptions{},
)
if resErr != nil {
return User{}, resErr
}
if res.StatusCode != http.StatusCreated {
return User{}, api.ErrUnexpectedStatusCode{"create user", res.StatusCode}
}
defer res.Body.Close()
var user User
if err := json.NewDecoder(res.Body).Decode(&user); err != nil {
return User{}, err
}
return user, nil
}
func (c *client) DeleteUser(groupID, appID, userID string) error {
res, resErr := c.do(
http.MethodDelete,
fmt.Sprintf(userPathPattern, groupID, appID, userID),
api.RequestOptions{},
)
if resErr != nil {
return resErr
}
if res.StatusCode != http.StatusNoContent {
return api.ErrUnexpectedStatusCode{Action: "delete user", Actual: res.StatusCode}
}
return nil
}
func (c *client) DisableUser(groupID, appID, userID string) error {
res, resErr := c.do(
http.MethodPut,
fmt.Sprintf(userDisablePathPattern, groupID, appID, userID),
api.RequestOptions{},
)
if resErr != nil {
return resErr
}
if res.StatusCode != http.StatusNoContent {
return api.ErrUnexpectedStatusCode{Action: "disable user", Actual: res.StatusCode}
}
return nil
}
func (c *client) EnableUser(groupID, appID, userID string) error {
res, resErr := c.do(
http.MethodPut,
fmt.Sprintf(userEnablePathPattern, groupID, appID, userID),
api.RequestOptions{},
)
if resErr != nil {
return resErr
}
if res.StatusCode != http.StatusNoContent {
return api.ErrUnexpectedStatusCode{Action: "enable user", Actual: res.StatusCode}
}
return nil
}
// UserFilter represents the optional filter parameters available for lists of users
type UserFilter struct {
IDs []string
Pending bool
Providers []AuthProviderType
State UserState
}
func (c *client) FindUsers(groupID, appID string, filter UserFilter) ([]User, error) {
if filter.Pending {
return c.getPendingUsers(groupID, appID, filter.IDs)
}
if len(filter.IDs) == 0 {
return c.getUsers(groupID, appID, filter.State, filter.Providers)
}
return c.getUsersByIDs(groupID, appID, filter.IDs, filter.State, filter.Providers)
}
func (c *client) RevokeUserSessions(groupID, appID, userID string) error {
res, resErr := c.do(
http.MethodPut,
fmt.Sprintf(userLogoutPathPattern, groupID, appID, userID),
api.RequestOptions{},
)
if resErr != nil {
return resErr
}
if res.StatusCode != http.StatusNoContent {
return api.ErrUnexpectedStatusCode{Action: "revoke user", Actual: res.StatusCode}
}
return nil
}
func (c *client) getPendingUsers(groupID, appID string, userIDs []string) ([]User, error) {
res, resErr := c.do(
http.MethodGet,
fmt.Sprintf(pendingUsersPathPattern, groupID, appID),
api.RequestOptions{},
)
if resErr != nil {
return nil, resErr
}
if res.StatusCode != http.StatusOK {
return nil, api.ErrUnexpectedStatusCode{"get pending users", res.StatusCode}
}
defer res.Body.Close()
var users []User
if err := json.NewDecoder(res.Body).Decode(&users); err != nil {
return nil, err
}
if len(userIDs) == 0 {
return users, nil
}
userIDSet := make(map[string]struct{}, len(userIDs))
for _, userID := range userIDs {
userIDSet[userID] = struct{}{}
}
filtered := make([]User, 0, len(users))
for _, user := range users {
if _, ok := userIDSet[user.ID]; !ok {
continue
}
filtered = append(users, user)
}
return filtered, nil
}
func (c *client) getUser(groupID, appID, userID string) (User, error) {
res, resErr := c.do(
http.MethodGet,
fmt.Sprintf(userPathPattern, groupID, appID, userID),
api.RequestOptions{},
)
if resErr != nil {
return User{}, resErr
}
if res.StatusCode != http.StatusOK {
return User{}, api.ErrUnexpectedStatusCode{"get user", res.StatusCode}
}
defer res.Body.Close()
var user User
if err := json.NewDecoder(res.Body).Decode(&user); err != nil {
return User{}, err
}
return user, nil
}
func (c *client) getUsers(groupID, appID string, userState UserState, authProviderTypes AuthProviderTypes) ([]User, error) {
options := api.RequestOptions{Query: make(map[string]string)}
if userState != UserStateNil {
options.Query[usersQueryStatus] = string(userState)
}
if len(authProviderTypes) > 0 {
options.Query[usersQueryProviderTypes] = authProviderTypes.join(",")
}
res, resErr := c.do(http.MethodGet, fmt.Sprintf(usersPathPattern, groupID, appID), options)
if resErr != nil {
return nil, resErr
}
if res.StatusCode != http.StatusOK {
return nil, api.ErrUnexpectedStatusCode{"get users", res.StatusCode}
}
defer res.Body.Close()
var users []User
if err := json.NewDecoder(res.Body).Decode(&users); err != nil {
return nil, err
}
return users, nil
}
func (c *client) getUsersByIDs(groupID, appID string, userIDs []string, userState UserState, authProviderTypes []AuthProviderType) ([]User, error) {
users := make([]User, 0, len(userIDs))
for _, userID := range userIDs {
user, err := c.getUser(groupID, appID, userID)
if err != nil {
return nil, err
}
if userMatchesState(user, userState) {
users = append(users, user)
}
}
if len(authProviderTypes) == 0 {
return users, nil
}
providers := make(map[AuthProviderType]struct{}, len(authProviderTypes))
for _, provider := range authProviderTypes {
providers[provider] = struct{}{}
}
filtered := make([]User, 0, len(users))
for _, user := range users {
var matchedProvider bool
for _, identity := range user.Identities {
if _, ok := providers[identity.ProviderType]; !ok {
continue
}
matchedProvider = true
break
}
if matchedProvider {
filtered = append(filtered, user)
}
}
return filtered, nil
}
func userMatchesState(user User, userState UserState) bool {
if userState == UserStateEnabled {
return !user.Disabled
}
if userState == UserStateDisabled {
return user.Disabled
}
return true
}
|
package main
import "fmt"
// 198. 打家劫舍
// 你是一个专业的小偷,计划偷窃沿街的房屋。每间房内都藏有一定的现金,影响你偷窃的唯一制约因素就是相邻的房屋装有相互连通的防盗系统,如果两间相邻的房屋在同一晚上被小偷闯入,系统会自动报警。
// 给定一个代表每个房屋存放金额的非负整数数组,计算你 不触动警报装置的情况下 ,一夜之内能够偷窃到的最高金额。
// 提示:
// 0 <= nums.length <= 100
// 0 <= nums[i] <= 400
// https://leetcode-cn.com/problems/house-robber/
func main() {
// fmt.Println(rob([]int{1, 2, 3, 1})) // 4
fmt.Println(rob2([]int{2, 7, 9, 3, 1})) // 12
}
// 法一:动态规划。
// nums[i] 要么纳入统计,要么不纳入统计。
// dp[i]变成二维,dp[i][1]表示偷nums[i]的最大值,dp[i][0]表示不偷nums[i]的最大值
// 这时dp变成二维:
// dp[i][0] = max(dp[i-1][0], dp[i-1][1])
// dp[i][1] = nums[i] + dp[i-1][0]
// 可以通过改变dp[i]的定义将问题简化,将dp[i]定义为含nums[i]的最大值,则:
// dp[2] = getMax(nums[1]+dp[0], dp[1])
// dp[3] = getMax(nums[2]+dp[1], dp[2])
// 递推公式:dp[n] = getMax(nums[n-1]+ dp[n-2], dp[n-1])
// 空间、时间O(n)
func rob(nums []int) int {
n := len(nums)
if n == 0 {
return 0
} else if n == 1 {
return nums[0]
}
state := make([]int, n+1) // state[i]表示前i个元素可以获取的最大结果
state[0] = 0
state[1] = nums[0]
for i := 2; i <= n; i++ {
state[i] = getMax(nums[i-1]+state[i-2], state[i-1])
}
return state[n]
}
// 法二:对法一的递推进行改进,节省空间
func rob2(nums []int) (result int) {
n := len(nums)
if n == 0 {
return 0
} else if n == 1 {
return nums[0]
}
pp, p := 0, nums[0]
for i := 1; i < n; i++ {
result = getMax(nums[i]+pp, p)
pp, p = p, result
}
return result
}
func getMax(a, b int) int {
if a > b {
return a
}
return b
}
|
package main
import "fmt"
func main() {
//一变量的声明
// 1. 声明var 变量名 类型 变量声明之后,必须使用
// 2. 只是声明变量没有初始化,默认为0
// 3. 在同一个{}里,声明变量是唯一的
var a int
a = 10 //变量的赋值 先声明 再赋值
fmt.Println(a)
//4.可以同时声明多个变量
//var b, c int
//
//b,c = 20,30
//fmt.Println(b,c)
//二变量初始化 声明变量的同时进行赋值
var b int = 20 //初始化
b = 30
fmt.Println(&b)
var value float64 =2
fmt.Println(value)
//=后可以跟表达式
var sum float64 = 1*9
fmt.Println(sum)
}
|
// Copyright 2019 Radiation Detection and Imaging (RDI), LLC
// Use of this source code is governed by the BSD 3-clause
// license that can be found in the LICENSE file.
package data
import (
"github.com/proio-org/go-proio"
)
type EventProcessor func(*proio.Event)
type EventOp struct {
Description string
EventProcessor EventProcessor
Concurrency int
MaxEventBuf int
}
func (o EventOp) GetDescription() string {
return o.Description
}
func (o EventOp) Run(input <-chan *proio.Event) <-chan *proio.Event {
if o.Concurrency == 0 {
o.Concurrency = *concurrency
}
if o.MaxEventBuf == 0 {
o.MaxEventBuf = *maxEventBuf
}
output := make(chan *proio.Event, o.MaxEventBuf)
go func() {
defer close(output)
procEvents := make(map[uint64]*proio.Event)
doneEvents := make(map[uint64]*proio.Event)
done := make(chan uint64)
ackDone := func() {
index := <-done
doneEvents[index] = procEvents[index]
delete(procEvents, index)
}
defer close(done)
nRead := uint64(0)
nWritten := uint64(0)
writeOut := func() {
for {
if event, ok := doneEvents[nWritten]; ok {
output <- event
delete(doneEvents, nWritten)
nWritten++
} else {
break
}
}
}
for event := range input {
go func(event *proio.Event, done chan<- uint64, index uint64) {
o.EventProcessor(event)
done <- index
}(event, done, nRead)
procEvents[nRead] = event
nRead++
for len(procEvents) >= o.Concurrency || len(doneEvents) >= o.MaxEventBuf {
ackDone()
writeOut()
}
}
for len(procEvents) > 0 {
ackDone()
}
writeOut()
}()
return output
}
|
package monitor
import (
"context"
"os"
"path/filepath"
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/cio"
"github.com/containerd/typeurl"
"github.com/crosbymichael/boss/config"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
type change interface {
apply(context.Context, *containerd.Client) error
}
type stopChange struct {
container containerd.Container
m *Monitor
}
func (s *stopChange) apply(ctx context.Context, client *containerd.Client) error {
if err := s.m.register.EnableMaintainance(s.container.ID(), "manual stop"); err != nil {
logrus.WithError(err).Error("setting service maintaince")
}
if err := killTask(ctx, s.container); err != nil {
return err
}
return nil
}
type startChange struct {
container containerd.Container
m *Monitor
}
func (s *startChange) apply(ctx context.Context, client *containerd.Client) error {
killTask(ctx, s.container)
config, err := getConfig(ctx, s.container)
if err != nil {
return err
}
task, err := s.container.NewTask(ctx, cio.NewCreator(cio.WithStdio))
if err != nil {
return err
}
ip, err := s.m.networks[config.Network].Create(task)
if err != nil {
if _, derr := task.Delete(ctx, containerd.WithProcessKill); derr != nil {
logrus.WithError(derr).Error("delete task on failed network setup")
}
return err
}
if ip != "" {
logrus.WithField("id", config.ID).WithField("ip", ip).Info("setup network interface")
for name, srv := range config.Services {
if err := s.m.register.Register(config.ID, name, ip, srv); err != nil {
logrus.WithError(err).Error("register service")
}
}
}
if err := task.Start(ctx); err != nil {
return err
}
if err := s.m.register.DisableMaintainance(config.ID); err != nil {
logrus.WithError(err).Error("disable service maintenance")
}
return nil
}
func killTask(ctx context.Context, container containerd.Container) error {
signal := unix.SIGTERM
task, err := container.Task(ctx, nil)
if err == nil {
wait, err := task.Wait(ctx)
if err != nil {
if _, derr := task.Delete(ctx); derr == nil {
return nil
}
return err
}
kill:
if err := task.Kill(ctx, signal, containerd.WithKillAll); err != nil {
if _, derr := task.Delete(ctx); derr == nil {
return nil
}
return err
}
select {
case <-wait:
if _, err := task.Delete(ctx); err != nil {
return err
}
case <-time.After(10 * time.Second):
signal = unix.SIGKILL
goto kill
}
}
return nil
}
func getConfig(ctx context.Context, container containerd.Container) (*config.Container, error) {
info, err := container.Info(ctx)
if err != nil {
return nil, err
}
d := info.Extensions[config.Extension]
v, err := typeurl.UnmarshalAny(&d)
if err != nil {
return nil, err
}
return v.(*config.Container), nil
}
type deleteChange struct {
container containerd.Container
m *Monitor
}
func (s *deleteChange) apply(ctx context.Context, client *containerd.Client) error {
path := filepath.Join(config.Root, s.container.ID())
if err := os.RemoveAll(path); err != nil {
logrus.WithError(err).Errorf("delete root dir %s", path)
}
config, err := getConfig(ctx, s.container)
if err != nil {
return err
}
s.m.register.Deregister(s.container.ID())
s.m.networks[config.Network].Remove(s.container)
return s.container.Delete(ctx, containerd.WithSnapshotCleanup)
}
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package yamlcfg
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
cfgutil "github.com/GoogleCloudPlatform/testgrid/config"
"github.com/GoogleCloudPlatform/testgrid/pb/config"
"sigs.k8s.io/yaml"
)
// Takes multiple source paths of the following form:
// If path is a local file, then the file will be parsed as YAML
// If path is a directory, then all files and directories within it will be parsed.
// Optionally, defaultPath points to default setting YAML
// Returns a configuration proto containing the data from all of those sources
func ReadConfig(paths []string, defaultpath string) (config.Configuration, error) {
var result config.Configuration
var defaults DefaultConfiguration
if defaultpath != "" {
b, err := ioutil.ReadFile(defaultpath)
if err != nil {
return result, fmt.Errorf("failed to read default at %s: %v", defaultpath, err)
}
defaults, err = LoadDefaults(b)
if err != nil {
return result, fmt.Errorf("failed to deserialize default at %s: %v", defaultpath, err)
}
}
err := SeekYAMLFiles(paths, func(path string, info os.FileInfo) error {
// Read YAML file and Update config
b, err := ioutil.ReadFile(path)
if err != nil {
return fmt.Errorf("failed to read %s: %v", path, err)
}
if err = Update(&result, b, &defaults); err != nil {
return fmt.Errorf("failed to merge %s into config: %v", path, err)
}
return nil
})
return result, err
}
// Update reads the config in yamlData and updates the config in c.
// If reconcile is non-nil, it will pad out new entries with those default settings
func Update(cfg *config.Configuration, yamlData []byte, reconcile *DefaultConfiguration) error {
newConfig := &config.Configuration{}
if err := yaml.Unmarshal(yamlData, newConfig); err != nil {
return err
}
if cfg == nil {
cfg = &config.Configuration{}
}
for _, testgroup := range newConfig.TestGroups {
if reconcile != nil {
ReconcileTestGroup(testgroup, reconcile.DefaultTestGroup)
}
cfg.TestGroups = append(cfg.TestGroups, testgroup)
}
for _, dashboard := range newConfig.Dashboards {
if reconcile != nil {
for _, dashboardtab := range dashboard.DashboardTab {
ReconcileDashboardTab(dashboardtab, reconcile.DefaultDashboardTab)
}
}
cfg.Dashboards = append(cfg.Dashboards, dashboard)
}
for _, dashboardGroup := range newConfig.DashboardGroups {
cfg.DashboardGroups = append(cfg.DashboardGroups, dashboardGroup)
}
return nil
}
// MarshalYAML returns a YAML file representing the parsed configuration.
// Returns an error if config is invalid or encoding failed.
func MarshalYAML(c *config.Configuration) ([]byte, error) {
if c == nil {
return nil, errors.New("got an empty config.Configuration")
}
if err := cfgutil.Validate(c); err != nil {
return nil, err
}
bytes, err := yaml.Marshal(c)
if err != nil {
return nil, fmt.Errorf("could not write config to yaml: %v", err)
}
return bytes, nil
}
type DefaultConfiguration struct {
// A default testgroup with default initialization data
DefaultTestGroup *config.TestGroup `json:"default_test_group,omitempty"`
// A default dashboard tab with default initialization data
DefaultDashboardTab *config.DashboardTab `json:"default_dashboard_tab,omitempty"`
}
// MissingFieldError is an error that includes the missing field.
type MissingFieldError struct {
Field string
}
func (e MissingFieldError) Error() string {
return fmt.Sprintf("field missing or unset: %s", e.Field)
}
// ReconcileTestGroup sets unfilled currentTestGroup fields to the corresponding defaultTestGroup value, if present
func ReconcileTestGroup(currentTestGroup *config.TestGroup, defaultTestGroup *config.TestGroup) {
if currentTestGroup.DaysOfResults == 0 {
currentTestGroup.DaysOfResults = defaultTestGroup.DaysOfResults
}
if currentTestGroup.TestsNamePolicy == config.TestGroup_TESTS_NAME_UNSPECIFIED {
currentTestGroup.TestsNamePolicy = defaultTestGroup.TestsNamePolicy
}
if currentTestGroup.IgnorePending == false {
currentTestGroup.IgnorePending = defaultTestGroup.IgnorePending
}
if currentTestGroup.IgnoreSkip == false {
currentTestGroup.IgnoreSkip = defaultTestGroup.IgnoreSkip
}
if currentTestGroup.ColumnHeader == nil {
currentTestGroup.ColumnHeader = defaultTestGroup.ColumnHeader
}
if currentTestGroup.NumColumnsRecent == 0 {
currentTestGroup.NumColumnsRecent = defaultTestGroup.NumColumnsRecent
}
if currentTestGroup.AlertStaleResultsHours == 0 {
currentTestGroup.AlertStaleResultsHours = defaultTestGroup.AlertStaleResultsHours
}
if currentTestGroup.NumFailuresToAlert == 0 {
currentTestGroup.NumFailuresToAlert = defaultTestGroup.NumFailuresToAlert
}
if currentTestGroup.CodeSearchPath == "" {
currentTestGroup.CodeSearchPath = defaultTestGroup.CodeSearchPath
}
if currentTestGroup.NumPassesToDisableAlert == 0 {
currentTestGroup.NumPassesToDisableAlert = defaultTestGroup.NumPassesToDisableAlert
}
// is_external and user_kubernetes_client should always be true
currentTestGroup.IsExternal = true
currentTestGroup.UseKubernetesClient = true
}
// ReconcileDashboardTab sets unfilled currentTab fields to the corresponding defaultTab value, if present
func ReconcileDashboardTab(currentTab *config.DashboardTab, defaultTab *config.DashboardTab) {
if currentTab.BugComponent == 0 {
currentTab.BugComponent = defaultTab.BugComponent
}
if currentTab.CodeSearchPath == "" {
currentTab.CodeSearchPath = defaultTab.CodeSearchPath
}
if currentTab.NumColumnsRecent == 0 {
currentTab.NumColumnsRecent = defaultTab.NumColumnsRecent
}
if currentTab.OpenTestTemplate == nil {
currentTab.OpenTestTemplate = defaultTab.OpenTestTemplate
}
if currentTab.FileBugTemplate == nil {
currentTab.FileBugTemplate = defaultTab.FileBugTemplate
}
if currentTab.AttachBugTemplate == nil {
currentTab.AttachBugTemplate = defaultTab.AttachBugTemplate
}
if currentTab.ResultsText == "" {
currentTab.ResultsText = defaultTab.ResultsText
}
if currentTab.ResultsUrlTemplate == nil {
currentTab.ResultsUrlTemplate = defaultTab.ResultsUrlTemplate
}
if currentTab.CodeSearchUrlTemplate == nil {
currentTab.CodeSearchUrlTemplate = defaultTab.CodeSearchUrlTemplate
}
if currentTab.AlertOptions == nil {
currentTab.AlertOptions = defaultTab.AlertOptions
}
if currentTab.OpenBugTemplate == nil {
currentTab.OpenBugTemplate = defaultTab.OpenBugTemplate
}
}
// UpdateDefaults reads and validates default settings from YAML
// Returns an error if the defaultConfig is partially or completely missing.
func LoadDefaults(yamlData []byte) (DefaultConfiguration, error) {
var result DefaultConfiguration
err := yaml.Unmarshal(yamlData, &result)
if err != nil {
return result, err
}
if result.DefaultTestGroup == nil {
return result, MissingFieldError{"DefaultTestGroup"}
}
if result.DefaultDashboardTab == nil {
return result, MissingFieldError{"DefaultDashboardTab"}
}
return result, nil
}
// walks through paths and directories, calling the passed function on each YAML file
// future modifications to what Configurator sees as a "config file" can be made here
//TODO(chases2) Rewrite so that it walks recursively, not lexically.
func SeekYAMLFiles(paths []string, callFunc func(path string, info os.FileInfo) error) error {
for _, path := range paths {
_, err := os.Stat(path)
if err != nil {
return fmt.Errorf("Failed status call on %s: %v", path, err)
}
err = filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
// A bad file should not stop us from parsing the directory
if err != nil {
return nil
}
// Only YAML files will be
if filepath.Ext(path) != ".yaml" && filepath.Ext(path) != ".yml" {
return nil
}
if info.IsDir() {
return nil
}
return callFunc(path, info)
})
if err != nil {
return fmt.Errorf("Failed to walk through %s: %v", path, err)
}
}
return nil
}
|
package knowledge
import (
"testing"
"github.com/clems4ever/go-graphkb/internal/schema"
"github.com/stretchr/testify/assert"
)
func TestShouldRelateAssets(t *testing.T) {
g := NewGraph()
binder := NewGraphBinder(g)
relation := schema.RelationType{
FromType: "from_type",
ToType: "to_type",
Type: "rel_type",
}
binder.Relate("from", relation, "to")
assert.Len(t, g.Assets(), 2)
assert.Len(t, g.Relations(), 1)
assert.Equal(t, g.Assets(), map[Asset]GraphEntryAction{
{Type: "from_type", Key: "from"}: GraphEntryAdd,
{Type: "to_type", Key: "to"}: GraphEntryAdd,
})
}
func TestShouldBindAsset(t *testing.T) {
g := NewGraph()
binder := NewGraphBinder(g)
binder.Bind("from", "from_type")
assert.Len(t, g.Assets(), 1)
assert.Len(t, g.Relations(), 0)
assert.Equal(t, g.Assets(), map[Asset]GraphEntryAction{
{Type: "from_type", Key: "from"}: GraphEntryAdd,
})
}
|
package astutils
import (
"fmt"
"github.com/kyleconroy/sqlc/internal/sql/ast"
"github.com/kyleconroy/sqlc/internal/sql/ast/pg"
)
type Visitor interface {
Visit(ast.Node) Visitor
}
type VisitorFunc func(ast.Node)
func (vf VisitorFunc) Visit(node ast.Node) Visitor {
vf(node)
return vf
}
func Walk(f Visitor, node ast.Node) {
if f = f.Visit(node); f == nil {
return
}
switch n := node.(type) {
case *ast.AlterTableCmd:
if n.Def != nil {
Walk(f, n.Def)
}
case *ast.AlterTableSetSchemaStmt:
if n.Table != nil {
Walk(f, n.Table)
}
case *ast.AlterTableStmt:
if n.Table != nil {
Walk(f, n.Table)
}
if n.Cmds != nil {
Walk(f, n.Cmds)
}
case *ast.AlterTypeAddValueStmt:
if n.Type != nil {
Walk(f, n.Type)
}
case *ast.AlterTypeRenameValueStmt:
if n.Type != nil {
Walk(f, n.Type)
}
case *ast.ColumnDef:
if n.TypeName != nil {
Walk(f, n.TypeName)
}
case *ast.ColumnRef:
// pass
case *ast.CommentOnColumnStmt:
if n.Table != nil {
Walk(f, n.Table)
}
if n.Col != nil {
Walk(f, n.Col)
}
case *ast.CommentOnSchemaStmt:
if n.Schema != nil {
Walk(f, n.Schema)
}
case *ast.CommentOnTableStmt:
if n.Table != nil {
Walk(f, n.Table)
}
case *ast.CommentOnTypeStmt:
if n.Type != nil {
Walk(f, n.Type)
}
case *ast.CompositeTypeStmt:
if n.TypeName != nil {
Walk(f, n.TypeName)
}
case *ast.CreateEnumStmt:
if n.TypeName != nil {
Walk(f, n.TypeName)
}
if n.Vals != nil {
Walk(f, n.Vals)
}
case *ast.CreateFunctionStmt:
if n.ReturnType != nil {
Walk(f, n.ReturnType)
}
if n.Func != nil {
Walk(f, n.Func)
}
case *ast.CreateSchemaStmt:
// pass
case *ast.CreateTableStmt:
if n.Name != nil {
Walk(f, n.Name)
}
case *ast.DropFunctionStmt:
// pass
case *ast.DropSchemaStmt:
// pass
case *ast.DropTableStmt:
// pass
case *ast.DropTypeStmt:
// pass
case *ast.FuncName:
// pass
case *ast.FuncParam:
if n.Type != nil {
Walk(f, n.Type)
}
if n.DefExpr != nil {
Walk(f, n.DefExpr)
}
case *ast.FuncSpec:
if n.Name != nil {
Walk(f, n.Name)
}
case *ast.List:
for _, item := range n.Items {
Walk(f, item)
}
case *ast.RawStmt:
if n.Stmt != nil {
Walk(f, n.Stmt)
}
case *ast.RenameColumnStmt:
if n.Table != nil {
Walk(f, n.Table)
}
if n.Col != nil {
Walk(f, n.Col)
}
case *ast.RenameTableStmt:
if n.Table != nil {
Walk(f, n.Table)
}
case *ast.ResTarget:
if n.Val != nil {
Walk(f, n.Val)
}
case *ast.SelectStmt:
if n.Fields != nil {
Walk(f, n.Fields)
}
if n.From != nil {
Walk(f, n.From)
}
case *ast.Statement:
if n.Raw != nil {
Walk(f, n.Raw)
}
case *ast.String:
// pass
case *ast.TODO:
// pass
case *ast.TableName:
// pass
case *ast.TypeName:
// pass
case *pg.A_ArrayExpr:
if n.Elements != nil {
Walk(f, n.Elements)
}
case *pg.A_Const:
if n.Val != nil {
Walk(f, n.Val)
}
case *pg.A_Expr:
if n.Name != nil {
Walk(f, n.Name)
}
if n.Lexpr != nil {
Walk(f, n.Lexpr)
}
if n.Rexpr != nil {
Walk(f, n.Rexpr)
}
case *pg.A_Indices:
if n.Lidx != nil {
Walk(f, n.Lidx)
}
if n.Uidx != nil {
Walk(f, n.Uidx)
}
case *pg.A_Indirection:
if n.Arg != nil {
Walk(f, n.Arg)
}
if n.Indirection != nil {
Walk(f, n.Indirection)
}
case *pg.A_Star:
// pass
case *pg.AccessPriv:
if n.Cols != nil {
Walk(f, n.Cols)
}
case *pg.Aggref:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Aggargtypes != nil {
Walk(f, n.Aggargtypes)
}
if n.Aggdirectargs != nil {
Walk(f, n.Aggdirectargs)
}
if n.Args != nil {
Walk(f, n.Args)
}
if n.Aggorder != nil {
Walk(f, n.Aggorder)
}
if n.Aggdistinct != nil {
Walk(f, n.Aggdistinct)
}
if n.Aggfilter != nil {
Walk(f, n.Aggfilter)
}
case *pg.Alias:
if n.Colnames != nil {
Walk(f, n.Colnames)
}
case *pg.AlterCollationStmt:
if n.Collname != nil {
Walk(f, n.Collname)
}
case *pg.AlterDatabaseSetStmt:
if n.Setstmt != nil {
Walk(f, n.Setstmt)
}
case *pg.AlterDatabaseStmt:
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.AlterDefaultPrivilegesStmt:
if n.Options != nil {
Walk(f, n.Options)
}
if n.Action != nil {
Walk(f, n.Action)
}
case *pg.AlterDomainStmt:
if n.TypeName != nil {
Walk(f, n.TypeName)
}
if n.Def != nil {
Walk(f, n.Def)
}
case *pg.AlterEnumStmt:
if n.TypeName != nil {
Walk(f, n.TypeName)
}
case *pg.AlterEventTrigStmt:
// pass
case *pg.AlterExtensionContentsStmt:
if n.Object != nil {
Walk(f, n.Object)
}
case *pg.AlterExtensionStmt:
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.AlterFdwStmt:
if n.FuncOptions != nil {
Walk(f, n.FuncOptions)
}
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.AlterForeignServerStmt:
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.AlterFunctionStmt:
if n.Func != nil {
Walk(f, n.Func)
}
if n.Actions != nil {
Walk(f, n.Actions)
}
case *pg.AlterObjectDependsStmt:
if n.Relation != nil {
Walk(f, n.Relation)
}
if n.Object != nil {
Walk(f, n.Object)
}
if n.Extname != nil {
Walk(f, n.Extname)
}
case *pg.AlterObjectSchemaStmt:
if n.Relation != nil {
Walk(f, n.Relation)
}
if n.Object != nil {
Walk(f, n.Object)
}
case *pg.AlterOpFamilyStmt:
if n.Opfamilyname != nil {
Walk(f, n.Opfamilyname)
}
if n.Items != nil {
Walk(f, n.Items)
}
case *pg.AlterOperatorStmt:
if n.Opername != nil {
Walk(f, n.Opername)
}
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.AlterOwnerStmt:
if n.Relation != nil {
Walk(f, n.Relation)
}
if n.Object != nil {
Walk(f, n.Object)
}
if n.Newowner != nil {
Walk(f, n.Newowner)
}
case *pg.AlterPolicyStmt:
if n.Table != nil {
Walk(f, n.Table)
}
if n.Roles != nil {
Walk(f, n.Roles)
}
if n.Qual != nil {
Walk(f, n.Qual)
}
if n.WithCheck != nil {
Walk(f, n.WithCheck)
}
case *pg.AlterPublicationStmt:
if n.Options != nil {
Walk(f, n.Options)
}
if n.Tables != nil {
Walk(f, n.Tables)
}
case *pg.AlterRoleSetStmt:
if n.Role != nil {
Walk(f, n.Role)
}
if n.Setstmt != nil {
Walk(f, n.Setstmt)
}
case *pg.AlterRoleStmt:
if n.Role != nil {
Walk(f, n.Role)
}
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.AlterSeqStmt:
if n.Sequence != nil {
Walk(f, n.Sequence)
}
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.AlterSubscriptionStmt:
if n.Publication != nil {
Walk(f, n.Publication)
}
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.AlterSystemStmt:
if n.Setstmt != nil {
Walk(f, n.Setstmt)
}
case *pg.AlterTSConfigurationStmt:
if n.Cfgname != nil {
Walk(f, n.Cfgname)
}
if n.Tokentype != nil {
Walk(f, n.Tokentype)
}
if n.Dicts != nil {
Walk(f, n.Dicts)
}
case *pg.AlterTSDictionaryStmt:
if n.Dictname != nil {
Walk(f, n.Dictname)
}
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.AlterTableCmd:
if n.Newowner != nil {
Walk(f, n.Newowner)
}
if n.Def != nil {
Walk(f, n.Def)
}
case *pg.AlterTableMoveAllStmt:
if n.Roles != nil {
Walk(f, n.Roles)
}
case *pg.AlterTableSpaceOptionsStmt:
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.AlterTableStmt:
if n.Relation != nil {
Walk(f, n.Relation)
}
if n.Cmds != nil {
Walk(f, n.Cmds)
}
case *pg.AlterUserMappingStmt:
if n.User != nil {
Walk(f, n.User)
}
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.AlternativeSubPlan:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Subplans != nil {
Walk(f, n.Subplans)
}
case *pg.ArrayCoerceExpr:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Arg != nil {
Walk(f, n.Arg)
}
case *pg.ArrayExpr:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Elements != nil {
Walk(f, n.Elements)
}
case *pg.ArrayRef:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Refupperindexpr != nil {
Walk(f, n.Refupperindexpr)
}
if n.Reflowerindexpr != nil {
Walk(f, n.Reflowerindexpr)
}
if n.Refexpr != nil {
Walk(f, n.Refexpr)
}
if n.Refassgnexpr != nil {
Walk(f, n.Refassgnexpr)
}
case *pg.BitString:
// pass
case *pg.BlockIdData:
// pass
case *pg.BoolExpr:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Args != nil {
Walk(f, n.Args)
}
case *pg.BooleanTest:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Arg != nil {
Walk(f, n.Arg)
}
case *pg.CaseExpr:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Arg != nil {
Walk(f, n.Arg)
}
if n.Args != nil {
Walk(f, n.Args)
}
if n.Defresult != nil {
Walk(f, n.Defresult)
}
case *pg.CaseTestExpr:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
case *pg.CaseWhen:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Expr != nil {
Walk(f, n.Expr)
}
if n.Result != nil {
Walk(f, n.Result)
}
case *pg.CheckPointStmt:
// pass
case *pg.ClosePortalStmt:
// pass
case *pg.ClusterStmt:
if n.Relation != nil {
Walk(f, n.Relation)
}
case *pg.CoalesceExpr:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Args != nil {
Walk(f, n.Args)
}
case *pg.CoerceToDomain:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Arg != nil {
Walk(f, n.Arg)
}
case *pg.CoerceToDomainValue:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
case *pg.CoerceViaIO:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Arg != nil {
Walk(f, n.Arg)
}
case *pg.CollateClause:
if n.Arg != nil {
Walk(f, n.Arg)
}
if n.Collname != nil {
Walk(f, n.Collname)
}
case *pg.CollateExpr:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Arg != nil {
Walk(f, n.Arg)
}
case *pg.ColumnDef:
if n.TypeName != nil {
Walk(f, n.TypeName)
}
if n.RawDefault != nil {
Walk(f, n.RawDefault)
}
if n.CookedDefault != nil {
Walk(f, n.CookedDefault)
}
if n.CollClause != nil {
Walk(f, n.CollClause)
}
if n.Constraints != nil {
Walk(f, n.Constraints)
}
if n.Fdwoptions != nil {
Walk(f, n.Fdwoptions)
}
case *pg.ColumnRef:
if n.Fields != nil {
Walk(f, n.Fields)
}
case *pg.CommentStmt:
if n.Object != nil {
Walk(f, n.Object)
}
case *pg.CommonTableExpr:
if n.Aliascolnames != nil {
Walk(f, n.Aliascolnames)
}
if n.Ctequery != nil {
Walk(f, n.Ctequery)
}
if n.Ctecolnames != nil {
Walk(f, n.Ctecolnames)
}
if n.Ctecoltypes != nil {
Walk(f, n.Ctecoltypes)
}
if n.Ctecoltypmods != nil {
Walk(f, n.Ctecoltypmods)
}
if n.Ctecolcollations != nil {
Walk(f, n.Ctecolcollations)
}
case *pg.CompositeTypeStmt:
if n.Typevar != nil {
Walk(f, n.Typevar)
}
if n.Coldeflist != nil {
Walk(f, n.Coldeflist)
}
case *pg.Const:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
case *pg.Constraint:
if n.RawExpr != nil {
Walk(f, n.RawExpr)
}
if n.Keys != nil {
Walk(f, n.Keys)
}
if n.Exclusions != nil {
Walk(f, n.Exclusions)
}
if n.Options != nil {
Walk(f, n.Options)
}
if n.WhereClause != nil {
Walk(f, n.WhereClause)
}
if n.Pktable != nil {
Walk(f, n.Pktable)
}
if n.FkAttrs != nil {
Walk(f, n.FkAttrs)
}
if n.PkAttrs != nil {
Walk(f, n.PkAttrs)
}
if n.OldConpfeqop != nil {
Walk(f, n.OldConpfeqop)
}
case *pg.ConstraintsSetStmt:
if n.Constraints != nil {
Walk(f, n.Constraints)
}
case *pg.ConvertRowtypeExpr:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Arg != nil {
Walk(f, n.Arg)
}
case *pg.CopyStmt:
if n.Relation != nil {
Walk(f, n.Relation)
}
if n.Query != nil {
Walk(f, n.Query)
}
if n.Attlist != nil {
Walk(f, n.Attlist)
}
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.CreateAmStmt:
if n.HandlerName != nil {
Walk(f, n.HandlerName)
}
case *pg.CreateCastStmt:
if n.Sourcetype != nil {
Walk(f, n.Sourcetype)
}
if n.Targettype != nil {
Walk(f, n.Targettype)
}
if n.Func != nil {
Walk(f, n.Func)
}
case *pg.CreateConversionStmt:
if n.ConversionName != nil {
Walk(f, n.ConversionName)
}
if n.FuncName != nil {
Walk(f, n.FuncName)
}
case *pg.CreateDomainStmt:
if n.Domainname != nil {
Walk(f, n.Domainname)
}
if n.TypeName != nil {
Walk(f, n.TypeName)
}
if n.CollClause != nil {
Walk(f, n.CollClause)
}
if n.Constraints != nil {
Walk(f, n.Constraints)
}
case *pg.CreateEnumStmt:
if n.TypeName != nil {
Walk(f, n.TypeName)
}
if n.Vals != nil {
Walk(f, n.Vals)
}
case *pg.CreateEventTrigStmt:
if n.Whenclause != nil {
Walk(f, n.Whenclause)
}
if n.Funcname != nil {
Walk(f, n.Funcname)
}
case *pg.CreateExtensionStmt:
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.CreateFdwStmt:
if n.FuncOptions != nil {
Walk(f, n.FuncOptions)
}
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.CreateForeignServerStmt:
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.CreateForeignTableStmt:
if n.Base != nil {
Walk(f, n.Base)
}
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.CreateFunctionStmt:
if n.Funcname != nil {
Walk(f, n.Funcname)
}
if n.Parameters != nil {
Walk(f, n.Parameters)
}
if n.ReturnType != nil {
Walk(f, n.ReturnType)
}
if n.Options != nil {
Walk(f, n.Options)
}
if n.WithClause != nil {
Walk(f, n.WithClause)
}
case *pg.CreateOpClassItem:
if n.Name != nil {
Walk(f, n.Name)
}
if n.OrderFamily != nil {
Walk(f, n.OrderFamily)
}
if n.ClassArgs != nil {
Walk(f, n.ClassArgs)
}
if n.Storedtype != nil {
Walk(f, n.Storedtype)
}
case *pg.CreateOpClassStmt:
if n.Opclassname != nil {
Walk(f, n.Opclassname)
}
if n.Opfamilyname != nil {
Walk(f, n.Opfamilyname)
}
if n.Datatype != nil {
Walk(f, n.Datatype)
}
if n.Items != nil {
Walk(f, n.Items)
}
case *pg.CreateOpFamilyStmt:
if n.Opfamilyname != nil {
Walk(f, n.Opfamilyname)
}
case *pg.CreatePLangStmt:
if n.Plhandler != nil {
Walk(f, n.Plhandler)
}
if n.Plinline != nil {
Walk(f, n.Plinline)
}
if n.Plvalidator != nil {
Walk(f, n.Plvalidator)
}
case *pg.CreatePolicyStmt:
if n.Table != nil {
Walk(f, n.Table)
}
if n.Roles != nil {
Walk(f, n.Roles)
}
if n.Qual != nil {
Walk(f, n.Qual)
}
if n.WithCheck != nil {
Walk(f, n.WithCheck)
}
case *pg.CreatePublicationStmt:
if n.Options != nil {
Walk(f, n.Options)
}
if n.Tables != nil {
Walk(f, n.Tables)
}
case *pg.CreateRangeStmt:
if n.TypeName != nil {
Walk(f, n.TypeName)
}
if n.Params != nil {
Walk(f, n.Params)
}
case *pg.CreateRoleStmt:
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.CreateSchemaStmt:
if n.Authrole != nil {
Walk(f, n.Authrole)
}
if n.SchemaElts != nil {
Walk(f, n.SchemaElts)
}
case *pg.CreateSeqStmt:
if n.Sequence != nil {
Walk(f, n.Sequence)
}
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.CreateStatsStmt:
if n.Defnames != nil {
Walk(f, n.Defnames)
}
if n.StatTypes != nil {
Walk(f, n.StatTypes)
}
if n.Exprs != nil {
Walk(f, n.Exprs)
}
if n.Relations != nil {
Walk(f, n.Relations)
}
case *pg.CreateStmt:
if n.Relation != nil {
Walk(f, n.Relation)
}
if n.TableElts != nil {
Walk(f, n.TableElts)
}
if n.InhRelations != nil {
Walk(f, n.InhRelations)
}
if n.Partbound != nil {
Walk(f, n.Partbound)
}
if n.Partspec != nil {
Walk(f, n.Partspec)
}
if n.OfTypename != nil {
Walk(f, n.OfTypename)
}
if n.Constraints != nil {
Walk(f, n.Constraints)
}
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.CreateSubscriptionStmt:
if n.Publication != nil {
Walk(f, n.Publication)
}
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.CreateTableAsStmt:
if n.Query != nil {
Walk(f, n.Query)
}
if n.Into != nil {
Walk(f, n.Into)
}
case *pg.CreateTableSpaceStmt:
if n.Owner != nil {
Walk(f, n.Owner)
}
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.CreateTransformStmt:
if n.TypeName != nil {
Walk(f, n.TypeName)
}
if n.Fromsql != nil {
Walk(f, n.Fromsql)
}
if n.Tosql != nil {
Walk(f, n.Tosql)
}
case *pg.CreateTrigStmt:
if n.Relation != nil {
Walk(f, n.Relation)
}
if n.Funcname != nil {
Walk(f, n.Funcname)
}
if n.Args != nil {
Walk(f, n.Args)
}
if n.Columns != nil {
Walk(f, n.Columns)
}
if n.WhenClause != nil {
Walk(f, n.WhenClause)
}
if n.TransitionRels != nil {
Walk(f, n.TransitionRels)
}
if n.Constrrel != nil {
Walk(f, n.Constrrel)
}
case *pg.CreateUserMappingStmt:
if n.User != nil {
Walk(f, n.User)
}
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.CreatedbStmt:
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.CurrentOfExpr:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
case *pg.DeallocateStmt:
// pass
case *pg.DeclareCursorStmt:
if n.Query != nil {
Walk(f, n.Query)
}
case *pg.DefElem:
if n.Arg != nil {
Walk(f, n.Arg)
}
case *pg.DefineStmt:
if n.Defnames != nil {
Walk(f, n.Defnames)
}
if n.Args != nil {
Walk(f, n.Args)
}
if n.Definition != nil {
Walk(f, n.Definition)
}
case *pg.DeleteStmt:
if n.Relation != nil {
Walk(f, n.Relation)
}
if n.UsingClause != nil {
Walk(f, n.UsingClause)
}
if n.WhereClause != nil {
Walk(f, n.WhereClause)
}
if n.ReturningList != nil {
Walk(f, n.ReturningList)
}
if n.WithClause != nil {
Walk(f, n.WithClause)
}
case *pg.DiscardStmt:
// pass
case *pg.DoStmt:
if n.Args != nil {
Walk(f, n.Args)
}
case *pg.DropOwnedStmt:
if n.Roles != nil {
Walk(f, n.Roles)
}
case *pg.DropRoleStmt:
if n.Roles != nil {
Walk(f, n.Roles)
}
case *pg.DropStmt:
if n.Objects != nil {
Walk(f, n.Objects)
}
case *pg.DropSubscriptionStmt:
// pass
case *pg.DropTableSpaceStmt:
// pass
case *pg.DropUserMappingStmt:
if n.User != nil {
Walk(f, n.User)
}
case *pg.DropdbStmt:
// pass
case *pg.ExecuteStmt:
if n.Params != nil {
Walk(f, n.Params)
}
case *pg.ExplainStmt:
if n.Query != nil {
Walk(f, n.Query)
}
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.Expr:
// pass
case *pg.FetchStmt:
// pass
case *pg.FieldSelect:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Arg != nil {
Walk(f, n.Arg)
}
case *pg.FieldStore:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Arg != nil {
Walk(f, n.Arg)
}
if n.Newvals != nil {
Walk(f, n.Newvals)
}
if n.Fieldnums != nil {
Walk(f, n.Fieldnums)
}
case *pg.Float:
// pass
case *pg.FromExpr:
if n.Fromlist != nil {
Walk(f, n.Fromlist)
}
if n.Quals != nil {
Walk(f, n.Quals)
}
case *ast.FuncCall:
if n.Func != nil {
Walk(f, n.Func)
}
if n.Funcname != nil {
Walk(f, n.Funcname)
}
if n.Args != nil {
Walk(f, n.Args)
}
if n.AggOrder != nil {
Walk(f, n.AggOrder)
}
if n.AggFilter != nil {
Walk(f, n.AggFilter)
}
if n.Over != nil {
Walk(f, n.Over)
}
case *pg.FuncExpr:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Args != nil {
Walk(f, n.Args)
}
case *pg.FunctionParameter:
if n.ArgType != nil {
Walk(f, n.ArgType)
}
if n.Defexpr != nil {
Walk(f, n.Defexpr)
}
case *pg.GrantRoleStmt:
if n.GrantedRoles != nil {
Walk(f, n.GrantedRoles)
}
if n.GranteeRoles != nil {
Walk(f, n.GranteeRoles)
}
if n.Grantor != nil {
Walk(f, n.Grantor)
}
case *pg.GrantStmt:
if n.Objects != nil {
Walk(f, n.Objects)
}
if n.Privileges != nil {
Walk(f, n.Privileges)
}
if n.Grantees != nil {
Walk(f, n.Grantees)
}
case *pg.GroupingFunc:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Args != nil {
Walk(f, n.Args)
}
if n.Refs != nil {
Walk(f, n.Refs)
}
if n.Cols != nil {
Walk(f, n.Cols)
}
case *pg.GroupingSet:
if n.Content != nil {
Walk(f, n.Content)
}
case *pg.ImportForeignSchemaStmt:
if n.TableList != nil {
Walk(f, n.TableList)
}
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.IndexElem:
if n.Expr != nil {
Walk(f, n.Expr)
}
if n.Collation != nil {
Walk(f, n.Collation)
}
if n.Opclass != nil {
Walk(f, n.Opclass)
}
case *pg.IndexStmt:
if n.Relation != nil {
Walk(f, n.Relation)
}
if n.IndexParams != nil {
Walk(f, n.IndexParams)
}
if n.Options != nil {
Walk(f, n.Options)
}
if n.WhereClause != nil {
Walk(f, n.WhereClause)
}
if n.ExcludeOpNames != nil {
Walk(f, n.ExcludeOpNames)
}
case *pg.InferClause:
if n.IndexElems != nil {
Walk(f, n.IndexElems)
}
if n.WhereClause != nil {
Walk(f, n.WhereClause)
}
case *pg.InferenceElem:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Expr != nil {
Walk(f, n.Expr)
}
case *pg.InlineCodeBlock:
// pass
case *pg.InsertStmt:
if n.Relation != nil {
Walk(f, n.Relation)
}
if n.Cols != nil {
Walk(f, n.Cols)
}
if n.SelectStmt != nil {
Walk(f, n.SelectStmt)
}
if n.OnConflictClause != nil {
Walk(f, n.OnConflictClause)
}
if n.ReturningList != nil {
Walk(f, n.ReturningList)
}
if n.WithClause != nil {
Walk(f, n.WithClause)
}
case *pg.Integer:
// pass
case *pg.IntoClause:
if n.Rel != nil {
Walk(f, n.Rel)
}
if n.ColNames != nil {
Walk(f, n.ColNames)
}
if n.Options != nil {
Walk(f, n.Options)
}
if n.ViewQuery != nil {
Walk(f, n.ViewQuery)
}
case *pg.JoinExpr:
if n.Larg != nil {
Walk(f, n.Larg)
}
if n.Rarg != nil {
Walk(f, n.Rarg)
}
if n.UsingClause != nil {
Walk(f, n.UsingClause)
}
if n.Quals != nil {
Walk(f, n.Quals)
}
if n.Alias != nil {
Walk(f, n.Alias)
}
case *pg.ListenStmt:
// pass
case *pg.LoadStmt:
// pass
case *pg.LockStmt:
if n.Relations != nil {
Walk(f, n.Relations)
}
case *pg.LockingClause:
if n.LockedRels != nil {
Walk(f, n.LockedRels)
}
case *pg.MinMaxExpr:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Args != nil {
Walk(f, n.Args)
}
case *pg.MultiAssignRef:
if n.Source != nil {
Walk(f, n.Source)
}
case *pg.NamedArgExpr:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Arg != nil {
Walk(f, n.Arg)
}
case *pg.NextValueExpr:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
case *pg.NotifyStmt:
// pass
case *pg.Null:
// pass
case *pg.NullTest:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Arg != nil {
Walk(f, n.Arg)
}
case *pg.ObjectWithArgs:
if n.Objname != nil {
Walk(f, n.Objname)
}
if n.Objargs != nil {
Walk(f, n.Objargs)
}
case *pg.OnConflictClause:
if n.Infer != nil {
Walk(f, n.Infer)
}
if n.TargetList != nil {
Walk(f, n.TargetList)
}
if n.WhereClause != nil {
Walk(f, n.WhereClause)
}
case *pg.OnConflictExpr:
if n.ArbiterElems != nil {
Walk(f, n.ArbiterElems)
}
if n.ArbiterWhere != nil {
Walk(f, n.ArbiterWhere)
}
if n.OnConflictSet != nil {
Walk(f, n.OnConflictSet)
}
if n.OnConflictWhere != nil {
Walk(f, n.OnConflictWhere)
}
if n.ExclRelTlist != nil {
Walk(f, n.ExclRelTlist)
}
case *pg.OpExpr:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Args != nil {
Walk(f, n.Args)
}
case *pg.Param:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
case *pg.ParamExecData:
// pass
case *pg.ParamExternData:
// pass
case *pg.ParamListInfoData:
// pass
case *pg.ParamRef:
// pass
case *pg.PartitionBoundSpec:
if n.Listdatums != nil {
Walk(f, n.Listdatums)
}
if n.Lowerdatums != nil {
Walk(f, n.Lowerdatums)
}
if n.Upperdatums != nil {
Walk(f, n.Upperdatums)
}
case *pg.PartitionCmd:
if n.Name != nil {
Walk(f, n.Name)
}
if n.Bound != nil {
Walk(f, n.Bound)
}
case *pg.PartitionElem:
if n.Expr != nil {
Walk(f, n.Expr)
}
if n.Collation != nil {
Walk(f, n.Collation)
}
if n.Opclass != nil {
Walk(f, n.Opclass)
}
case *pg.PartitionRangeDatum:
if n.Value != nil {
Walk(f, n.Value)
}
case *pg.PartitionSpec:
if n.PartParams != nil {
Walk(f, n.PartParams)
}
case *pg.PrepareStmt:
if n.Argtypes != nil {
Walk(f, n.Argtypes)
}
if n.Query != nil {
Walk(f, n.Query)
}
case *pg.Query:
if n.UtilityStmt != nil {
Walk(f, n.UtilityStmt)
}
if n.CteList != nil {
Walk(f, n.CteList)
}
if n.Rtable != nil {
Walk(f, n.Rtable)
}
if n.Jointree != nil {
Walk(f, n.Jointree)
}
if n.TargetList != nil {
Walk(f, n.TargetList)
}
if n.OnConflict != nil {
Walk(f, n.OnConflict)
}
if n.ReturningList != nil {
Walk(f, n.ReturningList)
}
if n.GroupClause != nil {
Walk(f, n.GroupClause)
}
if n.GroupingSets != nil {
Walk(f, n.GroupingSets)
}
if n.HavingQual != nil {
Walk(f, n.HavingQual)
}
if n.WindowClause != nil {
Walk(f, n.WindowClause)
}
if n.DistinctClause != nil {
Walk(f, n.DistinctClause)
}
if n.SortClause != nil {
Walk(f, n.SortClause)
}
if n.LimitOffset != nil {
Walk(f, n.LimitOffset)
}
if n.LimitCount != nil {
Walk(f, n.LimitCount)
}
if n.RowMarks != nil {
Walk(f, n.RowMarks)
}
if n.SetOperations != nil {
Walk(f, n.SetOperations)
}
if n.ConstraintDeps != nil {
Walk(f, n.ConstraintDeps)
}
if n.WithCheckOptions != nil {
Walk(f, n.WithCheckOptions)
}
case *pg.RangeFunction:
if n.Functions != nil {
Walk(f, n.Functions)
}
if n.Alias != nil {
Walk(f, n.Alias)
}
if n.Coldeflist != nil {
Walk(f, n.Coldeflist)
}
case *pg.RangeSubselect:
if n.Subquery != nil {
Walk(f, n.Subquery)
}
if n.Alias != nil {
Walk(f, n.Alias)
}
case *pg.RangeTableFunc:
if n.Docexpr != nil {
Walk(f, n.Docexpr)
}
if n.Rowexpr != nil {
Walk(f, n.Rowexpr)
}
if n.Namespaces != nil {
Walk(f, n.Namespaces)
}
if n.Columns != nil {
Walk(f, n.Columns)
}
if n.Alias != nil {
Walk(f, n.Alias)
}
case *pg.RangeTableFuncCol:
if n.TypeName != nil {
Walk(f, n.TypeName)
}
if n.Colexpr != nil {
Walk(f, n.Colexpr)
}
if n.Coldefexpr != nil {
Walk(f, n.Coldefexpr)
}
case *pg.RangeTableSample:
if n.Relation != nil {
Walk(f, n.Relation)
}
if n.Method != nil {
Walk(f, n.Method)
}
if n.Args != nil {
Walk(f, n.Args)
}
if n.Repeatable != nil {
Walk(f, n.Repeatable)
}
case *pg.RangeTblEntry:
if n.Tablesample != nil {
Walk(f, n.Tablesample)
}
if n.Subquery != nil {
Walk(f, n.Subquery)
}
if n.Joinaliasvars != nil {
Walk(f, n.Joinaliasvars)
}
if n.Functions != nil {
Walk(f, n.Functions)
}
if n.Tablefunc != nil {
Walk(f, n.Tablefunc)
}
if n.ValuesLists != nil {
Walk(f, n.ValuesLists)
}
if n.Coltypes != nil {
Walk(f, n.Coltypes)
}
if n.Coltypmods != nil {
Walk(f, n.Coltypmods)
}
if n.Colcollations != nil {
Walk(f, n.Colcollations)
}
if n.Alias != nil {
Walk(f, n.Alias)
}
if n.Eref != nil {
Walk(f, n.Eref)
}
if n.SecurityQuals != nil {
Walk(f, n.SecurityQuals)
}
case *pg.RangeTblFunction:
if n.Funcexpr != nil {
Walk(f, n.Funcexpr)
}
if n.Funccolnames != nil {
Walk(f, n.Funccolnames)
}
if n.Funccoltypes != nil {
Walk(f, n.Funccoltypes)
}
if n.Funccoltypmods != nil {
Walk(f, n.Funccoltypmods)
}
if n.Funccolcollations != nil {
Walk(f, n.Funccolcollations)
}
case *pg.RangeTblRef:
// pass
case *pg.RangeVar:
if n.Alias != nil {
Walk(f, n.Alias)
}
case *pg.RawStmt:
if n.Stmt != nil {
Walk(f, n.Stmt)
}
case *pg.ReassignOwnedStmt:
if n.Roles != nil {
Walk(f, n.Roles)
}
if n.Newrole != nil {
Walk(f, n.Newrole)
}
case *pg.RefreshMatViewStmt:
if n.Relation != nil {
Walk(f, n.Relation)
}
case *pg.ReindexStmt:
if n.Relation != nil {
Walk(f, n.Relation)
}
case *pg.RelabelType:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Arg != nil {
Walk(f, n.Arg)
}
case *pg.RenameStmt:
if n.Relation != nil {
Walk(f, n.Relation)
}
if n.Object != nil {
Walk(f, n.Object)
}
case *pg.ReplicaIdentityStmt:
// pass
case *pg.ResTarget:
if n.Indirection != nil {
Walk(f, n.Indirection)
}
if n.Val != nil {
Walk(f, n.Val)
}
case *pg.RoleSpec:
// pass
case *pg.RowCompareExpr:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Opnos != nil {
Walk(f, n.Opnos)
}
if n.Opfamilies != nil {
Walk(f, n.Opfamilies)
}
if n.Inputcollids != nil {
Walk(f, n.Inputcollids)
}
if n.Largs != nil {
Walk(f, n.Largs)
}
if n.Rargs != nil {
Walk(f, n.Rargs)
}
case *pg.RowExpr:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Args != nil {
Walk(f, n.Args)
}
if n.Colnames != nil {
Walk(f, n.Colnames)
}
case *pg.RowMarkClause:
// pass
case *pg.RuleStmt:
if n.Relation != nil {
Walk(f, n.Relation)
}
if n.WhereClause != nil {
Walk(f, n.WhereClause)
}
if n.Actions != nil {
Walk(f, n.Actions)
}
case *pg.SQLValueFunction:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
case *pg.ScalarArrayOpExpr:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Args != nil {
Walk(f, n.Args)
}
case *pg.SecLabelStmt:
if n.Object != nil {
Walk(f, n.Object)
}
case *pg.SelectStmt:
if n.DistinctClause != nil {
Walk(f, n.DistinctClause)
}
if n.IntoClause != nil {
Walk(f, n.IntoClause)
}
if n.TargetList != nil {
Walk(f, n.TargetList)
}
if n.FromClause != nil {
Walk(f, n.FromClause)
}
if n.WhereClause != nil {
Walk(f, n.WhereClause)
}
if n.GroupClause != nil {
Walk(f, n.GroupClause)
}
if n.HavingClause != nil {
Walk(f, n.HavingClause)
}
if n.WindowClause != nil {
Walk(f, n.WindowClause)
}
if n.ValuesLists != nil {
Walk(f, n.ValuesLists)
}
if n.SortClause != nil {
Walk(f, n.SortClause)
}
if n.LimitOffset != nil {
Walk(f, n.LimitOffset)
}
if n.LimitCount != nil {
Walk(f, n.LimitCount)
}
if n.LockingClause != nil {
Walk(f, n.LockingClause)
}
if n.WithClause != nil {
Walk(f, n.WithClause)
}
if n.Larg != nil {
Walk(f, n.Larg)
}
if n.Rarg != nil {
Walk(f, n.Rarg)
}
case *pg.SetOperationStmt:
if n.Larg != nil {
Walk(f, n.Larg)
}
if n.Rarg != nil {
Walk(f, n.Rarg)
}
if n.ColTypes != nil {
Walk(f, n.ColTypes)
}
if n.ColTypmods != nil {
Walk(f, n.ColTypmods)
}
if n.ColCollations != nil {
Walk(f, n.ColCollations)
}
if n.GroupClauses != nil {
Walk(f, n.GroupClauses)
}
case *pg.SetToDefault:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
case *pg.SortBy:
if n.Node != nil {
Walk(f, n.Node)
}
if n.UseOp != nil {
Walk(f, n.UseOp)
}
case *pg.SortGroupClause:
// pass
case *pg.String:
// pass
case *pg.SubLink:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Testexpr != nil {
Walk(f, n.Testexpr)
}
if n.OperName != nil {
Walk(f, n.OperName)
}
if n.Subselect != nil {
Walk(f, n.Subselect)
}
case *pg.SubPlan:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Testexpr != nil {
Walk(f, n.Testexpr)
}
if n.ParamIds != nil {
Walk(f, n.ParamIds)
}
if n.SetParam != nil {
Walk(f, n.SetParam)
}
if n.ParParam != nil {
Walk(f, n.ParParam)
}
if n.Args != nil {
Walk(f, n.Args)
}
case *pg.TableFunc:
if n.NsUris != nil {
Walk(f, n.NsUris)
}
if n.NsNames != nil {
Walk(f, n.NsNames)
}
if n.Docexpr != nil {
Walk(f, n.Docexpr)
}
if n.Rowexpr != nil {
Walk(f, n.Rowexpr)
}
if n.Colnames != nil {
Walk(f, n.Colnames)
}
if n.Coltypes != nil {
Walk(f, n.Coltypes)
}
if n.Coltypmods != nil {
Walk(f, n.Coltypmods)
}
if n.Colcollations != nil {
Walk(f, n.Colcollations)
}
if n.Colexprs != nil {
Walk(f, n.Colexprs)
}
if n.Coldefexprs != nil {
Walk(f, n.Coldefexprs)
}
case *pg.TableLikeClause:
if n.Relation != nil {
Walk(f, n.Relation)
}
case *pg.TableSampleClause:
if n.Args != nil {
Walk(f, n.Args)
}
if n.Repeatable != nil {
Walk(f, n.Repeatable)
}
case *pg.TargetEntry:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Expr != nil {
Walk(f, n.Expr)
}
case *pg.TransactionStmt:
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.TriggerTransition:
// pass
case *pg.TruncateStmt:
if n.Relations != nil {
Walk(f, n.Relations)
}
case *pg.TypeCast:
if n.Arg != nil {
Walk(f, n.Arg)
}
if n.TypeName != nil {
Walk(f, n.TypeName)
}
case *pg.TypeName:
if n.Names != nil {
Walk(f, n.Names)
}
if n.Typmods != nil {
Walk(f, n.Typmods)
}
if n.ArrayBounds != nil {
Walk(f, n.ArrayBounds)
}
case *pg.UnlistenStmt:
// pass
case *pg.UpdateStmt:
if n.Relation != nil {
Walk(f, n.Relation)
}
if n.TargetList != nil {
Walk(f, n.TargetList)
}
if n.WhereClause != nil {
Walk(f, n.WhereClause)
}
if n.FromClause != nil {
Walk(f, n.FromClause)
}
if n.ReturningList != nil {
Walk(f, n.ReturningList)
}
if n.WithClause != nil {
Walk(f, n.WithClause)
}
case *pg.VacuumStmt:
if n.Relation != nil {
Walk(f, n.Relation)
}
if n.VaCols != nil {
Walk(f, n.VaCols)
}
case *pg.Var:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
case *pg.VariableSetStmt:
if n.Args != nil {
Walk(f, n.Args)
}
case *pg.VariableShowStmt:
// pass
case *pg.ViewStmt:
if n.View != nil {
Walk(f, n.View)
}
if n.Aliases != nil {
Walk(f, n.Aliases)
}
if n.Query != nil {
Walk(f, n.Query)
}
if n.Options != nil {
Walk(f, n.Options)
}
case *pg.WindowClause:
if n.PartitionClause != nil {
Walk(f, n.PartitionClause)
}
if n.OrderClause != nil {
Walk(f, n.OrderClause)
}
if n.StartOffset != nil {
Walk(f, n.StartOffset)
}
if n.EndOffset != nil {
Walk(f, n.EndOffset)
}
case *ast.WindowDef:
if n.PartitionClause != nil {
Walk(f, n.PartitionClause)
}
if n.OrderClause != nil {
Walk(f, n.OrderClause)
}
if n.StartOffset != nil {
Walk(f, n.StartOffset)
}
if n.EndOffset != nil {
Walk(f, n.EndOffset)
}
case *pg.WindowFunc:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.Args != nil {
Walk(f, n.Args)
}
if n.Aggfilter != nil {
Walk(f, n.Aggfilter)
}
case *pg.WithCheckOption:
if n.Qual != nil {
Walk(f, n.Qual)
}
case *pg.WithClause:
if n.Ctes != nil {
Walk(f, n.Ctes)
}
case *pg.XmlExpr:
if n.Xpr != nil {
Walk(f, n.Xpr)
}
if n.NamedArgs != nil {
Walk(f, n.NamedArgs)
}
if n.ArgNames != nil {
Walk(f, n.ArgNames)
}
if n.Args != nil {
Walk(f, n.Args)
}
case *pg.XmlSerialize:
if n.Expr != nil {
Walk(f, n.Expr)
}
if n.TypeName != nil {
Walk(f, n.TypeName)
}
default:
panic(fmt.Sprintf("walk: unexpected node type %T", n))
}
f.Visit(nil)
}
|
/*
Go Language Raspberry Pi Interface
(c) Copyright David Thorpe 2016-2018
All Rights Reserved
Documentation http://djthorpe.github.io/gopi/
For Licensing and Usage information, please see LICENSE.md
*/
package sensors
import (
// Frameworks
"github.com/djthorpe/gopi"
)
////////////////////////////////////////////////////////////////////////////////
// INTERFACES - ADS1X15 Analog to Digital Convertors
// Note this driver is still in development
type ADS1X15 interface {
gopi.Driver
// Return product
Product() ADS1X15Product
}
type ADS1015 interface {
ADS1X15
}
type ADS1115 interface {
ADS1X15
}
type ADS1X15Product uint
type ADS1X15Rate uint16
////////////////////////////////////////////////////////////////////////////////
// CONSTANTS
const (
ADS1X15_PRODUCT_NONE ADS1X15Product = iota
ADS1X15_PRODUCT_1015 // 12-bit ADC with 4 channels
ADS1X15_PRODUCT_1115 // 16-bit ADC with 4 channels
ADS1X15_PRODUCT_MAX = ADS1X15_PRODUCT_1115
)
const (
ADS1X15_RATE_NONE ADS1X15Rate = iota
ADS1X15_RATE_8
ADS1X15_RATE_16
ADS1X15_RATE_32
ADS1X15_RATE_64
ADS1X15_RATE_128
ADS1X15_RATE_250
ADS1X15_RATE_475
ADS1X15_RATE_490
ADS1X15_RATE_860
ADS1X15_RATE_920
ADS1X15_RATE_1600
ADS1X15_RATE_2400
ADS1X15_RATE_3300
ADS1X15_RATE_MAX = ADS1X15_RATE_3300
)
////////////////////////////////////////////////////////////////////////////////
// STRINGIFY
func (p ADS1X15Product) String() string {
switch p {
case ADS1X15_PRODUCT_NONE:
return "ADS1X15_PRODUCT_NONE"
case ADS1X15_PRODUCT_1015:
return "ADS1X15_PRODUCT_1015"
case ADS1X15_PRODUCT_1115:
return "ADS1X15_PRODUCT_1115"
default:
return "[?? Invalid ADS1X15Product]"
}
}
|
package file
import (
"fmt"
. "github.com/rainmyy/easyDB/library/common"
. "github.com/rainmyy/easyDB/library/strategy"
)
/**
*parser ini conf file
*desc:
*[test]
* [..params]
* name:name1
* key:value
* [...params]
* name:name2
* key:value
*/
func ParserIniContent(data []byte) ([]*TreeStruct, error) {
if data == nil {
return nil, fmt.Errorf("content is nil")
}
bytesList := [][]byte{}
hasSlash := false
bytes := []byte{}
if data[len(data)-1] != byte(LineBreak) {
data = append(data, byte(LineBreak))
}
for i := 0; i < len(data); i++ {
value := data[i]
//filter the slash or hash or asterisk
if value == byte(Slash) || value == byte(Hash) || value == byte(Asterisk) {
hasSlash = true
continue
}
if hasSlash {
if value == byte(LineBreak) {
hasSlash = false
}
continue
}
//cut out the data with linebreak or black
if value != byte(LineBreak) && value != byte(Blank) {
bytes = append(bytes, value)
} else if len(bytes) > 0 {
bytesList = append(bytesList, bytes)
bytes = []byte{}
}
}
if len(bytesList) == 0 {
return nil, fmt.Errorf("bytes is empty")
}
//format the byte data with tree
byteTreeList := initTreeFunc(bytesList)
return byteTreeList, nil
}
/**
*实现树状结构
*/
func initTreeFunc(bytesList [][]byte) []*TreeStruct {
currentTree := TreeInstance()
//分隔符,91:'[' 46:'.' 58:'.'
var segment = []int{int(LeftBracket), int(Period)}
infunc := InIntSliceSortedFunc(segment)
var rootTree = currentTree
//根节点设置为1
currentTree.SetHight(1)
for i := 0; i < len(bytesList); i++ {
bytes := bytesList[i]
bytesLen := len(bytes)
if bytesLen == 0 {
continue
}
tempNum := 0
for j := 0; j < bytesLen; j++ {
if infunc(int(bytes[j])) {
tempNum++
}
}
treeStruct := TreeInstance()
currentHigh := currentTree.GetHight()
var nodeStruct *NodeStruct
if tempNum > 0 && len(bytes) > tempNum {
bytes = bytes[tempNum : bytesLen-1]
nodeStruct = NodeInstance(bytes, []byte{})
for tempNum < currentHigh {
currentTree = currentTree.GetParent()
currentHigh = currentTree.GetHight()
}
treeStruct.SetNode(nodeStruct)
treeStruct.SetParent(currentTree)
currentTree.SetChildren(treeStruct)
currentTree = treeStruct
} else if tempNum == 0 {
//type of key:vaule
separatorPlace := SlicePlace(byte(Colon), bytes)
if separatorPlace <= 0 {
continue
}
key := bytes[0:separatorPlace]
value := bytes[separatorPlace+1 : bytesLen]
nodeStruct = NodeInstance(key, value)
if currentTree == nil {
continue
}
treeStruct.SetNode(nodeStruct)
treeStruct.SetParent(currentTree)
currentTree.SetChildren(treeStruct)
}
}
return rootTree.GetChildren()
}
|
package main
import (
"fmt"
"net"
"log"
"io"
"strconv"
"bufio"
"strings"
)
type Client struct {
Id string
Conn net.Conn
MessageChan chan Message
}
type Message struct {
From string
To string
Body string
}
type Hub struct {
Clients map[string]Client
JoinChan chan Client
LeaveChan chan Client
MessageChan chan Message
}
var idGenerationChan = make(chan string)
// This example can setup a Hub and listen to client connection on port 8000
func main() {
hub, err := net.Listen("tcp", ":8000") // Create a hub on port 8000
if err != nil {
log.Fatalln(err.Error())
}
defer hub.Close()
messageHub := &Hub{
Clients: make(map[string]Client),
JoinChan: make(chan Client),
LeaveChan: make(chan Client),
MessageChan: make(chan Message),
}
go IdGenerator() // Generate User ID routine
go messageHub.Run() // Spin up hub routine
fmt.Println("Listening on port 8000")
for {
conn, err := hub.Accept()
if err != nil {
log.Fatalln(err.Error())
}
go HandleConnection(conn, messageHub) // Client connection routine
}
}
func (h *Hub) Run() {
for {
select {
case msg := <- h.MessageChan:
go func () {
for _, client := range h.Clients {
client.MessageChan <- msg
}
}()
case client := <-h.JoinChan:
h.Clients[client.Id] = client
fmt.Printf("New client join. User ID: %v\n", client.Id)
case client := <-h.LeaveChan:
delete(h.Clients, client.Id)
fmt.Printf("Client left. User ID: %v\n", client.Id)
}
}
}
func HandleConnection(conn net.Conn, h *Hub) {
errMsgs := make(chan string)
client := Client{
MessageChan: make(chan Message),
Conn: conn,
Id: <- idGenerationChan,
}
io.WriteString(conn, "Tervetuloa!\n")
h.JoinChan <- client
go func() {
defer close(errMsgs)
bufc := bufio.NewReader(conn)
for {
line, _, err := bufc.ReadLine()
if err != nil {
break
}
ln := strings.TrimSpace(string(line))
if strings.EqualFold(ln, "whoami") { // Handle "whoami" message
client.Conn.Write([]byte("Your User ID: " + client.Id + "\n"))
} else if strings.EqualFold(ln, "whoishere") { // Handle "whoishere" message
otherClients := GetOtherClients(client, h)
if len(otherClients) == 0 {
client.Conn.Write([]byte("Beside of you, there is no client connected now.\n"))
} else {
idSlice := []string{}
for _, v := range otherClients {
idSlice = append(idSlice, v.Id)
// client.Conn.Write([]byte("ID: " + v.Id+ "\n"))
}
client.Conn.Write([]byte("Beside of you, there are other " +
strconv.Itoa(len(otherClients)) +
" clients connected in total now. IDs: " +
strings.Join(idSlice, ", ") + "\n"))
}
} else if strings.Contains(ln, ":") { // Handle relay message
arr := strings.Split(ln, ":")
body := arr[0]
if strings.HasSuffix(ln, ":") { // User input ends with ":" means broadcasting
receivers := []string{}
for _, client := range GetOtherClients(client, h) {
receivers = append(receivers, client.Id)
}
if !ValidateMessage(receivers, body, client.Conn) {
continue
}
for _, to := range GetOtherClients(client, h) {
h.MessageChan <- Message{client.Id, to.Id, body}
}
} else { // User input doesn't end with ":" means send message to specific receivers
receivers := strings.Split(arr[1], ",")
if !ValidateMessage(receivers, body, client.Conn) {
continue
}
for _, to := range receivers {
h.MessageChan <- Message{client.Id, strings.TrimSpace(to), body}
}
}
}
}
errMsgs <- "User has left the chat room."
}()
LOOP:
for {
select {
case msg := <-client.MessageChan:
if msg.To == client.Id {
_, err := io.WriteString(conn, msg.From+": "+msg.Body+"\n")
if err != nil {
break LOOP
}
}
case _, ok := <-errMsgs:
if !ok {
break LOOP
}
}
}
conn.Close()
h.LeaveChan <- client
}
// Get other clients connected to hub
func GetOtherClients(c Client, h *Hub) []Client{
otherClients := []Client{}
for _, v := range h.Clients {
if c.Id != v.Id {
otherClients = append(otherClients, v)
}
}
return otherClients
}
// Generate User ID
func IdGenerator() {
var id uint64
for id = 0; ; id++ {
idGenerationChan <- strconv.FormatUint(id,10)
}
}
// Validate message, max 255 receivers, 1024 KB message body
func ValidateMessage(receivers []string, body string, conn net.Conn) bool{
if len(receivers) > 255 {
conn.Write([]byte("Maximum amount of receivers is 255. \n"))
return false
}
if len([]byte(body)) > 1024000 {
conn.Write([]byte("Maximum length of message body is 1024 kilobytes. \n"))
return false
}
return true
}
|
package models
// PaperList is the result of PaperList() function.
type PaperList struct {
Count int64 `json:"count"`
Next *string `json:"next"`
Previous *string `json:"previous"`
Results []Paper `json:"results"`
}
type Paper struct {
ID string `json:"id"`
ArxivID *string `json:"arxiv_id"`
NipsID *string `json:"nips_id"`
URLAbs string `json:"url_abs"`
URLPDF string `json:"url_pdf"`
Title string `json:"title"`
Abstract string `json:"abstract"`
Authors []string `json:"authors"`
Published YyyyMmDdDashed `json:"published"`
Conference *string `json:"conference"`
ConferenceURLAbs *string `json:"conference_url_abs"`
ConferenceURLPDF *string `json:"conference_url_pdf"`
Proceeding *string `json:"proceeding"`
}
|
package server
import (
"fmt"
"net/http"
"path/filepath"
"strconv"
"github.com/dimfeld/httptreemux"
"github.com/rkuris/journey/database"
"github.com/rkuris/journey/filenames"
"github.com/rkuris/journey/structure/methods"
"github.com/rkuris/journey/templates"
)
func indexHandler(w http.ResponseWriter, r *http.Request, params map[string]string) {
number := params["number"]
if number == "" {
// Render index template (first page)
err := templates.ShowIndexTemplate(w, r, 1)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
page, err := strconv.Atoi(number)
if err != nil || page <= 1 {
http.Redirect(w, r, "/", http.StatusFound)
return
}
// Render index template
err = templates.ShowIndexTemplate(w, r, page)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
func authorHandler(w http.ResponseWriter, r *http.Request, params map[string]string) {
slug := params["slug"]
function := params["function"]
number := params["number"]
if function == "" {
// Render author template (first page)
err := templates.ShowAuthorTemplate(w, r, slug, 1)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
} else if function == "rss" {
// Render author rss feed
err := templates.ShowAuthorRss(w, slug)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
page, err := strconv.Atoi(number)
if err != nil || page <= 1 {
http.Redirect(w, r, "/", http.StatusFound)
return
}
// Render author template
err = templates.ShowAuthorTemplate(w, r, slug, page)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
func tagHandler(w http.ResponseWriter, r *http.Request, params map[string]string) {
slug := params["slug"]
function := params["function"]
number := params["number"]
if function == "" {
// Render tag template (first page)
err := templates.ShowTagTemplate(w, r, slug, 1)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
} else if function == "rss" {
// Render tag rss feed
err := templates.ShowTagRss(w, slug)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
page, err := strconv.Atoi(number)
if err != nil || page <= 1 {
http.Redirect(w, r, "/", http.StatusFound)
return
}
// Render tag template
err = templates.ShowTagTemplate(w, r, slug, page)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
func postHandler(w http.ResponseWriter, r *http.Request, params map[string]string) {
slug := params["slug"]
if slug == "" {
http.Redirect(w, r, "/", http.StatusFound)
return
} else if slug == "rss" {
// Render index rss feed
err := templates.ShowIndexRss(w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
// Render post template
err := templates.ShowPostTemplate(w, r, slug)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
func postEditHandler(w http.ResponseWriter, r *http.Request, params map[string]string) {
slug := params["slug"]
if slug == "" {
http.Redirect(w, r, "/", http.StatusFound)
return
}
// Redirect to edit
post, err := database.RetrievePostBySlug(slug)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
url := fmt.Sprintf("/admin#/edit/%d", post.ID)
http.Redirect(w, r, url, http.StatusTemporaryRedirect)
}
func assetsHandler(w http.ResponseWriter, r *http.Request, params map[string]string) {
// Read lock global blog
methods.Blog.RLock()
defer methods.Blog.RUnlock()
http.ServeFile(w, r, filepath.Join(filenames.ThemesFilepath, methods.Blog.ActiveTheme, "assets", params["filepath"]))
return
}
func imagesHandler(w http.ResponseWriter, r *http.Request, params map[string]string) {
if path, ok := params["filepath"]; ok {
http.ServeFile(w, r, filepath.Join(filenames.ImagesFilepath, path))
} else {
// special, top level files like favicon.ico, robots.txt, etc.
http.ServeFile(w, r, filepath.Join(filenames.ImagesFilepath, r.URL.Path))
}
return
}
func publicHandler(w http.ResponseWriter, r *http.Request, params map[string]string) {
http.ServeFile(w, r, filepath.Join(filenames.PublicFilepath, params["filepath"]))
return
}
// InitializeBlog initializes all the non-admin non-post handlers
func InitializeBlog(router *httptreemux.TreeMux) {
// For index
router.GET("/", indexHandler)
router.GET("/:slug/edit", postEditHandler)
router.GET("/:slug", postHandler)
router.GET("/page/:number", indexHandler)
// For author
router.GET("/author/:slug", authorHandler)
router.GET("/author/:slug/:function", authorHandler)
router.GET("/author/:slug/:function/:number", authorHandler)
// For tag
router.GET("/tag/:slug", tagHandler)
router.GET("/tag/:slug/:function", tagHandler)
router.GET("/tag/:slug/:function/:number", tagHandler)
// For serving asset files
router.GET("/assets/*filepath", assetsHandler)
router.GET("/favicon.ico", imagesHandler)
router.GET("/robots.txt", imagesHandler)
router.GET("/sitemap.xml", imagesHandler)
router.GET("/images/*filepath", imagesHandler)
router.GET("/content/images/*filepath", imagesHandler) // This is here to keep compatibility with Ghost
router.GET("/public/*filepath", publicHandler)
}
|
package xattrsyscall
import (
"syscall"
"unsafe"
)
// Taken from https://golang.org/src/syscall/zsyscall_linux_amd64.go
var _zero uintptr
// Do the interface allocations only once for common
// Errno values.
var (
errEAGAIN error = syscall.EAGAIN
errEINVAL error = syscall.EINVAL
errENOENT error = syscall.ENOENT
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return nil
case syscall.EAGAIN:
return errEAGAIN
case syscall.EINVAL:
return errEINVAL
case syscall.ENOENT:
return errENOENT
}
return e
}
// BSD uses namespaces. Hardcode the user namespace
const EXTATTR_NAMESPACE_USER = 1
func Getxattr(path string, attr string, dest []byte) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(dest) > 0 {
_p2 = unsafe.Pointer(&dest[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall6(syscall.SYS_EXTATTR_GET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(EXTATTR_NAMESPACE_USER), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0)
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
func Listxattr(path string, dest []byte) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(dest) > 0 {
_p1 = unsafe.Pointer(&dest[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall6(syscall.SYS_EXTATTR_LIST_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(EXTATTR_NAMESPACE_USER), uintptr(_p1), uintptr(len(dest)), 0, 0)
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
func Setxattr(path string, attr string, data []byte, flags int) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(data) > 0 {
_p2 = unsafe.Pointer(&data[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
_, _, e1 := syscall.Syscall6(syscall.SYS_EXTATTR_SET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(EXTATTR_NAMESPACE_USER), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
func Removexattr(path string, attr string) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
_, _, e1 := syscall.Syscall(syscall.SYS_EXTATTR_DELETE_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(EXTATTR_NAMESPACE_USER), uintptr(unsafe.Pointer(_p1)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
|
package responses
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
)
func TestDecodeAccountsBalancesResponse(t *testing.T) {
encoded := "{\"balances\" : {\"nano_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3\": {\"balance\": \"325586539664609129644855132177\",\"pending\": \"2309372032769300000000000000000000\",\"receivable\": \"2309372032769300000000000000000000\"},\"nano_3i1aq1cchnmbn9x5rsbap8b15akfh7wj7pwskuzi7ahz8oq6cobd99d4r3b7\":{\"balance\": \"10000000\",\"pending\": \"0\",\"receivable\": \"0\" }}}"
var decoded AccountsBalancesResponse
json.Unmarshal([]byte(encoded), &decoded)
balances := *decoded.Balances
assert.Equal(t, "325586539664609129644855132177", balances["nano_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3"].Balance)
assert.Equal(t, "2309372032769300000000000000000000", balances["nano_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3"].Pending)
assert.Equal(t, "2309372032769300000000000000000000", balances["nano_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3"].Receivable)
assert.Equal(t, "10000000", balances["nano_3i1aq1cchnmbn9x5rsbap8b15akfh7wj7pwskuzi7ahz8oq6cobd99d4r3b7"].Balance)
assert.Equal(t, "0", balances["nano_3i1aq1cchnmbn9x5rsbap8b15akfh7wj7pwskuzi7ahz8oq6cobd99d4r3b7"].Pending)
assert.Equal(t, "0", balances["nano_3i1aq1cchnmbn9x5rsbap8b15akfh7wj7pwskuzi7ahz8oq6cobd99d4r3b7"].Receivable)
}
func TestDecodeAccountsBalancesResponseError(t *testing.T) {
encoded := "{\"error\": \"Account not found\"}"
var decoded AccountsBalancesResponse
json.Unmarshal([]byte(encoded), &decoded)
assert.Nil(t, decoded.Balances)
}
|
package usecases_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
usecases "github.com/vmware-tanzu-labs/git-story/usecases"
)
var _ = Describe("Story Sweeper use case", func() {
It("should delete all branches that correspond to accepted stories", func() {
mockGitRepo := &MockGitRepository{branchNames: []string{"main", "some-accepted-story-#123", "some-wip-story-#234"}}
mockTrackerReader := MockPivotalTrackerReader{}
result := usecases.SweepAcceptedStories(mockGitRepo, mockTrackerReader)
var resultContainsStory bool
_, resultContainsStory = result["some-accepted-story-#123"]
Expect(resultContainsStory).To(BeTrue())
Expect(mockGitRepo.deletedBranches).To(Equal([]string{"some-accepted-story-#123"}))
})
It("should return an error if the branch is unable to be deleted", func() {
mockGitRepo := &MockGitRepository{
branchNames: []string{"main", "some-accepted-story-#123", "some-accepted-story-#789", "some-wip-story-#234"},
erroredBranches: []string{"some-accepted-story-#123"},
}
mockTrackerReader := MockPivotalTrackerReader{}
results := usecases.SweepAcceptedStories(mockGitRepo, mockTrackerReader)
Expect(results["some-accepted-story-#123"]).NotTo(BeNil())
Expect(results["some-accepted-story-#789"]).To(BeNil())
})
})
|
package cmd
import(
"fmt"
"github.com/spf13/cobra"
"os"
// "strings"
)
var rootCmd = &cobra.Command {
Use: "LearningGo ",
Short: "My first go project",
Long: `Just a small CLI application. Read Atom feeds`,
}
var cmdLs = &cobra.Command {
Use: "ls",
Short: "List news",
Long: `List first 5 news`,
Run: func (cmd *cobra.Command, args []string) {
list()
},
}
var cmdDescribe = &cobra.Command {
Use: "describe [id]",
Short: "Show details for an article",
Long: `Details for an article`,
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
describe(args[0])
},
}
func Exec() {
rootCmd.AddCommand(cmdLs)
rootCmd.AddCommand(cmdDescribe)
err := rootCmd.Execute()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
} |
package main
import (
"fmt"
"github.com/spf13/cobra"
cmder "github.com/yaegashi/cobra-cmder"
)
type AppSPJobDelete struct {
*AppSPJob
Scope string
}
func (app *AppSPJob) AppSPJobDeleteComder() cmder.Cmder {
return &AppSPJobDelete{AppSPJob: app}
}
func (app *AppSPJobDelete) Cmd() *cobra.Command {
cmd := &cobra.Command{
Use: "delete",
Short: "Delete job (not implemented)",
RunE: app.RunE,
SilenceUsage: true,
}
return cmd
}
func (app *AppSPJobDelete) RunE(cmd *cobra.Command, args []string) error {
return fmt.Errorf("Not implemented")
}
|
package models
import (
"time"
)
// Profile -
type Profile struct {
UserID int `json:"user_id"`
ProfileImage string `json:"profile_image"`
Bio string `json:"bio"`
TeamID int `json:"team_id"`
Settings string `json:"settings"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
DeletedAt time.Time `json:"deleted_at"`
}
// Profiles -
func Profiles () {
}
|
package security
import (
"fmt"
"html/template"
"net/http"
"net/url"
"strings"
)
func ExternalSystemCreatePage(t *template.Template, am AccessManager) func(w http.ResponseWriter, r *http.Request) {
type Page struct {
Session Session
Title []string
SystemType string
Uuid string
ExternalSystem *ExternalSystem
Feedback []string
Config []*ConfSet
ConnectorLabel string
}
return func(w http.ResponseWriter, r *http.Request) {
session, err := LookupSession(r, am)
if err != nil {
ShowError(w, r, t, err, session)
return
}
if !session.IsAuthenticated() {
http.Redirect(w, r, "/signup", http.StatusTemporaryRedirect)
return
}
if !session.HasRole("c6") && !session.HasRole("s1") {
ShowErrorForbidden(w, r, t, session)
return
}
AddSafeHeaders(w)
p := &Page{
Session: session,
Title: []string{"Create new external system connection", "External Systems"},
Uuid: r.FormValue(`uuid`),
SystemType: r.FormValue(`type`),
ConnectorLabel: r.FormValue(`connector`),
}
if p.SystemType == `Mailchimp` {
p.Config = append(p.Config, &ConfSet{"Mailchimp API Key", "mailchimp.key", "", "string"})
}
if p.SystemType == `Moodle` {
p.Config = append(p.Config, &ConfSet{"Moodle URL", "moodle.url", "", "string"})
p.Config = append(p.Config, &ConfSet{"Moodle API Key", "moodle.key", "", "string"})
}
if p.SystemType == `Formsite` {
p.Config = append(p.Config, &ConfSet{"Formsite URL", "formsite.url", "", "string"})
p.Config = append(p.Config, &ConfSet{"Formsite API Key", "formsite.key", "", "string"})
}
if p.SystemType == `GoogleSheets` {
p.Config = append(p.Config, &ConfSet{"Client Secret", "client.secret", "", "string"})
}
if r.Method == "POST" {
es, feedback, err := createExternalSystemWithFormValues(am, session, r, p.Config)
if err != nil {
ShowError(w, r, t, err, session)
return
}
if len(feedback) == 0 && es != nil {
// Saved with no errors
if p.SystemType == "Formsite" {
http.Redirect(w, r, "/z/connector/formsite.add1?&external_system_uuid="+es.Uuid(), http.StatusSeeOther)
} else {
http.Redirect(w, r, "/z/connectors?add="+url.QueryEscape(r.FormValue("connector"))+"&uuid="+es.Uuid(), http.StatusSeeOther)
}
return
}
p.Feedback = feedback
}
// Not saved, show creation form, with feedback if needed
Render(r, w, t, "external_system_create", p)
return
}
}
// If /account.details/ detects some posted data, we can do a account update.
func createExternalSystemWithFormValues(am AccessManager, session Session, r *http.Request, conf []*ConfSet) (ExternalSystem, []string, error) {
var warnings []string
var config []KeyValue
etype := r.FormValue(`type`)
for _, i := range conf {
val := strings.TrimSpace(r.FormValue(i.FieldName))
if val != "" {
config = append(config, KeyValue{i.FieldName, val})
}
}
// If data is all in order, create it
if len(warnings) == 0 {
es, err := am.AddExternalSystem(etype, config, session)
fmt.Printf("external system add %v %s %v\n", es, es.Uuid(), err)
return es, warnings, err
} else {
return nil, warnings, nil
}
}
var externalSystemCreateTemplate = `
{{define "external_system_create"}}
{{template "admin_header" .}}
<style type="text/css">
#editform table {
margin-left:auto;
margin-right:auto;
}
#editform h1 {
text-align:center;
}
#editform p {
text-align:center;
margin-bottom: 2em;
}
#editform input {
font-size: 1rem;
}
#editform table th {
vertical-align:top;
}
</style>
<div style="margin-top: -0.7rem"><a class="back" href="/z/connectors?add={{.ConnectorLabel}}">Back</a></div>
{{if .Feedback}}<div class="feedback error">{{if eq 1 (len .Feedback)}}<p>{{index .Feedback 0}}</p>{{else}}<ul>{{range .Feedback}}<li>{{.}}</li>{{end}}</ul>{{end}}</div>{{end}}
<div id="editform">
<h1>Add External System: {{.SystemType}}</h1>
<p>This form is used to establish the connection details of an external system.</p>
<form method="post">
<input type="hidden" name="type" value="{{.SystemType}}"/>
<input type="hidden" name="connector" value="{{.ConnectorLabel}}"/>
<input type="hidden" name="csrf" value="{{.Session.CSRF}}"/>
<table id="course_edit" class="form">
<tr>
<th>System Type</th>
<td>{{.SystemType}}</td>
</tr>
<tr><td> </td></tr>
{{range .Config}}
<tr>
<th>{{.English}}</th>
<td><input type="text" name="{{.FieldName}}" value="{{.Value}}"></td>
</tr>
{{end}}
<tr><td> </td></tr>
<tr><td></td><td><input type="submit" value="Create New External System"></td></tr>
</table>
</form>
</div>
{{template "admin_footer" .}}
{{end}}
`
|
package controller
import (
"encoding/json"
"fmt"
"regexp"
"sort"
"strings"
"time"
wfv1 "github.com/argoproj/argo/api/workflow/v1alpha1"
"github.com/argoproj/argo/errors"
workflowclient "github.com/argoproj/argo/workflow/client"
"github.com/argoproj/argo/workflow/common"
log "github.com/sirupsen/logrus"
"github.com/valyala/fasttemplate"
apiv1 "k8s.io/api/core/v1"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// wfOperationCtx is the context for evaluation and operation of a single workflow
type wfOperationCtx struct {
// wf is the workflow object
wf *wfv1.Workflow
// updated indicates whether or not the workflow object itself was updated
// and needs to be persisted back to kubernetes
updated bool
// log is an logrus logging context to corrolate logs with a workflow
log *log.Entry
// controller reference to workflow controller
controller *WorkflowController
// NOTE: eventually we may need to store additional metadata state to
// understand how to proceed in workflows with more complex control flows.
// (e.g. workflow failed in step 1 of 3 but has finalizer steps)
}
// wfScope contains the current scope of variables available when iterating steps in a workflow
type wfScope struct {
tmpl *wfv1.Template
scope map[string]interface{}
}
// operateWorkflow is the operator logic of a workflow
// It evaluates the current state of the workflow and decides how to proceed down the execution path
func (wfc *WorkflowController) operateWorkflow(wf *wfv1.Workflow) {
if wf.ObjectMeta.Labels[common.LabelKeyCompleted] == "true" {
// can get here if we already added the completed=true label,
// but we are still draining the controller's workflow channel
return
}
log.Infof("Processing wf: %v", wf.ObjectMeta.SelfLink)
// NEVER modify objects from the store. It's a read-only, local cache.
// You can use DeepCopy() to make a deep copy of original object and modify this copy
// Or create a copy manually for better performance
woc := wfOperationCtx{
wf: wf.DeepCopyObject().(*wfv1.Workflow),
updated: false,
log: log.WithFields(log.Fields{
"workflow": wf.ObjectMeta.Name,
"namespace": wf.ObjectMeta.Namespace,
}),
controller: wfc,
}
defer func() {
if woc.updated {
wfClient := workflowclient.NewWorkflowClient(wfc.restClient, wfc.scheme, wf.ObjectMeta.Namespace)
_, err := wfClient.UpdateWorkflow(woc.wf)
if err != nil {
woc.log.Errorf("Error updating %s status: %v", woc.wf.ObjectMeta.SelfLink, err)
} else {
woc.log.Infof("Workflow %s updated", woc.wf.ObjectMeta.SelfLink)
}
}
}()
// Perform one-time workflow validation
if woc.wf.Status.Phase == "" {
woc.markWorkflowRunning()
err := common.ValidateWorkflow(woc.wf)
if err != nil {
woc.markWorkflowFailed(fmt.Sprintf("invalid spec: %s", err.Error()))
return
}
}
err := woc.createPVCs()
if err != nil {
woc.log.Errorf("%s error: %+v", wf.ObjectMeta.Name, err)
woc.markWorkflowError(err, true)
return
}
err = woc.executeTemplate(wf.Spec.Entrypoint, wf.Spec.Arguments, wf.ObjectMeta.Name)
if err != nil {
woc.log.Errorf("%s error: %+v", wf.ObjectMeta.Name, err)
}
node := woc.wf.Status.Nodes[woc.wf.NodeID(wf.ObjectMeta.Name)]
if !node.Completed() {
return
}
err = woc.deletePVCs()
if err != nil {
woc.log.Errorf("%s error: %+v", wf.ObjectMeta.Name, err)
// Mark the workflow with an error message and return, but intentionally do not
// markCompletion so that we can retry PVC deletion (TODO: requires resync to be set on the informer)
// This error phase may be cleared if a subsequent delete attempt is successful.
woc.markWorkflowError(err, false)
return
}
// TODO: workflow finalizer logic goes here
// If we get here, the workflow completed, all PVCs were deleted successfully,
// and finalizers were executed (finalizer feature yet to be implemented).
// We now need to infer the workflow phase from the node phase.
switch node.Phase {
case wfv1.NodeSucceeded, wfv1.NodeSkipped:
woc.markWorkflowSuccess()
case wfv1.NodeFailed:
woc.markWorkflowFailed(node.Message)
case wfv1.NodeError:
woc.markWorkflowPhase(wfv1.NodeError, true, node.Message)
default:
// NOTE: we should never make it here because if the the node was 'Running'
// we should have returned earlier.
err = errors.InternalErrorf("Unexpected node phase %s: %+v", wf.ObjectMeta.Name, err)
woc.markWorkflowError(err, true)
}
}
func (woc *wfOperationCtx) createPVCs() error {
if woc.wf.Status.Phase != wfv1.NodeRunning {
// Only attempt to create PVCs if workflow transitioned to Running state
// (e.g. passed validation, or didn't already complete)
return nil
}
if len(woc.wf.Spec.VolumeClaimTemplates) == len(woc.wf.Status.PersistentVolumeClaims) {
// If we have already created the PVCs, then there is nothing to do.
// This will also handle the case where workflow has no volumeClaimTemplates.
return nil
}
if len(woc.wf.Status.PersistentVolumeClaims) == 0 {
woc.wf.Status.PersistentVolumeClaims = make([]apiv1.Volume, len(woc.wf.Spec.VolumeClaimTemplates))
}
pvcClient := woc.controller.clientset.CoreV1().PersistentVolumeClaims(woc.wf.ObjectMeta.Namespace)
t := true
for i, pvcTmpl := range woc.wf.Spec.VolumeClaimTemplates {
if pvcTmpl.ObjectMeta.Name == "" {
return errors.Errorf(errors.CodeBadRequest, "volumeClaimTemplates[%d].metadata.name is required", i)
}
pvcTmpl = *pvcTmpl.DeepCopy()
// PVC name will be <workflowname>-<volumeclaimtemplatename>
refName := pvcTmpl.ObjectMeta.Name
pvcName := fmt.Sprintf("%s-%s", woc.wf.ObjectMeta.Name, pvcTmpl.ObjectMeta.Name)
woc.log.Infof("Creating pvc %s", pvcName)
pvcTmpl.ObjectMeta.Name = pvcName
pvcTmpl.OwnerReferences = []metav1.OwnerReference{
metav1.OwnerReference{
APIVersion: wfv1.CRDFullName,
Kind: wfv1.CRDKind,
Name: woc.wf.ObjectMeta.Name,
UID: woc.wf.ObjectMeta.UID,
BlockOwnerDeletion: &t,
},
}
pvc, err := pvcClient.Create(&pvcTmpl)
if err != nil {
woc.markNodeError(woc.wf.ObjectMeta.Name, err)
return err
}
vol := apiv1.Volume{
Name: refName,
VolumeSource: apiv1.VolumeSource{
PersistentVolumeClaim: &apiv1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.ObjectMeta.Name,
},
},
}
woc.wf.Status.PersistentVolumeClaims[i] = vol
woc.updated = true
}
return nil
}
func (woc *wfOperationCtx) deletePVCs() error {
totalPVCs := len(woc.wf.Status.PersistentVolumeClaims)
if totalPVCs == 0 {
// PVC list already empty. nothing to do
return nil
}
pvcClient := woc.controller.clientset.CoreV1().PersistentVolumeClaims(woc.wf.ObjectMeta.Namespace)
newPVClist := make([]apiv1.Volume, 0)
// Attempt to delete all PVCs. Record first error encountered
var firstErr error
for _, pvc := range woc.wf.Status.PersistentVolumeClaims {
woc.log.Infof("Deleting PVC %s", pvc.PersistentVolumeClaim.ClaimName)
err := pvcClient.Delete(pvc.PersistentVolumeClaim.ClaimName, nil)
if err != nil {
if !apierr.IsNotFound(err) {
woc.log.Errorf("Failed to delete pvc %s: %v", pvc.PersistentVolumeClaim.ClaimName, err)
newPVClist = append(newPVClist, pvc)
if firstErr == nil {
firstErr = err
}
}
}
}
if len(newPVClist) != totalPVCs {
// we were successful in deleting one ore more PVCs
woc.log.Infof("Deleted %d/%d PVCs", totalPVCs-len(newPVClist), totalPVCs)
woc.wf.Status.PersistentVolumeClaims = newPVClist
woc.updated = true
}
return firstErr
}
func (woc *wfOperationCtx) executeTemplate(templateName string, args wfv1.Arguments, nodeName string) error {
woc.log.Debugf("Evaluating node %s: template: %s", nodeName, templateName)
nodeID := woc.wf.NodeID(nodeName)
node, ok := woc.wf.Status.Nodes[nodeID]
if ok && node.Completed() {
woc.log.Debugf("Node %s already completed", nodeName)
return nil
}
tmpl := woc.wf.GetTemplate(templateName)
if tmpl == nil {
err := errors.Errorf(errors.CodeBadRequest, "Node %v error: template '%s' undefined", node, templateName)
woc.markNodeError(nodeName, err)
return err
}
tmpl, err := common.ProcessArgs(tmpl, args, false)
if err != nil {
woc.markNodeError(nodeName, err)
return err
}
if tmpl.Container != nil {
if ok {
// There's already a node entry for the container. This means the container was already
// scheduled (or had a create pod error). Nothing to more to do with this node.
return nil
}
// We have not yet created the pod
return woc.executeContainer(nodeName, tmpl)
} else if len(tmpl.Steps) > 0 {
if !ok {
node = *woc.markNodePhase(nodeName, wfv1.NodeRunning)
woc.log.Infof("Initialized workflow node %v", node)
}
err = woc.executeSteps(nodeName, tmpl)
if woc.wf.Status.Nodes[nodeID].Completed() {
woc.killDeamonedChildren(nodeID)
}
return err
} else if tmpl.Script != nil {
return woc.executeScript(nodeName, tmpl)
}
err = errors.Errorf("Template '%s' missing specification", tmpl.Name)
woc.markNodeError(nodeName, err)
return err
}
// markWorkflowPhase is a convenience method to set the phase of the workflow with optional message
// optionally marks the workflow completed, which sets the finishedAt timestamp and completed label
func (woc *wfOperationCtx) markWorkflowPhase(phase wfv1.NodePhase, markCompleted bool, message ...string) {
if woc.wf.Status.Phase != phase {
woc.log.Infof("Updated phase %s -> %s", woc.wf.Status.Phase, phase)
woc.updated = true
woc.wf.Status.Phase = phase
if woc.wf.ObjectMeta.Labels == nil {
woc.wf.ObjectMeta.Labels = make(map[string]string)
}
woc.wf.ObjectMeta.Labels[common.LabelKeyPhase] = string(phase)
}
if woc.wf.Status.StartedAt.IsZero() {
woc.updated = true
woc.wf.Status.StartedAt = metav1.Time{Time: time.Now().UTC()}
}
if len(message) > 0 && woc.wf.Status.Message != message[0] {
woc.log.Infof("Updated message %s -> %s", woc.wf.Status.Message, message[0])
woc.updated = true
woc.wf.Status.Message = message[0]
}
switch phase {
case wfv1.NodeSucceeded, wfv1.NodeFailed, wfv1.NodeError:
if markCompleted {
woc.log.Infof("Marking workflow completed")
woc.wf.Status.FinishedAt = metav1.Time{Time: time.Now().UTC()}
if woc.wf.ObjectMeta.Labels == nil {
woc.wf.ObjectMeta.Labels = make(map[string]string)
}
woc.wf.ObjectMeta.Labels[common.LabelKeyCompleted] = "true"
woc.updated = true
}
}
}
func (woc *wfOperationCtx) markWorkflowRunning() {
woc.markWorkflowPhase(wfv1.NodeRunning, false)
}
func (woc *wfOperationCtx) markWorkflowSuccess() {
woc.markWorkflowPhase(wfv1.NodeSucceeded, true)
}
func (woc *wfOperationCtx) markWorkflowFailed(message string) {
woc.markWorkflowPhase(wfv1.NodeFailed, true, message)
}
func (woc *wfOperationCtx) markWorkflowError(err error, markCompleted bool) {
woc.markWorkflowPhase(wfv1.NodeError, markCompleted, err.Error())
}
// markNodePhase marks a node with the given phase, creating the node if necessary and handles timestamps
func (woc *wfOperationCtx) markNodePhase(nodeName string, phase wfv1.NodePhase, message ...string) *wfv1.NodeStatus {
if woc.wf.Status.Nodes == nil {
woc.wf.Status.Nodes = make(map[string]wfv1.NodeStatus)
}
nodeID := woc.wf.NodeID(nodeName)
node, ok := woc.wf.Status.Nodes[nodeID]
if !ok {
node = wfv1.NodeStatus{
ID: nodeID,
Name: nodeName,
Phase: phase,
StartedAt: metav1.Time{Time: time.Now().UTC()},
}
} else {
node.Phase = phase
}
if len(message) > 0 {
node.Message = message[0]
}
if node.Completed() && node.FinishedAt.IsZero() {
node.FinishedAt = metav1.Time{Time: time.Now().UTC()}
}
woc.wf.Status.Nodes[nodeID] = node
woc.updated = true
return &node
}
// markNodeError is a convenience method to mark a node with an error and set the message from the error
func (woc *wfOperationCtx) markNodeError(nodeName string, err error) *wfv1.NodeStatus {
return woc.markNodePhase(nodeName, wfv1.NodeError, err.Error())
}
func (woc *wfOperationCtx) executeContainer(nodeName string, tmpl *wfv1.Template) error {
err := woc.createWorkflowPod(nodeName, tmpl)
if err != nil {
woc.markNodeError(nodeName, err)
return err
}
node := woc.markNodePhase(nodeName, wfv1.NodeRunning)
woc.log.Infof("Initialized container node %v", node)
return nil
}
func (woc *wfOperationCtx) executeSteps(nodeName string, tmpl *wfv1.Template) error {
scope := wfScope{
tmpl: tmpl,
scope: make(map[string]interface{}),
}
for i, stepGroup := range tmpl.Steps {
sgNodeName := fmt.Sprintf("%s[%d]", nodeName, i)
woc.addChildNode(nodeName, sgNodeName)
err := woc.executeStepGroup(stepGroup, sgNodeName, &scope)
if err != nil {
woc.markNodeError(nodeName, err)
return err
}
sgNodeID := woc.wf.NodeID(sgNodeName)
if !woc.wf.Status.Nodes[sgNodeID].Completed() {
woc.log.Infof("Workflow step group node %v not yet completed", woc.wf.Status.Nodes[sgNodeID])
return nil
}
if !woc.wf.Status.Nodes[sgNodeID].Successful() {
failMessage := fmt.Sprintf("step group %s was unsuccessful", sgNodeName)
woc.log.Info(failMessage)
woc.markNodePhase(nodeName, wfv1.NodeFailed, failMessage)
return nil
}
// HACK: need better way to add children to scope
for _, step := range stepGroup {
childNodeName := fmt.Sprintf("%s.%s", sgNodeName, step.Name)
childNodeID := woc.wf.NodeID(childNodeName)
childNode, ok := woc.wf.Status.Nodes[childNodeID]
if !ok {
// This can happen if there was `withItem` expansion
// it is okay to ignore this because these expanded steps
// are not easily referenceable by user.
continue
}
if childNode.PodIP != "" {
key := fmt.Sprintf("steps.%s.ip", step.Name)
scope.addParamToScope(key, childNode.PodIP)
}
if childNode.Outputs != nil {
if childNode.Outputs.Result != nil {
key := fmt.Sprintf("steps.%s.outputs.result", step.Name)
scope.addParamToScope(key, *childNode.Outputs.Result)
}
for _, outParam := range childNode.Outputs.Parameters {
key := fmt.Sprintf("steps.%s.outputs.parameters.%s", step.Name, outParam.Name)
scope.addParamToScope(key, *outParam.Value)
}
for _, outArt := range childNode.Outputs.Artifacts {
key := fmt.Sprintf("steps.%s.outputs.artifacts.%s", step.Name, outArt.Name)
scope.addArtifactToScope(key, outArt)
}
}
}
}
woc.markNodePhase(nodeName, wfv1.NodeSucceeded)
return nil
}
// executeStepGroup examines a map of parallel steps and executes them in parallel.
// Handles referencing of variables in scope, expands `withItem` clauses, and evaluates `when` expressions
func (woc *wfOperationCtx) executeStepGroup(stepGroup []wfv1.WorkflowStep, sgNodeName string, scope *wfScope) error {
nodeID := woc.wf.NodeID(sgNodeName)
node, ok := woc.wf.Status.Nodes[nodeID]
if ok && node.Completed() {
woc.log.Debugf("Step group node %v already marked completed", node)
return nil
}
if !ok {
node = *woc.markNodePhase(sgNodeName, wfv1.NodeRunning)
woc.log.Infof("Initializing step group node %v", node)
}
// First, resolve any references to outputs from previous steps, and perform substitution
stepGroup, err := woc.resolveReferences(stepGroup, scope)
if err != nil {
woc.markNodeError(sgNodeName, err)
return err
}
// Next, expand the step's withItems (if any)
stepGroup, err = woc.expandStepGroup(stepGroup)
if err != nil {
woc.markNodeError(sgNodeName, err)
return err
}
// Kick off all parallel steps in the group
for _, step := range stepGroup {
childNodeName := fmt.Sprintf("%s.%s", sgNodeName, step.Name)
woc.addChildNode(sgNodeName, childNodeName)
// Check the step's when clause to decide if it should execute
proceed, err := shouldExecute(step.When)
if err != nil {
woc.markNodeError(childNodeName, err)
woc.markNodeError(sgNodeName, err)
return err
}
if !proceed {
skipReason := fmt.Sprintf("when '%s' evaluated false", step.When)
woc.log.Infof("Skipping %s: %s", childNodeName, skipReason)
woc.markNodePhase(childNodeName, wfv1.NodeSkipped, skipReason)
continue
}
err = woc.executeTemplate(step.Template, step.Arguments, childNodeName)
if err != nil {
woc.markNodeError(childNodeName, err)
woc.markNodeError(sgNodeName, err)
return err
}
}
node = woc.wf.Status.Nodes[nodeID]
// Return if not all children completed
for _, childNodeID := range node.Children {
if !woc.wf.Status.Nodes[childNodeID].Completed() {
return nil
}
}
// All children completed. Determine step group status as a whole
for _, childNodeID := range node.Children {
childNode := woc.wf.Status.Nodes[childNodeID]
if !childNode.Successful() {
failMessage := fmt.Sprintf("child '%s' failed", childNodeID)
woc.markNodePhase(sgNodeName, wfv1.NodeFailed, failMessage)
woc.log.Infof("Step group node %s deemed failed: %s", childNode, failMessage)
return nil
}
}
woc.markNodePhase(node.Name, wfv1.NodeSucceeded)
woc.log.Infof("Step group node %v successful", woc.wf.Status.Nodes[nodeID])
return nil
}
var whenExpression = regexp.MustCompile("^(.*)(==|!=)(.*)$")
// shouldExecute evaluates a already substituted when expression to decide whether or not a step should execute
func shouldExecute(when string) (bool, error) {
if when == "" {
return true, nil
}
parts := whenExpression.FindStringSubmatch(when)
if len(parts) == 0 {
return false, errors.Errorf(errors.CodeBadRequest, "Invalid 'when' expression: %s", when)
}
var1 := strings.TrimSpace(parts[1])
operator := parts[2]
var2 := strings.TrimSpace(parts[3])
switch operator {
case "==":
return var1 == var2, nil
case "!=":
return var1 != var2, nil
default:
return false, errors.Errorf(errors.CodeBadRequest, "Unknown operator: %s", operator)
}
}
// resolveReferences replaces any references to outputs of previous steps, or artifacts in the inputs
// NOTE: by now, input parameters should have been substituted throughout the template, so we only
// are concerned with:
// 1) dereferencing output.parameters from previous steps
// 2) dereferencing output.result from previous steps
// 2) dereferencing artifacts from previous steps
// 3) dereferencing artifacts from inputs
func (woc *wfOperationCtx) resolveReferences(stepGroup []wfv1.WorkflowStep, scope *wfScope) ([]wfv1.WorkflowStep, error) {
newStepGroup := make([]wfv1.WorkflowStep, len(stepGroup))
for i, step := range stepGroup {
// Step 1: replace all parameter scope references in the step
// TODO: improve this
stepBytes, err := json.Marshal(step)
if err != nil {
return nil, errors.InternalWrapError(err)
}
replaceMap := make(map[string]string)
for key, val := range scope.scope {
valStr, ok := val.(string)
if ok {
replaceMap[key] = valStr
}
}
fstTmpl := fasttemplate.New(string(stepBytes), "{{", "}}")
newStepStr, err := common.Replace(fstTmpl, replaceMap, true)
if err != nil {
return nil, err
}
var newStep wfv1.WorkflowStep
err = json.Unmarshal([]byte(newStepStr), &newStep)
if err != nil {
return nil, errors.InternalWrapError(err)
}
// Step 2: replace all artifact references
for j, art := range newStep.Arguments.Artifacts {
if art.From == "" {
continue
}
resolvedArt, err := scope.resolveArtifact(art.From)
if err != nil {
return nil, err
}
resolvedArt.Name = art.Name
newStep.Arguments.Artifacts[j] = *resolvedArt
}
newStepGroup[i] = newStep
}
return newStepGroup, nil
}
// expandStepGroup looks at each step in a collection of parallel steps, and expands all steps using withItems/withParam
func (woc *wfOperationCtx) expandStepGroup(stepGroup []wfv1.WorkflowStep) ([]wfv1.WorkflowStep, error) {
newStepGroup := make([]wfv1.WorkflowStep, 0)
for _, step := range stepGroup {
if len(step.WithItems) == 0 && step.WithParam == "" {
newStepGroup = append(newStepGroup, step)
continue
}
expandedStep, err := woc.expandStep(step)
if err != nil {
return nil, err
}
for _, newStep := range expandedStep {
newStepGroup = append(newStepGroup, newStep)
}
}
return newStepGroup, nil
}
// expandStep expands a step containing withItems or withParams into multiple parallel steps
func (woc *wfOperationCtx) expandStep(step wfv1.WorkflowStep) ([]wfv1.WorkflowStep, error) {
stepBytes, err := json.Marshal(step)
if err != nil {
return nil, errors.InternalWrapError(err)
}
fstTmpl := fasttemplate.New(string(stepBytes), "{{", "}}")
expandedStep := make([]wfv1.WorkflowStep, 0)
var items []wfv1.Item
if len(step.WithItems) > 0 {
items = step.WithItems
} else if step.WithParam != "" {
err = json.Unmarshal([]byte(step.WithParam), &items)
if err != nil {
return nil, errors.Errorf(errors.CodeBadRequest, "withParam value not be parsed as a JSON list: %s", step.WithParam)
}
} else {
// this should have been prevented in expandStepGroup()
return nil, errors.InternalError("expandStep() was called with withItems and withParam empty")
}
for i, item := range items {
replaceMap := make(map[string]string)
var newStepName string
switch val := item.(type) {
case string, int32, int64, float32, float64:
replaceMap["item"] = fmt.Sprintf("%v", val)
newStepName = fmt.Sprintf("%s(%v)", step.Name, val)
case map[string]interface{}:
// Handle the case when withItems is a list of maps.
// vals holds stringified versions of the map items which are incorporated as part of the step name.
// For example if the item is: {"name": "jesse","group":"developer"}
// the vals would be: ["name:jesse", "group:developer"]
// This would eventually be part of the step name (group:developer,name:jesse)
vals := make([]string, 0)
for itemKey, itemValIf := range val {
switch itemVal := itemValIf.(type) {
case string, int32, int64, float32, float64:
replaceMap[fmt.Sprintf("item.%s", itemKey)] = fmt.Sprintf("%v", itemVal)
vals = append(vals, fmt.Sprintf("%s:%s", itemKey, itemVal))
default:
return nil, errors.Errorf(errors.CodeBadRequest, "withItems[%d][%s] expected string or number. received: %s", i, itemKey, itemVal)
}
}
// sort the values so that the name is deterministic
sort.Strings(vals)
newStepName = fmt.Sprintf("%s(%v)", step.Name, strings.Join(vals, ","))
default:
return nil, errors.Errorf(errors.CodeBadRequest, "withItems[%d] expected string, number, or map. received: %s", i, val)
}
newStepStr, err := common.Replace(fstTmpl, replaceMap, false)
if err != nil {
return nil, err
}
var newStep wfv1.WorkflowStep
err = json.Unmarshal([]byte(newStepStr), &newStep)
if err != nil {
return nil, errors.InternalWrapError(err)
}
newStep.Name = newStepName
expandedStep = append(expandedStep, newStep)
}
return expandedStep, nil
}
func (woc *wfOperationCtx) executeScript(nodeName string, tmpl *wfv1.Template) error {
err := woc.createWorkflowPod(nodeName, tmpl)
if err != nil {
woc.markNodeError(nodeName, err)
return err
}
node := woc.markNodePhase(nodeName, wfv1.NodeRunning)
woc.log.Infof("Initialized container node %v", node)
return nil
}
func (wfs *wfScope) addParamToScope(key, val string) {
wfs.scope[key] = val
}
func (wfs *wfScope) addArtifactToScope(key string, artifact wfv1.Artifact) {
wfs.scope[key] = artifact
}
func (wfs *wfScope) resolveVar(v string) (interface{}, error) {
v = strings.TrimPrefix(v, "{{")
v = strings.TrimSuffix(v, "}}")
if strings.HasPrefix(v, "steps.") {
val, ok := wfs.scope[v]
if !ok {
return nil, errors.Errorf(errors.CodeBadRequest, "Unable to resolve: {{%s}}", v)
}
return val, nil
}
parts := strings.Split(v, ".")
// HACK (assuming it is an input artifact)
art := wfs.tmpl.Inputs.GetArtifactByName(parts[2])
if art != nil {
return *art, nil
}
return nil, errors.Errorf(errors.CodeBadRequest, "Unable to resolve input artifact: {{%s}}", v)
}
func (wfs *wfScope) resolveParameter(v string) (string, error) {
val, err := wfs.resolveVar(v)
if err != nil {
return "", err
}
valStr, ok := val.(string)
if !ok {
return "", errors.Errorf(errors.CodeBadRequest, "Variable {{%s}} is not a string", v)
}
return valStr, nil
}
func (wfs *wfScope) resolveArtifact(v string) (*wfv1.Artifact, error) {
val, err := wfs.resolveVar(v)
if err != nil {
return nil, err
}
valArt, ok := val.(wfv1.Artifact)
if !ok {
return nil, errors.Errorf(errors.CodeBadRequest, "Variable {{%s}} is not an artifact", v)
}
return &valArt, nil
}
// addChildNode adds a nodeID as a child to a parent
func (woc *wfOperationCtx) addChildNode(parent string, child string) {
parentID := woc.wf.NodeID(parent)
childID := woc.wf.NodeID(child)
node, ok := woc.wf.Status.Nodes[parentID]
if !ok {
panic(fmt.Sprintf("parent node %s not initialized", parent))
}
if node.Children == nil {
node.Children = make([]string, 0)
}
for _, nodeID := range node.Children {
if childID == nodeID {
// already exists
return
}
}
node.Children = append(node.Children, childID)
woc.wf.Status.Nodes[parentID] = node
woc.updated = true
}
// killDeamonedChildren kill any granchildren of a step template node, which have been daemoned.
// We only need to check grandchildren instead of children becuase the direct children of a step
// template are actually stepGroups, which are nodes that cannot represent actual containers.
// Returns the first error that occurs (if any)
func (woc *wfOperationCtx) killDeamonedChildren(nodeID string) error {
woc.log.Infof("Checking deamon children of %s", nodeID)
var firstErr error
for _, childNodeID := range woc.wf.Status.Nodes[nodeID].Children {
for _, grandChildID := range woc.wf.Status.Nodes[childNodeID].Children {
gcNode := woc.wf.Status.Nodes[grandChildID]
if gcNode.Daemoned == nil || !*gcNode.Daemoned {
continue
}
err := common.KillPodContainer(woc.controller.restConfig, woc.wf.ObjectMeta.Namespace, gcNode.ID, common.MainContainerName)
if err != nil {
woc.log.Errorf("Failed to kill %s: %+v", gcNode, err)
if firstErr == nil {
firstErr = err
}
}
}
}
return firstErr
}
|
package models
import (
"github.com/jinzhu/gorm"
)
// Review model
type Review struct {
gorm.Model
Comment string
Role string // Either Employer or Employee
FromUserID uint
ToUserID uint
ReputationData ReputationData
}
|
package main
import (
"encoding/json"
"fmt"
)
type Monster struct {
Name string
Age int
Skill string
}
//加tag,返回小写
type Hero struct {
Name string `json:"name"`
Age int `json:"age"`
Skill string `json:"skill"`
}
func main() {
monster := Monster{
Name: "monster",
Age: 12,
Skill: "kill",
}
//json是切片的字串
jsonMonster, err := json.Marshal(monster)
if err != nil {
fmt.Println("jsonMonster error:", err)
}
fmt.Println("jsonMonster:", string(jsonMonster))
hero := Hero{
Name: "hero",
Age: 12,
Skill: "kill",
}
//json是切片的字串
jsonHero, err := json.Marshal(hero)
if err != nil {
fmt.Println("jsonHero error:", err)
}
fmt.Println("jsonHero:", string(jsonHero))
}
|
package operatorlister
import (
"fmt"
"sync"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
corev1 "k8s.io/client-go/listers/core/v1"
)
type UnionPodLister struct {
podListers map[string]corev1.PodLister
podLock sync.RWMutex
}
// List lists all Pods in the indexer.
func (usl *UnionPodLister) List(selector labels.Selector) (ret []*v1.Pod, err error) {
usl.podLock.RLock()
defer usl.podLock.RUnlock()
set := make(map[types.UID]*v1.Pod)
for _, sl := range usl.podListers {
pods, err := sl.List(selector)
if err != nil {
return nil, err
}
for _, pod := range pods {
set[pod.GetUID()] = pod
}
}
for _, pod := range set {
ret = append(ret, pod)
}
return
}
// Pods returns an object that can list and get Pods.
func (usl *UnionPodLister) Pods(namespace string) corev1.PodNamespaceLister {
usl.podLock.RLock()
defer usl.podLock.RUnlock()
// Check for specific namespace listers
if sl, ok := usl.podListers[namespace]; ok {
return sl.Pods(namespace)
}
// Check for any namespace-all listers
if sl, ok := usl.podListers[metav1.NamespaceAll]; ok {
return sl.Pods(namespace)
}
return &NullPodNamespaceLister{}
}
func (usl *UnionPodLister) RegisterPodLister(namespace string, lister corev1.PodLister) {
usl.podLock.Lock()
defer usl.podLock.Unlock()
if usl.podListers == nil {
usl.podListers = make(map[string]corev1.PodLister)
}
usl.podListers[namespace] = lister
}
func (l *coreV1Lister) RegisterPodLister(namespace string, lister corev1.PodLister) {
l.podLister.RegisterPodLister(namespace, lister)
}
func (l *coreV1Lister) PodLister() corev1.PodLister {
return l.podLister
}
// NullPodNamespaceLister is an implementation of a null PodNamespaceLister. It is
// used to prevent nil pointers when no PodNamespaceLister has been registered for a given
// namespace.
type NullPodNamespaceLister struct {
corev1.PodNamespaceLister
}
// List returns nil and an error explaining that this is a NullPodNamespaceLister.
func (n *NullPodNamespaceLister) List(selector labels.Selector) (ret []*v1.Pod, err error) {
return nil, fmt.Errorf("cannot list Pods with a NullPodNamespaceLister")
}
// Get returns nil and an error explaining that this is a NullPodNamespaceLister.
func (n *NullPodNamespaceLister) Get(name string) (*v1.Pod, error) {
return nil, fmt.Errorf("cannot get Pod with a NullPodNamespaceLister")
}
|
func isValid(s string) bool {
stack := make([]byte, len(s))
cmap := map[byte]byte{
')': '(',
'}': '{',
']': '[',
}
height := 0
for idx := 0; idx < len(s); idx += 1{
ch := s[idx]
if ch == '[' || ch == '(' || ch == '{' {
stack = append(stack, ch)
height += 1
} else {
if len(stack) == 0{
return false
}
top_element := stack[len(stack)-1]
expected, _ := cmap[ch]
if expected != top_element{
return false
}
stack = stack[:len(stack)-1]
height -= 1
}
}
return height == 0
}
|
package xpen
import (
"encoding/json"
//log "github.com/cihub/seelog"
)
// 用户信息
type User struct {
Nick string
Email string
}
// 消息
type Message struct {
Content string
Time string
User User
}
// 消息列表
type Msg struct {
// 命令 login 登录, chat 聊天, users 用户列表, init 获取聊天记录, logout 登出
Command string
// 消息列表
Messages []Message
// 用户列表
Users []User
// 消息来源
Source User
// 来源
Pointer string
// 目标用户
To string
}
// ws消息
type WsData struct {
Message Msg
}
// json
func (m Msg) toJson() (string, error) {
b, err := json.Marshal(m)
if err != nil {
return "", err
}
return string(b), nil
}
// 读取消息
func ReadMsg(str string) Msg {
var s Msg
json.Unmarshal([]byte(str), &s)
return s
}
|
package service
import (
"github.com/keybase/client/go/libkb"
"github.com/keybase/go-framed-msgpack-rpc/rpc"
"golang.org/x/net/context"
)
func CancellingProtocol(g *libkb.GlobalContext, prot rpc.Protocol) (res rpc.Protocol) {
res.Name = prot.Name
res.WrapError = prot.WrapError
res.Methods = make(map[string]rpc.ServeHandlerDescription)
for name, ldesc := range prot.Methods {
var newDesc rpc.ServeHandlerDescription
desc := ldesc
newDesc.MakeArg = desc.MakeArg
newDesc.MethodType = desc.MethodType
newDesc.Handler = func(ctx context.Context, arg interface{}) (interface{}, error) {
var ctxID libkb.RPCCancellerKey
ctx, ctxID = g.RPCCanceller.RegisterContext(ctx)
defer g.RPCCanceller.UnregisterContext(ctxID)
return desc.Handler(ctx, arg)
}
res.Methods[name] = newDesc
}
return res
}
|
package main
import (
"fmt"
"log"
"net/rpc"
"os"
)
type Args struct {
A, B int
}
type Math int
type Quotient struct {
Quo, Remem int
}
func main() {
if len(os.Args) != 2 {
fmt.Println("Usage:", os.Args[0], "server")
}
serverAddr := os.Args[1]
// client, err := rpc.DialHTTP("tcp", serverAddr+":8080")
client, err := rpc.Dial("tcp", serverAddr+":8080")
if err != nil {
log.Fatal("dialing", err)
}
args := Args{17, 8}
var reply int
err = client.Call("Math.Multiply", args, &reply)
if err != nil {
log.Fatal("call trouble--", err)
}
fmt.Printf("the result is: %d\n", reply)
}
|
package x
// GENERATED BY XO. DO NOT EDIT.
import (
"errors"
"strings"
//"time"
"ms/sun/shared/helper"
"strconv"
"github.com/jmoiron/sqlx"
)
// (shortname .TableNameGo "err" "res" "sqlstr" "db" "XOLog") -}}//(schema .Schema .Table.TableName) -}}// .TableNameGo}}// GroupOrderdUser represents a row from 'sun_chat.group_orderd_user'.
// Manualy copy this to project
type GroupOrderdUser__ struct {
OrderId int `json:"OrderId"` // OrderId -
GroupId int `json:"GroupId"` // GroupId -
UserId int `json:"UserId"` // UserId -
// xo fields
_exists, _deleted bool
}
// Exists determines if the GroupOrderdUser exists in the database.
func (gou *GroupOrderdUser) Exists() bool {
return gou._exists
}
// Deleted provides information if the GroupOrderdUser has been deleted from the database.
func (gou *GroupOrderdUser) Deleted() bool {
return gou._deleted
}
// Insert inserts the GroupOrderdUser to the database.
func (gou *GroupOrderdUser) Insert(db XODB) error {
var err error
// if already exist, bail
if gou._exists {
return errors.New("insert failed: already exists")
}
// sql insert query, primary key must be provided
const sqlstr = `INSERT INTO sun_chat.group_orderd_user (` +
`OrderId, GroupId, UserId` +
`) VALUES (` +
`?, ?, ?` +
`)`
// run query
if LogTableSqlReq.GroupOrderdUser {
XOLog(sqlstr, gou.OrderId, gou.GroupId, gou.UserId)
}
_, err = db.Exec(sqlstr, gou.OrderId, gou.GroupId, gou.UserId)
if err != nil {
return err
}
// set existence
gou._exists = true
OnGroupOrderdUser_AfterInsert(gou)
return nil
}
// Insert inserts the GroupOrderdUser to the database.
func (gou *GroupOrderdUser) Replace(db XODB) error {
var err error
// sql query
const sqlstr = `REPLACE INTO sun_chat.group_orderd_user (` +
`OrderId, GroupId, UserId` +
`) VALUES (` +
`?, ?, ?` +
`)`
// run query
if LogTableSqlReq.GroupOrderdUser {
XOLog(sqlstr, gou.OrderId, gou.GroupId, gou.UserId)
}
_, err = db.Exec(sqlstr, gou.OrderId, gou.GroupId, gou.UserId)
if err != nil {
if LogTableSqlReq.GroupOrderdUser {
XOLogErr(err)
}
return err
}
gou._exists = true
OnGroupOrderdUser_AfterInsert(gou)
return nil
}
// Update updates the GroupOrderdUser in the database.
func (gou *GroupOrderdUser) Update(db XODB) error {
var err error
// if doesn't exist, bail
if !gou._exists {
return errors.New("update failed: does not exist")
}
// if deleted, bail
if gou._deleted {
return errors.New("update failed: marked for deletion")
}
// sql query
const sqlstr = `UPDATE sun_chat.group_orderd_user SET ` +
`GroupId = ?, UserId = ?` +
` WHERE OrderId = ?`
// run query
if LogTableSqlReq.GroupOrderdUser {
XOLog(sqlstr, gou.GroupId, gou.UserId, gou.OrderId)
}
_, err = db.Exec(sqlstr, gou.GroupId, gou.UserId, gou.OrderId)
if LogTableSqlReq.GroupOrderdUser {
XOLogErr(err)
}
OnGroupOrderdUser_AfterUpdate(gou)
return err
}
// Save saves the GroupOrderdUser to the database.
func (gou *GroupOrderdUser) Save(db XODB) error {
if gou.Exists() {
return gou.Update(db)
}
return gou.Replace(db)
}
// Delete deletes the GroupOrderdUser from the database.
func (gou *GroupOrderdUser) Delete(db XODB) error {
var err error
// if doesn't exist, bail
if !gou._exists {
return nil
}
// if deleted, bail
if gou._deleted {
return nil
}
// sql query
const sqlstr = `DELETE FROM sun_chat.group_orderd_user WHERE OrderId = ?`
// run query
if LogTableSqlReq.GroupOrderdUser {
XOLog(sqlstr, gou.OrderId)
}
_, err = db.Exec(sqlstr, gou.OrderId)
if err != nil {
if LogTableSqlReq.GroupOrderdUser {
XOLogErr(err)
}
return err
}
// set deleted
gou._deleted = true
OnGroupOrderdUser_AfterDelete(gou)
return nil
}
////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////// Querify gen - ME /////////////////////////////////////////
//.TableNameGo= table name
// _Deleter, _Updater
// orma types
type __GroupOrderdUser_Deleter struct {
wheres []whereClause
whereSep string
dollarIndex int
isMysql bool
}
type __GroupOrderdUser_Updater struct {
wheres []whereClause
// updates map[string]interface{}
updates []updateCol
whereSep string
dollarIndex int
isMysql bool
}
type __GroupOrderdUser_Selector struct {
wheres []whereClause
selectCol string
whereSep string
orderBy string //" order by id desc //for ints
limit int
offset int
dollarIndex int
isMysql bool
}
func NewGroupOrderdUser_Deleter() *__GroupOrderdUser_Deleter {
d := __GroupOrderdUser_Deleter{whereSep: " AND ", isMysql: true}
return &d
}
func NewGroupOrderdUser_Updater() *__GroupOrderdUser_Updater {
u := __GroupOrderdUser_Updater{whereSep: " AND ", isMysql: true}
//u.updates = make(map[string]interface{},10)
return &u
}
func NewGroupOrderdUser_Selector() *__GroupOrderdUser_Selector {
u := __GroupOrderdUser_Selector{whereSep: " AND ", selectCol: "*", isMysql: true}
return &u
}
/*/// mysql or cockroach ? or $1 handlers
func (m *__GroupOrderdUser_Selector)nextDollars(size int) string {
r := DollarsForSqlIn(size,m.dollarIndex,m.isMysql)
m.dollarIndex += size
return r
}
func (m *__GroupOrderdUser_Selector)nextDollar() string {
r := DollarsForSqlIn(1,m.dollarIndex,m.isMysql)
m.dollarIndex += 1
return r
}
*/
/////////////////////////////// Where for all /////////////////////////////
//// for ints all selector updater, deleter
/// mysql or cockroach ? or $1 handlers
func (m *__GroupOrderdUser_Deleter) nextDollars(size int) string {
r := DollarsForSqlIn(size, m.dollarIndex, m.isMysql)
m.dollarIndex += size
return r
}
func (m *__GroupOrderdUser_Deleter) nextDollar() string {
r := DollarsForSqlIn(1, m.dollarIndex, m.isMysql)
m.dollarIndex += 1
return r
}
////////ints
func (u *__GroupOrderdUser_Deleter) Or() *__GroupOrderdUser_Deleter {
u.whereSep = " OR "
return u
}
func (u *__GroupOrderdUser_Deleter) OrderId_In(ins []int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " OrderId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__GroupOrderdUser_Deleter) OrderId_Ins(ins ...int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " OrderId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__GroupOrderdUser_Deleter) OrderId_NotIn(ins []int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " OrderId NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__GroupOrderdUser_Deleter) OrderId_Eq(val int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " OrderId = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Deleter) OrderId_NotEq(val int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " OrderId != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Deleter) OrderId_LT(val int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " OrderId < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Deleter) OrderId_LE(val int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " OrderId <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Deleter) OrderId_GT(val int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " OrderId > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Deleter) OrderId_GE(val int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " OrderId >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__GroupOrderdUser_Deleter) GroupId_In(ins []int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " GroupId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__GroupOrderdUser_Deleter) GroupId_Ins(ins ...int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " GroupId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__GroupOrderdUser_Deleter) GroupId_NotIn(ins []int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " GroupId NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__GroupOrderdUser_Deleter) GroupId_Eq(val int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " GroupId = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Deleter) GroupId_NotEq(val int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " GroupId != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Deleter) GroupId_LT(val int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " GroupId < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Deleter) GroupId_LE(val int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " GroupId <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Deleter) GroupId_GT(val int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " GroupId > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Deleter) GroupId_GE(val int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " GroupId >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__GroupOrderdUser_Deleter) UserId_In(ins []int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " UserId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__GroupOrderdUser_Deleter) UserId_Ins(ins ...int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " UserId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__GroupOrderdUser_Deleter) UserId_NotIn(ins []int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " UserId NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__GroupOrderdUser_Deleter) UserId_Eq(val int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " UserId = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Deleter) UserId_NotEq(val int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " UserId != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Deleter) UserId_LT(val int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " UserId < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Deleter) UserId_LE(val int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " UserId <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Deleter) UserId_GT(val int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " UserId > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Deleter) UserId_GE(val int) *__GroupOrderdUser_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " UserId >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
/// mysql or cockroach ? or $1 handlers
func (m *__GroupOrderdUser_Updater) nextDollars(size int) string {
r := DollarsForSqlIn(size, m.dollarIndex, m.isMysql)
m.dollarIndex += size
return r
}
func (m *__GroupOrderdUser_Updater) nextDollar() string {
r := DollarsForSqlIn(1, m.dollarIndex, m.isMysql)
m.dollarIndex += 1
return r
}
////////ints
func (u *__GroupOrderdUser_Updater) Or() *__GroupOrderdUser_Updater {
u.whereSep = " OR "
return u
}
func (u *__GroupOrderdUser_Updater) OrderId_In(ins []int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " OrderId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__GroupOrderdUser_Updater) OrderId_Ins(ins ...int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " OrderId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__GroupOrderdUser_Updater) OrderId_NotIn(ins []int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " OrderId NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__GroupOrderdUser_Updater) OrderId_Eq(val int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " OrderId = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Updater) OrderId_NotEq(val int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " OrderId != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Updater) OrderId_LT(val int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " OrderId < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Updater) OrderId_LE(val int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " OrderId <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Updater) OrderId_GT(val int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " OrderId > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Updater) OrderId_GE(val int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " OrderId >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__GroupOrderdUser_Updater) GroupId_In(ins []int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " GroupId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__GroupOrderdUser_Updater) GroupId_Ins(ins ...int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " GroupId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__GroupOrderdUser_Updater) GroupId_NotIn(ins []int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " GroupId NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__GroupOrderdUser_Updater) GroupId_Eq(val int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " GroupId = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Updater) GroupId_NotEq(val int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " GroupId != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Updater) GroupId_LT(val int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " GroupId < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Updater) GroupId_LE(val int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " GroupId <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Updater) GroupId_GT(val int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " GroupId > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Updater) GroupId_GE(val int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " GroupId >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__GroupOrderdUser_Updater) UserId_In(ins []int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " UserId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__GroupOrderdUser_Updater) UserId_Ins(ins ...int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " UserId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__GroupOrderdUser_Updater) UserId_NotIn(ins []int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " UserId NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__GroupOrderdUser_Updater) UserId_Eq(val int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " UserId = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Updater) UserId_NotEq(val int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " UserId != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Updater) UserId_LT(val int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " UserId < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Updater) UserId_LE(val int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " UserId <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Updater) UserId_GT(val int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " UserId > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Updater) UserId_GE(val int) *__GroupOrderdUser_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " UserId >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
/// mysql or cockroach ? or $1 handlers
func (m *__GroupOrderdUser_Selector) nextDollars(size int) string {
r := DollarsForSqlIn(size, m.dollarIndex, m.isMysql)
m.dollarIndex += size
return r
}
func (m *__GroupOrderdUser_Selector) nextDollar() string {
r := DollarsForSqlIn(1, m.dollarIndex, m.isMysql)
m.dollarIndex += 1
return r
}
////////ints
func (u *__GroupOrderdUser_Selector) Or() *__GroupOrderdUser_Selector {
u.whereSep = " OR "
return u
}
func (u *__GroupOrderdUser_Selector) OrderId_In(ins []int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " OrderId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__GroupOrderdUser_Selector) OrderId_Ins(ins ...int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " OrderId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__GroupOrderdUser_Selector) OrderId_NotIn(ins []int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " OrderId NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__GroupOrderdUser_Selector) OrderId_Eq(val int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " OrderId = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Selector) OrderId_NotEq(val int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " OrderId != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Selector) OrderId_LT(val int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " OrderId < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Selector) OrderId_LE(val int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " OrderId <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Selector) OrderId_GT(val int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " OrderId > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Selector) OrderId_GE(val int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " OrderId >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__GroupOrderdUser_Selector) GroupId_In(ins []int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " GroupId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__GroupOrderdUser_Selector) GroupId_Ins(ins ...int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " GroupId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__GroupOrderdUser_Selector) GroupId_NotIn(ins []int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " GroupId NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__GroupOrderdUser_Selector) GroupId_Eq(val int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " GroupId = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Selector) GroupId_NotEq(val int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " GroupId != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Selector) GroupId_LT(val int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " GroupId < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Selector) GroupId_LE(val int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " GroupId <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Selector) GroupId_GT(val int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " GroupId > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Selector) GroupId_GE(val int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " GroupId >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__GroupOrderdUser_Selector) UserId_In(ins []int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " UserId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__GroupOrderdUser_Selector) UserId_Ins(ins ...int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " UserId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__GroupOrderdUser_Selector) UserId_NotIn(ins []int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " UserId NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__GroupOrderdUser_Selector) UserId_Eq(val int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " UserId = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Selector) UserId_NotEq(val int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " UserId != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Selector) UserId_LT(val int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " UserId < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Selector) UserId_LE(val int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " UserId <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Selector) UserId_GT(val int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " UserId > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__GroupOrderdUser_Selector) UserId_GE(val int) *__GroupOrderdUser_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " UserId >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
///// for strings //copy of above with type int -> string + rm if eq + $ms_str_cond
////////ints
////////ints
////////ints
/// End of wheres for selectors , updators, deletor
/////////////////////////////// Updater /////////////////////////////
//ints
func (u *__GroupOrderdUser_Updater) OrderId(newVal int) *__GroupOrderdUser_Updater {
up := updateCol{" OrderId = " + u.nextDollar(), newVal}
u.updates = append(u.updates, up)
// u.updates[" OrderId = " + u.nextDollar()] = newVal
return u
}
func (u *__GroupOrderdUser_Updater) OrderId_Increment(count int) *__GroupOrderdUser_Updater {
if count > 0 {
up := updateCol{" OrderId = OrderId+ " + u.nextDollar(), count}
u.updates = append(u.updates, up)
//u.updates[" OrderId = OrderId+ " + u.nextDollar()] = count
}
if count < 0 {
up := updateCol{" OrderId = OrderId- " + u.nextDollar(), count}
u.updates = append(u.updates, up)
// u.updates[" OrderId = OrderId- " + u.nextDollar() ] = -(count) //make it positive
}
return u
}
//string
//ints
func (u *__GroupOrderdUser_Updater) GroupId(newVal int) *__GroupOrderdUser_Updater {
up := updateCol{" GroupId = " + u.nextDollar(), newVal}
u.updates = append(u.updates, up)
// u.updates[" GroupId = " + u.nextDollar()] = newVal
return u
}
func (u *__GroupOrderdUser_Updater) GroupId_Increment(count int) *__GroupOrderdUser_Updater {
if count > 0 {
up := updateCol{" GroupId = GroupId+ " + u.nextDollar(), count}
u.updates = append(u.updates, up)
//u.updates[" GroupId = GroupId+ " + u.nextDollar()] = count
}
if count < 0 {
up := updateCol{" GroupId = GroupId- " + u.nextDollar(), count}
u.updates = append(u.updates, up)
// u.updates[" GroupId = GroupId- " + u.nextDollar() ] = -(count) //make it positive
}
return u
}
//string
//ints
func (u *__GroupOrderdUser_Updater) UserId(newVal int) *__GroupOrderdUser_Updater {
up := updateCol{" UserId = " + u.nextDollar(), newVal}
u.updates = append(u.updates, up)
// u.updates[" UserId = " + u.nextDollar()] = newVal
return u
}
func (u *__GroupOrderdUser_Updater) UserId_Increment(count int) *__GroupOrderdUser_Updater {
if count > 0 {
up := updateCol{" UserId = UserId+ " + u.nextDollar(), count}
u.updates = append(u.updates, up)
//u.updates[" UserId = UserId+ " + u.nextDollar()] = count
}
if count < 0 {
up := updateCol{" UserId = UserId- " + u.nextDollar(), count}
u.updates = append(u.updates, up)
// u.updates[" UserId = UserId- " + u.nextDollar() ] = -(count) //make it positive
}
return u
}
//string
/////////////////////////////////////////////////////////////////////
/////////////////////// Selector ///////////////////////////////////
//Select_* can just be used with: .GetString() , .GetStringSlice(), .GetInt() ..GetIntSlice()
func (u *__GroupOrderdUser_Selector) OrderBy_OrderId_Desc() *__GroupOrderdUser_Selector {
u.orderBy = " ORDER BY OrderId DESC "
return u
}
func (u *__GroupOrderdUser_Selector) OrderBy_OrderId_Asc() *__GroupOrderdUser_Selector {
u.orderBy = " ORDER BY OrderId ASC "
return u
}
func (u *__GroupOrderdUser_Selector) Select_OrderId() *__GroupOrderdUser_Selector {
u.selectCol = "OrderId"
return u
}
func (u *__GroupOrderdUser_Selector) OrderBy_GroupId_Desc() *__GroupOrderdUser_Selector {
u.orderBy = " ORDER BY GroupId DESC "
return u
}
func (u *__GroupOrderdUser_Selector) OrderBy_GroupId_Asc() *__GroupOrderdUser_Selector {
u.orderBy = " ORDER BY GroupId ASC "
return u
}
func (u *__GroupOrderdUser_Selector) Select_GroupId() *__GroupOrderdUser_Selector {
u.selectCol = "GroupId"
return u
}
func (u *__GroupOrderdUser_Selector) OrderBy_UserId_Desc() *__GroupOrderdUser_Selector {
u.orderBy = " ORDER BY UserId DESC "
return u
}
func (u *__GroupOrderdUser_Selector) OrderBy_UserId_Asc() *__GroupOrderdUser_Selector {
u.orderBy = " ORDER BY UserId ASC "
return u
}
func (u *__GroupOrderdUser_Selector) Select_UserId() *__GroupOrderdUser_Selector {
u.selectCol = "UserId"
return u
}
func (u *__GroupOrderdUser_Selector) Limit(num int) *__GroupOrderdUser_Selector {
u.limit = num
return u
}
func (u *__GroupOrderdUser_Selector) Offset(num int) *__GroupOrderdUser_Selector {
u.offset = num
return u
}
func (u *__GroupOrderdUser_Selector) Order_Rand() *__GroupOrderdUser_Selector {
u.orderBy = " ORDER BY RAND() "
return u
}
///////////////////////// Queryer Selector //////////////////////////////////
func (u *__GroupOrderdUser_Selector) _stoSql() (string, []interface{}) {
sqlWherrs, whereArgs := whereClusesToSql(u.wheres, u.whereSep)
sqlstr := "SELECT " + u.selectCol + " FROM sun_chat.group_orderd_user"
if len(strings.Trim(sqlWherrs, " ")) > 0 { //2 for safty
sqlstr += " WHERE " + sqlWherrs
}
if u.orderBy != "" {
sqlstr += u.orderBy
}
if u.limit != 0 {
sqlstr += " LIMIT " + strconv.Itoa(u.limit)
}
if u.offset != 0 {
sqlstr += " OFFSET " + strconv.Itoa(u.offset)
}
return sqlstr, whereArgs
}
func (u *__GroupOrderdUser_Selector) GetRow(db *sqlx.DB) (*GroupOrderdUser, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.GroupOrderdUser {
XOLog(sqlstr, whereArgs)
}
row := &GroupOrderdUser{}
//by Sqlx
err = db.Get(row, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.GroupOrderdUser {
XOLogErr(err)
}
return nil, err
}
row._exists = true
OnGroupOrderdUser_LoadOne(row)
return row, nil
}
func (u *__GroupOrderdUser_Selector) GetRows(db *sqlx.DB) ([]*GroupOrderdUser, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.GroupOrderdUser {
XOLog(sqlstr, whereArgs)
}
var rows []*GroupOrderdUser
//by Sqlx
err = db.Unsafe().Select(&rows, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.GroupOrderdUser {
XOLogErr(err)
}
return nil, err
}
/*for i:=0;i< len(rows);i++ {
rows[i]._exists = true
}*/
for i := 0; i < len(rows); i++ {
rows[i]._exists = true
}
OnGroupOrderdUser_LoadMany(rows)
return rows, nil
}
//dep use GetRows()
func (u *__GroupOrderdUser_Selector) GetRows2(db *sqlx.DB) ([]GroupOrderdUser, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.GroupOrderdUser {
XOLog(sqlstr, whereArgs)
}
var rows []*GroupOrderdUser
//by Sqlx
err = db.Unsafe().Select(&rows, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.GroupOrderdUser {
XOLogErr(err)
}
return nil, err
}
/*for i:=0;i< len(rows);i++ {
rows[i]._exists = true
}*/
for i := 0; i < len(rows); i++ {
rows[i]._exists = true
}
OnGroupOrderdUser_LoadMany(rows)
rows2 := make([]GroupOrderdUser, len(rows))
for i := 0; i < len(rows); i++ {
cp := *rows[i]
rows2[i] = cp
}
return rows2, nil
}
func (u *__GroupOrderdUser_Selector) GetString(db *sqlx.DB) (string, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.GroupOrderdUser {
XOLog(sqlstr, whereArgs)
}
var res string
//by Sqlx
err = db.Get(&res, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.GroupOrderdUser {
XOLogErr(err)
}
return "", err
}
return res, nil
}
func (u *__GroupOrderdUser_Selector) GetStringSlice(db *sqlx.DB) ([]string, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.GroupOrderdUser {
XOLog(sqlstr, whereArgs)
}
var rows []string
//by Sqlx
err = db.Select(&rows, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.GroupOrderdUser {
XOLogErr(err)
}
return nil, err
}
return rows, nil
}
func (u *__GroupOrderdUser_Selector) GetIntSlice(db *sqlx.DB) ([]int, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.GroupOrderdUser {
XOLog(sqlstr, whereArgs)
}
var rows []int
//by Sqlx
err = db.Select(&rows, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.GroupOrderdUser {
XOLogErr(err)
}
return nil, err
}
return rows, nil
}
func (u *__GroupOrderdUser_Selector) GetInt(db *sqlx.DB) (int, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.GroupOrderdUser {
XOLog(sqlstr, whereArgs)
}
var res int
//by Sqlx
err = db.Get(&res, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.GroupOrderdUser {
XOLogErr(err)
}
return 0, err
}
return res, nil
}
///////////////////////// Queryer Update Delete //////////////////////////////////
func (u *__GroupOrderdUser_Updater) Update(db XODB) (int, error) {
var err error
var updateArgs []interface{}
var sqlUpdateArr []string
/*for up, newVal := range u.updates {
sqlUpdateArr = append(sqlUpdateArr, up)
updateArgs = append(updateArgs, newVal)
}*/
for _, up := range u.updates {
sqlUpdateArr = append(sqlUpdateArr, up.col)
updateArgs = append(updateArgs, up.val)
}
sqlUpdate := strings.Join(sqlUpdateArr, ",")
sqlWherrs, whereArgs := whereClusesToSql(u.wheres, u.whereSep)
var allArgs []interface{}
allArgs = append(allArgs, updateArgs...)
allArgs = append(allArgs, whereArgs...)
sqlstr := `UPDATE sun_chat.group_orderd_user SET ` + sqlUpdate
if len(strings.Trim(sqlWherrs, " ")) > 0 { //2 for safty
sqlstr += " WHERE " + sqlWherrs
}
if LogTableSqlReq.GroupOrderdUser {
XOLog(sqlstr, allArgs)
}
res, err := db.Exec(sqlstr, allArgs...)
if err != nil {
if LogTableSqlReq.GroupOrderdUser {
XOLogErr(err)
}
return 0, err
}
num, err := res.RowsAffected()
if err != nil {
if LogTableSqlReq.GroupOrderdUser {
XOLogErr(err)
}
return 0, err
}
return int(num), nil
}
func (d *__GroupOrderdUser_Deleter) Delete(db XODB) (int, error) {
var err error
var wheresArr []string
for _, w := range d.wheres {
wheresArr = append(wheresArr, w.condition)
}
wheresStr := strings.Join(wheresArr, d.whereSep)
var args []interface{}
for _, w := range d.wheres {
args = append(args, w.args...)
}
sqlstr := "DELETE FROM sun_chat.group_orderd_user WHERE " + wheresStr
// run query
if LogTableSqlReq.GroupOrderdUser {
XOLog(sqlstr, args)
}
res, err := db.Exec(sqlstr, args...)
if err != nil {
if LogTableSqlReq.GroupOrderdUser {
XOLogErr(err)
}
return 0, err
}
// retrieve id
num, err := res.RowsAffected()
if err != nil {
if LogTableSqlReq.GroupOrderdUser {
XOLogErr(err)
}
return 0, err
}
return int(num), nil
}
///////////////////////// Mass insert - replace for GroupOrderdUser ////////////////
func MassInsert_GroupOrderdUser(rows []GroupOrderdUser, db XODB) error {
if len(rows) == 0 {
return errors.New("rows slice should not be empty - inserted nothing")
}
var err error
ln := len(rows)
// insVals_:= strings.Repeat(s, ln)
// insVals := insVals_[0:len(insVals_)-1]
insVals := helper.SqlManyDollars(3, ln, true)
// sql query
sqlstr := "INSERT INTO sun_chat.group_orderd_user (" +
"OrderId, GroupId, UserId" +
") VALUES " + insVals
// run query
vals := make([]interface{}, 0, ln*5) //5 fields
for _, row := range rows {
// vals = append(vals,row.UserId)
vals = append(vals, row.OrderId)
vals = append(vals, row.GroupId)
vals = append(vals, row.UserId)
}
if LogTableSqlReq.GroupOrderdUser {
XOLog(sqlstr, " MassInsert len = ", ln, vals)
}
_, err = db.Exec(sqlstr, vals...)
if err != nil {
if LogTableSqlReq.GroupOrderdUser {
XOLogErr(err)
}
return err
}
return nil
}
func MassReplace_GroupOrderdUser(rows []GroupOrderdUser, db XODB) error {
if len(rows) == 0 {
return errors.New("rows slice should not be empty - inserted nothing")
}
var err error
ln := len(rows)
// insVals_:= strings.Repeat(s, ln)
// insVals := insVals_[0:len(insVals_)-1]
insVals := helper.SqlManyDollars(3, ln, true)
// sql query
sqlstr := "REPLACE INTO sun_chat.group_orderd_user (" +
"OrderId, GroupId, UserId" +
") VALUES " + insVals
// run query
vals := make([]interface{}, 0, ln*5) //5 fields
for _, row := range rows {
// vals = append(vals,row.UserId)
vals = append(vals, row.OrderId)
vals = append(vals, row.GroupId)
vals = append(vals, row.UserId)
}
if LogTableSqlReq.GroupOrderdUser {
XOLog(sqlstr, " MassReplace len = ", ln, vals)
}
_, err = db.Exec(sqlstr, vals...)
if err != nil {
if LogTableSqlReq.GroupOrderdUser {
XOLogErr(err)
}
return err
}
return nil
}
//////////////////// Play ///////////////////////////////
//
//
//
|
package Problem0093
import (
"fmt"
)
func restoreIpAddresses(s string) []string {
n := len(s)
if n < 4 || n > 12 {
return []string{}
}
res := []string{}
combination := make([]string, 4)
var dfs func(int, int)
dfs = func(idx, begin int) {
if idx == 3 {
temp := s[begin:]
if isOK(temp) {
combination[3] = temp
res = append(res, IP(combination))
}
return
}
// 剩余 IP 段,最多需要的宽度
maxRemain := 3 * (3 - idx)
for end := begin + 1; end <= n-(3-idx); end++ {
if end+maxRemain < n {
// 后面的 IP 段 至少有一个超过了 3 个字符
// 说明此IP段短了
continue
}
if end-begin > 3 {
// 此 IP 段长度,超过 3 位了
break
}
temp := s[begin:end]
if isOK(temp) {
combination[idx] = temp
dfs(idx+1, end)
}
}
}
dfs(0, 0)
return res
}
// IP 返回 s 代表的 IP 地址
func IP(s []string) string {
return fmt.Sprintf("%s.%s.%s.%s", s[0], s[1], s[2], s[3])
}
// 由程序其他部分保证了 len(s) <= 3
func isOK(s string) bool {
// "0" 可以
// "01" 不可以
if len(s) > 1 && s[0] == '0' {
return false
}
if len(s) < 3 {
return true
}
// len(s) == 3
switch s[0] {
case '1':
return true
case '2':
if '0' <= s[1] && s[1] <= '4' {
return true
}
if s[1] == '5' && '0' <= s[2] && s[2] <= '5' {
return true
}
}
return false
}
|
package calendar
import (
"strconv"
"time"
"github.com/kudrykv/latex-yearly-planner/app/components/hyper"
)
type DayTime struct {
time.Time
}
func (d DayTime) AddDate(years, months, days int) DayTime {
return DayTime{Time: d.Time.AddDate(years, months, days)}
}
func (d DayTime) Link() string {
return hyper.Link(d.RefText(), strconv.Itoa(d.Day()))
}
func (d DayTime) SquareLink() string {
ref := d.RefText()
day := strconv.Itoa(d.Day())
return `\hyperlink{` + ref + `}{\begin{tabular}{@{}p{5mm}@{}|}\hfil{}` + day + `\\ \hline\end{tabular}}`
}
func (d DayTime) SelectedCell() string {
return `\cellcolor{black}{\textcolor{white}{` + strconv.Itoa(d.Day()) + `}}`
}
func (d DayTime) RefText() string {
return d.Format(time.RFC3339)
}
func (d DayTime) FormatHour(ampm bool) string {
if ampm {
return d.Format("3 PM")
}
return d.Format("15")
}
|
package main
import (
"encoding/json"
"fmt"
)
// 结构体标签
// 定义一个Student体,使用结构体标签
type Student2 struct {
Id string `json:"id"` // 通过指定tag实现json序列化该字段的key
Gender string `json:"gender"`
Name string `json:"name"`
Sno string `json:"sno"`
}
func main() {
var s1 = Student2{
Id: "12",
Gender: "男",
Name: "李四",
Sno: "s001",
}
// 结构体转换成Json
jsonByte, _ := json.Marshal(s1)
jsonStr := string(jsonByte)
fmt.Println(jsonStr)
// Json字符串转换成结构体
var str = `{"Id":"12","Gender":"男","Name":"李四","Sno":"s001"}`
var s2 = Student2{}
// 第一个是需要传入byte类型的数据,第二参数需要传入转换的地址
err := json.Unmarshal([]byte(str), &s2)
if err != nil {
fmt.Printf("转换失败 \n")
} else {
fmt.Printf("%#v \n", s2)
}
}
|
package board
import (
"image"
"strings"
"testing"
"testutil"
)
func TestDirectionNames(t *testing.T) {
testCases := map[Direction]string{
None: "None",
S: "S",
N | W: "NW",
N | E | S | W: "NESW",
Direction(directionMask) << 1: "(illegal)"}
for dir, name := range testCases {
if dir.String() != name {
t.Errorf("Name of direction %d is %q, expected %q",
uint8(dir), dir.String(), name)
}
}
}
func dirArrayEquals(a1, a2 []Direction) bool {
if len(a1) != len(a2) {
return false
}
for i := range a1 {
if a1[i] != a2[i] {
return false
}
}
return true
}
func TestDirectionDecomposing(t *testing.T) {
testCases := map[Direction][]Direction{
None: {},
N: {N},
S: {S},
E: {E},
W: {W},
S | E: {E, S},
N | E | S | W: {N, E, S, W},
}
for dir, expected := range testCases {
decomposition := dir.Decompose()
if !dirArrayEquals(decomposition, expected) {
t.Errorf("Decomposition of %v is %v, expected %v",
dir, decomposition, expected)
}
}
}
var dirNegationTests [][]Direction = [][]Direction{
{None, N | E | S | W}, {N, E | S | W}, {W | S, N | E},
}
func TestDirectionNegation(t *testing.T) {
for _, test := range dirNegationTests {
neg := test[0].Negate()
if neg != test[1] {
t.Errorf("Negation of %v is %v, expected %v",
test[0], neg, test[1])
}
}
}
func performFieldValueTests(t *testing.T, f *Field, visited bool) {
f.setVisited(visited)
f.SetDirection(N)
if f.Direction() != N {
t.Errorf("Direction set to %v, expected N", f.Direction())
}
f.SetDirection(S)
if f.Direction() != S {
t.Errorf("Direction set to %v, expected S", f.Direction())
}
f.AddDirection(W)
if f.Direction() != S|W {
t.Errorf("Direction set to %v, expected %v", f.Direction(), S|W)
}
if f.visited() != visited {
t.Errorf("Visited flag is %v, expected %v", f.visited(), visited)
}
if f.Direction() != S|W {
t.Errorf("Direction after changing visited flag is %v, expected %v",
f.Direction(), S|W)
}
}
func TestFieldValues(t *testing.T) {
var f Field
if f.Direction() != None {
t.Errorf("Initial direction is %v, expected None", f.Direction())
}
if f.visited() {
t.Errorf("Field is initially visited")
}
performFieldValueTests(t, &f, false)
performFieldValueTests(t, &f, true)
}
func TestCreatingBoard(t *testing.T) {
const width, height = 3, 2
board := New(width, height)
for y := 0; y < height; y++ {
for x := 0; x < width; x++ {
field := board.At(x, y)
if field.Direction() != None {
t.Errorf("Direction of (%d, %d) is %v", x, y,
field.Direction())
}
if field.visited() {
t.Errorf("Field (%d, %d) is visited", x, y)
}
}
}
if !board.Entrance().Eq(image.Pt(0, 0)) {
t.Errorf("Entrance is %v, expected %v", board.Entrance(), image.Pt(0, 0))
}
expectedExit := image.Pt(width-1, height-1)
if !board.Exit().Eq(expectedExit) {
t.Errorf("Exit is %v, expected %v", board.Exit(), expectedExit)
}
}
type walkingTest struct {
Board boardImpl
Solve bool
VisitMatrix [][]bool
}
var walkingTests []walkingTest = []walkingTest{
// + +
// |X|
// + +
{
Board: boardImpl{
fields: [][]Field{{Field(N | S)}},
entrance: image.Pt(0, 0),
exit: image.Pt(0, 0),
},
VisitMatrix: [][]bool{{true}},
},
// +-+-+
// | |
// + + +
// |*|x|
// + + +
{
Board: boardImpl{
fields: [][]Field{
{Field(E | S), Field(W | S)},
{Field(N | S), Field(N | S)},
},
entrance: image.Pt(0, 1),
exit: image.Pt(1, 1),
},
VisitMatrix: [][]bool{{true, true}, {true, true}},
},
// +-+-+
// * |
// +-+ +
// |#|x
// +-+-+
{
Board: boardImpl{
fields: [][]Field{
{Field(E | W), Field(W | S)},
{Field(None), Field(N | E)},
},
entrance: image.Pt(0, 0),
exit: image.Pt(1, 1),
},
VisitMatrix: [][]bool{{true, true}, {false, true}},
},
// +-+-+
// * x
// +-+-+
// |# #|
// +-+-+
{
Board: boardImpl{
fields: [][]Field{
{Field(E | W), Field(E | W)},
{Field(E), Field(W)},
},
entrance: image.Pt(0, 0),
exit: image.Pt(1, 0),
},
VisitMatrix: [][]bool{{true, true}, {false, false}},
},
// + +
// |*|
// + +
// |x|
// + +
{
Board: boardImpl{
fields: [][]Field{
{Field(N | S)},
{Field(N | S)},
},
entrance: image.Pt(0, 0),
exit: image.Pt(0, 1),
},
VisitMatrix: [][]bool{{true}, {true}},
},
// +-+-+
// x *
// +-+-+
{
Board: boardImpl{
fields: [][]Field{
{Field(E | W), Field(E | W)},
},
entrance: image.Pt(1, 0),
exit: image.Pt(0, 0),
},
VisitMatrix: [][]bool{{true, true}},
},
// +-+-+-+
// | |
// + + + +
// |x|*| |
// + + +-+
{
Board: boardImpl{
fields: [][]Field{
{Field(E | S), Field(E | S | W), Field(S | W)},
{Field(N | S), Field(N | S), Field(N)},
},
entrance: image.Pt(1, 1),
exit: image.Pt(0, 1),
},
VisitMatrix: [][]bool{{true, true, true}, {true, true, true}},
},
// +-+-+
// *|x
// +-+-+ (solve)
{
Board: boardImpl{
fields: [][]Field{{Field(W), Field(E)}},
entrance: image.Pt(0, 0),
exit: image.Pt(1, 0),
},
Solve: true,
VisitMatrix: [][]bool{{false, false}},
},
// +-+-+
// * x
// +-+-+ (solve)
{
Board: boardImpl{
fields: [][]Field{{Field(E | W), Field(E | W)}},
entrance: image.Pt(0, 0),
exit: image.Pt(1, 0),
},
Solve: true,
VisitMatrix: [][]bool{{true, true}},
},
// + + +-+
// |x|*| |
// + + + |
// | |
// +-+-+-+ (solve)
{
Board: boardImpl{
fields: [][]Field{
{Field(N | S), Field(N | S), Field(S)},
{Field(N | E), Field(N | E | W), Field(N | W)},
},
entrance: image.Pt(1, 0),
exit: image.Pt(0, 0),
},
Solve: true,
VisitMatrix: [][]bool{
{true, true, false},
{true, true, false},
},
},
}
func TestWalking(t *testing.T) {
for i, test := range walkingTests {
if !test.Board.Validate() {
t.Fatalf("Test %d is broken:\n%v", i, &test.Board)
}
visitMatrix, error := test.Board.Walk(test.Solve)
if error != nil {
t.Errorf("Error in test %d: %v", i, error)
continue
}
if visitMatrix == nil {
t.Errorf("Visit matrix for test %d is nil. "+
"Something terrible has happened", i)
continue
}
if !testutil.MatricesEqual(visitMatrix, test.VisitMatrix) {
t.Errorf("Visit matrix for test %d is %v, expected %v",
i, visitMatrix, test.VisitMatrix)
}
}
}
func TestWalkingFallsOffBoard(t *testing.T) {
// +-+ +-+
// *
// +-+-+-+
board := boardImpl{
fields: [][]Field{
{Field(E | W), Field(N | E | W), Field(E | W)},
},
entrance: image.Pt(0, 0),
exit: image.Pt(2, 0),
}
if !board.Validate() {
t.Fatal("Test is broken")
}
_, error := board.Walk(false)
expectedPointStr := image.Pt(1, -1).String()
if error == nil || !strings.Contains(error.String(), expectedPointStr) {
t.Errorf("Error is %q, expected to contain %s", error, expectedPointStr)
}
}
type validationTest struct {
Fields [][]Field
Ok bool
}
var validationTests []validationTest = []validationTest{
// +-++-+
// | |
// + ++-+
// + ++-+
// | || |
// +-++-+
{
Fields: [][]Field{
{Field(E | S), Field(W)},
{Field(N), Field(None)},
},
Ok: true,
},
// +-++-+
// | | |
// +-++-+
{
Fields: [][]Field{
{Field(None), Field(W)},
},
Ok: false,
},
// +-+
// | |
// +-+
// + +
// | |
// +-+
{
Fields: [][]Field{
{Field(None)},
{Field(N)},
},
Ok: false,
},
}
func TestValidation(t *testing.T) {
for i, test := range validationTests {
board := boardImpl{fields: test.Fields}
validated := board.Validate()
if validated != test.Ok {
t.Errorf("Validation %d resulted in %v, expected %v",
i, validated, test.Ok)
}
}
}
type complexityTest struct {
Fields [][]Field
Complexity int
}
var complexityTests []complexityTest = []complexityTest{
// +-+-+-+
// | | |
// + + + +
// | | |
// +-+-+-+
{
Fields: [][]Field{
{Field(E | S), Field(W | S), Field(S)},
{Field(N), Field(E | N), Field(W | N)},
},
Complexity: 0,
},
// +-+-+-+-+-+
// | | | |
// + + + + +-+
// | | |
// +-+ +-+-+-+
// | |
// +---------+
{
Fields: [][]Field{
{Field(S), Field(S), Field(S | E), Field(S | E | W), Field(W)},
{Field(N | E), Field(N | E | S | W), Field(N | W), Field(N | E), Field(W)},
{Field(E), Field(N | E | W), Field(E | W), Field(E | W), Field(W)},
},
Complexity: 3,
},
}
func TestComplexity(t *testing.T) {
for i, test := range complexityTests {
board := boardImpl{fields: test.Fields}
if !board.Validate() {
t.Fatalf("Test %d is broken:\n%v", i, &board)
}
complexity := board.Complexity()
if complexity != test.Complexity {
t.Errorf("Complexity of test %d is %d, expected %d",
i, complexity, test.Complexity)
}
}
}
|
package twch
import (
"fmt"
)
type Blocks struct {
client *Client
}
type listBlocks struct {
Blocks []Block `json:"blocks"`
listLinks
}
type Block struct {
ID *int `json:"_id"`
UpdatedAt *string `json:"updated_at"`
User *User `json:"user"`
}
func (b *Blocks) ListBlocks(login string, opts *ListOptions) (blocks []Block, resp *Response, err error) {
url := fmt.Sprintf("users/%s/blocks", login)
u, err := appendOptions(url, opts)
if err != nil {
return
}
req, err := b.client.NewRequest("GET", u)
if err != nil {
return
}
r := new(listBlocks)
resp, err = b.client.Do(req, r)
if err != nil {
return
}
blocks = r.Blocks
return
}
// AddBlock adds a block to the passed authenticated user. `user` is the current user,
// `target` is the account to block. A successful block returns the new block object.
// This method requires OAuth authentication with the required `user_blocks_edit` scope
func (b *Blocks) AddBlock(user, target string) (block *Block, resp *Response, err error) {
url := fmt.Sprintf("users/%s/blocks/%s", user, target)
req, err := b.client.NewRequest("PUT", url)
if err != nil {
return
}
block = new(Block)
resp, err = b.client.Do(req, block)
if err != nil {
return
}
return
}
// RemoveBlock deletes a block from the passed authenticated user. `user` is the current user,
// `target` is the account to block. A 404 error will be returned if the block did not exist
// for the given user.
func (b *Blocks) RemoveBlock(user, target string) (err error) {
url := fmt.Sprintf("users/%s/blocks/%s", user, target)
req, err := b.client.NewRequest("DELETE", url)
if err != nil {
return
}
_, err = b.client.Do(req, nil)
if err != nil {
return
}
return
}
|
package main
import (
"fmt"
)
func printDeezerPlaylists() {
playlists, err := d.Client.GetCurrentUserPlaylists()
if err != nil {
fmt.Println(err)
return
}
for _, playlist := range playlists {
fmt.Println(playlist.Title)
}
}
func printDeezerLovedTracks() {
playlists, err := d.Client.GetCurrentUserPlaylists()
if err != nil {
fmt.Printf("%#v\n", err)
return
}
for _, playlist := range playlists {
if playlist.IsLovedTrack {
pl, err := d.Client.GetPlaylist(playlist.ID)
if err != nil {
fmt.Println(err)
return
}
for _, track := range pl.Tracks {
fmt.Printf("%s - %s - %s\n", track.Title, track.Artist.Name, track.Album.Title)
}
}
}
}
|
// interfaces
package main
import "fmt"
// declarar una interfaz llamada speaker con un método speak
type speaker interface {
speak()
}
// declarar un struct llamada ingles que representa a una persona que hable inglés
type ingles struct {
}
// declarar un struct llamada spanish que representa a una persona que hable spanish
type spanish struct {
}
// implementar la interface speaker para cada struct usando un valor y string "hello world" y "Hola mundo"
func (ingles) speak() {
fmt.Println("Hello world")
}
func (spanish) speak() {
fmt.Println("Hola mundo")
}
func main() {
// declarar una variable del tipo speaker y asignarle una direción de tipo ingles y llamar a método
var speaker speaker
var i ingles
speaker = i
speaker.speak()
// declarar una variable del tipo speaker y asignarle una direción de tipo spanish y llamar a método
var s spanish
speaker = s
speaker.speak()
decirHola(new(ingles))
decirHola(&spanish{})
}
// abstare la funcionlidad de speak
func decirHola(sp speaker) {
sp.speak()
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package webservice
import (
restfulspec "github.com/emicklei/go-restful-openapi/v2"
restful "github.com/emicklei/go-restful/v3"
apis "github.com/oam-dev/kubevela/pkg/apiserver/rest/apis/v1"
"github.com/oam-dev/kubevela/pkg/apiserver/rest/usecase"
"github.com/oam-dev/kubevela/pkg/apiserver/rest/utils"
"github.com/oam-dev/kubevela/pkg/apiserver/rest/utils/bcode"
)
// ClusterWebService cluster manage webservice
type ClusterWebService struct {
clusterUsecase usecase.ClusterUsecase
}
// NewClusterWebService new cluster webservice
func NewClusterWebService(clusterUsecase usecase.ClusterUsecase) *ClusterWebService {
return &ClusterWebService{clusterUsecase: clusterUsecase}
}
// GetWebService -
func (c *ClusterWebService) GetWebService() *restful.WebService {
ws := new(restful.WebService)
ws.Path(versionPrefix+"/clusters").
Consumes(restful.MIME_XML, restful.MIME_JSON).
Produces(restful.MIME_JSON, restful.MIME_XML).
Doc("api for cluster manage")
tags := []string{"cluster"}
ws.Route(ws.GET("/").To(c.listKubeClusters).
Doc("list all clusters").
Metadata(restfulspec.KeyOpenAPITags, tags).
Param(ws.QueryParameter("query", "Fuzzy search based on name or description").DataType("string")).
Param(ws.QueryParameter("page", "Page for paging").DataType("int").DefaultValue("0")).
Param(ws.QueryParameter("pageSize", "PageSize for paging").DataType("int").DefaultValue("20")).
Returns(200, "", apis.ListClusterResponse{}).
Returns(400, "", bcode.Bcode{}).
Writes(apis.ListClusterResponse{}).Do(returns200, returns500))
ws.Route(ws.POST("/").To(c.createKubeCluster).
Doc("create cluster").
Metadata(restfulspec.KeyOpenAPITags, tags).
Reads(&apis.CreateClusterRequest{}).
Returns(200, "", apis.ClusterBase{}).
Returns(400, "", bcode.Bcode{}).
Writes(apis.ClusterBase{}))
ws.Route(ws.GET("/{clusterName}").To(c.getKubeCluster).
Doc("detail cluster info").
Metadata(restfulspec.KeyOpenAPITags, tags).
Param(ws.PathParameter("clusterName", "identifier of the cluster").DataType("string")).
Returns(200, "", apis.DetailClusterResponse{}).
Returns(400, "", bcode.Bcode{}).
Writes(apis.DetailClusterResponse{}))
ws.Route(ws.PUT("/{clusterName}").To(c.modifyKubeCluster).
Doc("modify cluster").
Metadata(restfulspec.KeyOpenAPITags, tags).
Param(ws.PathParameter("clusterName", "identifier of the cluster").DataType("string")).
Reads(apis.CreateClusterRequest{}).
Returns(200, "", apis.ClusterBase{}).
Returns(400, "", bcode.Bcode{}).
Writes(apis.ClusterBase{}))
ws.Route(ws.DELETE("/{clusterName}").To(c.deleteKubeCluster).
Doc("delete cluster").
Metadata(restfulspec.KeyOpenAPITags, tags).
Param(ws.PathParameter("clusterName", "identifier of the cluster").DataType("string")).
Returns(200, "", apis.ClusterBase{}).
Returns(400, "", bcode.Bcode{}).
Writes(apis.ClusterBase{}))
ws.Route(ws.POST("/{clusterName}/namespaces").To(c.createNamespace).
Doc("create namespace in cluster").
Metadata(restfulspec.KeyOpenAPITags, tags).
Param(ws.PathParameter("clusterName", "name of the target cluster").DataType("string")).
Reads(apis.CreateClusterNamespaceRequest{}).
Returns(200, "", apis.CreateClusterNamespaceResponse{}).
Returns(400, "", bcode.Bcode{}).
Writes(apis.CreateClusterNamespaceResponse{}))
ws.Route(ws.POST("/cloud-clusters/{provider}").To(c.listCloudClusters).
Doc("list cloud clusters").
Metadata(restfulspec.KeyOpenAPITags, tags).
Param(ws.PathParameter("provider", "identifier of the cloud provider").DataType("string")).
Param(ws.QueryParameter("page", "Page for paging").DataType("int").DefaultValue("0")).
Param(ws.QueryParameter("pageSize", "PageSize for paging").DataType("int").DefaultValue("20")).
Reads(apis.AccessKeyRequest{}).
Returns(200, "", apis.ListCloudClusterResponse{}).
Returns(400, "", bcode.Bcode{}).
Writes(apis.ListCloudClusterResponse{}))
ws.Route(ws.POST("/cloud-clusters/{provider}/connect").To(c.connectCloudCluster).
Doc("create cluster from cloud cluster").
Metadata(restfulspec.KeyOpenAPITags, tags).
Param(ws.PathParameter("provider", "identifier of the cloud provider").DataType("string")).
Reads(apis.ConnectCloudClusterRequest{}).
Returns(200, "", apis.ClusterBase{}).
Returns(400, "", bcode.Bcode{}).
Writes(apis.ClusterBase{}))
ws.Route(ws.POST("/cloud-clusters/{provider}/create").To(c.createCloudCluster).
Doc("create cloud cluster").
Metadata(restfulspec.KeyOpenAPITags, tags).
Param(ws.PathParameter("provider", "identifier of the cloud provider").DataType("string").Required(true)).
Reads(apis.CreateCloudClusterRequest{}).
Returns(200, "", apis.CreateCloudClusterResponse{}).
Returns(400, "", bcode.Bcode{}).
Writes(apis.CreateCloudClusterResponse{}))
ws.Route(ws.GET("/cloud-clusters/{provider}/creation/{cloudClusterName}").To(c.getCloudClusterCreationStatus).
Doc("check cloud cluster create status").
Metadata(restfulspec.KeyOpenAPITags, tags).
Param(ws.PathParameter("provider", "identifier of the cloud provider").DataType("string")).
Param(ws.PathParameter("cloudClusterName", "identifier for cloud cluster which is creating").DataType("string")).
Returns(200, "", apis.CreateCloudClusterResponse{}).
Returns(400, "", bcode.Bcode{}).
Writes(apis.CreateCloudClusterResponse{}))
ws.Route(ws.GET("/cloud-clusters/{provider}/creation").To(c.listCloudClusterCreation).
Doc("list cloud cluster creation").
Metadata(restfulspec.KeyOpenAPITags, tags).
Param(ws.PathParameter("provider", "identifier of the cloud provider").DataType("string")).
Returns(200, "", apis.ListCloudClusterCreationResponse{}).
Returns(400, "", bcode.Bcode{}).
Writes(apis.ListCloudClusterCreationResponse{}))
ws.Route(ws.DELETE("/cloud-clusters/{provider}/creation/{cloudClusterName}").To(c.deleteCloudClusterCreation).
Doc("delete cloud cluster creation").
Metadata(restfulspec.KeyOpenAPITags, tags).
Param(ws.PathParameter("provider", "identifier of the cloud provider").DataType("string")).
Param(ws.PathParameter("cloudClusterName", "identifier for cloud cluster which is creating").DataType("string")).
Returns(200, "", apis.CreateCloudClusterResponse{}).
Returns(400, "", bcode.Bcode{}).
Writes(apis.CreateCloudClusterResponse{}))
return ws
}
func (c *ClusterWebService) listKubeClusters(req *restful.Request, res *restful.Response) {
query := req.QueryParameter("query")
page, pageSize, err := utils.ExtractPagingParams(req, minPageSize, maxPageSize)
if err != nil {
bcode.ReturnError(req, res, err)
return
}
// Call the usecase layer code
clusters, err := c.clusterUsecase.ListKubeClusters(req.Request.Context(), query, page, pageSize)
if err != nil {
bcode.ReturnError(req, res, err)
return
}
// Write back response data
if err := res.WriteEntity(clusters); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (c *ClusterWebService) createKubeCluster(req *restful.Request, res *restful.Response) {
// Verify the validity of parameters
var createReq apis.CreateClusterRequest
if err := req.ReadEntity(&createReq); err != nil {
bcode.ReturnError(req, res, err)
return
}
if err := validate.Struct(&createReq); err != nil {
bcode.ReturnError(req, res, err)
return
}
// Call the usecase layer code
clusterBase, err := c.clusterUsecase.CreateKubeCluster(req.Request.Context(), createReq)
if err != nil {
bcode.ReturnError(req, res, err)
return
}
// Write back response data
if err := res.WriteEntity(clusterBase); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (c *ClusterWebService) getKubeCluster(req *restful.Request, res *restful.Response) {
clusterName := req.PathParameter("clusterName")
// Call the usecase layer code
clusterDetail, err := c.clusterUsecase.GetKubeCluster(req.Request.Context(), clusterName)
if err != nil {
bcode.ReturnError(req, res, err)
return
}
// Write back response data
if err := res.WriteEntity(clusterDetail); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (c *ClusterWebService) modifyKubeCluster(req *restful.Request, res *restful.Response) {
// Verify the validity of parameters
var createReq apis.CreateClusterRequest
if err := req.ReadEntity(&createReq); err != nil {
bcode.ReturnError(req, res, err)
return
}
if err := validate.Struct(&createReq); err != nil {
bcode.ReturnError(req, res, err)
return
}
clusterName := req.PathParameter("clusterName")
// Call the usecase layer code
clusterBase, err := c.clusterUsecase.ModifyKubeCluster(req.Request.Context(), createReq, clusterName)
if err != nil {
bcode.ReturnError(req, res, err)
return
}
// Write back response data
if err := res.WriteEntity(clusterBase); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (c *ClusterWebService) deleteKubeCluster(req *restful.Request, res *restful.Response) {
clusterName := req.PathParameter("clusterName")
// Call the usecase layer code
clusterBase, err := c.clusterUsecase.DeleteKubeCluster(req.Request.Context(), clusterName)
if err != nil {
bcode.ReturnError(req, res, err)
return
}
// Write back response data
if err := res.WriteEntity(clusterBase); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (c *ClusterWebService) createNamespace(req *restful.Request, res *restful.Response) {
clusterName := req.PathParameter("clusterName")
// Verify the validity of parameters
var createReq apis.CreateClusterNamespaceRequest
if err := req.ReadEntity(&createReq); err != nil {
bcode.ReturnError(req, res, err)
return
}
if err := validate.Struct(&createReq); err != nil {
bcode.ReturnError(req, res, err)
return
}
// Call the usecase layer code
resp, err := c.clusterUsecase.CreateClusterNamespace(req.Request.Context(), clusterName, createReq)
if err != nil {
bcode.ReturnError(req, res, err)
return
}
// Write back response data
if err := res.WriteEntity(resp); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (c *ClusterWebService) listCloudClusters(req *restful.Request, res *restful.Response) {
provider := req.PathParameter("provider")
page, pageSize, err := utils.ExtractPagingParams(req, minPageSize, maxPageSize)
if err != nil {
bcode.ReturnError(req, res, err)
return
}
// Verify the validity of parameters
var accessKeyRequest apis.AccessKeyRequest
if err := req.ReadEntity(&accessKeyRequest); err != nil {
bcode.ReturnError(req, res, err)
return
}
if err := validate.Struct(&accessKeyRequest); err != nil {
bcode.ReturnError(req, res, err)
return
}
// Call the usecase layer code
clustersResp, err := c.clusterUsecase.ListCloudClusters(req.Request.Context(), provider, accessKeyRequest, page, pageSize)
if err != nil {
bcode.ReturnError(req, res, err)
return
}
// Write back response data
if err := res.WriteEntity(clustersResp); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (c *ClusterWebService) connectCloudCluster(req *restful.Request, res *restful.Response) {
provider := req.PathParameter("provider")
// Verify the validity of parameters
var connectReq apis.ConnectCloudClusterRequest
if err := req.ReadEntity(&connectReq); err != nil {
bcode.ReturnError(req, res, err)
return
}
if err := validate.Struct(&connectReq); err != nil {
bcode.ReturnError(req, res, err)
return
}
// Call the usecase layer code
cluster, err := c.clusterUsecase.ConnectCloudCluster(req.Request.Context(), provider, connectReq)
if err != nil {
bcode.ReturnError(req, res, err)
return
}
// Write back response data
if err := res.WriteEntity(cluster); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (c *ClusterWebService) createCloudCluster(req *restful.Request, res *restful.Response) {
provider := req.PathParameter("provider")
// Verify the validity of parameters
var createReq apis.CreateCloudClusterRequest
if err := req.ReadEntity(&createReq); err != nil {
bcode.ReturnError(req, res, err)
return
}
if err := validate.Struct(&createReq); err != nil {
bcode.ReturnError(req, res, err)
return
}
// Call the usecase layer code
resp, err := c.clusterUsecase.CreateCloudCluster(req.Request.Context(), provider, createReq)
if err != nil {
bcode.ReturnError(req, res, err)
return
}
// Write back response data
if err := res.WriteEntity(resp); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (c *ClusterWebService) getCloudClusterCreationStatus(req *restful.Request, res *restful.Response) {
provider := req.PathParameter("provider")
cloudClusterName := req.PathParameter("cloudClusterName")
// Call the usecase layer code
resp, err := c.clusterUsecase.GetCloudClusterCreationStatus(req.Request.Context(), provider, cloudClusterName)
if err != nil {
bcode.ReturnError(req, res, err)
return
}
// Write back response data
if err := res.WriteEntity(resp); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (c *ClusterWebService) listCloudClusterCreation(req *restful.Request, res *restful.Response) {
provider := req.PathParameter("provider")
// Call the usecase layer code
resp, err := c.clusterUsecase.ListCloudClusterCreation(req.Request.Context(), provider)
if err != nil {
bcode.ReturnError(req, res, err)
return
}
// Write back response data
if err := res.WriteEntity(resp); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (c *ClusterWebService) deleteCloudClusterCreation(req *restful.Request, res *restful.Response) {
provider := req.PathParameter("provider")
cloudClusterName := req.PathParameter("cloudClusterName")
// Call the usecase layer code
resp, err := c.clusterUsecase.DeleteCloudClusterCreation(req.Request.Context(), provider, cloudClusterName)
if err != nil {
bcode.ReturnError(req, res, err)
return
}
// Write back response data
if err := res.WriteEntity(resp); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
|
package main
import (
"fmt"
"path/filepath"
"os"
)
func main() {
fmt.Printf("hello, world\n")
p, e := filepath.Abs("")
fmt.Printf("%v\n", p)
fmt.Printf("%v\n", e)
stat, _ := os.Stat("parent")
if stat != nil {
fmt.Printf("Stat name: %s", stat.Name())
fmt.Printf("Stat isdir: %v", stat.IsDir())
}
}
|
package main
import (
"fmt"
"math"
)
func main() {
a := 3
b := 2
fmt.Println("Sum = ", a+b)
fmt.Println("Sub = ", a-b)
fmt.Println("Div = ", a/b)
fmt.Println("Mul = ", a*b)
fmt.Println("Mod = ", a%b)
fmt.Println("AND => ", a&b)
fmt.Println("OR => ", a|b)
c := 3.0
d := 2.0
fmt.Println("Bigger =>", math.Max(float64(c), float64(d)))
}
|
package authenticate
import (
"testing"
"github.com/pomerium/pomerium/config"
)
func newTestOptions(t *testing.T) *config.Options {
opts := config.NewDefaultOptions()
opts.AuthenticateURLString = "https://authenticate.example"
opts.AuthorizeURLString = "https://authorize.example"
opts.InsecureServer = true
opts.ClientID = "client-id"
opts.Provider = "google"
opts.ClientSecret = "OromP1gurwGWjQPYb1nNgSxtbVB5NnLzX6z5WOKr0Yw="
opts.CookieSecret = "OromP1gurwGWjQPYb1nNgSxtbVB5NnLzX6z5WOKr0Yw="
opts.SigningKey = "LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUJlMFRxbXJkSXBZWE03c3pSRERWYndXOS83RWJHVWhTdFFJalhsVHNXM1BvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFb0xaRDI2bEdYREhRQmhhZkdlbEVmRDdlNmYzaURjWVJPVjdUbFlIdHF1Y1BFL2hId2dmYQpNY3FBUEZsRmpueUpySXJhYTFlQ2xZRTJ6UktTQk5kNXBRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo="
err := opts.Validate()
if err != nil {
t.Fatal(err)
}
return opts
}
func TestOptions_Validate(t *testing.T) {
good := newTestOptions(t)
badScheme := newTestOptions(t)
badScheme.AuthenticateURLString = "BAD_SCHEME://"
emptyClientID := newTestOptions(t)
emptyClientID.ClientID = ""
emptyClientSecret := newTestOptions(t)
emptyClientSecret.ClientSecret = ""
invalidCookieSecret := newTestOptions(t)
invalidCookieSecret.CookieSecret = "OromP1gurwGWjQPYb1nNgSxtbVB5NnLzX6z5WOKr0Yw^"
shortCookieLength := newTestOptions(t)
shortCookieLength.CookieSecret = "gN3xnvfsAwfCXxnJorGLKUG4l2wC8sS8nfLMhcStPg=="
badSharedKey := newTestOptions(t)
badSharedKey.Services = "authenticate"
badSharedKey.SharedKey = ""
badAuthenticateURL := newTestOptions(t)
badAuthenticateURL.AuthenticateURLString = "BAD_URL"
badCallbackPath := newTestOptions(t)
badCallbackPath.AuthenticateCallbackPath = ""
tests := []struct {
name string
o *config.Options
wantErr bool
}{
{"minimum options", good, false},
{"nil options", &config.Options{}, true},
{"invalid cookie secret", invalidCookieSecret, true},
{"short cookie secret", shortCookieLength, true},
{"no shared secret", badSharedKey, true},
{"empty callback path", badCallbackPath, true},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
if err := ValidateOptions(tt.o); (err != nil) != tt.wantErr {
t.Errorf("Options.Validate() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestNew(t *testing.T) {
good := newTestOptions(t)
good.CookieName = "A"
badRedirectURL := newTestOptions(t)
badRedirectURL.AuthenticateURLString = "BAD URL"
badRedirectURL.CookieName = "B"
badProvider := newTestOptions(t)
badProvider.Provider = ""
badProvider.CookieName = "C"
badGRPCConn := newTestOptions(t)
badGRPCConn.CookieName = "D"
badGRPCConn.DataBrokerURLString = "BAD"
emptyProviderURL := newTestOptions(t)
emptyProviderURL.Provider = "oidc"
emptyProviderURL.ProviderURL = ""
goodSigningKey := newTestOptions(t)
goodSigningKey.SigningKey = "LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUpCMFZkbko1VjEvbVlpYUlIWHhnd2Q0Yzd5YWRTeXMxb3Y0bzA1b0F3ekdvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFVUc1eENQMEpUVDFINklvbDhqS3VUSVBWTE0wNENnVzlQbEV5cE5SbVdsb29LRVhSOUhUMwpPYnp6aktZaWN6YjArMUt3VjJmTVRFMTh1dy82MXJVQ0JBPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo="
badSigningKey := newTestOptions(t)
badSigningKey.SigningKey = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJFakNCdWdJSkFNWUdtVzhpYWd1TU1Bb0dDQ3FHU000OUJBTUNNQkV4RHpBTkJnTlZCQU1NQm5WdWRYTmwKWkRBZ0Z3MHlNREExTWpJeU1EUTFNalJhR0E4ME56VTRNRFF4T1RJd05EVXlORm93RVRFUE1BMEdBMVVFQXd3RwpkVzUxYzJWa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRVVHNXhDUDBKVFQxSDZJb2w4akt1ClRJUFZMTTA0Q2dXOVBsRXlwTlJtV2xvb0tFWFI5SFQzT2J6empLWWljemIwKzFLd1YyZk1URTE4dXcvNjFyVUMKQkRBS0JnZ3Foa2pPUFFRREFnTkhBREJFQWlBSFFDUFh2WG5oeHlDTGNhZ3N3eWt4RUM1NFV5RmdyUVJVRmVCYwpPUzVCSFFJZ1Y3T2FXY2pMeHdsRlIrWDZTQ2daZDI5bXBtOVZKNnpXQURhWGdEN3FURW89Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"
tests := []struct {
name string
opts *config.Options
// want *Authenticate
wantErr bool
}{
{"good", good, false},
{"empty opts", &config.Options{}, true},
{"fails to validate", badRedirectURL, true},
{"good signing key", goodSigningKey, false},
{"bad signing key", badSigningKey, true},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
_, err := New(&config.Config{Options: tt.opts})
if (err != nil) != tt.wantErr {
t.Errorf("New() error = %v, wantErr %v", err, tt.wantErr)
return
}
})
}
}
|
package main
import (
adventutilities "AdventOfCode/utils"
"log"
"strconv"
"strings"
)
func splitStringIntoJuicyBits(passwAndPolicy string) (minOcc int, maxOcc int, charToMatch string, password string) {
// trekk ut policy
policy := strings.Split(passwAndPolicy, " ")[0]
minOcc, err := strconv.Atoi(strings.Split(policy, "-")[0])
adventutilities.Check(err)
maxOcc, err = strconv.Atoi(strings.Split(policy, "-")[1])
adventutilities.Check(err)
//find :
i := strings.Index(passwAndPolicy, ":")
charToMatch = passwAndPolicy[i-1 : i]
passwordIndex := strings.LastIndex(passwAndPolicy, " ")
password = passwAndPolicy[passwordIndex+1:]
//log.Println("extracted password", password, "from raw:", passwAndPolicy)
//password = strings.TrimSpace(password)
//log.Println("trimmed password", password, "NOSPACES")
//log.Println("line: ",passwAndPolicy, "passwd: ", password," policy:", policy," minOcc: ", minOcc, " maxOcc: ", maxOcc, "char to match is", charToMatch)
return minOcc, maxOcc, charToMatch, password
}
func abidesByPolicyPuzzle1(password string, minOcc int, maxOcc int, charToMatch string) (ok bool) {
count := strings.Count(password, charToMatch)
//log.Println(password, count)
return count >= minOcc && count <= maxOcc
}
func abidesByPolicyPuzzle2(password string, pos1 int, pos2 int, charToMatch string) (ok bool) {
//Toboggan Corporate Policies have no concept of "index zero"!
adjustedPos1 := pos1 - 1
adjustedPos2 := pos2 - 1
trimmedPwd := strings.TrimSpace(password)
runes := []rune(trimmedPwd)
pos1Char := string(runes[adjustedPos1:pos1]) //we're taking one char, and orig pos is adjusted plus 1
pos2Char := string(runes[adjustedPos2:pos2])
if pos1Char == charToMatch && pos2Char != charToMatch {
//log.Println(trimmedPwd, "char at pos1(", pos1, ") is", pos1Char, "char at pos2 (", pos2, ") is", pos2Char, "char to match", charToMatch)
return true
}
if pos1Char != charToMatch && pos2Char == charToMatch {
//log.Println(trimmedPwd, "char at pos1(", pos1, ") is", pos1Char, "char at pos2 (", pos2, ") is", pos2Char, "char to match", charToMatch)
return true
}
return false
}
func solvePuzzle1(passwords []string) (numValidPasswords int) {
numValidPasswords = 0
for _, line := range passwords {
minOcc, maxOcc, charToMatch, password := splitStringIntoJuicyBits(line)
if abidesByPolicyPuzzle1(password, minOcc, maxOcc, charToMatch) {
//log.Println("valid password:", password)
numValidPasswords++
}
}
return numValidPasswords
}
func solvePuzzle2(passwords []string) (numValidPasswords int) {
numValidPasswords = 0
for _, line := range passwords {
pos1, pos2, charToMatch, password := splitStringIntoJuicyBits(line)
if abidesByPolicyPuzzle2(password, pos1, pos2, charToMatch) {
//log.Println("valid password:", password)
numValidPasswords++
}
}
return numValidPasswords
}
func main() {
lines, err := adventutilities.ReadStringsFromFile("data/inputs_02_12.txt")
adventutilities.Check(err)
numValid := solvePuzzle1(lines)
log.Println("got a file with ", len(lines), " passwords to check")
log.Println("numValid passwords puzzle 1:", numValid)
numValid = solvePuzzle2(lines)
log.Println("numValid passwords puzzle 2:", numValid)
}
|
package main
import (
"fmt"
"runtime"
"sync"
"time"
)
const limit = 1e6
func main() {
t1 := time.Now()
sum1 := loopSum()
fmt.Println("sum1 is: ", sum1)
fmt.Printf("cost time %d ns\n", time.Now().Sub(t1))
t2 := time.Now()
sum2 := ConcurrentSum()
fmt.Println("sum2 is: ", sum2)
fmt.Printf("cost time %d ns\n", time.Now().Sub(t2))
t3 := time.Now()
sum3 := ChannelSum()
fmt.Println("sum3 is: ", sum3)
fmt.Printf("cost time %d ns\n", time.Now().Sub(t3))
}
func ConcurrentSum() int {
n := runtime.GOMAXPROCS(0)
sums := make([]int, n)
wg := sync.WaitGroup{}
for i := 0; i < n; i++ {
wg.Add(1)
go func(i int) {
start := (limit / n) * i
end := start + limit/n
for j := start; j < end; j++ {
sums[i] += j
}
wg.Done()
}(i)
}
wg.Wait()
sum := 0
for i := 0; i < n; i++ {
sum += sums[i]
}
return sum
}
func loopSum() int {
sum := 0
for i := 0; i < limit; i++ {
sum += i
}
return sum
}
func ChannelSum() int {
n := runtime.GOMAXPROCS(0)
ch := make(chan int)
for i := 0; i < n; i++ {
go func(i int, r chan int) {
sum := 0
start := (limit / n) * i
end := start + limit/n
for j := start; j < end; j++ {
sum += j
}
r <- sum
}(i, ch)
}
rs := 0
for i := 0; i < n; i++ {
rs += <-ch
}
return rs
}
|
/*
* Copyright © 2018-2022 Software AG, Darmstadt, Germany and/or its licensors
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package adabas
import (
"fmt"
"github.com/SoftwareAG/adabas-go-api/adatypes"
)
// Adabas field name length maximum
const fieldNameLength = 2
// Field identifier used in the FDT call
type fieldIdentifier uint
const (
fieldIdentifierField fieldIdentifier = iota
fieldIdentifierSub
fieldIdentifierSuper
fieldIdentifierPhonetic
fieldIdentifierCollation
fieldIdentifierReferential
fieldIdentifierHyperexit
)
var fieldIdentifiers = []byte{'F', 'S', 'T', 'P', 'C', 'R', 'H'}
func (fdt fieldIdentifier) code() byte {
return byte(fieldIdentifiers[fdt])
}
// Adabas FDT call fdt options used for getting field information
type option byte
const (
fdtFlagOption2NC option = iota // (1<<0) /* 0x01 Not counted field (SQL) */
fdtFlagOption2NN // (1<<1) /* 0x02 Field must not contain NULL */
fdtFlagOption2LB // (1<<2) /* 0x04 4 bytes inclusive length for var.len fields */
fdtFlagOption2LA // (1<<3) /* 0x08 Long Alpha (up to 16 K)*/
fdtFlagOption2UNUSED // (1<<3) /* 0x10 Unused */
fdtFlagOption2HF // (1<<4) /* 0x20 HF option active */
fdtFlagOption2NV // (1<<5) /* 0x40 No conversion over net-work */
fdtFlagOption2NB // (1<<6) /* 0x89 NB option active */
)
const (
fdtFlagOption1UQ option = iota // (1<<0) /* 0x01 UQ option (unique descriptor) */
fdtFlagOption1SB // (1<<1) /* 0x02 Field is sub descriptor */
fdtFlagOption1PH // (1<<2) /* 0x04 Field is phonetic descriptor */
fdtFlagOption1PE // (1<<3) /* 0x08 PE (Group-level) */
fdtFlagOption1NU // (1<<4) /* 0x10 NU option (zero-suppression) */
fdtFlagOption1MU // (1<<5) /* 0x20 Multiple value field */
fdtFlagOption1FI // (1<<6) /* 0x40 FI option (fixed length) */
fdtFlagOption1DE // (1<<7) /* 0x80 Field is descriptor */
)
const (
fdtFlagMfOption1DE option = iota // (1<<0) /* 0x01 descriptor */
fdtFlagMfOption1FI // (1<<1) /* 0x02 FI option (fixed length) */
fdtFlagMfOption1MU // (1<<2) /* 0x04 Multiple value field */
fdtFlagMfOption1NU // (1<<3) /* 0x08 NU option (zero-suppression) */
fdtFlagMfOption1PE // (1<<4) /* 0x10 PE (Group-level) */
fdtFlagMfOption1PH // (1<<5) /* 0x20 Field is phonetic descriptor */
fdtFlagMfOption1SB // (1<<6) /* 0x40 Field is sub descriptor */
fdtFlagMfOption1UQ // (1<<7) /* 0x80 UQ option (unique descriptor) */
)
const (
fdtIdentifier = "FieldIdentifier"
fdtLength = "fdtLength"
fdtStrLevel = "fdtStrLevel"
fdtFlag = "fdtFlag"
fdtCount = "fdtCount"
fdtTime = "fdtTime"
)
func (cc option) iv() int {
return int(cc)
}
// FDT field entry structures
var fdtFieldEntry = []adatypes.IAdaType{
adatypes.NewTypeWithLength(adatypes.FieldTypeString, "fieldName", 2),
adatypes.NewType(adatypes.FieldTypeUInt2, "fieldFrom"),
adatypes.NewType(adatypes.FieldTypeUInt2, "fieldTo"),
}
// FDT hyper field entry structures
var fdtHyperFieldEntry = []adatypes.IAdaType{
adatypes.NewTypeWithLength(adatypes.FieldTypeString, "fieldName", 2),
}
// FDT main field structures
var fdt = []adatypes.IAdaType{
adatypes.NewType(adatypes.FieldTypeCharacter, fdtIdentifier), // 0
adatypes.NewType(adatypes.FieldTypeLength, "FieldDefLength"), // 1
adatypes.NewTypeWithLength(adatypes.FieldTypeString, "fieldName", fieldNameLength), // 2
adatypes.NewType(adatypes.FieldTypeCharacter, "fieldFormat"), // 3
adatypes.NewType(adatypes.FieldTypeUByte, "fieldOption"), // 4
adatypes.NewType(adatypes.FieldTypeUByte, "fieldOption2"), // 5
adatypes.NewType(adatypes.FieldTypeUByte, "fieldLevel"),
adatypes.NewType(adatypes.FieldTypeUByte, "fieldEditMask"),
adatypes.NewType(adatypes.FieldTypeUByte, "fieldSubOption"),
adatypes.NewType(adatypes.FieldTypeUByte, "fieldSYfunction"),
adatypes.NewType(adatypes.FieldTypeUByte, "fieldDeactivate"), // 10
adatypes.NewType(adatypes.FieldTypeUInt4, "fieldLength"),
adatypes.NewType(adatypes.FieldTypeUInt2, "superLength"),
adatypes.NewType(adatypes.FieldTypeUByte, "superOption2"),
adatypes.NewStructureList(adatypes.FieldTypeStructure, "superList", adatypes.OccByte, fdtFieldEntry),
adatypes.NewType(adatypes.FieldTypeUInt2, "subLength"), // 15
adatypes.NewType(adatypes.FieldTypeUByte, "subOption2"),
adatypes.NewTypeWithLength(adatypes.FieldTypeFiller, "FILL1", 2),
adatypes.NewTypeWithLength(adatypes.FieldTypeString, "parentName", 2),
adatypes.NewType(adatypes.FieldTypeUInt2, "subFrom"),
adatypes.NewType(adatypes.FieldTypeUInt2, "subTo"), // 20
adatypes.NewType(adatypes.FieldTypeUInt2, "colLength"),
adatypes.NewType(adatypes.FieldTypeString, "colParentName"),
adatypes.NewType(adatypes.FieldTypeUInt2, "colInternalLength"),
adatypes.NewType(adatypes.FieldTypeUByte, "colOption2"),
adatypes.NewTypeWithFlag(adatypes.FieldTypeString, "colAttribute", adatypes.FlagOptionLengthNotIncluded), // 25
adatypes.NewType(adatypes.FieldTypeUInt2, "hyperLength"),
adatypes.NewType(adatypes.FieldTypeUByte, "hyperFExit"),
adatypes.NewType(adatypes.FieldTypeUByte, "hyperOption2"),
adatypes.NewTypeWithLength(adatypes.FieldTypeFiller, "FILL2", 1),
adatypes.NewStructureList(adatypes.FieldTypeStructure, "hyperList",
adatypes.OccByte, fdtHyperFieldEntry), // 30
adatypes.NewType(adatypes.FieldTypeUInt4, "refFile"),
adatypes.NewTypeWithLength(adatypes.FieldTypeString, "refPrimaryKey", fieldNameLength),
adatypes.NewTypeWithLength(adatypes.FieldTypeString, "refForeignKey", fieldNameLength),
adatypes.NewType(adatypes.FieldTypeUByte, "refType"),
adatypes.NewType(adatypes.FieldTypeUByte, "refUpdateAction"), // 35
adatypes.NewType(adatypes.FieldTypeUByte, "refDeleteAction"),
}
// FDT condition matrix defining various parts of the field types needed
var fdtCondition = map[byte][]byte{
fieldIdentifierField.code(): {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
fieldIdentifierSuper.code(): {1, 2, 3, 4, 12, 13, 14},
fieldIdentifierSub.code(): {1, 2, 3, 4, 12, 13, 14},
// fieldIdentifierSub.code(): []byte{1, 2, 3, 4, 15, 16, 17, 18, 19, 20},
fieldIdentifierPhonetic.code(): {1, 2, 3, 4, 12, 17, 18},
fieldIdentifierCollation.code(): {1, 2, 3, 4, 12, 18, 23, 24, 25},
fieldIdentifierHyperexit.code(): {1, 2, 3, 4, 26, 27, 28, 29, 30},
fieldIdentifierReferential.code(): {1, 2, 31, 32, 33, 34, 35, 36},
}
// FDT general main level layout for the Adabas LA call
var fdtGeneralLayout = []adatypes.IAdaType{
adatypes.NewType(adatypes.FieldTypeUInt4, fdtLength),
adatypes.NewType(adatypes.FieldTypeByte, fdtStrLevel),
adatypes.NewType(adatypes.FieldTypeUByte, fdtFlag),
adatypes.NewType(adatypes.FieldTypeUInt2, fdtCount),
adatypes.NewType(adatypes.FieldTypeUInt8, fdtTime),
adatypes.NewStructureCondition(adatypes.FieldTypeStructure, "fdt", fdt, adatypes.NewFieldCondition(1, 0, fdtCondition)),
}
// Create used definition to read FDT
func createFdtDefintion() *adatypes.Definition {
return adatypes.NewDefinitionWithTypes(fdtGeneralLayout)
}
// Traverser to count fields
// func traverserFieldDefinitionCreator(adaValue adatypes.IAdaValue, level int, x interface{}) bool {
// number := x.(*int)
// (*number)++
// return true
// }
// Create field definition table definition useds to parse Adabas LA call
// getting the Adabas file definition out of the FDT
func createFieldDefinitionTable(fdtDef *adatypes.Definition) (definition *adatypes.Definition, err error) {
definition = adatypes.NewDefinition()
fdtSearch := fdtDef.Search("fdt")
fdt := fdtSearch.(*adatypes.StructureValue)
nrFdtEntries := len(fdt.Elements)
stack := adatypes.NewStack()
definition.FileTime = fdtDef.Search(fdtTime)
var lastStruct adatypes.IAdaType
for index := 1; index < nrFdtEntries+1; index++ {
value := fdt.Get(fdtIdentifier, index)
var fieldType adatypes.IAdaType
switch value.Value().(byte) {
case 0:
break
case fieldIdentifierField.code():
fieldType, err = createFieldType(fdt, index)
if err != nil {
return
}
adatypes.Central.Log.Debugf("Found normal field %s level=%d fieldType=%v", fieldType.Name(), fieldType.Level(), fieldType.Type())
case fieldIdentifierSub.code(), fieldIdentifierSuper.code():
adatypes.Central.Log.Debugf("Found Super/Sub field %c", value.Value().(byte))
fieldType, err = createSubSuperDescriptorType(fdt, index)
if err != nil {
return
}
case fieldIdentifierPhonetic.code():
adatypes.Central.Log.Debugf("Found Super/Sub field %c", value.Value().(byte))
fieldType, err = createPhoneticType(fdt, index)
if err != nil {
return
}
case fieldIdentifierCollation.code():
adatypes.Central.Log.Debugf("Found Collation field %c", value.Value().(byte))
fieldType, err = createCollationType(fdt, index)
if err != nil {
return
}
case fieldIdentifierHyperexit.code():
adatypes.Central.Log.Debugf("Found HyperExit field %c", value.Value().(byte))
fieldType, err = createHyperExitType(fdt, index)
if err != nil {
return
}
case fieldIdentifierReferential.code():
adatypes.Central.Log.Debugf("Found Referential field %c", value.Value().(byte))
fieldType, err = createReferential(fdt, index)
if err != nil {
return
}
default:
fmt.Printf("Not implemented already >%c<\n", value.Value().(byte))
err = adatypes.NewGenericError(11, value.Value().(byte), value.Value().(byte))
return
}
if fieldType != nil {
for {
if lastStruct != nil {
if lastStruct.Level() == fieldType.Level()-1 {
if adatypes.Central.IsDebugLevel() {
adatypes.Central.Log.Debugf("Append to structure %s add %s %d", lastStruct.Name(), fieldType.String(), fieldType.Level())
}
lastStruct.(*adatypes.StructureType).AddField(fieldType)
break
} else {
popElement, _ := stack.Pop()
if popElement == nil {
lastStruct = nil
definition.AppendType(fieldType)
if adatypes.Central.IsDebugLevel() {
adatypes.Central.Log.Debugf("%s append to main %v", fieldType.Name(), fieldType.Type())
}
break
} else {
if adatypes.Central.IsDebugLevel() {
adatypes.Central.Log.Debugf("Pop from Stack %v", popElement)
}
lastStruct = popElement.(adatypes.IAdaType)
if adatypes.Central.IsDebugLevel() {
adatypes.Central.Log.Debugf("Level equal last=%s %d current=%s %d",
lastStruct.String(), lastStruct.Level(), fieldType.String(), fieldType.Level())
}
}
}
} else {
adatypes.Central.Log.Debugf("Append to main %d %s", fieldType.Level(), fieldType.Name())
definition.AppendType(fieldType)
break
}
}
if fieldType.IsStructure() && fieldType.Type() != adatypes.FieldTypeMultiplefield {
lastStruct = fieldType
if adatypes.Central.IsDebugLevel() {
adatypes.Central.Log.Debugf("Pop to MU Stack %v", lastStruct)
}
stack.Push(lastStruct)
}
if adatypes.Central.IsDebugLevel() {
adatypes.Central.Log.Debugf("Current structure %v", lastStruct)
}
definition.Register(fieldType)
}
adatypes.Central.Log.Debugf("Field type DONE")
}
definition.InitReferences()
return
}
// create a common field type for a field
func createFieldType(fdt *adatypes.StructureValue, index int) (fieldType adatypes.IAdaType, err error) {
name := string(fdt.Get("fieldName", index).Value().([]byte))
length := fdt.Get("fieldLength", index).Value().(uint32)
fdtFormat := fdt.Get("fieldFormat", index).Value().(byte)
option := fdt.Get("fieldOption", index).Value().(uint8)
option2 := fdt.Get("fieldOption2", index).Value().(uint8)
level := fdt.Get("fieldLevel", index).Value().(uint8)
sysf := fdt.Get("fieldSYfunction", index).Value().(uint8)
editMask := fdt.Get("fieldEditMask", index).Value().(uint8)
subOption := fdt.Get("fieldSubOption", index).Value().(uint8)
adatypes.Central.Log.Debugf("Create field type %s check option=%v check containing %v", name, option, fdtFlagOption1PE)
// Check if field is period element
if level == 1 && option&(1<<fdtFlagOption1PE) > 0 {
adatypes.Central.Log.Debugf("%s is PE", name)
fieldType = adatypes.NewStructureEmpty(adatypes.FieldTypePeriodGroup, name, adatypes.OccUInt2, level)
} else {
// Normal field, check format
var id adatypes.FieldType
switch fdtFormat {
case 'A':
switch {
case option2&(1<<fdtFlagOption2LA) > 0:
id = adatypes.FieldTypeLAString
case option2&(1<<fdtFlagOption2LB) > 0:
id = adatypes.FieldTypeLBString
default:
id = adatypes.FieldTypeString
}
case 'W':
switch {
case option2&(1<<fdtFlagOption2LA) > 0:
id = adatypes.FieldTypeLAUnicode
case option2&(1<<fdtFlagOption2LB) > 0:
id = adatypes.FieldTypeLBUnicode
default:
id = adatypes.FieldTypeUnicode
}
case 'P':
id = adatypes.FieldTypePacked
case 'U':
id = adatypes.FieldTypeUnpacked
case 'B':
id = evaluateIntegerValue(true, length)
case 'F':
id = evaluateIntegerValue(false, length)
case 'G':
switch length {
case 4:
id = adatypes.FieldTypeFloat
case 8:
id = adatypes.FieldTypeFloat
default:
err = adatypes.NewGenericError(12, length)
return
}
case ' ':
adatypes.Central.Log.Debugf("%s created as Group", name)
fieldType = adatypes.NewStructureEmpty(adatypes.FieldTypeGroup, name, adatypes.OccSingle, level)
return
default:
err = adatypes.NewGenericError(13, fdtFormat)
return
}
// flag option check
adatypes.Central.Log.Debugf("Id=%d name=%s length=%d format=%c", id, name, length, fdtFormat)
if (option & (1 << fdtFlagOption1MU)) > 0 {
newType := adatypes.NewTypeWithLength(id, name, length)
evaluateOption(newType, option, option2)
newType.SysField = sysf
newType.EditMask = editMask
newType.SubOption = subOption
newType.AddFlag(adatypes.FlagOptionAtomicFB)
adatypes.Central.Log.Debugf("%s created as MU on top of the field MU=%v %p", name, newType.HasFlagSet(adatypes.FlagOptionAtomicFB), newType)
fieldTypes := []adatypes.IAdaType{newType}
fieldType = adatypes.NewStructureList(adatypes.FieldTypeMultiplefield, name, adatypes.OccUInt2, fieldTypes)
adatypes.Central.Log.Debugf("%s MU structure %d -> %p", fieldType.Name(), fieldType.(*adatypes.StructureType).NrFields(), fieldType)
} else {
newType := adatypes.NewTypeWithLength(id, name, length)
adatypes.Central.Log.Debugf("%s created as normal field %p", name, newType)
evaluateOption(newType, option, option2)
newType.SysField = sysf
newType.EditMask = editMask
newType.SubOption = subOption
fieldType = newType
}
}
fieldType.SetLevel(level)
return
}
// Create Super-/Sub- Descriptor types
func createSubSuperDescriptorType(fdt *adatypes.StructureValue, index int) (fieldType adatypes.IAdaType, err error) {
name := string(fdt.Get("fieldName", index).Value().([]byte))
superList := fdt.Get("superList", index).(*adatypes.StructureValue)
fdtFormat := fdt.Get("fieldFormat", index).Value().(byte)
option := fdt.Get("fieldOption", index).Value().(byte)
superType := adatypes.NewSuperType(name, option)
superType.FdtFormat = fdtFormat
for _, sub := range superList.Elements {
superType.AddSubEntry(string(sub.Values[0].Value().([]byte)), sub.Values[1].Value().(uint16), sub.Values[2].Value().(uint16))
}
fieldType = superType
return
}
// create phonetic type
func createPhoneticType(fdt *adatypes.StructureValue, index int) (fieldType adatypes.IAdaType, err error) {
name := string(fdt.Get("fieldName", index).Value().([]byte))
descriptorLength := fdt.Get("superLength", index).Value().(uint16)
parentName := string(fdt.Get("parentName", index).Value().([]byte))
fieldType = adatypes.NewPhoneticType(name, descriptorLength, parentName)
return
}
// create collation descriptor type
func createCollationType(fdt *adatypes.StructureValue, index int) (fieldType adatypes.IAdaType, err error) {
name := string(fdt.Get("fieldName", index).Value().([]byte))
length := fdt.Get("superLength", index).Value().(uint16)
parentName := string(fdt.Get("parentName", index).Value().([]byte))
colAttribute := string(fdt.Get("colAttribute", index).Value().([]byte))
adatypes.Central.Log.Debugf("Collation attribute : %s", colAttribute)
collType := adatypes.NewCollationType(name, length, parentName, colAttribute)
option := fdt.Get("fieldOption", index).Value().(uint8)
adatypes.Central.Log.Debugf("Option %d", option)
flags := []byte{0x1, 0x8, 0x10, 0x20}
optionFlags := []adatypes.FieldOption{adatypes.FieldOptionUQ, adatypes.FieldOptionPE,
adatypes.FieldOptionNU, adatypes.FieldOptionMU}
for index, f := range flags {
if (option & f) > 0 {
collType.AddOption(optionFlags[index])
}
}
if (option & 0x3) == 0 {
collType.AddOption(adatypes.FieldOptionHE)
}
option2 := fdt.Get("colOption2", index).Value().(uint8)
if (option2 & 0x4) == 0 {
collType.AddOption(adatypes.FieldOptionLA)
}
if (option2 & 0x8) == 0 {
collType.AddOption(adatypes.FieldOptionLB)
}
if (option2 & 0x80) == 0 {
collType.AddOption(adatypes.FieldOptionColExit)
}
fieldType = collType
return
}
// create hyperexit type
func createHyperExitType(fdt *adatypes.StructureValue, index int) (fieldType adatypes.IAdaType, err error) {
name := string(fdt.Get("fieldName", index).Value().([]byte))
length := fdt.Get("hyperLength", index).Value().(uint16)
fdtFormat := fdt.Get("fieldFormat", index).Value().(byte)
nr := fdt.Get("hyperFExit", index).Value().(uint8)
hyperList := fdt.Get("hyperList", index).(*adatypes.StructureValue)
var parentFieldNames []string
for _, hyper := range hyperList.Elements {
parentFieldNames = append(parentFieldNames, string(hyper.Values[0].Value().([]byte)))
}
hyperType := adatypes.NewHyperExitType(name, uint32(length), fdtFormat, nr, parentFieldNames)
option := fdt.Get("fieldOption", index).Value().(uint8)
flags := []byte{0x1, 0x4, 0x8, 0x10, 0x20}
optionFlags := []adatypes.FieldOption{adatypes.FieldOptionUQ, adatypes.FieldOptionHE, adatypes.FieldOptionPE,
adatypes.FieldOptionNU, adatypes.FieldOptionMU}
for index, f := range flags {
if (option & f) > 0 {
hyperType.AddOption(optionFlags[index])
}
}
fieldType = hyperType
return
}
// create referential integrity
func createReferential(fdt *adatypes.StructureValue, index int) (fieldType adatypes.IAdaType, err error) {
name := string(fdt.Get("fieldName", index).Value().([]byte))
refFile := fdt.Get("refFile", index).Value().(uint32)
var keys [2]string
keys[0] = string(fdt.Get("refPrimaryKey", index).Value().([]byte))
keys[1] = string(fdt.Get("refForeignKey", index).Value().([]byte))
refType := fdt.Get("refType", index).Value().(uint8)
refUpdateAction := fdt.Get("refUpdateAction", index).Value().(uint8)
refDeleteAction := fdt.Get("refDeleteAction", index).Value().(uint8)
referentialType := adatypes.NewReferentialType(name, refFile, keys,
refType, refUpdateAction, refDeleteAction)
fieldType = referentialType
return
}
// evaluate type of integer dependent on length
func evaluateIntegerValue(binary bool, length uint32) adatypes.FieldType {
switch {
case length == 4 && binary:
return adatypes.FieldTypeUInt4
case length == 4:
return adatypes.FieldTypeInt4
case length == 2 && binary:
return adatypes.FieldTypeUInt2
case length == 2:
return adatypes.FieldTypeInt2
case length == 1 && binary:
return adatypes.FieldTypeUByte
case length == 1:
return adatypes.FieldTypeByte
case length == 8 && binary:
return adatypes.FieldTypeUInt8
case length == 8:
return adatypes.FieldTypeInt8
default:
return adatypes.FieldTypeByteArray
}
}
// Evaluate option for a field types
func evaluateOption(fieldType *adatypes.AdaType, option uint8, option2 uint8) {
flags := [...]int{fdtFlagOption1UQ.iv(), fdtFlagOption1NU.iv(), fdtFlagOption1FI.iv(), fdtFlagOption1DE.iv(), fdtFlagOption1MU.iv()}
optionFlags := []adatypes.FieldOption{adatypes.FieldOptionUQ, adatypes.FieldOptionNU, adatypes.FieldOptionFI, adatypes.FieldOptionDE, adatypes.FieldOptionMU}
flags2 := [...]int{fdtFlagOption2NC.iv(), fdtFlagOption2NN.iv(), fdtFlagOption2HF.iv(), fdtFlagOption2NV.iv(), fdtFlagOption2NB.iv()}
optionFlags2 := []adatypes.FieldOption{adatypes.FieldOptionNC, adatypes.FieldOptionNN, adatypes.FieldOptionHF, adatypes.FieldOptionNV, adatypes.FieldOptionNB}
adatypes.Central.Log.Debugf("Evaluate Options %x", option)
for i := 0; i < len(flags); i++ {
if (option & (1 << uint32(flags[i]))) > 0 {
adatypes.Central.Log.Debugf("%s Option %d", fieldType.String(), i)
fieldType.AddOption(optionFlags[i])
}
}
adatypes.Central.Log.Debugf("Evaluate Options2 %v", option2)
for i := 0; i < len(flags2); i++ {
if (option2 & (1 << uint32(flags2[i]))) > 0 {
adatypes.Central.Log.Debugf("%s Option2 %d", fieldType.String(), i)
fieldType.AddOption(optionFlags2[i])
}
}
}
|
package handlers
import (
"fama/core"
"fama/numbers/ports"
"github.com/gin-gonic/gin"
"net/http"
)
func init() {
err := core.Injector.Provide(newNumbersHandler)
core.CheckInjection(err, "newNumbersHandler")
}
type NumbersHandler struct {
manager ports.NumbersManager
}
func newNumbersHandler(manager ports.NumbersManager) *NumbersHandler {
return &NumbersHandler{
manager: manager,
}
}
func (h *NumbersHandler) ToWords(c *gin.Context) {
req, err := newNumberToWordsRequest(c)
if err != nil {
c.JSON(http.StatusBadRequest, err)
return
}
words, e := h.manager.ToWords(req.Number, req.Lang)
if e != nil {
generateError(c, http.StatusUnprocessableEntity, e)
return
}
response := newNumberToWordsResponse(StatusOK, req.Lang, words)
c.JSON(http.StatusOK, response)
}
|
package main
import (
"fmt"
"sort"
)
// https://leetcode-cn.com/problems/reverse-pairs/
// 493. 翻转对 | Reverse Pairs
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Solution
//
// 离散化 + 树状数组
//
// 这里注意将原数据*2,以方便处理.
// 离散化2倍后的元素后,将原数据范围缩小到50000内,再从右至左遍历原数组,找到比当前元素小的最大元素
// 位置 idx ,再利用树状数组统计 idx 处的计数.
//
// 复杂度分析:
// * 时间: O(N*lgN)
// * 空间: O(N)
func reversePairs(nums []int) int {
N := len(nums)
// discretize and delete duplicated values
ordered, hash, n := discretizeNums(nums)
// Binary Indexed Trees: [1,n]
NN := n + 1
tr := make([]int, NN)
add := func(i int) {
for ; i < NN; i += i & -i {
tr[i]++
}
}
sum := func(i int) (sum int) {
for ; i > 0; i -= i & -i {
sum += tr[i]
}
return sum
}
res := 0
for i := N - 1; i >= 0; i-- {
l := sort.SearchInts(ordered, nums[i])
res += sum(l)
add(hash[nums[i]*2])
}
return res
}
func discretizeNums(nums []int) ([]int, map[int]int, int) {
N := len(nums)
hash := make(map[int]int)
// de-dup
for i := 0; i < N; i++ {
hash[nums[i]*2] = i
}
ordered, n := make([]int, len(hash)), 0
for k := range hash {
ordered[n] = k
n++
}
sort.Ints(ordered)
for i := 0; i < n; i++ {
// BIT starts with index 1
hash[ordered[i]] = i + 1
}
return ordered, hash, n
}
//------------------------------------------------------------------------------
// main
func main() {
cases := [][]int{
{1, 3, 2, 3, 1},
{2, 4, 3, 5, 1},
{-6, -3},
{6, 3},
}
realCase := cases[0:]
for i, c := range realCase {
fmt.Println("## case", i)
// solve
fmt.Println(reversePairs(c))
}
}
|
package GC
import (
"bytes"
"encoding/binary"
cmp "github.com/mortim-portim/GraphEng/compression"
)
//
//.d8888. db db d8b db .o88b. db db .d8b. d8888b.
//88' YP `8b d8' 888o 88 d8P Y8 88 88 d8' `8b 88 `8D
//`8bo. `8bd8' 88V8o 88 8P Y8 8P 88ooo88 88oobY'
// `Y8b. 88 88 V8o88 8b `8b d8' 88~~~88 88`8b
//db 8D 88 88 V888 Y8b d8 `8bd8' 88 88 88 `88.
//`8888Y' YP VP V8P `Y88P' YP YP YP 88 YD
//
//Syncronized Variable
type SyncVar interface {
IsDirty() bool
//MakeDirty()
GetData() []byte
SetData([]byte)
Type() byte
IsRegisteredTo(int)
}
var RegisteredSyncVarTypes map[byte](func() SyncVar)
func RegisterSyncVar(idx byte, factory func() SyncVar) {
RegisteredSyncVarTypes[idx] = factory
}
func InitSyncVarStandardTypes() {
RegisteredSyncVarTypes = make(map[byte](func() SyncVar))
RegisteredSyncVarTypes[INT64SYNCED] = func() SyncVar { return CreateSyncInt64(0) }
RegisteredSyncVarTypes[FLOAT64SYNCED] = func() SyncVar { return CreateSyncFloat64(0) }
RegisteredSyncVarTypes[STRINGSYNCED] = func() SyncVar { return CreateSyncString("") }
RegisteredSyncVarTypes[INT16SYNCED] = func() SyncVar { return CreateSyncInt16(0) }
RegisteredSyncVarTypes[BOOLSYNCED] = func() SyncVar { return CreateSyncBool(false) }
RegisteredSyncVarTypes[BYTESYNCED] = func() SyncVar { return CreateSyncByte(0) }
RegisteredSyncVarTypes[BYTECOORDSYNCED] = func() SyncVar { return CreateSyncByteCoord(0, 0) }
RegisteredSyncVarTypes[UINT16SYNCED] = func() SyncVar { return CreateSyncUInt16(0) }
RegisteredSyncVarTypes[CHANNELSYNCED] = func() SyncVar { return CreateSyncChannel() }
}
func GetSyncVarOfType(t byte) SyncVar {
return RegisteredSyncVarTypes[t]()
}
func GetBasicSyncVar() BasicSyncVar {
return BasicSyncVar{Registered: 1, Updated: 0}
}
type BasicSyncVar struct {
Registered, Updated int
}
func (sv *BasicSyncVar) IsRegisteredTo(count int) {
sv.Registered = count
}
func (sv *BasicSyncVar) AllUpdated() bool {
return sv.Updated >= sv.Registered
}
func (sv *BasicSyncVar) UpdatedPP() {
sv.Updated++
}
func (sv *BasicSyncVar) ResetUpdated() {
sv.Updated = 0
}
// +-+-+-+-+-+-+-+-+-+
// |S|y|n|c|I|n|t|6|4|
// +-+-+-+-+-+-+-+-+-+
type SyncInt64 struct {
BasicSyncVar
variable int64
dirty bool
}
func (sv *SyncInt64) SetInt(i int64) {
if i != sv.variable {
sv.variable = i
sv.dirty = true
sv.ResetUpdated()
}
}
func (sv *SyncInt64) GetInt() int64 {
return sv.variable
}
func (sv *SyncInt64) IsDirty() bool {
return sv.dirty
}
func (sv *SyncInt64) MakeDirty() {
sv.dirty = true
}
func (sv *SyncInt64) GetData() []byte {
sv.UpdatedPP()
if sv.AllUpdated() {
sv.dirty = false
}
buf := new(bytes.Buffer)
binary.Write(buf, binary.LittleEndian, sv.variable)
return buf.Bytes()
}
func (sv *SyncInt64) SetData(variable []byte) {
sv.dirty = false
buf := bytes.NewBuffer(variable)
binary.Read(buf, binary.LittleEndian, &sv.variable)
}
func (sv *SyncInt64) Type() byte {
return INT64SYNCED
}
func CreateSyncInt64(variable int64) *SyncInt64 {
return &SyncInt64{GetBasicSyncVar(), variable, true}
}
// +-+-+-+-+-+-+-+-+-+
// |S|y|n|c|I|n|t|1|6|
// +-+-+-+-+-+-+-+-+-+
type SyncInt16 struct {
BasicSyncVar
variable int16
dirty bool
}
func (sv *SyncInt16) SetInt(i int16) {
if i != sv.variable {
sv.variable = i
sv.dirty = true
sv.ResetUpdated()
}
}
func (sv *SyncInt16) GetInt() int16 {
return sv.variable
}
func (sv *SyncInt16) IsDirty() bool {
return sv.dirty
}
func (sv *SyncInt16) MakeDirty() {
sv.dirty = true
}
func (sv *SyncInt16) GetData() []byte {
sv.UpdatedPP()
if sv.AllUpdated() {
sv.dirty = false
}
return cmp.Int16ToBytes(sv.variable)
}
func (sv *SyncInt16) SetData(variable []byte) {
sv.dirty = false
sv.variable = cmp.BytesToInt16(variable)
}
func (sv *SyncInt16) Type() byte {
return INT16SYNCED
}
func CreateSyncInt16(variable int16) *SyncInt16 {
return &SyncInt16{GetBasicSyncVar(), variable, true}
}
// +-+-+-+-+-+-+-+-+-+-+
// |S|y|n|c|U|I|n|t|1|6|
// +-+-+-+-+-+-+-+-+-+-+
type SyncUInt16 struct {
BasicSyncVar
variable uint16
dirty bool
}
func (sv *SyncUInt16) SetInt(i uint16) {
if i != sv.variable {
sv.variable = i
sv.dirty = true
sv.ResetUpdated()
}
}
func (sv *SyncUInt16) GetInt() uint16 {
return sv.variable
}
func (sv *SyncUInt16) IsDirty() bool {
return sv.dirty
}
func (sv *SyncUInt16) MakeDirty() {
sv.dirty = true
}
func (sv *SyncUInt16) GetData() []byte {
sv.UpdatedPP()
if sv.AllUpdated() {
sv.dirty = false
}
return cmp.UInt16ToBytes(sv.variable)
}
func (sv *SyncUInt16) Clean() {
sv.dirty = false
}
func (sv *SyncUInt16) SetData(variable []byte) {
sv.dirty = false
sv.variable = cmp.BytesToUInt16(variable)
}
func (sv *SyncUInt16) Type() byte {
return UINT16SYNCED
}
func CreateSyncUInt16(variable uint16) *SyncUInt16 {
return &SyncUInt16{GetBasicSyncVar(), variable, true}
}
// +-+-+-+-+-+-+-+-+
// |S|y|n|c|B|o|o|l|
// +-+-+-+-+-+-+-+-+
type SyncBool struct {
BasicSyncVar
variable bool
dirty bool
}
func (sv *SyncBool) SetBool(i bool) {
if i != sv.variable {
sv.variable = i
sv.dirty = true
sv.ResetUpdated()
}
}
func (sv *SyncBool) GetBool() bool {
return sv.variable
}
func (sv *SyncBool) IsDirty() bool {
return sv.dirty
}
func (sv *SyncBool) MakeDirty() {
sv.dirty = true
}
func (sv *SyncBool) GetData() []byte {
sv.UpdatedPP()
if sv.AllUpdated() {
sv.dirty = false
}
return []byte{cmp.BoolToByte(sv.variable)}
}
func (sv *SyncBool) SetData(variable []byte) {
sv.dirty = false
sv.variable = cmp.ByteToBool(variable[0])
}
func (sv *SyncBool) Type() byte {
return BOOLSYNCED
}
func CreateSyncBool(variable bool) *SyncBool {
return &SyncBool{GetBasicSyncVar(), variable, true}
}
// +-+-+-+-+-+-+-+-+
// |S|y|n|c|B|y|t|e|
// +-+-+-+-+-+-+-+-+
type SyncByte struct {
BasicSyncVar
variable byte
dirty bool
}
func (sv *SyncByte) SetByte(i byte) {
if i != sv.variable {
sv.variable = i
sv.dirty = true
sv.ResetUpdated()
}
}
func (sv *SyncByte) GetByte() byte {
return sv.variable
}
func (sv *SyncByte) IsDirty() bool {
return sv.dirty
}
func (sv *SyncByte) MakeDirty() {
sv.dirty = true
}
func (sv *SyncByte) GetData() []byte {
sv.UpdatedPP()
if sv.AllUpdated() {
sv.dirty = false
}
return []byte{sv.variable}
}
func (sv *SyncByte) SetData(variable []byte) {
sv.dirty = false
sv.variable = variable[0]
}
func (sv *SyncByte) Type() byte {
return BYTESYNCED
}
func CreateSyncByte(variable byte) *SyncByte {
return &SyncByte{GetBasicSyncVar(), variable, true}
}
// +-+-+-+-+-+-+-+-+-+-+-+-+-+
// |S|y|n|c|B|y|t|e|C|o|o|r|d|
// +-+-+-+-+-+-+-+-+-+-+-+-+-+
type SyncByteCoord struct {
BasicSyncVar
x, y int8
dirty bool
}
func (sv *SyncByteCoord) Move(dx, dy int8) {
sv.Set(sv.x+dx, sv.y+dy)
}
func (sv *SyncByteCoord) Set(x, y int8) {
if x != sv.x || y != sv.y {
sv.x = x
sv.y = y
sv.dirty = true
sv.ResetUpdated()
}
}
func (sv *SyncByteCoord) Get() (int8, int8) {
return sv.x, sv.y
}
func (sv *SyncByteCoord) IsDirty() bool {
return sv.dirty
}
func (sv *SyncByteCoord) MakeDirty() {
sv.dirty = true
}
func (sv *SyncByteCoord) GetData() []byte {
sv.UpdatedPP()
if sv.AllUpdated() {
sv.dirty = false
}
return []byte{byte(int(sv.x) + 128), byte(int(sv.y) + 128)}
}
func (sv *SyncByteCoord) SetData(data []byte) {
sv.dirty = false
sv.x = int8(int(data[0]) - 128)
sv.y = int8(int(data[1]) - 128)
}
func (sv *SyncByteCoord) Type() byte {
return BYTECOORDSYNCED
}
func CreateSyncByteCoord(x, y int8) *SyncByteCoord {
return &SyncByteCoord{GetBasicSyncVar(), x, y, true}
}
// +-+-+-+-+-+-+-+-+-+-+-+
// |S|y|n|c|F|l|o|a|t|6|4|
// +-+-+-+-+-+-+-+-+-+-+-+
type SyncFloat64 struct {
BasicSyncVar
variable float64
dirty bool
}
func (sv *SyncFloat64) SetFloat(i float64) {
if i != sv.variable {
sv.variable = i
sv.dirty = true
sv.ResetUpdated()
}
}
func (sv *SyncFloat64) GetFloat() float64 {
return sv.variable
}
func (sv *SyncFloat64) IsDirty() bool {
return sv.dirty
}
func (sv *SyncFloat64) MakeDirty() {
sv.dirty = true
}
func (sv *SyncFloat64) GetData() []byte {
sv.UpdatedPP()
if sv.AllUpdated() {
sv.dirty = false
}
buf := new(bytes.Buffer)
binary.Write(buf, binary.LittleEndian, sv.variable)
return buf.Bytes()
}
func (sv *SyncFloat64) SetData(variable []byte) {
sv.dirty = false
buf := bytes.NewBuffer(variable)
binary.Read(buf, binary.LittleEndian, &sv.variable)
}
func (sv *SyncFloat64) Type() byte {
return FLOAT64SYNCED
}
func CreateSyncFloat64(variable float64) *SyncFloat64 {
return &SyncFloat64{GetBasicSyncVar(), variable, true}
}
// +-+-+-+-+-+-+-+-+-+-+
// |S|y|n|c|S|t|r|i|n|g|
// +-+-+-+-+-+-+-+-+-+-+
type SyncString struct {
BasicSyncVar
variable string
dirty bool
}
func (sv *SyncString) Clear() {
if len(sv.variable) > 0 {
sv.variable = ""
}
}
func (sv *SyncString) SetBs(bs []byte) {
sv.SetString(string(bs))
}
func (sv *SyncString) GetBs() []byte {
return []byte(sv.variable)
}
func (sv *SyncString) SetString(i string) {
if sv.variable != i {
sv.variable = i
sv.dirty = true
sv.ResetUpdated()
}
}
func (sv *SyncString) GetString() string {
return sv.variable
}
func (sv *SyncString) IsDirty() bool {
return sv.dirty
}
func (sv *SyncString) MakeDirty() {
sv.dirty = true
}
func (sv *SyncString) GetData() []byte {
sv.UpdatedPP()
if sv.AllUpdated() {
sv.dirty = false
}
return []byte(sv.variable)
}
func (sv *SyncString) SetData(variable []byte) {
sv.dirty = false
sv.variable = string(variable)
}
func (sv *SyncString) Type() byte {
return STRINGSYNCED
}
func CreateSyncString(variable string) *SyncString {
return &SyncString{GetBasicSyncVar(), variable, true}
}
//!DEPRECATED!
//func CopySyncVar(sv SyncVar) SyncVar {
// wasDirty := sv.IsDirty()
// svc := GetSyncVarOfType(sv.Type())
// svc.SetData(sv.GetData())
// if wasDirty {
// sv.MakeDirty()
// svc.MakeDirty()
// }
// return svc
//}
//func CopySyncVars(count int, svs ...SyncVar) (svsL [][]SyncVar) {
// svsL = make([][]SyncVar, count)
// copy(svsL[0], svs)
// for i := 1; i < count; i ++ {
// svsL[i] = make([]SyncVar, len(svs))
// for i2, sv := range(svs) {
// svsL[i][i2] = CopySyncVar(sv)
// }
// }
// return
//}
//TODO
/**
// +-+-+-+-+-+-+-+-+-+
// |S|y|n|c|S|t|a|c|k|
// +-+-+-+-+-+-+-+-+-+
type SyncStack struct {
bytes [][]byte
dirty bool
}
func (sv *SyncString) SetString(i string) {
sv.variable = i
sv.dirty = true
}
func (sv *SyncString) GetString() string {
return sv.variable
}
func (sv *SyncString) IsDirty() bool {
return sv.dirty
}
func (sv *SyncString) GetData() []byte {
sv.dirty = false
return []byte(sv.variable)
}
func (sv *SyncString) SetData(variable []byte) {
sv.dirty = false
sv.variable = string(variable)
}
func (sv *SyncString) Type() byte {
return STRINGSYNCED
}
func CreateSyncString(variable string) *SyncString {
return &SyncString{variable, true}
}
**/
|
// Copyright © 2020 Attestant Limited.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package immediate is a submitter that immediately submits requests received.
package immediate
import (
"context"
eth2client "github.com/attestantio/go-eth2-client"
"github.com/attestantio/vouch/services/metrics"
nullmetrics "github.com/attestantio/vouch/services/metrics/null"
"github.com/pkg/errors"
"github.com/rs/zerolog"
)
type parameters struct {
logLevel zerolog.Level
clientMonitor metrics.ClientMonitor
beaconBlockSubmitter eth2client.BeaconBlockSubmitter
attestationsSubmitter eth2client.AttestationsSubmitter
beaconCommitteeSubscriptionsSubmitter eth2client.BeaconCommitteeSubscriptionsSubmitter
aggregateAttestationsSubmitter eth2client.AggregateAttestationsSubmitter
syncCommitteeMessagesSubmitter eth2client.SyncCommitteeMessagesSubmitter
syncCommitteeSubscriptionsSubmitter eth2client.SyncCommitteeSubscriptionsSubmitter
syncCommitteeContributionsSubmitter eth2client.SyncCommitteeContributionsSubmitter
}
// Parameter is the interface for service parameters.
type Parameter interface {
apply(*parameters)
}
type parameterFunc func(*parameters)
func (f parameterFunc) apply(p *parameters) {
f(p)
}
// WithLogLevel sets the log level for the module.
func WithLogLevel(logLevel zerolog.Level) Parameter {
return parameterFunc(func(p *parameters) {
p.logLevel = logLevel
})
}
// WithClientMonitor sets the client monitor.
func WithClientMonitor(clientMonitor metrics.ClientMonitor) Parameter {
return parameterFunc(func(p *parameters) {
p.clientMonitor = clientMonitor
})
}
// WithBeaconBlockSubmitter sets the beacon block submitter.
func WithBeaconBlockSubmitter(submitter eth2client.BeaconBlockSubmitter) Parameter {
return parameterFunc(func(p *parameters) {
p.beaconBlockSubmitter = submitter
})
}
// WithAttestationsSubmitter sets the attestation submitter.
func WithAttestationsSubmitter(submitter eth2client.AttestationsSubmitter) Parameter {
return parameterFunc(func(p *parameters) {
p.attestationsSubmitter = submitter
})
}
// WithSyncCommitteeMessagesSubmitter sets the sync committee messages submitter.
func WithSyncCommitteeMessagesSubmitter(submitter eth2client.SyncCommitteeMessagesSubmitter) Parameter {
return parameterFunc(func(p *parameters) {
p.syncCommitteeMessagesSubmitter = submitter
})
}
// WithSyncCommitteeSubscriptionsSubmitter sets the sync committee subscriptions submitter
func WithSyncCommitteeSubscriptionsSubmitter(submitter eth2client.SyncCommitteeSubscriptionsSubmitter) Parameter {
return parameterFunc(func(p *parameters) {
p.syncCommitteeSubscriptionsSubmitter = submitter
})
}
// WithSyncCommitteeContributionsSubmitter sets the sync committee contributions submitter
func WithSyncCommitteeContributionsSubmitter(submitter eth2client.SyncCommitteeContributionsSubmitter) Parameter {
return parameterFunc(func(p *parameters) {
p.syncCommitteeContributionsSubmitter = submitter
})
}
// WithBeaconCommitteeSubscriptionsSubmitter sets the attestation subnet subscriptions submitter
func WithBeaconCommitteeSubscriptionsSubmitter(submitter eth2client.BeaconCommitteeSubscriptionsSubmitter) Parameter {
return parameterFunc(func(p *parameters) {
p.beaconCommitteeSubscriptionsSubmitter = submitter
})
}
// WithAggregateAttestationsSubmitter sets the aggregate attestation submitter
func WithAggregateAttestationsSubmitter(submitter eth2client.AggregateAttestationsSubmitter) Parameter {
return parameterFunc(func(p *parameters) {
p.aggregateAttestationsSubmitter = submitter
})
}
// parseAndCheckParameters parses and checks parameters to ensure that mandatory parameters are present and correct.
func parseAndCheckParameters(params ...Parameter) (*parameters, error) {
parameters := parameters{
logLevel: zerolog.GlobalLevel(),
clientMonitor: nullmetrics.New(context.Background()),
}
for _, p := range params {
if params != nil {
p.apply(¶meters)
}
}
if parameters.clientMonitor == nil {
return nil, errors.New("no client monitor specified")
}
if parameters.beaconBlockSubmitter == nil {
return nil, errors.New("no beacon block submitter specified")
}
if parameters.attestationsSubmitter == nil {
return nil, errors.New("no attestations submitter specified")
}
if parameters.syncCommitteeMessagesSubmitter == nil {
return nil, errors.New("no sync committee messages submitter specified")
}
if parameters.syncCommitteeSubscriptionsSubmitter == nil {
return nil, errors.New("no sync committee subscriptions submitter specified")
}
if parameters.syncCommitteeContributionsSubmitter == nil {
return nil, errors.New("no sync committee contributions submitter specified")
}
if parameters.beaconCommitteeSubscriptionsSubmitter == nil {
return nil, errors.New("no beacon committee subscriptions submitter specified")
}
if parameters.aggregateAttestationsSubmitter == nil {
return nil, errors.New("no aggregate attestations submitter specified")
}
return ¶meters, nil
}
|
package opp
import "testing"
func TestMemProFile(t*testing.T){
MemProFile()
} |
package solver
import (
"context"
"math/rand"
"strconv"
"testing"
)
var BenchmarkInput = func() []Variable {
const (
length = 256
seed = 9
pMandatory = .1
pDependency = .15
nDependency = 6
pConflict = .05
nConflict = 3
)
rnd := rand.New(rand.NewSource(seed))
id := func(i int) Identifier {
return Identifier(strconv.Itoa(i))
}
variable := func(i int) TestVariable {
var c []Constraint
if rnd.Float64() < pMandatory {
c = append(c, Mandatory())
}
if rnd.Float64() < pDependency {
n := rnd.Intn(nDependency-1) + 1
var d []Identifier
for x := 0; x < n; x++ {
y := i
for y == i {
y = rnd.Intn(length)
}
d = append(d, id(y))
}
c = append(c, Dependency(d...))
}
if rnd.Float64() < pConflict {
n := rnd.Intn(nConflict-1) + 1
for x := 0; x < n; x++ {
y := i
for y == i {
y = rnd.Intn(length)
}
c = append(c, Conflict(id(y)))
}
}
return TestVariable{
identifier: id(i),
constraints: c,
}
}
result := make([]Variable, length)
for i := range result {
result[i] = variable(i)
}
return result
}()
func BenchmarkSolve(b *testing.B) {
for i := 0; i < b.N; i++ {
s, err := New(WithInput(BenchmarkInput))
if err != nil {
b.Fatalf("failed to initialize solver: %s", err)
}
s.Solve(context.Background())
}
}
func BenchmarkNewInput(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := New(WithInput(BenchmarkInput))
if err != nil {
b.Fatalf("failed to initialize solver: %s", err)
}
}
}
|
/*
Given num as input, return an array with all primes up to num included.
Alternative Text
Examples
eratosthenes(1) ➞ []
eratosthenes(10) ➞ [2, 3, 5, 7]
eratosthenes(20) ➞ [2, 3, 5, 7, 11, 13, 17, 19]
eratosthenes(0) ➞ []
Notes
Check the Resources tab for info on the meaning of "Eratosthenes".
Try solving this challenge using Eratosthenes sieve.
*/
package main
import (
"math"
"reflect"
)
func main() {
test(10, []uint{2, 3, 5, 7})
test(0, []uint{})
test(1, []uint{})
test(20, []uint{2, 3, 5, 7, 11, 13, 17, 19})
test(1000, []uint{2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997})
}
func test(n uint, r []uint) {
p := eratosthenes(n)
if len(r) == 0 {
assert(len(p) == 0)
} else {
assert(reflect.DeepEqual(p, r))
}
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func eratosthenes(n uint) []uint {
l := uint(math.Sqrt(float64(n)))
p := make([]bool, n)
for i := uint(2); i <= l; i++ {
for j := i + i; j < n; j += i {
p[j] = true
}
}
var r []uint
for i := uint(2); i < n; i++ {
if !p[i] {
r = append(r, i)
}
}
return r
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.