text stringlengths 11 4.05M |
|---|
package moxxiConf
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
"net/url"
"os"
"strings"
"testing"
"text/template"
"github.com/stretchr/testify/assert"
)
func TestStaticHandler(t *testing.T) {
// test setup
expected := []byte(`this is the response I expect to recieve`)
file, err := ioutil.TempFile(os.TempDir(), "moxxi_test_")
assert.Nil(t, err, "could no open temp file for writing - %v", err)
_, err = file.Write(expected)
assert.Nil(t, err, "could no open temp file for writing - %v", err)
server := httptest.NewServer(StaticHandler(HandlerConfig{resFile: file.Name()},
log.New(ioutil.Discard, "", log.LstdFlags)))
defer server.Close()
for i := 0; i < 10; i++ {
resp, err := http.Get(server.URL)
assert.Nil(t, err, "got a bad response from the server - %v", err)
actual, err := ioutil.ReadAll(resp.Body)
assert.Nil(t, err, "got an error reading the body of the response - %v", err)
assert.Equal(t, expected, actual, "test #%d - got a different response than expected", i)
}
}
func TestFormHandler_POST(t *testing.T) {
// test setup
testConfig := HandlerConfig{
baseURL: "test.com",
confPath: os.TempDir(),
confExt: ".testout",
exclude: []string{"a", "b", "c"},
subdomainLen: 8,
}
confTemplVal := "{{.IntHost}} {{.IntIP}} {{.IntPort}} {{.Encrypted}}"
confTemplVal += " {{ range .StripHeaders }}{{.}} {{end}}"
testConfig.confTempl = template.Must(template.New("testing").Parse(confTemplVal))
resTemplVal := "{{range .}} {{ .ExtHost }} {{ end }}"
testConfig.resTempl = template.Must(template.New("testing").Parse(resTemplVal))
server := httptest.NewServer(FormHandler(testConfig,
log.New(ioutil.Discard, "", log.LstdFlags)))
defer server.Close()
var testData = []struct {
// reqMethod string
reqParams map[string][]string
resCode int
fileOut string
}{
{
reqParams: map[string][]string{
"host": []string{"proxied.com"},
"ip": []string{"10.10.10.10"},
"port": []string{"80"},
"tls": []string{"true"},
"header": []string{"KeepAlive", "b", "c"},
},
resCode: 200,
fileOut: `proxied.com 10.10.10.10 80 true KeepAlive b c `,
}, {
reqParams: map[string][]string{
"ip": []string{"10.10.10.10"},
"port": []string{"80"},
"tls": []string{"true"},
"header": []string{"KeepAlive", "b", "c"},
},
resCode: http.StatusPreconditionFailed,
fileOut: "no provided hostname\n",
}, {
reqParams: map[string][]string{
"host": []string{"proxied.com"},
"port": []string{"80"},
"tls": []string{"true"},
"header": []string{"KeepAlive", "b", "c"},
},
resCode: http.StatusPreconditionFailed,
fileOut: "no provided IP\n",
}, {
reqParams: map[string][]string{
"host": []string{"proxied.com"},
"ip": []string{"10.10.10.10"},
"tls": []string{"true"},
"header": []string{"KeepAlive", "b", "c"},
},
resCode: 200,
fileOut: `proxied.com 10.10.10.10 80 true KeepAlive b c `,
}, {
reqParams: map[string][]string{
"host": []string{".com"},
"ip": []string{"10.potato10.10.10"},
"port": []string{"80"},
"tls": []string{"true"},
"header": []string{"KeepAlive", "b", "c"},
},
resCode: http.StatusPreconditionFailed,
fileOut: "bad hostname provided [.com]\n",
},
}
for id, test := range testData {
params := url.Values(test.reqParams)
resp, err := http.PostForm(server.URL, params)
assert.NoError(t, err, "test %d - got an error I should not have when running the request", id)
if err != nil {
continue
}
assert.Equal(t, test.resCode, resp.StatusCode,
"test %d - got the wrong response code", id)
body, err := ioutil.ReadAll(resp.Body)
assert.Nil(t, err, "test %d - problem reading response - %v", id, err)
if resp.StatusCode == 200 {
proxyOut, err := ioutil.ReadFile(
fmt.Sprintf("%s/%s%s",
testConfig.confPath,
bytes.TrimSpace(body),
testConfig.confExt))
assert.Nil(t, err, "test %d - problem reading file - %v", id, err)
assert.Equal(t, test.fileOut, string(proxyOut),
"test %d - wrong data written to the file", id)
} else {
assert.Equal(t, string(body), test.fileOut,
"test %d - response and expected response did not match", id)
}
// resp.Body.Close()
}
}
func TestJSONHandler_POST(t *testing.T) {
// test setup
testConfig := HandlerConfig{
baseURL: "test.com",
confPath: os.TempDir(),
confExt: ".testout",
exclude: []string{"a", "b", "c"},
subdomainLen: 8,
}
confTemplVal := "{{.IntHost}} {{.IntIP}} {{.IntPort}} {{.Encrypted}}"
confTemplVal += " {{ range .StripHeaders }}{{.}} {{end}}"
testConfig.confTempl = template.Must(template.New("testing").Parse(confTemplVal))
resTemplVal := `{{ define "start" }}{{ end }}
{{define "body" }}{{ .ExtHost }}
{{ end }}
{{ define "end" }}{{ end }}"`
testConfig.resTempl = template.Must(template.New("testing").Parse(resTemplVal))
server := httptest.NewServer(JSONHandler(testConfig,
log.New(ioutil.Discard, "", log.LstdFlags)))
defer server.Close()
var testData = []struct {
// reqMethod string
reqBody string
resCode int
fileOut []string
}{
{
reqBody: `{ "IntHost": "proxied.com", "IntIP": "10.10.10.10", "IntPort": 80,
"Encrypted": true, "StripHeaders": [ "KeepAlive", "b", "c" ]}`,
resCode: 200,
fileOut: []string{
`proxied.com 10.10.10.10 80 true KeepAlive b c `,
},
},
}
for id, test := range testData {
resp, err := http.Post(server.URL, "application/json", strings.NewReader(test.reqBody))
assert.NoError(t, err, "test %d - got an error I should not have when running the request", id)
if err != nil {
continue
}
assert.Equal(t, test.resCode, resp.StatusCode,
"test %d - got the wrong response code", id)
if resp.StatusCode == 200 {
allFiles := test.fileOut
s := bufio.NewScanner(resp.Body)
for s.Scan() {
fileName := strings.TrimSpace(s.Text())
if fileName == "" {
continue
}
contents, err := ioutil.ReadFile(
fmt.Sprintf("%s/%s%s",
testConfig.confPath,
fileName,
testConfig.confExt))
assert.NoError(t, err, "test %d - problem reading file [%s] - %v", id, fileName, err)
var found bool
for i := 0; i < len(allFiles); i++ {
if allFiles[i] == string(contents) {
if i < len(allFiles)-1 {
allFiles = append(allFiles[:i], allFiles[i+1:]...)
found = true
break
} else {
allFiles = allFiles[:i]
found = true
break
}
}
}
if !found {
assert.Fail(t, "test %d - response not expected - opened file [%s]", id, fileName)
}
}
if len(allFiles) > 0 {
assert.Fail(t, "test %d - had results left over that were not found\n%v", id, allFiles)
}
}
resp.Body.Close()
}
}
|
package utils
import (
"errors"
"github.com/wenzhenxi/gorsa"
)
var publicKey =
`-----BEGIN 公钥-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANL378k3RiZHWx5AfJqdH9xRNBmD9wGD2iRe41HdTNF8RUhNnHit5NpMNtGL0NPTSSpPjjI1kJfVorRvaQerUgkCAwEAAQ==
-----END 公钥-----
`
var privateKey =
`-----BEGIN 私钥-----
MIIBUwIBADANBgkqhkiG9w0BAQEFAASCAT0wggE5AgEAAkEA0vfvyTdGJkdbHkB8mp0f3FE0GYP3AYPaJF7jUd1M0XxFSE2ceK3k2kw20YvQ09NJKk+OMjWQl9WitG9pB6tSCQIDAQABAkA2SimBrWC2/wvauBuYqjCFwLvYiRYqZKThUS3MZlebXJiLB+Ue/gUifAAKIg1avttUZsHBHrop4qfJCwAI0+YRAiEA+W3NK/RaXtnRqmoUUkb59zsZUBLpvZgQPfj1MhyHDz0CIQDYhsAhPJ3mgS64NbUZmGWuuNKp5coY2GIj/zYDMJp6vQIgUueLFXv/eZ1ekgz2Oi67MNCk5jeTF2BurZqNLR3MSmUCIFT3Q6uHMtsB9Eha4u7hS31tj1UWE+D+ADzp59MGnoftAiBeHT7gDMuqeJHPL4b+kC+gzV4FGTfhR9q3tTbklZkD2A==
-----END 私钥-----
`
// 私钥解密
func RsaPriDecode(str string) (value string, err error) {
value, err = gorsa.PriKeyDecrypt(str,privateKey)
if err != nil {
return
}
return
}
// 公钥解密
func RsaPubDecode(str string) (value string, err error) {
value, err = gorsa.PublicDecrypt(str, publicKey)
if err != nil {
return
}
return
}
// 私钥加密
func RsaPriEncode(str string) (value string, err error) {
value, err = gorsa.PriKeyEncrypt(str, privateKey)
if err != nil {
return
}
return
}
// 公钥加密
func RsaPubEncode(str string) (value string, err error) {
value, err = gorsa.PublicEncrypt(str, publicKey)
if err != nil {
return
}
return
}
// 公钥加密私钥解密
func ApplyPubEPriD() error {
pubenctypt, err := gorsa.PublicEncrypt(`hello world`,publicKey)
if err != nil {
return err
}
pridecrypt, err := gorsa.PriKeyDecrypt(pubenctypt,privateKey)
if err != nil {
return err
}
if string(pridecrypt) != `hello world` {
return errors.New(`解密失败`)
}
return nil
}
// 公钥解密私钥加密
func ApplyPriEPubD() error {
prienctypt, err := gorsa.PriKeyEncrypt(`hello world`, privateKey)
if err != nil {
return err
}
pubdecrypt, err := gorsa.PublicDecrypt(prienctypt, publicKey)
if err != nil {
return err
}
if string(pubdecrypt) != `hello world` {
return errors.New(`解密失败`)
}
return nil
}
|
package cmd
import (
"bufio"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path/filepath"
"sort"
"strings"
"time"
"github.com/hoisie/mustache"
"github.com/mitchellh/go-homedir"
)
const (
appName = "rest-client"
defaultEnvFile = "rest-client.env.json"
httpFileExt = ".http"
reqNamePrefix = "#:name"
reqDescPrefix = "#:desc"
)
var (
// global options
cfgFile string
envsFile string
httpFiles []string
verbose bool
// exec command options
envName string
reqNames []string
// http methods supported
validMethods = []string{
http.MethodGet,
http.MethodHead,
http.MethodPost,
http.MethodPut,
http.MethodPatch,
http.MethodDelete,
http.MethodConnect,
http.MethodOptions,
http.MethodTrace,
}
)
// Envs is a mapping from environment name to its variables.
type Envs = map[string]Vars
// Vars is a mapping from environment variable name to its value.
type Vars = map[string]string
func parseEnvs(r io.Reader) (Envs, error) {
buf, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
var envs Envs
err = json.Unmarshal(buf, &envs)
if err != nil {
return nil, err
}
return envs, nil
}
// Req is data ready from a request in an http requests file.
type Req struct {
File string
Name string
Desc string
Method string
URL string
Headers []string
Body []string
}
// Res is data returned from executing a request.
type Res struct {
Req Req
Res *http.Response
}
func (r Req) String() string {
var sb strings.Builder
sb.WriteString(fmt.Sprintf("File: %s\n", r.File))
sb.WriteString(fmt.Sprintf("Name: %s\n", r.Name))
sb.WriteString(fmt.Sprintf("Desc: %s\n\n", r.Desc))
sb.WriteString(fmt.Sprintf("%s %s\n", r.Method, r.URL))
for _, header := range r.Headers {
sb.WriteString(fmt.Sprintf("%s\n", header))
}
if len(r.Body) > 0 {
sb.WriteString("\n")
for _, line := range r.Body {
sb.WriteString(fmt.Sprintf("%s\n", line))
}
}
return sb.String()
}
// Execute executes the http request and returns a response.
func (r Req) Execute(env map[string]string) (*Res, error) {
// Build URL
url := expandString(r.URL, env)
// Build body.
var sb strings.Builder
for _, line := range r.Body {
line = expandString(line, env)
sb.WriteString(line)
sb.WriteString("\n")
}
body := strings.NewReader(sb.String())
// Create request.
request, err := http.NewRequest(r.Method, url, body)
// Add request headers.
for _, line := range r.Headers {
line = expandString(line, env)
if err != nil {
return nil, err
}
parts := strings.Split(line, ":")
k := strings.Trim(parts[0], " ")
v := strings.Trim(parts[1], " ")
request.Header.Set(k, v)
}
// Execute request and return response.
client := http.Client{
Timeout: 10 * time.Second,
}
var response *http.Response
if response, err = client.Do(request); err != nil {
return nil, err
}
// Success!
return &Res{Req: r, Res: response}, nil
}
func newReq(name string, file string) *Req {
return &Req{
Name: name,
File: file,
}
}
func loadReqs(paths []string) ([]*Req, error) {
if len(paths) == 0 {
// Default to looking in all *.http files in the working directory.
cwd, _ := os.Getwd()
paths, _ = listReqFiles(cwd)
}
allReqs := make([]*Req, 0)
for _, path := range paths {
path, err := homedir.Expand(path)
if err != nil {
return nil, err
}
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
reqs, err := parseReqs(f, path)
if err != nil {
return nil, err
}
allReqs = append(allReqs, reqs...)
}
return allReqs, nil
}
func parseReqs(r io.Reader, path string) ([]*Req, error) {
reqs := make([]*Req, 0)
var req *Req
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, reqNamePrefix) && req == nil {
// Start building a new Req object.
name := strings.Split(line, " ")[1]
req = newReq(name, path)
} else if strings.HasPrefix(line, reqDescPrefix) && req != nil {
// Update Req object with description.
line = strings.TrimPrefix(line, reqDescPrefix)
req.Desc = strings.TrimSpace(line)
} else if isReq(line) && req != nil {
// Update Req object with URL method and path.
parts := strings.Split(line, " ")
req.Method = parts[0]
req.URL = parts[1]
} else if len(strings.Split(line, ":")) == 2 && req != nil && req.Method != "" {
// Update Req object with header.
req.Headers = append(req.Headers, line)
} else if line == "" {
// Skip blank lines.
continue
} else if strings.HasPrefix(line, "#") && req != nil {
// Finish building new Req object and add to Reqs to return.
reqs = append(reqs, req)
req = nil
} else if req != nil {
// Update Request body with line.
req.Body = append(req.Body, line)
}
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
if req != nil {
// Handle case where file does not contain "###" finalizer sigil.
reqs = append(reqs, req)
}
return reqs, nil
}
func isReq(line string) bool {
parts := strings.Split(line, " ")
if len(parts) != 2 {
return false
}
method := parts[0]
for _, validMethod := range validMethods {
if method == validMethod {
return true
}
}
return false
}
func filterReqs(reqs []*Req, names []string) []*Req {
filtered := make([]*Req, 0)
for _, req := range reqs {
if containsString(names, req.Name) {
filtered = append(filtered, req)
}
}
return filtered
}
func execReqs(reqs []*Req, env map[string]string) ([]*Res, error) {
responses := make([]*Res, 0, len(reqs))
for _, req := range reqs {
response, err := req.Execute(env)
if err != nil {
return responses, err
}
responses = append(responses, response)
}
return responses, nil
}
func containsString(ss []string, s string) bool {
for _, _s := range ss {
if _s == s {
return true
}
}
return false
}
func expandString(s string, env map[string]string) string {
return mustache.Render(s, env)
}
func renderResponses(responses []*Res) string {
var sb strings.Builder
for i, response := range responses {
sb.WriteString(renderResponse(response))
if i < len(responses) - 1 {
sb.WriteString("\n###\n\n")
}
}
return sb.String()
}
func renderResponse(response *Res) string {
var sb strings.Builder
sb.WriteString(fmt.Sprintf("%s %s\n", response.Res.Proto, response.Res.Status))
var headers []string
for k, v := range response.Res.Header {
headers = append(headers, fmt.Sprintf("%s: %s\n", k, v))
}
sort.Strings(headers)
for _, header := range headers {
sb.WriteString(header)
}
defer response.Res.Body.Close()
buf, err := ioutil.ReadAll(response.Res.Body)
if err != nil {
panic(err)
}
if len(buf) > 0 {
sb.WriteString(fmt.Sprintf("\n%s", string(buf)))
}
return sb.String()
}
func listReqFiles(dir string) ([]string, error) {
var files []string
if err := filepath.Walk(dir, func(path string, f os.FileInfo, _ error) error {
if !f.IsDir() {
if filepath.Ext(path) == httpFileExt {
files = append(files, f.Name())
}
}
return nil
}); err != nil {
return files, err
}
return files, nil
} |
package helm
import (
"github.com/devspace-cloud/devspace/pkg/devspace/config/generated"
"github.com/devspace-cloud/devspace/pkg/devspace/config/versions/latest"
"github.com/devspace-cloud/devspace/pkg/devspace/deploy/deployer"
"github.com/devspace-cloud/devspace/pkg/devspace/helm"
helmtypes "github.com/devspace-cloud/devspace/pkg/devspace/helm/types"
helmv2 "github.com/devspace-cloud/devspace/pkg/devspace/helm/v2cli"
"github.com/devspace-cloud/devspace/pkg/devspace/kubectl"
"github.com/devspace-cloud/devspace/pkg/util/log"
"github.com/pkg/errors"
)
// DevSpaceChartConfig is the config that holds the devspace chart information
var DevSpaceChartConfig = &latest.ChartConfig{
Name: "component-chart",
Version: "0.1.4",
RepoURL: "https://charts.devspace.sh",
}
// DeployConfig holds the information necessary to deploy via helm
type DeployConfig struct {
// Public because we can switch them to fake clients for testing
Kube kubectl.Client
Helm helmtypes.Client
TillerNamespace string
DeploymentConfig *latest.DeploymentConfig
Log log.Logger
config *latest.Config
}
// New creates a new helm deployment client
func New(config *latest.Config, helmClient helmtypes.Client, kubeClient kubectl.Client, deployConfig *latest.DeploymentConfig, log log.Logger) (deployer.Interface, error) {
tillerNamespace := ""
if kubeClient != nil {
tillerNamespace = kubeClient.Namespace()
if deployConfig.Helm.TillerNamespace != "" {
tillerNamespace = deployConfig.Helm.TillerNamespace
}
}
// Exchange chart
if deployConfig.Helm.ComponentChart != nil && *deployConfig.Helm.ComponentChart == true {
deployConfig.Helm.Chart = DevSpaceChartConfig
}
return &DeployConfig{
Kube: kubeClient,
Helm: helmClient,
TillerNamespace: tillerNamespace,
DeploymentConfig: deployConfig,
Log: log,
config: config,
}, nil
}
// Delete deletes the release
func (d *DeployConfig) Delete(cache *generated.CacheConfig) error {
// Delete with helm engine
if d.DeploymentConfig.Helm.V2 == true {
isDeployed := helmv2.IsTillerDeployed(d.config, d.Kube, d.TillerNamespace)
if isDeployed == false {
return nil
}
}
if d.Helm == nil {
var err error
// Get HelmClient
d.Helm, err = helm.NewClient(d.config, d.DeploymentConfig, d.Kube, d.TillerNamespace, false, false, d.Log)
if err != nil {
return errors.Wrap(err, "new helm client")
}
}
err := d.Helm.DeleteRelease(d.DeploymentConfig.Name, d.DeploymentConfig.Namespace, d.DeploymentConfig.Helm)
if err != nil {
return err
}
// Delete from cache
delete(cache.Deployments, d.DeploymentConfig.Helm.Chart.Name)
return nil
}
|
package main
import (
"fmt"
"os"
"github.com/vlad-belogrudov/gopl/pkg/space"
)
func main() {
if len(os.Args) < 2 {
fmt.Fprintln(os.Stderr, `need word to brush, e.g "hello bye end"`)
os.Exit(1)
}
fmt.Println(string(space.Brush([]byte(os.Args[1]))))
}
|
package website
import (
"classes/oop/blog/post"
"fmt"
)
type website struct {
p []post.Post
}
//Website exports website
type Website website
//New creates website object
func New(args ...interface{}) Website {
var w Website
wArgs := make([]string, 0)
for i, v := range args {
wArgs = append(wArgs, fmt.Sprintf("%s", v))
if (i != 0) && ((i % 4) == 0) {
w.p = append(w.p, post.New(wArgs...))
wArgs = make([]string, 0)
}
}
return w
}
//NewFromPosts creates Website object from post.Post objects
func NewFromPosts(p ...post.Post) Website {
var w Website
for _, v := range p {
w.p = append(w.p, v)
}
return w
}
//Contents prints blogs
func (w Website) Contents() {
fmt.Println("Content of Website")
fmt.Println()
for _, v := range w.p {
v.Details()
fmt.Println()
}
}
|
package form
import (
"fmt"
"log"
"net/url"
"reflect"
"strconv"
"time"
)
const (
errArraySize = "Array size of '%d' is larger than the maximum currently set on the decoder of '%d'. To increase this limit please see, SetMaxArraySize(size uint)"
errMissingStartBracket = "Invalid formatting for key '%s' missing '[' bracket"
errMissingEndBracket = "Invalid formatting for key '%s' missing ']' bracket"
)
type decoder struct {
d *Decoder
errs DecodeErrors
dm dataMap
values url.Values
maxKeyLen int
namespace []byte
}
func (d *decoder) setError(namespace []byte, err error) {
if d.errs == nil {
d.errs = make(DecodeErrors)
}
d.errs[string(namespace)] = err
}
func (d *decoder) findAlias(ns string) *recursiveData {
for i := 0; i < len(d.dm); i++ {
if d.dm[i].alias == ns {
return d.dm[i]
}
}
return nil
}
func (d *decoder) parseMapData() {
// already parsed
if len(d.dm) > 0 {
return
}
d.maxKeyLen = 0
d.dm = d.dm[0:0]
var i int
var idx int
var l int
var insideBracket bool
var rd *recursiveData
var isNum bool
for k := range d.values {
if len(k) > d.maxKeyLen {
d.maxKeyLen = len(k)
}
for i = 0; i < len(k); i++ {
switch k[i] {
case '[':
idx = i
insideBracket = true
isNum = true
case ']':
if !insideBracket {
log.Panicf(errMissingStartBracket, k)
}
if rd = d.findAlias(k[:idx]); rd == nil {
l = len(d.dm) + 1
if l > cap(d.dm) {
dm := make(dataMap, l)
copy(dm, d.dm)
rd = new(recursiveData)
dm[len(d.dm)] = rd
d.dm = dm
} else {
l = len(d.dm)
d.dm = d.dm[:l+1]
rd = d.dm[l]
rd.sliceLen = 0
rd.keys = rd.keys[0:0]
}
rd.alias = k[:idx]
}
// is map + key
ke := key{
ivalue: -1,
value: k[idx+1 : i],
searchValue: k[idx : i+1],
}
// is key is number, most likely array key, keep track of just in case an array/slice.
if isNum {
// no need to check for error, it will always pass
// as we have done the checking to ensure
// the value is a number ahead of time.
var err error
ke.ivalue, err = strconv.Atoi(ke.value)
if err != nil {
ke.ivalue = -1
}
if ke.ivalue > rd.sliceLen {
rd.sliceLen = ke.ivalue
}
}
rd.keys = append(rd.keys, ke)
insideBracket = false
default:
// checking if not a number, 0-9 is 48-57 in byte, see for yourself fmt.Println('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
if insideBracket && (k[i] > 57 || k[i] < 48) {
isNum = false
}
}
}
// if still inside bracket, that means no ending bracket was ever specified
if insideBracket {
log.Panicf(errMissingEndBracket, k)
}
}
}
func (d *decoder) traverseStruct(v reflect.Value, typ reflect.Type, namespace []byte) (set bool) {
l := len(namespace)
first := l == 0
// anonymous structs will still work for caching as the whole definition is stored
// including tags
s, ok := d.d.structCache.Get(typ)
if !ok {
s = d.d.structCache.parseStruct(d.d.mode, v, typ, d.d.tagName)
}
for _, f := range s.fields {
namespace = namespace[:l]
if f.isAnonymous {
if d.setFieldByType(v.Field(f.idx), namespace, 0) {
set = true
}
}
if first {
namespace = append(namespace, f.name...)
} else {
namespace = append(namespace, d.d.namespacePrefix...)
namespace = append(namespace, f.name...)
namespace = append(namespace, d.d.namespaceSuffix...)
}
if d.setFieldByType(v.Field(f.idx), namespace, 0) {
set = true
}
}
return
}
func (d *decoder) setFieldByType(current reflect.Value, namespace []byte, idx int) (set bool) {
var err error
v, kind := ExtractType(current)
arr, ok := d.values[string(namespace)]
if d.d.customTypeFuncs != nil {
if ok {
if cf, ok := d.d.customTypeFuncs[v.Type()]; ok {
val, err := cf(arr[idx:])
if err != nil {
d.setError(namespace, err)
return
}
v.Set(reflect.ValueOf(val))
set = true
return
}
}
}
switch kind {
case reflect.Interface:
if !ok || idx == len(arr) {
return
}
v.Set(reflect.ValueOf(arr[idx]))
set = true
case reflect.Ptr:
newVal := reflect.New(v.Type().Elem())
if set = d.setFieldByType(newVal.Elem(), namespace, idx); set {
v.Set(newVal)
}
case reflect.String:
if !ok || idx == len(arr) {
return
}
v.SetString(arr[idx])
set = true
case reflect.Uint, reflect.Uint64:
if !ok || idx == len(arr) || len(arr[idx]) == 0 {
return
}
var u64 uint64
if u64, err = strconv.ParseUint(arr[idx], 10, 64); err != nil {
d.setError(namespace, fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
return
}
v.SetUint(u64)
set = true
case reflect.Uint8:
if !ok || idx == len(arr) || len(arr[idx]) == 0 {
return
}
var u64 uint64
if u64, err = strconv.ParseUint(arr[idx], 10, 8); err != nil {
d.setError(namespace, fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
return
}
v.SetUint(u64)
set = true
case reflect.Uint16:
if !ok || idx == len(arr) || len(arr[idx]) == 0 {
return
}
var u64 uint64
if u64, err = strconv.ParseUint(arr[idx], 10, 16); err != nil {
d.setError(namespace, fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
return
}
v.SetUint(u64)
set = true
case reflect.Uint32:
if !ok || idx == len(arr) || len(arr[idx]) == 0 {
return
}
var u64 uint64
if u64, err = strconv.ParseUint(arr[idx], 10, 32); err != nil {
d.setError(namespace, fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
return
}
v.SetUint(u64)
set = true
case reflect.Int, reflect.Int64:
if !ok || idx == len(arr) || len(arr[idx]) == 0 {
return
}
var i64 int64
if i64, err = strconv.ParseInt(arr[idx], 10, 64); err != nil {
d.setError(namespace, fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
return
}
v.SetInt(i64)
set = true
case reflect.Int8:
if !ok || idx == len(arr) || len(arr[idx]) == 0 {
return
}
var i64 int64
if i64, err = strconv.ParseInt(arr[idx], 10, 8); err != nil {
d.setError(namespace, fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
return
}
v.SetInt(i64)
set = true
case reflect.Int16:
if !ok || idx == len(arr) || len(arr[idx]) == 0 {
return
}
var i64 int64
if i64, err = strconv.ParseInt(arr[idx], 10, 16); err != nil {
d.setError(namespace, fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
return
}
v.SetInt(i64)
set = true
case reflect.Int32:
if !ok || idx == len(arr) || len(arr[idx]) == 0 {
return
}
var i64 int64
if i64, err = strconv.ParseInt(arr[idx], 10, 32); err != nil {
d.setError(namespace, fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
return
}
v.SetInt(i64)
set = true
case reflect.Float32:
if !ok || idx == len(arr) || len(arr[idx]) == 0 {
return
}
var f float64
if f, err = strconv.ParseFloat(arr[idx], 32); err != nil {
d.setError(namespace, fmt.Errorf("Invalid Float Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
return
}
v.SetFloat(f)
set = true
case reflect.Float64:
if !ok || idx == len(arr) || len(arr[idx]) == 0 {
return
}
var f float64
if f, err = strconv.ParseFloat(arr[idx], 64); err != nil {
d.setError(namespace, fmt.Errorf("Invalid Float Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
return
}
v.SetFloat(f)
set = true
case reflect.Bool:
if !ok || idx == len(arr) {
return
}
var b bool
if b, err = parseBool(arr[idx]); err != nil {
d.setError(namespace, fmt.Errorf("Invalid Boolean Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
return
}
v.SetBool(b)
set = true
case reflect.Slice:
d.parseMapData()
// slice elements could be mixed eg. number and non-numbers Value[0]=[]string{"10"} and Value=[]string{"10","20"}
if ok && len(arr) > 0 {
var varr reflect.Value
var ol int
l := len(arr)
if v.IsNil() {
varr = reflect.MakeSlice(v.Type(), len(arr), len(arr))
} else {
ol = v.Len()
l += ol
if v.Cap() <= l {
varr = reflect.MakeSlice(v.Type(), l, l)
} else {
// preserve predefined capacity, possibly for reuse after decoding
varr = reflect.MakeSlice(v.Type(), l, v.Cap())
}
reflect.Copy(varr, v)
}
for i := ol; i < l; i++ {
newVal := reflect.New(v.Type().Elem()).Elem()
if d.setFieldByType(newVal, namespace, i-ol) {
set = true
varr.Index(i).Set(newVal)
}
}
v.Set(varr)
}
// maybe it's an numbered array i.e. Phone[0].Number
if rd := d.findAlias(string(namespace)); rd != nil {
var varr reflect.Value
var kv key
sl := rd.sliceLen + 1
// checking below for maxArraySize, but if array exists and already
// has sufficient capacity allocated then we do not check as the code
// obviously allows a capacity greater than the maxArraySize.
if v.IsNil() {
if sl > d.d.maxArraySize {
d.setError(namespace, fmt.Errorf(errArraySize, sl, d.d.maxArraySize))
return
}
varr = reflect.MakeSlice(v.Type(), sl, sl)
} else if v.Len() < sl {
if v.Cap() <= sl {
if sl > d.d.maxArraySize {
d.setError(namespace, fmt.Errorf(errArraySize, sl, d.d.maxArraySize))
return
}
varr = reflect.MakeSlice(v.Type(), sl, sl)
} else {
varr = reflect.MakeSlice(v.Type(), sl, v.Cap())
}
reflect.Copy(varr, v)
} else {
varr = v
}
for i := 0; i < len(rd.keys); i++ {
kv = rd.keys[i]
newVal := reflect.New(varr.Type().Elem()).Elem()
if kv.ivalue == -1 {
d.setError(namespace, fmt.Errorf("invalid slice index '%s'", kv.value))
continue
}
if d.setFieldByType(newVal, append(namespace, kv.searchValue...), 0) {
set = true
varr.Index(kv.ivalue).Set(newVal)
}
}
if !set {
return
}
v.Set(varr)
}
case reflect.Array:
d.parseMapData()
// array elements could be mixed eg. number and non-numbers Value[0]=[]string{"10"} and Value=[]string{"10","20"}
if ok && len(arr) > 0 {
var varr reflect.Value
l := len(arr)
overCapacity := v.Len() < l
if overCapacity {
// more values than array capacity, ignore values over capacity as it's possible some would just want
// to grab the first x number of elements; in the future strict mode logic should return an error
fmt.Println("warning number of post form array values is larger than array capacity, ignoring overflow values")
}
varr = reflect.Indirect(reflect.New(reflect.ArrayOf(v.Len(), v.Type().Elem())))
reflect.Copy(varr, v)
if v.Len() < len(arr) {
l = v.Len()
}
for i := 0; i < l; i++ {
newVal := reflect.New(v.Type().Elem()).Elem()
if d.setFieldByType(newVal, namespace, i) {
set = true
varr.Index(i).Set(newVal)
}
}
v.Set(varr)
}
// maybe it's an numbered array i.e. Phone[0].Number
if rd := d.findAlias(string(namespace)); rd != nil {
var varr reflect.Value
var kv key
overCapacity := rd.sliceLen >= v.Len()
if overCapacity {
// more values than array capacity, ignore values over capacity as it's possible some would just want
// to grab the first x number of elements; in the future strict mode logic should return an error
fmt.Println("warning number of post form array values is larger than array capacity, ignoring overflow values")
}
varr = reflect.Indirect(reflect.New(reflect.ArrayOf(v.Len(), v.Type().Elem())))
reflect.Copy(varr, v)
for i := 0; i < len(rd.keys); i++ {
kv = rd.keys[i]
if kv.ivalue >= v.Len() {
continue
}
newVal := reflect.New(varr.Type().Elem()).Elem()
if kv.ivalue == -1 {
d.setError(namespace, fmt.Errorf("invalid array index '%s'", kv.value))
continue
}
if d.setFieldByType(newVal, append(namespace, kv.searchValue...), 0) {
set = true
varr.Index(kv.ivalue).Set(newVal)
}
}
if !set {
return
}
v.Set(varr)
}
case reflect.Map:
var rd *recursiveData
d.parseMapData()
// no natural map support so skip directly to dm lookup
if rd = d.findAlias(string(namespace)); rd == nil {
return
}
var existing bool
var kv key
var mp reflect.Value
var mk reflect.Value
typ := v.Type()
if v.IsNil() {
mp = reflect.MakeMap(typ)
} else {
existing = true
mp = v
}
for i := 0; i < len(rd.keys); i++ {
newVal := reflect.New(typ.Elem()).Elem()
mk = reflect.New(typ.Key()).Elem()
kv = rd.keys[i]
if err := d.getMapKey(kv.value, mk, namespace); err != nil {
d.setError(namespace, err)
continue
}
if d.setFieldByType(newVal, append(namespace, kv.searchValue...), 0) {
set = true
mp.SetMapIndex(mk, newVal)
}
}
if !set || existing {
return
}
v.Set(mp)
case reflect.Struct:
typ := v.Type()
// if we get here then no custom time function declared so use RFC3339 by default
if typ == timeType {
if !ok || len(arr[idx]) == 0 {
return
}
t, err := time.Parse(time.RFC3339, arr[idx])
if err != nil {
d.setError(namespace, err)
}
v.Set(reflect.ValueOf(t))
set = true
return
}
d.parseMapData()
// we must be recursing infinitly...but that's ok we caught it on the very first overun.
if len(namespace) > d.maxKeyLen {
return
}
set = d.traverseStruct(v, typ, namespace)
}
return
}
func (d *decoder) getMapKey(key string, current reflect.Value, namespace []byte) (err error) {
v, kind := ExtractType(current)
if d.d.customTypeFuncs != nil {
if cf, ok := d.d.customTypeFuncs[v.Type()]; ok {
val, er := cf([]string{key})
if er != nil {
err = er
return
}
v.Set(reflect.ValueOf(val))
return
}
}
switch kind {
case reflect.Interface:
// If interface would have been set on the struct before decoding,
// say to a struct value we would not get here but kind would be struct.
v.Set(reflect.ValueOf(key))
return
case reflect.Ptr:
newVal := reflect.New(v.Type().Elem())
if err = d.getMapKey(key, newVal.Elem(), namespace); err == nil {
v.Set(newVal)
}
case reflect.String:
v.SetString(key)
case reflect.Uint, reflect.Uint64:
u64, e := strconv.ParseUint(key, 10, 64)
if e != nil {
err = fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
return
}
v.SetUint(u64)
case reflect.Uint8:
u64, e := strconv.ParseUint(key, 10, 8)
if e != nil {
err = fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
return
}
v.SetUint(u64)
case reflect.Uint16:
u64, e := strconv.ParseUint(key, 10, 16)
if e != nil {
err = fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
return
}
v.SetUint(u64)
case reflect.Uint32:
u64, e := strconv.ParseUint(key, 10, 32)
if e != nil {
err = fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
return
}
v.SetUint(u64)
case reflect.Int, reflect.Int64:
i64, e := strconv.ParseInt(key, 10, 64)
if e != nil {
err = fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
return
}
v.SetInt(i64)
case reflect.Int8:
i64, e := strconv.ParseInt(key, 10, 8)
if e != nil {
err = fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
return
}
v.SetInt(i64)
case reflect.Int16:
i64, e := strconv.ParseInt(key, 10, 16)
if e != nil {
err = fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
return
}
v.SetInt(i64)
case reflect.Int32:
i64, e := strconv.ParseInt(key, 10, 32)
if e != nil {
err = fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
return
}
v.SetInt(i64)
case reflect.Float32:
f, e := strconv.ParseFloat(key, 32)
if e != nil {
err = fmt.Errorf("Invalid Float Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
return
}
v.SetFloat(f)
case reflect.Float64:
f, e := strconv.ParseFloat(key, 64)
if e != nil {
err = fmt.Errorf("Invalid Float Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
return
}
v.SetFloat(f)
case reflect.Bool:
b, e := parseBool(key)
if e != nil {
err = fmt.Errorf("Invalid Boolean Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
return
}
v.SetBool(b)
default:
err = fmt.Errorf("Unsupported Map Key '%s', Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
}
return
}
|
package shufflearray
func removeIndex(nums []int, i int) []int {
return append(nums[:i], nums[i+1:]...)
}
func insertIndex(nums []int, i int, v int) []int {
nums = append(nums, 0)
copy(nums[i+1:], nums[i:])
nums[i] = v
return nums
}
func shuffle(nums []int, n int) []int {
for i := 0; i < n; i++ {
v := nums[n+i]
nums = removeIndex(nums, n+i)
nums = insertIndex(nums, 2*i + 1, v)
}
return nums
} |
package action
import (
"github.com/agiledragon/trans-dsl"
"github.com/agiledragon/trans-dsl/test/context"
)
type StubModifySomething struct {
}
func (this *StubModifySomething) Exec(transInfo *transdsl.TransInfo) error {
stubInfo := transInfo.AppInfo.(*context.StubInfo)
stubInfo.P2 = 22
return nil
}
func (this *StubModifySomething) Rollback(transInfo *transdsl.TransInfo) {
stubInfo := transInfo.AppInfo.(*context.StubInfo)
stubInfo.P2 = 0
}
|
package main
import "sync"
func main() {
}
func findDifference(nums1 []int, nums2 []int) [][]int {
m1 := make(map[int]bool)
m2 := make(map[int]bool)
for _, v := range nums1 {
m1[v] = true
}
for _, v := range nums2 {
m2[v] = true
}
ans := make([][]int, 2)
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
for k := range m1 {
if !m2[k] {
ans[0] = append(ans[0], k)
}
}
}()
go func() {
defer wg.Done()
for k := range m2 {
if !m1[k] {
ans[1] = append(ans[1], k)
}
}
}()
wg.Wait()
return ans
}
|
package main
import (
"fmt"
)
func test() {
// go 数组为之类型 不是引用传递
//长度为2的int数字 默认是0
//不同长度的数组是不同的类型
//长度也是数组的一部分
var a [2]int
//var c [1]int
//自动长度,9+1
b := [...]int{9: 1}
//返回一个指向数组的指针
p := new([10]int)
//多维数组 有两个元素,每个元素又是有3个int的数组
var d [2][3]int
fmt.Println(a)
fmt.Println(b)
fmt.Println(p)
fmt.Println(d)
p[2] = 3
fmt.Println("After Change")
fmt.Println(p)
}
func bubble_sort() {
//冒泡排序
a := [...]int{5, 2, 6, 8, 9}
fmt.Println("Origen a is ", a)
len_a := len(a)
//for 循环中声明的变量在每次循环结束都会消失,作用域只在当次循环
for i := 0; i < len_a; i++ {
for j := i; j < len_a; j++ {
if a[i] > a[j] {
temp := a[j]
a[j] = a[i]
a[i] = temp
}
}
}
fmt.Println("Sort a is ", a)
}
func main() {
test()
bubble_sort()
}
|
package protocol
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Stream ID", func() {
Context("bidirectional streams", func() {
It("doesn't allow any", func() {
Expect(MaxBidiStreamID(0, PerspectiveClient)).To(Equal(StreamID(0)))
Expect(MaxBidiStreamID(0, PerspectiveServer)).To(Equal(StreamID(0)))
})
It("allows one", func() {
Expect(MaxBidiStreamID(1, PerspectiveClient)).To(Equal(StreamID(1)))
Expect(MaxBidiStreamID(1, PerspectiveServer)).To(Equal(StreamID(4)))
})
It("allows many", func() {
Expect(MaxBidiStreamID(100, PerspectiveClient)).To(Equal(StreamID(397)))
Expect(MaxBidiStreamID(100, PerspectiveServer)).To(Equal(StreamID(400)))
})
})
Context("unidirectional streams", func() {
It("doesn't allow any", func() {
Expect(MaxUniStreamID(0, PerspectiveClient)).To(Equal(StreamID(0)))
Expect(MaxUniStreamID(0, PerspectiveServer)).To(Equal(StreamID(0)))
})
It("allows one", func() {
Expect(MaxUniStreamID(1, PerspectiveClient)).To(Equal(StreamID(3)))
Expect(MaxUniStreamID(1, PerspectiveServer)).To(Equal(StreamID(2)))
})
It("allows many", func() {
Expect(MaxUniStreamID(100, PerspectiveClient)).To(Equal(StreamID(399)))
Expect(MaxUniStreamID(100, PerspectiveServer)).To(Equal(StreamID(398)))
})
})
})
|
//+build !noexit
/*
* Copyright (c) Microsoft Corporation.
* Licensed under the MIT license.
*/
package xcobra
import (
"errors"
"os"
)
func exitWithCode(err error) {
if err == nil {
return
}
var e ErrorWithCode
if errors.As(err, &e) {
os.Exit(e.Code)
}
os.Exit(1)
}
|
package dbops
import (
"database/sql"
_ "github.com/go-sql-driver/mysql"
"time"
)
var (
dbConn *sql.DB
err error
)
func init() {
dbConn, err = sql.Open("mysql", "root:llc123..@tcp(106.14.146.41:3306)/ynk_cms?charset=utf8&parseTime=True&loc=Local")
if err != nil {
panic(err.Error())
}
//设置连接的最大连接周期,超时自动关闭
dbConn.SetConnMaxLifetime(100 * time.Second)
//设置最大连接数
dbConn.SetMaxOpenConns(100)
//设置闲置连接数
dbConn.SetMaxIdleConns(15)
}
|
package engine
import (
"encoding/json"
"testing"
"time"
. "github.com/mailgun/vulcand/Godeps/_workspace/src/gopkg.in/check.v1"
"github.com/mailgun/vulcand/plugin"
"github.com/mailgun/vulcand/plugin/connlimit"
)
func TestBackend(t *testing.T) { TestingT(t) }
type BackendSuite struct {
}
var _ = Suite(&BackendSuite{})
func (s *BackendSuite) TestHostNew(c *C) {
h, err := NewHost("localhost", HostSettings{})
c.Assert(err, IsNil)
c.Assert(h.Name, Equals, "localhost")
c.Assert(h.Name, Equals, h.GetId())
c.Assert(h.String(), Not(Equals), "")
}
func (s *BackendSuite) TestHostBad(c *C) {
h, err := NewHost("", HostSettings{})
c.Assert(err, NotNil)
c.Assert(h, IsNil)
}
func (s *BackendSuite) TestFrontendDefaults(c *C) {
f, err := NewHTTPFrontend("f1", "b1", `Path("/home")`, HTTPFrontendSettings{})
c.Assert(err, IsNil)
c.Assert(f.GetId(), Equals, "f1")
c.Assert(f.String(), Not(Equals), "")
c.Assert(f.Route, Equals, `Path("/home")`)
}
func (s *BackendSuite) TestNewFrontendWithOptions(c *C) {
settings := HTTPFrontendSettings{
Limits: HTTPFrontendLimits{
MaxMemBodyBytes: 12,
MaxBodyBytes: 400,
},
FailoverPredicate: "IsNetworkError() && Attempts() <= 1",
Hostname: "host1",
TrustForwardHeader: true,
}
f, err := NewHTTPFrontend("f1", "b1", `Path("/home")`, settings)
c.Assert(err, IsNil)
c.Assert(f.Id, Equals, "f1")
o := f.HTTPSettings()
c.Assert(o.Limits.MaxMemBodyBytes, Equals, int64(12))
c.Assert(o.Limits.MaxBodyBytes, Equals, int64(400))
c.Assert(o.FailoverPredicate, NotNil)
c.Assert(o.TrustForwardHeader, Equals, true)
c.Assert(o.Hostname, Equals, "host1")
}
func (s *BackendSuite) TestFrontendBadParams(c *C) {
// Bad route
_, err := NewHTTPFrontend("f1", "b1", "/home -- afawf \\~", HTTPFrontendSettings{})
c.Assert(err, NotNil)
// Empty params
_, err = NewHTTPFrontend("", "", "", HTTPFrontendSettings{})
c.Assert(err, NotNil)
}
func (s *BackendSuite) TestFrontendBadOptions(c *C) {
settings := []HTTPFrontendSettings{
HTTPFrontendSettings{
FailoverPredicate: "bad predicate",
},
}
for _, s := range settings {
f, err := NewHTTPFrontend("f1", "b", `Path("/home")`, s)
c.Assert(err, NotNil)
c.Assert(f, IsNil)
}
}
func (s *BackendSuite) TestBackendNew(c *C) {
b, err := NewHTTPBackend("b1", HTTPBackendSettings{})
c.Assert(err, IsNil)
c.Assert(b.Type, Equals, HTTP)
c.Assert(b.GetId(), Equals, "b1")
c.Assert(b.String(), Not(Equals), "")
}
func (s *BackendSuite) TestNewBackendWithOptions(c *C) {
options := HTTPBackendSettings{
Timeouts: HTTPBackendTimeouts{
Read: "1s",
Dial: "2s",
TLSHandshake: "3s",
},
KeepAlive: HTTPBackendKeepAlive{
Period: "4s",
MaxIdleConnsPerHost: 3,
},
}
b, err := NewHTTPBackend("b1", options)
c.Assert(err, IsNil)
c.Assert(b.GetId(), Equals, "b1")
o, err := b.TransportSettings()
c.Assert(err, IsNil)
c.Assert(o.Timeouts.Read, Equals, time.Second)
c.Assert(o.Timeouts.Dial, Equals, 2*time.Second)
c.Assert(o.Timeouts.TLSHandshake, Equals, 3*time.Second)
c.Assert(o.KeepAlive.Period, Equals, 4*time.Second)
c.Assert(o.KeepAlive.MaxIdleConnsPerHost, Equals, 3)
}
func (s *BackendSuite) TestBackendOptionsEq(c *C) {
options := []struct {
a HTTPBackendSettings
b HTTPBackendSettings
e bool
}{
{HTTPBackendSettings{}, HTTPBackendSettings{}, true},
{HTTPBackendSettings{Timeouts: HTTPBackendTimeouts{Dial: "1s"}}, HTTPBackendSettings{Timeouts: HTTPBackendTimeouts{Dial: "1s"}}, true},
{HTTPBackendSettings{Timeouts: HTTPBackendTimeouts{Dial: "2s"}}, HTTPBackendSettings{Timeouts: HTTPBackendTimeouts{Dial: "1s"}}, false},
{HTTPBackendSettings{Timeouts: HTTPBackendTimeouts{Read: "2s"}}, HTTPBackendSettings{Timeouts: HTTPBackendTimeouts{Read: "1s"}}, false},
{HTTPBackendSettings{Timeouts: HTTPBackendTimeouts{TLSHandshake: "2s"}}, HTTPBackendSettings{Timeouts: HTTPBackendTimeouts{TLSHandshake: "1s"}}, false},
{HTTPBackendSettings{KeepAlive: HTTPBackendKeepAlive{Period: "2s"}}, HTTPBackendSettings{KeepAlive: HTTPBackendKeepAlive{Period: "1s"}}, false},
{HTTPBackendSettings{KeepAlive: HTTPBackendKeepAlive{MaxIdleConnsPerHost: 1}}, HTTPBackendSettings{KeepAlive: HTTPBackendKeepAlive{MaxIdleConnsPerHost: 2}}, false},
}
for _, o := range options {
c.Assert(o.a.Equals(o.b), Equals, o.e)
}
}
func (s *BackendSuite) TestOCSPSettingsEq(c *C) {
options := []struct {
a *OCSPSettings
b *OCSPSettings
e bool
}{
{&OCSPSettings{}, &OCSPSettings{}, true},
{&OCSPSettings{Period: "2m0s"}, &OCSPSettings{Period: "2m"}, true},
{&OCSPSettings{Period: "2m0s"}, &OCSPSettings{Period: "3m"}, false},
{&OCSPSettings{Period: "bla"}, &OCSPSettings{Period: "2m"}, false},
{&OCSPSettings{Period: "2m"}, &OCSPSettings{Period: "bla"}, false},
{&OCSPSettings{Enabled: true}, &OCSPSettings{Enabled: false}, false},
{
&OCSPSettings{Enabled: true, Responders: []string{"http://a.com", "http://b.com"}},
&OCSPSettings{Enabled: true, Responders: []string{"http://a.com", "http://b.com"}},
true,
},
{
&OCSPSettings{Enabled: true, Responders: []string{"http://a.com", "http://b.com"}},
&OCSPSettings{Enabled: true, Responders: []string{"http://a.com"}},
false,
},
{
&OCSPSettings{Enabled: true, Responders: []string{"http://a.com", "http://b.com"}},
&OCSPSettings{Enabled: true, Responders: []string{"http://a.com", "http://c.com"}},
false,
},
}
for _, o := range options {
c.Assert(o.a.Equals(o.b), Equals, o.e)
}
}
func (s *BackendSuite) TestNewBackendWithBadOptions(c *C) {
options := []HTTPBackendSettings{
HTTPBackendSettings{
Timeouts: HTTPBackendTimeouts{
Read: "1what?",
},
},
HTTPBackendSettings{
Timeouts: HTTPBackendTimeouts{
Dial: "1what?",
},
},
HTTPBackendSettings{
Timeouts: HTTPBackendTimeouts{
TLSHandshake: "1what?",
},
},
HTTPBackendSettings{
KeepAlive: HTTPBackendKeepAlive{
Period: "1what?",
},
},
}
for _, o := range options {
b, err := NewHTTPBackend("b1", o)
c.Assert(err, NotNil)
c.Assert(b, IsNil)
}
}
func (s *BackendSuite) TestNewServer(c *C) {
sv, err := NewServer("s1", "http://falhost")
c.Assert(err, IsNil)
c.Assert(sv.GetId(), Equals, "s1")
c.Assert(sv.String(), Not(Equals), "")
}
func (s *BackendSuite) TestNewServerBadParams(c *C) {
_, err := NewServer("s1", "http---")
c.Assert(err, NotNil)
}
func (s *BackendSuite) TestNewListener(c *C) {
_, err := NewListener("id", "http", "tcp", "127.0.0.1:4000", "")
c.Assert(err, IsNil)
}
func (s *BackendSuite) TestNewListenerBadParams(c *C) {
_, err := NewListener("id", "http", "tcp", "", "")
c.Assert(err, NotNil)
_, err = NewListener("id", "", "tcp", "127.0.0.1:4000", "")
c.Assert(err, NotNil)
_, err = NewListener("id", "http", "tcp", "127.0.0.1:4000", "blabla")
c.Assert(err, NotNil)
}
func (s *BackendSuite) TestFrontendsFromJSON(c *C) {
f, err := NewHTTPFrontend("f1", "b1", `Path("/path")`, HTTPFrontendSettings{})
c.Assert(err, IsNil)
bytes, err := json.Marshal(f)
fs := []Frontend{*f}
bytes, err = json.Marshal(map[string]interface{}{"Frontends": fs})
r := plugin.NewRegistry()
c.Assert(r.AddSpec(connlimit.GetSpec()), IsNil)
out, err := FrontendsFromJSON(bytes)
c.Assert(err, IsNil)
c.Assert(out, NotNil)
c.Assert(out, DeepEquals, fs)
}
func (s *BackendSuite) MiddlewareFromJSON(c *C) {
cl, err := connlimit.NewConnLimit(10, "client.ip")
c.Assert(err, IsNil)
m := &Middleware{Id: "c1", Type: "connlimit", Middleware: cl}
bytes, err := json.Marshal(m)
c.Assert(err, IsNil)
out, err := MiddlewareFromJSON(bytes, plugin.NewRegistry().GetSpec)
c.Assert(err, IsNil)
c.Assert(out, NotNil)
c.Assert(out, DeepEquals, m)
}
func (s *BackendSuite) TestBackendFromJSON(c *C) {
b, err := NewHTTPBackend("b1", HTTPBackendSettings{})
c.Assert(err, IsNil)
bytes, err := json.Marshal(b)
c.Assert(err, IsNil)
out, err := BackendFromJSON(bytes)
c.Assert(err, IsNil)
c.Assert(out, NotNil)
c.Assert(out, DeepEquals, b)
}
func (s *BackendSuite) TestServerFromJSON(c *C) {
e, err := NewServer("sv1", "http://localhost")
c.Assert(err, IsNil)
bytes, err := json.Marshal(e)
c.Assert(err, IsNil)
out, err := ServerFromJSON(bytes)
c.Assert(err, IsNil)
c.Assert(out, NotNil)
c.Assert(out, DeepEquals, e)
}
|
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func getIntersectionNode(headA, headB *ListNode) *ListNode {
node1, node2 := headA, headB
for node1 != node2 {
if node1 != nil {
node1 = node1.Next
} else {
node1 = headB
}
if node2 != nil {
node2 = node2.Next
} else {
node2 = headA
}
}
return node1
}
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func getIntersectionNode(headA, headB *ListNode) *ListNode {
mapping := map[*ListNode] int {}
for headA != nil {
mapping[headA]++
headA = headA.Next
}
for headB != nil {
_, found := mapping[headB]
if found {
break
}
headB = headB.Next
}
return headB
} |
package honeycombio
import (
"context"
"errors"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestTriggers(t *testing.T) {
ctx := context.Background()
var trigger *Trigger
var err error
c := newTestClient(t)
dataset := testDataset(t)
t.Run("Create", func(t *testing.T) {
data := &Trigger{
Name: fmt.Sprintf("Test trigger created at %v", time.Now()),
Description: "Some description",
Disabled: true,
Query: &QuerySpec{
Breakdowns: nil,
Calculations: []CalculationSpec{
{
Op: CalculationOpP99,
Column: StringPtr("duration_ms"),
},
},
Filters: []FilterSpec{
{
Column: "column_1",
Op: FilterOpExists,
},
{
Column: "column_2",
Op: FilterOpContains,
Value: "foobar",
},
},
FilterCombination: FilterCombinationOr,
},
Frequency: 300,
Threshold: &TriggerThreshold{
Op: TriggerThresholdOpGreaterThan,
Value: 10000,
},
Recipients: []TriggerRecipient{
{
Type: TriggerRecipientTypeEmail,
Target: "hello@example.com",
},
{
Type: TriggerRecipientTypeMarker,
Target: "This marker is created by a trigger",
},
},
}
trigger, err = c.Triggers.Create(ctx, dataset, data)
if err != nil {
t.Fatal(err)
}
assert.NotNil(t, trigger.ID)
// copy IDs before asserting equality
data.ID = trigger.ID
for i := range trigger.Recipients {
data.Recipients[i].ID = trigger.Recipients[i].ID
}
// set default time range
data.Query.TimeRange = IntPtr(300)
assert.Equal(t, data, trigger)
})
t.Run("List", func(t *testing.T) {
result, err := c.Triggers.List(ctx, dataset)
assert.NoError(t, err)
assert.Contains(t, result, *trigger, "could not find newly created trigger with List")
})
t.Run("Get", func(t *testing.T) {
getTrigger, err := c.Triggers.Get(ctx, dataset, trigger.ID)
assert.NoError(t, err)
assert.Equal(t, *trigger, *getTrigger)
})
t.Run("Update", func(t *testing.T) {
trigger.Description = "A new description"
result, err := c.Triggers.Update(ctx, dataset, trigger)
assert.NoError(t, err)
assert.Equal(t, trigger, result)
})
t.Run("Delete", func(t *testing.T) {
err = c.Triggers.Delete(ctx, dataset, trigger.ID)
assert.NoError(t, err)
})
t.Run("Get_deletedTrigger", func(t *testing.T) {
_, err := c.Triggers.Get(ctx, dataset, trigger.ID)
assert.Equal(t, ErrNotFound, err)
})
}
func TestMatchesTriggerSubset(t *testing.T) {
cases := []struct {
in QuerySpec
expectedErr error
}{
{
in: QuerySpec{
Calculations: []CalculationSpec{
{
Op: CalculationOpCount,
},
},
},
expectedErr: nil,
},
{
in: QuerySpec{
Calculations: nil,
},
expectedErr: errors.New("a trigger query should contain exactly one calculation"),
},
{
in: QuerySpec{
Calculations: []CalculationSpec{
{
Op: CalculationOpHeatmap,
},
},
},
expectedErr: errors.New("a trigger query may not contain a HEATMAP calculation"),
},
{
in: QuerySpec{
Calculations: []CalculationSpec{
{
Op: CalculationOpCount,
},
},
Limit: IntPtr(100),
},
expectedErr: errors.New("limit is not allowed in a trigger query"),
},
{
in: QuerySpec{
Calculations: []CalculationSpec{
{
Op: CalculationOpCount,
},
},
Orders: []OrderSpec{
{
Column: StringPtr("duration_ms"),
},
},
},
expectedErr: errors.New("orders is not allowed in a trigger query"),
},
}
for i, c := range cases {
err := MatchesTriggerSubset(&c.in)
assert.Equal(t, c.expectedErr, err, "Test case %d, QuerySpec: %v", i, c.in)
}
}
|
package zen
import "fmt"
func Show(msg string) error {
fmt.Println("Hello, %s!", msg)
return nil
}
|
// +build !darwin
package platform
func start(h *CursorHandle) {}
|
package main
import (
"encoding/json"
"flag"
"fmt"
"log"
"os"
"strings"
)
var (
graphfile = flag.String("graphfile", "kevinbacon.json", "graph data file")
)
func main() {
flag.Usage = usage
flag.Parse()
if flag.NArg() < 1 {
usage()
}
graph, err := load(*graphfile)
if err != nil {
log.Fatal(err)
}
from := flag.Arg(0)
to := "Kevin Bacon"
if flag.NArg() > 1 {
to = flag.Arg(1)
}
paths := bfs(graph, from, to)
fmt.Println(strings.Join(paths, " -> "))
}
func usage() {
fmt.Fprintln(os.Stderr, "usage: from [to]")
flag.PrintDefaults()
os.Exit(2)
}
type Graph struct {
nodes []*Node
links map[string]*Node
}
type Node struct {
value string
edges []*Node
searched bool
parent *Node
}
func newgraph() *Graph {
return &Graph{
links: make(map[string]*Node),
}
}
func (g *Graph) Reset() {
for _, node := range g.nodes {
node.searched = false
node.parent = nil
}
}
func (g *Graph) AddNode(node *Node) {
g.nodes = append(g.nodes, node)
g.links[node.value] = node
}
func (g *Graph) GetNode(value string) *Node {
return g.links[value]
}
func (n *Node) AddEdge(neighbor *Node) {
n.edges = append(n.edges, neighbor)
neighbor.edges = append(neighbor.edges, n)
}
func load(name string) (*Graph, error) {
data, err := os.ReadFile(name)
if err != nil {
return nil, err
}
var db struct {
Movies []struct {
Title string
Cast []string
}
}
err = json.Unmarshal(data, &db)
if err != nil {
return nil, err
}
graph := newgraph()
for _, movie := range db.Movies {
movienode := &Node{value: movie.Title}
graph.AddNode(movienode)
for _, cast := range movie.Cast {
actornode := graph.GetNode(cast)
if actornode == nil {
actornode = &Node{value: cast}
}
graph.AddNode(actornode)
movienode.AddEdge(actornode)
}
}
return graph, nil
}
func bfs(graph *Graph, from, to string) []string {
graph.Reset()
start := graph.GetNode(from)
end := graph.GetNode(to)
if start == nil || end == nil {
return nil
}
start.searched = true
queue := []*Node{start}
for ; len(queue) > 0; queue = queue[1:] {
current := queue[0]
if current == end {
break
}
for _, neighbor := range current.edges {
if !neighbor.searched {
neighbor.searched = true
neighbor.parent = current
queue = append(queue, neighbor)
}
}
}
nodes := []*Node{end}
for next := end.parent; next != nil; next = next.parent {
nodes = append(nodes, next)
}
paths := []string{}
for i := len(nodes) - 1; i >= 0; i-- {
paths = append(paths, nodes[i].value)
}
return paths
}
|
package controller
import (
"fmt"
"log"
"net/http"
"github.com/SebastiaanKlippert/go-wkhtmltopdf"
"github.com/gin-gonic/gin"
)
// DownloadPdf 通过html url地址下载pdf附件
func DownloadPdf(c *gin.Context) {
url := c.Query("url")
if url == "" {
c.JSON(http.StatusBadRequest, gin.H{
"msg": "url is empty",
})
}
pdfg, err := wkhtmltopdf.NewPDFGenerator()
if err != nil {
log.Fatal(err)
}
// Set global options
pdfg.Dpi.Set(300)
pdfg.Orientation.Set(wkhtmltopdf.OrientationLandscape)
// pdfg.Grayscale.Set(true)
// Create a new input page from an URL
page := wkhtmltopdf.NewPage(url)
// Set options for this page
// page.FooterRight.Set("[page]")
// page.FooterFontSize.Set(10)
// page.Zoom.Set(0.95)
// Add to document
pdfg.AddPage(page)
// Create PDF document in internal buffer
err = pdfg.Create()
if err != nil {
log.Fatal(err)
}
c.Writer.Header().Add("Content-Type", "application/octet-stream")
c.Writer.Header().Set("Accept-Ranges", "bytes")
c.Writer.Header().Set("Content-Disposition", "attachment; filename="+fmt.Sprintf("%s", "download.pdf")) //文件名
c.Writer.Header().Set("Cache-Control", "must-revalidate, post-check=0, pre-check=0")
c.Writer.Header().Set("Pragma", "no-cache")
c.Writer.Header().Set("Expires", "0")
c.Writer.Write(pdfg.Bytes())
}
|
/*
* Copyright (c) 2018 Jeffrey Walter <jeffreydwalter@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
* WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package arlo
const (
DeviceTypeArloQ = "arloq"
DeviceTypeArloBridge = "arlobridge"
DeviceTypeBasestation = "basestation"
DeviceTypeCamera = "camera"
DeviceTypeLights = "lights"
DeviceTypeSiren = "siren"
TransIdPrefix = "web"
BaseUrl = "https://my.arlo.com/hmsweb"
// TODO: Implement all of the following urls. There are many here I don't have devices for. :/
ActiveAutomationUri = "/users/devices/automation/active"
AllMediaUri = "/users/music/all"
AnalyticFeedbackUri = "/users/library/%s/feedback"
AssignBetaPlanUri = "/users/assign/smartfeatures"
AssignDeviceToServicePlanUri = "/users/devices/provision"
AutomationDefinitionsUri = "/users/automation/definitions?uniqueIds=all"
AutomationModeUri = "/users/locations/%uniqueId/modes"
AutomationScheduleUri = "/users/locations/%uniqueId/schedules"
BuyUri = "http:/www.arlo.com/en-us/products/default.aspx?utm_source=app_desktop&p=all&cid=app"
CameraOrderUri = "/users/devices/v2/displayOrder"
CancelPlanUri = "/users/payment/plans/%paymentId/cancel"
CancelQuotationUri = "/users/payment/quotations/%paymentId/cancel/v1"
CapabilitiesUri = "/resources/capabilities/en/en_%t|i1000.json?t=%s"
ChangeMobileOffersUri = "/users/payment/offers/dataplans/change/v5"
ChangePlanUri = "/users/%paymentId/changeplan"
CheckAccountUri = "/checkAccountUsage"
CheckEmailUri = "/checkEmailUsage"
ClaimDeviceUri = "/users/devices/claimDevice"
CommunityUri = "http:/www.arlo.com/community?utm_source=app_desktop&locale=en"
ConfirmUserIdUri = "/users/resend/confirm/email"
CountryCodesUri = "/static/countrycodes"
CreatePaymentAccountUri = "/users/payment/accounts"
CreatePlanUri = "/users/payment/plans/%paymentId"
DeleteAccountUri = "/users/closeAccount"
DeviceFeaturesUri = "/users/devices/updateFeatures/feature/%uniqueId"
DeviceProvisioningUri = "/users/devices/states"
DeviceStatisticsUri = "/users/devices/%uniqueId/data/statistics?t=%s"
DeviceSupportUri = "/devicesupport"
DeviceSupportV2Uri = "/devicesupport/v2"
DeviceUri = "/users/devices/%deviceId"
DeviceZoneUri = "/users/devices/%uniqueId/activityzones/%zoneId"
DeviceZonesUri = "/users/devices/%uniqueId/activityzones"
DevicesUpdateFeaturesUri = "/users/devices/updateFeatures/feature"
DevicesUri = "/users/devices/?t=%s"
DonateRecordUri = "/users/library/%uniqueId/donate"
EditAutomationModeUri = "/users/locations/%uniqueId/modes/%mode/devices/%i"
EditUri = "/users/media"
EmergencyCallDetailsUri = "/users/emergency/%emergencyId/call"
EmergencyLocationSaveUri = "/users/emergency/locations/%emergencyId"
EmergencyLocationsUri = "/users/emergency/locations"
EventsUri = "/users/devices/%uniqueId/events?fromDate=%s&toDate=%s"
FavoriteUri = "/users/library/favorite"
FieldLengthsUri = "/static/fieldLengths"
FriendsDeleteUri = "/users/friends/remove"
FriendsUri = "/users/friends"
FullFrameSnapshotUri = "/users/devices/fullFrameSnapshot"
GPLUri = "https:/vzs3-prod-common.s3.amazonaws.com/license/GPLv1.html"
HtmlChangeOffersUri = "/users/payment/offers/html/v5/change"
HtmlOffersUri = "/users/payment/offers/html/v5"
HtmlPrelimQuotationUri = "/users/payment/offers/quotation/html/v5"
HtmlQuotationUri = "/users/payment/confirmation/%paymentId"
LibFeedbackUri = "/library/feedback"
LibraryStateUri = "/users/library/state/v1"
LocateDevicesUri = "/locateDevice?discoveryToken=%s"
LocationByZipUri = "/users/payment/postalcodelookup"
LocationUri = "/users/locations"
LoginUri = "/login"
LoginV2Uri = "/login/v2"
LogoutUri = "/logout"
MetadataUri = "/users/library/metadata/v2"
MigrateZonesUri = "/users/devices/%uniqueId/activityzones/migrate"
MobileOffersUri = "/users/payment/offers/dataplans/v5"
ModifyBillingUri = "/users/payment/billing/%paymentId"
NotifyResponsesPushServiceUri = "/client/subscribe?token=%s"
NotifyUri = "/users/devices/notify/%s"
OffersDetailsUri = "/users/payment/offersdetail"
OffersDvrChangeUri = "/users/payment/offers/arloq/html/v5/change"
OffersDvrUri = "/users/payment/offers/arloq/html/v5"
OffersUri = "/users/payment/offers/v3"
PaymentBillingUri = "/users/payment/billing/%paymentId"
PaymentRenewUri = "/users/payment/autoRenew/%paymentId"
PaymentTermsLinkUri = "/paymentTermsAndConditions?t=%s"
PlacemeterUri = ""
PlaylistMetaUri = "/users/devices/%uniqueId/playlist/metadata"
PlaylistUri = "/users/devices/%s/playlist?fromDate=%s&toDate=%s"
PolicyUri = "/policy/v1/?t=%s"
PreferencesUri = "/users/preferences"
ProfileUri = "/users/profile"
PttNotifyUri = "/users/devices/notify/%parentId"
PttUri = "/users/devices/%s/pushtotalk"
RMAValidationUri = "/users/devices/%restrictedDevice/apvalidation"
RecordingsUri = "/users/library"
RecycleUri = "/users/library/recycle"
RegisterUserUri = "/register"
RemoveDeviceUri = "/users/devices/v2/removeDevice"
RenameDeviceUri = "/users/devices/v2/renameDevice"
RenewPlanUri = "/users/payment/plans/%paymentId/renew"
RenewQuotationUri = "/users/payment/quotations/%?/renew"
RequestPasswordResetUri = "/requestPasswordReset"
ResetCountUri = "/users/library/reset?uniqueId=%s"
ResetPasswordUri = "/resetPassword"
RestartDeviceUri = "/users/devices/restart"
SSORegisterUri = "/ssoregister"
SecretQuestionsUri = "/static/secretquestions"
ServicePlanUri = "/users/serviceLevel/v3"
SessionUri = "/users/session"
SetAutomationModeUri = "/users/locations/%uniqueId/modes/%mode"
ShareUri = "/users/library/share"
SmartAlertsUri = "/users/devices/%uniqueId/smartalerts"
SmartConfigUri = "/user/smarthome/config"
StartRecordUri = "/users/devices/startRecord"
StartStreamUri = "/users/devices/startStream"
StatesCodesUri = "/static/usstatescodes"
StopRecordUri = "/users/devices/stopRecord"
StopStreamUri = "/users/devices/stopStream"
StorageQuotaUri = "/users/quota"
SupportUri = "http:/www.arlo.com/support?utm_source=app_desktop&cc=en"
TakeSnapshotUri = "/users/devices/takeSnapshot"
TempUnitUri = "/users/devices/%uniqueId/tempUnit"
TermsLinkUri = "/termsAndConditionsLink?t=%s"
TermsUri = "/termsAndConditions/?t=%s"
TimeZonesUri = "/static/timezones"
UnsubscribeUri = "/client/unsubscribe"
UpdateNameUri = "/user"
UpdatePasswordUri = "/users/changePassword"
UpdateUserIdUri = "/users/changeEmail"
UserFrameSnapshotUri = "/users/devices/userSnapshot"
UsersEmailsUri = "/users/emails"
ValidateCouponUri = "/users/payment/coupondetails"
ValidateResetUri = "/validatePasswordReset/%?"
WakeupUri = "/users/devices/wakeup/%deviceId?t=%s"
)
|
package tools_test
import (
"testing"
"payment/internal/tools"
"github.com/stretchr/testify/assert"
)
func TestIsExistSlice(t *testing.T) {
var (
payload = []struct {
want bool
got string
}{
{true, "test1"},
{false, "test66"},
}
slice = []string{
"test1", "test2", "test3",
}
)
t.Run("Test on get value is exist in string slice", func(t *testing.T) {
for _, item := range payload {
assert.Equal(t, item.want, tools.IsExistSlice(item.got, slice))
}
})
}
func TestStringToInt(t *testing.T) {
var payload = []struct {
want int
got string
}{
{10, "10"},
{0, "fail"},
}
t.Run("Test on get value is exist in string slice", func(t *testing.T) {
for _, item := range payload {
assert.Equal(t, item.want, tools.StringToInt(item.got))
}
})
}
func TestReverseSign(t *testing.T) {
var payload = []struct {
want float64
got float64
}{
{-1, 1},
{1, -1},
}
t.Run("Test on get value is exist in string slice", func(t *testing.T) {
for _, item := range payload {
assert.Equal(t, item.want, tools.ReverseSign(item.got))
}
})
}
|
package main
import (
"fmt"
)
type teste int
var x teste
var y int
func main() {
fmt.Printf("tipo:%T\nvalor:%v\n", x, x)
x = 42
fmt.Printf("x:%d\n", x)
y = int(x)
fmt.Printf("valor de y:%v\ntipo de y:%T\n", y, y)
}
|
package main
import (
"bytes"
"crypto/tls"
"encoding/base64"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
"time"
"github.com/gorilla/mux"
)
var (
bind = flag.String(
"bind",
"127.0.0.1:8080",
"Listening address for incoming requests",
)
concourseUrl = flag.String(
"concourse-url",
"http://localhost:8888",
"Concourse URL to forward the reqeuest",
)
token = flag.String(
"token",
"",
"Bearer oAuth authorization token",
)
insecure = flag.Bool(
"insecure",
false,
"Disable ssl security verification",
)
)
type From struct {
Ref string `json:"ref"`
}
type Version struct {
From From `json:"from"`
}
type Request struct {
Method string `json:"request,omitempty"`
Path string `json:"path,omitempty"`
Proto string `json:"proto,omitempty"`
Headers [][]string `json:"headers,omitempty"`
Body []byte `json:"body,omitempty"`
Token string `json:"token,omitempty"`
}
func header(r *http.Request, key string) (string, bool) {
if r.Header == nil {
return "", false
}
if candidate := r.Header[key]; len(candidate) > 0 {
return candidate[0], true
}
return "", false
}
func handleHealth(resp http.ResponseWriter, _ *http.Request) {
resp.WriteHeader(http.StatusOK)
}
func handleProxy(w http.ResponseWriter, r *http.Request) {
proc := time.Now()
params := mux.Vars(r)
var request Request
request.Method = r.Method
request.Path = r.URL.String()
request.Proto = r.Proto
request.Token = *token
for name, headers := range r.Header {
name = strings.ToLower(name)
for _, value := range headers {
header := []string{name, value}
request.Headers = append(request.Headers, header)
}
}
var err error
request.Body, err = ioutil.ReadAll(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
ref, err := json.Marshal(request)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
encoded, err := json.Marshal(Version{From{base64.StdEncoding.EncodeToString(ref)}})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
u, err := url.Parse(*concourseUrl)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
u.Path = fmt.Sprintf(
"/api/v1/teams/%v/pipelines/%v/resources/%v/check",
params["team"],
params["pipeline"],
params["resource"],
)
http.DefaultTransport.(*http.Transport).TLSClientConfig =
&tls.Config{InsecureSkipVerify: *insecure}
req, err := http.NewRequest("POST", u.String(), bytes.NewBuffer(encoded))
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %v", *token))
req.Header.Set("Content-Type", "application/json")
c := &http.Client{}
resp, err := c.Do(req)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
w.WriteHeader(resp.StatusCode)
return
}
addr := req.RemoteAddr
if ip, found := header(req, "X-Forwarded-For"); found {
addr = ip
}
log.Printf("[%s] %.3f %d %s %s",
addr,
time.Now().Sub(proc).Seconds(),
http.StatusOK,
req.Method,
req.URL,
)
w.WriteHeader(http.StatusOK)
}
func main() {
flag.Parse()
router := mux.NewRouter()
router.HandleFunc("/health", handleHealth).
Methods("GET")
router.HandleFunc("/hook/{team:.+}/{pipeline:.+}/{resource:.+}", handleProxy).
Methods("POST")
log.Printf("[service] listening on %s", *bind)
if err := http.ListenAndServe(*bind, router); err != nil {
log.Fatal(err)
}
}
|
package problem0166
import (
"strconv"
)
func fractionToDecimal(numerator int, denominator int) string {
result := ""
// 处理符号位
digit := ""
if (numerator > 0 && denominator < 0) || (numerator < 0 && denominator > 0) {
digit = "-"
}
result += digit
// 整数部分
numerator = abs(numerator)
denominator = abs(denominator)
result += strconv.Itoa(numerator / denominator)
remainer := numerator % denominator
if remainer == 0 {
return result
}
// 分数部分
result += "."
index := make(map[int]int)
index[remainer] = len(result)
for remainer != 0 {
remainer *= 10
result += strconv.Itoa(remainer / denominator)
remainer %= denominator
// 循环
if pos, found := index[remainer]; found {
result = result[0:pos] + "(" + result[pos:] + ")"
return result
} else {
index[remainer] = len(result)
}
}
return result
}
func abs(num int) int {
if num >= 0 {
return num
}
return -num
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"fmt"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam"
dclService "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/storage/beta"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured"
iamUnstruct "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/iam"
)
type Bucket struct{}
func BucketToUnstructured(r *dclService.Bucket) *unstructured.Resource {
u := &unstructured.Resource{
STV: unstructured.ServiceTypeVersion{
Service: "storage",
Version: "beta",
Type: "Bucket",
},
Object: make(map[string]interface{}),
}
var rCors []interface{}
for _, rCorsVal := range r.Cors {
rCorsObject := make(map[string]interface{})
if rCorsVal.MaxAgeSeconds != nil {
rCorsObject["maxAgeSeconds"] = *rCorsVal.MaxAgeSeconds
}
var rCorsValMethod []interface{}
for _, rCorsValMethodVal := range rCorsVal.Method {
rCorsValMethod = append(rCorsValMethod, rCorsValMethodVal)
}
rCorsObject["method"] = rCorsValMethod
var rCorsValOrigin []interface{}
for _, rCorsValOriginVal := range rCorsVal.Origin {
rCorsValOrigin = append(rCorsValOrigin, rCorsValOriginVal)
}
rCorsObject["origin"] = rCorsValOrigin
var rCorsValResponseHeader []interface{}
for _, rCorsValResponseHeaderVal := range rCorsVal.ResponseHeader {
rCorsValResponseHeader = append(rCorsValResponseHeader, rCorsValResponseHeaderVal)
}
rCorsObject["responseHeader"] = rCorsValResponseHeader
rCors = append(rCors, rCorsObject)
}
u.Object["cors"] = rCors
if r.Lifecycle != nil && r.Lifecycle != dclService.EmptyBucketLifecycle {
rLifecycle := make(map[string]interface{})
var rLifecycleRule []interface{}
for _, rLifecycleRuleVal := range r.Lifecycle.Rule {
rLifecycleRuleObject := make(map[string]interface{})
if rLifecycleRuleVal.Action != nil && rLifecycleRuleVal.Action != dclService.EmptyBucketLifecycleRuleAction {
rLifecycleRuleValAction := make(map[string]interface{})
if rLifecycleRuleVal.Action.StorageClass != nil {
rLifecycleRuleValAction["storageClass"] = *rLifecycleRuleVal.Action.StorageClass
}
if rLifecycleRuleVal.Action.Type != nil {
rLifecycleRuleValAction["type"] = string(*rLifecycleRuleVal.Action.Type)
}
rLifecycleRuleObject["action"] = rLifecycleRuleValAction
}
if rLifecycleRuleVal.Condition != nil && rLifecycleRuleVal.Condition != dclService.EmptyBucketLifecycleRuleCondition {
rLifecycleRuleValCondition := make(map[string]interface{})
if rLifecycleRuleVal.Condition.Age != nil {
rLifecycleRuleValCondition["age"] = *rLifecycleRuleVal.Condition.Age
}
if rLifecycleRuleVal.Condition.CreatedBefore != nil {
rLifecycleRuleValCondition["createdBefore"] = *rLifecycleRuleVal.Condition.CreatedBefore
}
var rLifecycleRuleValConditionMatchesStorageClass []interface{}
for _, rLifecycleRuleValConditionMatchesStorageClassVal := range rLifecycleRuleVal.Condition.MatchesStorageClass {
rLifecycleRuleValConditionMatchesStorageClass = append(rLifecycleRuleValConditionMatchesStorageClass, rLifecycleRuleValConditionMatchesStorageClassVal)
}
rLifecycleRuleValCondition["matchesStorageClass"] = rLifecycleRuleValConditionMatchesStorageClass
if rLifecycleRuleVal.Condition.NumNewerVersions != nil {
rLifecycleRuleValCondition["numNewerVersions"] = *rLifecycleRuleVal.Condition.NumNewerVersions
}
if rLifecycleRuleVal.Condition.WithState != nil {
rLifecycleRuleValCondition["withState"] = string(*rLifecycleRuleVal.Condition.WithState)
}
rLifecycleRuleObject["condition"] = rLifecycleRuleValCondition
}
rLifecycleRule = append(rLifecycleRule, rLifecycleRuleObject)
}
rLifecycle["rule"] = rLifecycleRule
u.Object["lifecycle"] = rLifecycle
}
if r.Location != nil {
u.Object["location"] = *r.Location
}
if r.Logging != nil && r.Logging != dclService.EmptyBucketLogging {
rLogging := make(map[string]interface{})
if r.Logging.LogBucket != nil {
rLogging["logBucket"] = *r.Logging.LogBucket
}
if r.Logging.LogObjectPrefix != nil {
rLogging["logObjectPrefix"] = *r.Logging.LogObjectPrefix
}
u.Object["logging"] = rLogging
}
if r.Name != nil {
u.Object["name"] = *r.Name
}
if r.Project != nil {
u.Object["project"] = *r.Project
}
if r.StorageClass != nil {
u.Object["storageClass"] = string(*r.StorageClass)
}
if r.Versioning != nil && r.Versioning != dclService.EmptyBucketVersioning {
rVersioning := make(map[string]interface{})
if r.Versioning.Enabled != nil {
rVersioning["enabled"] = *r.Versioning.Enabled
}
u.Object["versioning"] = rVersioning
}
if r.Website != nil && r.Website != dclService.EmptyBucketWebsite {
rWebsite := make(map[string]interface{})
if r.Website.MainPageSuffix != nil {
rWebsite["mainPageSuffix"] = *r.Website.MainPageSuffix
}
if r.Website.NotFoundPage != nil {
rWebsite["notFoundPage"] = *r.Website.NotFoundPage
}
u.Object["website"] = rWebsite
}
return u
}
func UnstructuredToBucket(u *unstructured.Resource) (*dclService.Bucket, error) {
r := &dclService.Bucket{}
if _, ok := u.Object["cors"]; ok {
if s, ok := u.Object["cors"].([]interface{}); ok {
for _, o := range s {
if objval, ok := o.(map[string]interface{}); ok {
var rCors dclService.BucketCors
if _, ok := objval["maxAgeSeconds"]; ok {
if i, ok := objval["maxAgeSeconds"].(int64); ok {
rCors.MaxAgeSeconds = dcl.Int64(i)
} else {
return nil, fmt.Errorf("rCors.MaxAgeSeconds: expected int64")
}
}
if _, ok := objval["method"]; ok {
if s, ok := objval["method"].([]interface{}); ok {
for _, ss := range s {
if strval, ok := ss.(string); ok {
rCors.Method = append(rCors.Method, strval)
}
}
} else {
return nil, fmt.Errorf("rCors.Method: expected []interface{}")
}
}
if _, ok := objval["origin"]; ok {
if s, ok := objval["origin"].([]interface{}); ok {
for _, ss := range s {
if strval, ok := ss.(string); ok {
rCors.Origin = append(rCors.Origin, strval)
}
}
} else {
return nil, fmt.Errorf("rCors.Origin: expected []interface{}")
}
}
if _, ok := objval["responseHeader"]; ok {
if s, ok := objval["responseHeader"].([]interface{}); ok {
for _, ss := range s {
if strval, ok := ss.(string); ok {
rCors.ResponseHeader = append(rCors.ResponseHeader, strval)
}
}
} else {
return nil, fmt.Errorf("rCors.ResponseHeader: expected []interface{}")
}
}
r.Cors = append(r.Cors, rCors)
}
}
} else {
return nil, fmt.Errorf("r.Cors: expected []interface{}")
}
}
if _, ok := u.Object["lifecycle"]; ok {
if rLifecycle, ok := u.Object["lifecycle"].(map[string]interface{}); ok {
r.Lifecycle = &dclService.BucketLifecycle{}
if _, ok := rLifecycle["rule"]; ok {
if s, ok := rLifecycle["rule"].([]interface{}); ok {
for _, o := range s {
if objval, ok := o.(map[string]interface{}); ok {
var rLifecycleRule dclService.BucketLifecycleRule
if _, ok := objval["action"]; ok {
if rLifecycleRuleAction, ok := objval["action"].(map[string]interface{}); ok {
rLifecycleRule.Action = &dclService.BucketLifecycleRuleAction{}
if _, ok := rLifecycleRuleAction["storageClass"]; ok {
if s, ok := rLifecycleRuleAction["storageClass"].(string); ok {
rLifecycleRule.Action.StorageClass = dcl.String(s)
} else {
return nil, fmt.Errorf("rLifecycleRule.Action.StorageClass: expected string")
}
}
if _, ok := rLifecycleRuleAction["type"]; ok {
if s, ok := rLifecycleRuleAction["type"].(string); ok {
rLifecycleRule.Action.Type = dclService.BucketLifecycleRuleActionTypeEnumRef(s)
} else {
return nil, fmt.Errorf("rLifecycleRule.Action.Type: expected string")
}
}
} else {
return nil, fmt.Errorf("rLifecycleRule.Action: expected map[string]interface{}")
}
}
if _, ok := objval["condition"]; ok {
if rLifecycleRuleCondition, ok := objval["condition"].(map[string]interface{}); ok {
rLifecycleRule.Condition = &dclService.BucketLifecycleRuleCondition{}
if _, ok := rLifecycleRuleCondition["age"]; ok {
if i, ok := rLifecycleRuleCondition["age"].(int64); ok {
rLifecycleRule.Condition.Age = dcl.Int64(i)
} else {
return nil, fmt.Errorf("rLifecycleRule.Condition.Age: expected int64")
}
}
if _, ok := rLifecycleRuleCondition["createdBefore"]; ok {
if s, ok := rLifecycleRuleCondition["createdBefore"].(string); ok {
rLifecycleRule.Condition.CreatedBefore = dcl.String(s)
} else {
return nil, fmt.Errorf("rLifecycleRule.Condition.CreatedBefore: expected string")
}
}
if _, ok := rLifecycleRuleCondition["matchesStorageClass"]; ok {
if s, ok := rLifecycleRuleCondition["matchesStorageClass"].([]interface{}); ok {
for _, ss := range s {
if strval, ok := ss.(string); ok {
rLifecycleRule.Condition.MatchesStorageClass = append(rLifecycleRule.Condition.MatchesStorageClass, strval)
}
}
} else {
return nil, fmt.Errorf("rLifecycleRule.Condition.MatchesStorageClass: expected []interface{}")
}
}
if _, ok := rLifecycleRuleCondition["numNewerVersions"]; ok {
if i, ok := rLifecycleRuleCondition["numNewerVersions"].(int64); ok {
rLifecycleRule.Condition.NumNewerVersions = dcl.Int64(i)
} else {
return nil, fmt.Errorf("rLifecycleRule.Condition.NumNewerVersions: expected int64")
}
}
if _, ok := rLifecycleRuleCondition["withState"]; ok {
if s, ok := rLifecycleRuleCondition["withState"].(string); ok {
rLifecycleRule.Condition.WithState = dclService.BucketLifecycleRuleConditionWithStateEnumRef(s)
} else {
return nil, fmt.Errorf("rLifecycleRule.Condition.WithState: expected string")
}
}
} else {
return nil, fmt.Errorf("rLifecycleRule.Condition: expected map[string]interface{}")
}
}
r.Lifecycle.Rule = append(r.Lifecycle.Rule, rLifecycleRule)
}
}
} else {
return nil, fmt.Errorf("r.Lifecycle.Rule: expected []interface{}")
}
}
} else {
return nil, fmt.Errorf("r.Lifecycle: expected map[string]interface{}")
}
}
if _, ok := u.Object["location"]; ok {
if s, ok := u.Object["location"].(string); ok {
r.Location = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Location: expected string")
}
}
if _, ok := u.Object["logging"]; ok {
if rLogging, ok := u.Object["logging"].(map[string]interface{}); ok {
r.Logging = &dclService.BucketLogging{}
if _, ok := rLogging["logBucket"]; ok {
if s, ok := rLogging["logBucket"].(string); ok {
r.Logging.LogBucket = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Logging.LogBucket: expected string")
}
}
if _, ok := rLogging["logObjectPrefix"]; ok {
if s, ok := rLogging["logObjectPrefix"].(string); ok {
r.Logging.LogObjectPrefix = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Logging.LogObjectPrefix: expected string")
}
}
} else {
return nil, fmt.Errorf("r.Logging: expected map[string]interface{}")
}
}
if _, ok := u.Object["name"]; ok {
if s, ok := u.Object["name"].(string); ok {
r.Name = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Name: expected string")
}
}
if _, ok := u.Object["project"]; ok {
if s, ok := u.Object["project"].(string); ok {
r.Project = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Project: expected string")
}
}
if _, ok := u.Object["storageClass"]; ok {
if s, ok := u.Object["storageClass"].(string); ok {
r.StorageClass = dclService.BucketStorageClassEnumRef(s)
} else {
return nil, fmt.Errorf("r.StorageClass: expected string")
}
}
if _, ok := u.Object["versioning"]; ok {
if rVersioning, ok := u.Object["versioning"].(map[string]interface{}); ok {
r.Versioning = &dclService.BucketVersioning{}
if _, ok := rVersioning["enabled"]; ok {
if b, ok := rVersioning["enabled"].(bool); ok {
r.Versioning.Enabled = dcl.Bool(b)
} else {
return nil, fmt.Errorf("r.Versioning.Enabled: expected bool")
}
}
} else {
return nil, fmt.Errorf("r.Versioning: expected map[string]interface{}")
}
}
if _, ok := u.Object["website"]; ok {
if rWebsite, ok := u.Object["website"].(map[string]interface{}); ok {
r.Website = &dclService.BucketWebsite{}
if _, ok := rWebsite["mainPageSuffix"]; ok {
if s, ok := rWebsite["mainPageSuffix"].(string); ok {
r.Website.MainPageSuffix = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Website.MainPageSuffix: expected string")
}
}
if _, ok := rWebsite["notFoundPage"]; ok {
if s, ok := rWebsite["notFoundPage"].(string); ok {
r.Website.NotFoundPage = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Website.NotFoundPage: expected string")
}
}
} else {
return nil, fmt.Errorf("r.Website: expected map[string]interface{}")
}
}
return r, nil
}
func GetBucket(ctx context.Context, config *dcl.Config, u *unstructured.Resource) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToBucket(u)
if err != nil {
return nil, err
}
r, err = c.GetBucket(ctx, r)
if err != nil {
return nil, err
}
return BucketToUnstructured(r), nil
}
func ListBucket(ctx context.Context, config *dcl.Config, project string) ([]*unstructured.Resource, error) {
c := dclService.NewClient(config)
l, err := c.ListBucket(ctx, project)
if err != nil {
return nil, err
}
var resources []*unstructured.Resource
for {
for _, r := range l.Items {
resources = append(resources, BucketToUnstructured(r))
}
if !l.HasNext() {
break
}
if err := l.Next(ctx, c); err != nil {
return nil, err
}
}
return resources, nil
}
func ApplyBucket(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToBucket(u)
if err != nil {
return nil, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToBucket(ush)
if err != nil {
return nil, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
r, err = c.ApplyBucket(ctx, r, opts...)
if err != nil {
return nil, err
}
return BucketToUnstructured(r), nil
}
func BucketHasDiff(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToBucket(u)
if err != nil {
return false, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToBucket(ush)
if err != nil {
return false, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
opts = append(opts, dcl.WithLifecycleParam(dcl.BlockDestruction), dcl.WithLifecycleParam(dcl.BlockCreation), dcl.WithLifecycleParam(dcl.BlockModification))
_, err = c.ApplyBucket(ctx, r, opts...)
if err != nil {
if _, ok := err.(dcl.ApplyInfeasibleError); ok {
return true, nil
}
return false, err
}
return false, nil
}
func DeleteBucket(ctx context.Context, config *dcl.Config, u *unstructured.Resource) error {
c := dclService.NewClient(config)
r, err := UnstructuredToBucket(u)
if err != nil {
return err
}
return c.DeleteBucket(ctx, r)
}
func BucketID(u *unstructured.Resource) (string, error) {
r, err := UnstructuredToBucket(u)
if err != nil {
return "", err
}
return r.ID()
}
func (r *Bucket) STV() unstructured.ServiceTypeVersion {
return unstructured.ServiceTypeVersion{
"storage",
"Bucket",
"beta",
}
}
func SetPolicyBucket(ctx context.Context, config *dcl.Config, u *unstructured.Resource, p *unstructured.Resource) (*unstructured.Resource, error) {
r, err := UnstructuredToBucket(u)
if err != nil {
return nil, err
}
policy, err := iamUnstruct.UnstructuredToPolicy(p)
if err != nil {
return nil, err
}
policy.Resource = r
iamClient := iam.NewClient(config)
newPolicy, err := iamClient.SetPolicy(ctx, policy)
if err != nil {
return nil, err
}
return iamUnstruct.PolicyToUnstructured(newPolicy), nil
}
func SetPolicyWithEtagBucket(ctx context.Context, config *dcl.Config, u *unstructured.Resource, p *unstructured.Resource) (*unstructured.Resource, error) {
r, err := UnstructuredToBucket(u)
if err != nil {
return nil, err
}
policy, err := iamUnstruct.UnstructuredToPolicy(p)
if err != nil {
return nil, err
}
policy.Resource = r
iamClient := iam.NewClient(config)
newPolicy, err := iamClient.SetPolicyWithEtag(ctx, policy)
if err != nil {
return nil, err
}
return iamUnstruct.PolicyToUnstructured(newPolicy), nil
}
func GetPolicyBucket(ctx context.Context, config *dcl.Config, u *unstructured.Resource) (*unstructured.Resource, error) {
r, err := UnstructuredToBucket(u)
if err != nil {
return nil, err
}
iamClient := iam.NewClient(config)
policy, err := iamClient.GetPolicy(ctx, r)
if err != nil {
return nil, err
}
return iamUnstruct.PolicyToUnstructured(policy), nil
}
func SetPolicyMemberBucket(ctx context.Context, config *dcl.Config, u *unstructured.Resource, m *unstructured.Resource) (*unstructured.Resource, error) {
r, err := UnstructuredToBucket(u)
if err != nil {
return nil, err
}
member, err := iamUnstruct.UnstructuredToMember(m)
if err != nil {
return nil, err
}
member.Resource = r
iamClient := iam.NewClient(config)
policy, err := iamClient.SetMember(ctx, member)
if err != nil {
return nil, err
}
return iamUnstruct.PolicyToUnstructured(policy), nil
}
func GetPolicyMemberBucket(ctx context.Context, config *dcl.Config, u *unstructured.Resource, role, member string) (*unstructured.Resource, error) {
r, err := UnstructuredToBucket(u)
if err != nil {
return nil, err
}
iamClient := iam.NewClient(config)
policyMember, err := iamClient.GetMember(ctx, r, role, member)
if err != nil {
return nil, err
}
return iamUnstruct.MemberToUnstructured(policyMember), nil
}
func DeletePolicyMemberBucket(ctx context.Context, config *dcl.Config, u *unstructured.Resource, m *unstructured.Resource) error {
r, err := UnstructuredToBucket(u)
if err != nil {
return err
}
member, err := iamUnstruct.UnstructuredToMember(m)
if err != nil {
return err
}
member.Resource = r
iamClient := iam.NewClient(config)
if err := iamClient.DeleteMember(ctx, member); err != nil {
return err
}
return nil
}
func (r *Bucket) SetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) (*unstructured.Resource, error) {
return SetPolicyMemberBucket(ctx, config, resource, member)
}
func (r *Bucket) GetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, role, member string) (*unstructured.Resource, error) {
return GetPolicyMemberBucket(ctx, config, resource, role, member)
}
func (r *Bucket) DeletePolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) error {
return DeletePolicyMemberBucket(ctx, config, resource, member)
}
func (r *Bucket) SetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return SetPolicyBucket(ctx, config, resource, policy)
}
func (r *Bucket) SetPolicyWithEtag(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return SetPolicyWithEtagBucket(ctx, config, resource, policy)
}
func (r *Bucket) GetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return GetPolicyBucket(ctx, config, resource)
}
func (r *Bucket) Get(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return GetBucket(ctx, config, resource)
}
func (r *Bucket) Apply(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
return ApplyBucket(ctx, config, resource, opts...)
}
func (r *Bucket) HasDiff(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
return BucketHasDiff(ctx, config, resource, opts...)
}
func (r *Bucket) Delete(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) error {
return DeleteBucket(ctx, config, resource)
}
func (r *Bucket) ID(resource *unstructured.Resource) (string, error) {
return BucketID(resource)
}
func init() {
unstructured.Register(&Bucket{})
}
|
package main
import (
"database/sql"
"fmt"
_ "github.com/lib/pq"
"github.com/op/go-logging"
"os"
"runtime"
"strconv"
)
// CheckEnvVars checks that all the environment variables required are set, without checking their value. It will panic if one is missing.
func CheckEnvVars() {
envvars := []string{"AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_STORAGE_BUCKET_NAME", "REDIS_URL", "DATABASE_URL"}
for _, envvar := range envvars {
if os.Getenv(envvar) == "" {
panic(fmt.Errorf("environment variable `%s` is missing or empty", envvar))
}
}
}
// ConfigureRuntime configures the server runtime, including the number of CPUs to use.
func ConfigureRuntime() {
// Note that we're using os instead of syscall because we'll be parsing the int anyway, so there is no need to check if the envvar was found.
useNumCPUsStr := os.Getenv("MAX_CPUS")
useNumCPUsInt, err := strconv.ParseInt(useNumCPUsStr, 10, 0)
useNumCPUs := int(useNumCPUsInt)
if err != nil {
useNumCPUs = runtime.NumCPU()
}
runtime.GOMAXPROCS(useNumCPUs)
log.Info("Running with %d CPUs.\n", useNumCPUs)
}
// ConfigureLogger configures the default logger (named "gofetch").
func ConfigureLogger() {
// From https://github.com/op/go-logging/blob/master/examples/example.go.
logFormat := logging.MustStringFormatter("%{color}%{time:15:04:05.000} %{shortfunc} ▶ %{level}%{color:reset} %{message}")
logging.SetBackend(logging.NewBackendFormatter(logging.NewLogBackend(os.Stderr, "", 0), logFormat))
// Let's grab the log level from the environment, or set it to INFO.
envlvl := os.Getenv("LOG_LEVEL")
if envlvl != "" {
lvl, err := logging.LogLevel(envlvl)
if err != nil {
lvl = logging.INFO
}
log.Notice("Set logging level to %s.\n", lvl)
logging.SetLevel(lvl, "")
} else {
log.Notice("No log level defined in environment. Defaulting to INFO.\n")
logging.SetLevel(logging.INFO, "")
}
}
// GetDBConn returns a database connection. Note that database/sql handles a connection pool by itself.
func GetDBConn() *sql.DB {
db, err := sql.Open("postgres", os.Getenv("DATABASE_URL"))
if err != nil {
panic(fmt.Errorf("could not connect to database `%s`", err))
}
return db
}
|
package errors
import (
"encoding/json"
"errors"
"testing"
"github.com/rs/zerolog"
)
func TestError_UnmarshalJSON(t *testing.T) {
t.Run("no bytes", func(t *testing.T) {
e := &Error{
Err: errors.New("test error"),
Op: "testOp",
Kind: KindUnexpected,
Level: zerolog.TraceLevel,
}
err := e.UnmarshalJSON([]byte{})
if err == nil {
t.Errorf("Error.UnmarshalJSON() error = %v, wantErr true", err)
}
})
t.Run("empty fields", func(t *testing.T) {
e := &Error{
Err: errors.New("test error"),
Op: "testOp",
Kind: KindUnexpected,
Level: zerolog.TraceLevel,
}
p, err := json.Marshal(e)
err = e.UnmarshalJSON(p)
if err != nil {
t.Errorf("Error.UnmarshalJSON() error = %v, wantErr false", err)
}
})
}
|
package git
import (
"errors"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
log "github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/config"
"gopkg.in/src-d/go-git.v4/plumbing/transport"
"gopkg.in/src-d/go-git.v4/plumbing/transport/http"
ssh2 "gopkg.in/src-d/go-git.v4/plumbing/transport/ssh"
wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
)
// GitArtifactDriver is the artifact driver for a git repo
type GitArtifactDriver struct {
Username string
Password string
SSHPrivateKey string
InsecureIgnoreHostKey bool
}
func (g *GitArtifactDriver) auth() (func(), transport.AuthMethod, []string, error) {
if g.SSHPrivateKey != "" {
signer, err := ssh.ParsePrivateKey([]byte(g.SSHPrivateKey))
if err != nil {
return nil, nil, nil, err
}
privateKeyFile, err := ioutil.TempFile("", "id_rsa.")
if err != nil {
return nil, nil, nil, err
}
err = ioutil.WriteFile(privateKeyFile.Name(), []byte(g.SSHPrivateKey), 0600)
if err != nil {
return nil, nil, nil, err
}
auth := &ssh2.PublicKeys{User: "git", Signer: signer}
if g.InsecureIgnoreHostKey {
auth.HostKeyCallback = ssh.InsecureIgnoreHostKey()
}
args := []string{"ssh", "-i", privateKeyFile.Name()}
if g.InsecureIgnoreHostKey {
args = append(args, "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null")
} else {
args = append(args, "-o", "StrictHostKeyChecking=yes", "-o")
}
env := []string{"GIT_SSH_COMMAND=" + strings.Join(args, " ")}
if g.InsecureIgnoreHostKey {
auth.HostKeyCallback = ssh.InsecureIgnoreHostKey()
env = append(env, "GIT_SSL_NO_VERIFY=true")
}
return func() { _ = os.Remove(privateKeyFile.Name()) },
auth,
env,
nil
}
if g.Username != "" || g.Password != "" {
filename, err := filepath.Abs("git-ask-pass.sh")
if err != nil {
return nil, nil, nil, err
}
_, err = os.Stat(filename)
if os.IsNotExist(err) {
err := ioutil.WriteFile(filename, []byte(`#!/bin/sh
case "$1" in
Username*) echo "${GIT_USERNAME}" ;;
Password*) echo "${GIT_PASSWORD}" ;;
esac
`), 0755)
if err != nil {
return nil, nil, nil, err
}
}
return func() {},
&http.BasicAuth{Username: g.Username, Password: g.Password},
[]string{
"GIT_ASKPASS=" + filename,
"GIT_USERNAME=" + g.Username,
"GIT_PASSWORD=" + g.Password,
},
nil
}
return func() {}, nil, nil, nil
}
// Save is unsupported for git output artifacts
func (g *GitArtifactDriver) Save(string, *wfv1.Artifact) error {
return errors.New("git output artifacts unsupported")
}
func (g *GitArtifactDriver) Load(inputArtifact *wfv1.Artifact, path string) error {
closer, auth, env, err := g.auth()
if err != nil {
return err
}
defer closer()
repo, err := git.PlainClone(path, false, &git.CloneOptions{
URL: inputArtifact.Git.Repo,
RecurseSubmodules: git.DefaultSubmoduleRecursionDepth,
Auth: auth,
Depth: inputArtifact.Git.GetDepth(),
})
if err != nil {
return err
}
if inputArtifact.Git.Fetch != nil {
refSpecs := make([]config.RefSpec, len(inputArtifact.Git.Fetch))
for i, spec := range inputArtifact.Git.Fetch {
refSpecs[i] = config.RefSpec(spec)
}
fetchOptions := git.FetchOptions{
Auth: auth,
RefSpecs: refSpecs,
Depth: inputArtifact.Git.GetDepth(),
}
err = fetchOptions.Validate()
if err != nil {
return err
}
err = repo.Fetch(&fetchOptions)
if isAlreadyUpToDateErr(err) {
return err
}
}
if inputArtifact.Git.Revision != "" {
// We still rely on forking git for checkout, since go-git does not have a reliable
// way of resolving revisions (e.g. mybranch, HEAD^, v1.2.3)
log.Infof("Checking out revision %s", inputArtifact.Git.Revision)
cmd := exec.Command("git", "checkout", inputArtifact.Git.Revision)
cmd.Dir = path
cmd.Env = env
output, err := cmd.Output()
if err != nil {
return g.error(err, cmd)
}
log.Infof("`%s` stdout:\n%s", cmd.Args, string(output))
submodulesCmd := exec.Command("git", "submodule", "update", "--init", "--recursive", "--force")
submodulesCmd.Dir = path
submodulesCmd.Env = env
submoduleOutput, err := submodulesCmd.Output()
if err != nil {
return g.error(err, cmd)
}
log.Infof("`%s` stdout:\n%s", cmd.Args, string(submoduleOutput))
}
return nil
}
func isAlreadyUpToDateErr(err error) bool {
return err != nil && err.Error() != "already up-to-date"
}
func (g *GitArtifactDriver) error(err error, cmd *exec.Cmd) error {
if exErr, ok := err.(*exec.ExitError); ok {
log.Errorf("`%s` stderr:\n%s", cmd.Args, string(exErr.Stderr))
return errors.New(strings.Split(string(exErr.Stderr), "\n")[0])
}
return err
}
|
func rob(nums []int) int {
if len(nums) == 0{
return 0
}
if len(nums) == 1{
return nums[0]
}
max_v := make([]int, len(nums))
for idx := 0; idx < len(nums); idx += 1{
if idx == 0{
max_v[idx] = nums[0]
} else if idx == 1{
max_v[idx] = max(nums[0], nums[1])
} else {
max_v[idx] = max(max_v[idx-1], max_v[idx-2] + nums[idx])
}
}
return max(max_v[len(nums)-1], max_v[len(nums)-2])
}
func max(a, b int) int {
if a > b{
return a
}
return b
}
|
package model
type account struct {
account_num string
account_currency string
}
type request struct {
inn string
bank_branch_inn string
doc_date string
doc_num string
bank_branch_director string
bank_branch_operator string
authCode string
accounts []account
}
|
package pattern
import "fmt"
type WithName struct {
Name string
}
type Country struct {
WithName
}
type City struct {
WithName
}
func (w WithName) PrintStr() {
fmt.Println(w.Name)
}
type Shape interface {
Sides() int
Area() int
}
type Square struct {
Len int
}
func (s Square) Sides() int {
return 4
}
func (s Square) Area() int {
return s.Len
}
func MapUpCase(arr []string, fn func(s string) string) []string {
var newArray []string
for _, it := range arr {
newArray = append(newArray, fn(it))
}
return newArray
}
|
package relay
import (
"net"
"strconv"
)
// TCPRelayConn is the wrapper of
// net.Conn used for tcp relay.
type TCPRelayConn struct {
net.Conn
relay *TCPRelay
}
// Close rewrite the Close method of net.Conn,
// it put the nat port back to nat pool after
// conn closed.
func (conn TCPRelayConn) Close() error {
// get nat port, and put it into nat pool.
addr := conn.RemoteAddr().String()
_, port, _ := net.SplitHostPort(addr)
portInt, _ := strconv.Atoi(port)
conn.relay.NAT.DelRecord(uint16(portInt))
// close the conn.
return conn.Conn.Close()
} |
package subscription
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/google/uuid"
"github.com/imrenagi/go-payment"
"github.com/imrenagi/go-payment/invoice"
)
// New creates empty subscription with valid UUID
func New() *Subscription {
return &Subscription{
Number: uuid.New().String(),
// TODO change this with configuration
MissedPaymentAction: MissedPaymentActionIgnore,
Recharge: true,
ShouldSendEmail: true,
InvoiceDuration: 7 * 24 * time.Hour,
Invoices: make([]invoice.Invoice, 0),
}
}
// Subscription is object recording the recurring payment
type Subscription struct {
payment.Model
Number string `json:"number" gorm:"unique_index:subs_number_k"`
Name string `json:"name"`
Description string `json:"description" gorm:"type:text"`
Amount float64 `json:"amount"`
UserID string `json:"user_id"`
Currency string `json:"currency"`
Schedule Schedule `json:"schedule" gorm:"ForeignKey:SubscriptionID"`
TotalReccurence int `json:"total_recurrence"`
InvoiceDuration time.Duration `json:"invoice_duration"`
ShouldSendEmail bool `json:"should_send_email"`
MissedPaymentAction MissedPaymentAction `json:"missed_payment_action"`
Recharge bool `json:"recharge"`
CardToken string `json:"card_token"`
GatewayRecurringID string `json:"gateway_recurring_id"`
Gateway string `json:"gateway"`
Invoices []invoice.Invoice `json:"invoices"`
// ChargeImmediately will create first invoice no matter
// what the startat value is
ChargeImmediately bool `json:"charge_immediately"`
LastCreatedInvoice string `json:"last_created_invoice"`
Status Status `json:"-"`
}
// MarshalJSON ...
func (s *Subscription) MarshalJSON() ([]byte, error) {
type Alias Subscription
return json.Marshal(&struct {
*Alias
Status string `json:"status"`
RecurrenceProgress int `json:"recurrence_progress"`
}{
Alias: (*Alias)(s),
Status: s.Status.String(),
RecurrenceProgress: s.recurrenceProgress(),
})
}
// Start will create subscription to the payment gateway and update its properties
func (s *Subscription) Start(ctx context.Context, c creator) error {
res, err := c.Create(ctx, s)
if err != nil {
return err
}
s.Gateway = c.Gateway().String()
s.GatewayRecurringID = res.ID
s.Status = res.Status
s.LastCreatedInvoice = res.LastCreatedInvoiceURL
s.Schedule.PreviousExecutionAt = s.Schedule.StartAt
if s.TotalReccurence == 0 || s.TotalReccurence > 1 {
next := s.Schedule.NextSince(*s.Schedule.PreviousExecutionAt)
s.Schedule.NextExecutionAt = &next
}
return nil
}
func (s Subscription) recurrenceProgress() int {
return len(s.Invoices)
}
// Pause change the subscription status to paused and stop the schedule
func (s *Subscription) Pause(ctx context.Context, p pauser) error {
if s.Status != StatusActive {
return fmt.Errorf("can't pause subscription if it is not in active state: %w", payment.ErrCantProceed)
}
if err := p.Pause(ctx, s); err != nil {
return err
}
s.Status = StatusPaused
return nil
}
// Resume ...
func (s *Subscription) Resume(ctx context.Context, r resumer) error {
if s.Status != StatusPaused {
return fmt.Errorf("can't resume subscription if it is not in paused state: %w", payment.ErrCantProceed)
}
if err := r.Resume(ctx, s); err != nil {
return err
}
s.Schedule.NextExecutionAt = s.Schedule.NextAfterPause()
s.Status = StatusActive
return nil
}
// Stop should stop subscription
func (s *Subscription) Stop(ctx context.Context, st stopper) error {
if s.Status == StatusStop {
return fmt.Errorf("subscriptions has been stopped: %w", payment.ErrCantProceed)
}
if err := st.Stop(ctx, s); err != nil {
return err
}
s.Schedule.NextExecutionAt = nil
s.Status = StatusStop
return nil
}
// Save stores invoice created for subscription and renew subscription
// schedule
func (s *Subscription) Save(inv *invoice.Invoice) error {
if s.TotalReccurence != 0 && s.recurrenceProgress() >= s.TotalReccurence {
return fmt.Errorf("should not accept more invoice since all invoices has been recorded %w", payment.ErrCantProceed)
}
inv.SubscriptionID = &s.ID
s.Invoices = append(s.Invoices, *inv)
if s.Schedule.NextExecutionAt != nil {
next := s.Schedule.NextSince(*s.Schedule.NextExecutionAt)
s.Schedule.PreviousExecutionAt = s.Schedule.NextExecutionAt
s.Schedule.NextExecutionAt = &next
}
return nil
}
// NewSchedule create new payment schedule
func NewSchedule(interval int, unit IntervalUnit, start *time.Time) *Schedule {
s := &Schedule{
Interval: interval,
IntervalUnit: unit,
StartAt: start,
}
return s
}
// Schedule tells when subscription starts and charges
type Schedule struct {
payment.Model
SubscriptionID uint64 `json:"-" gorm:"index:schedule_subs_id"`
Interval int `json:"interval"`
IntervalUnit IntervalUnit `json:"interval_unit"`
StartAt *time.Time `json:"start_at"`
PreviousExecutionAt *time.Time `json:"previous_execution_at"`
NextExecutionAt *time.Time `json:"next_execution_at"`
}
// NextSince ...
func (s *Schedule) NextSince(t time.Time) time.Time {
return t.Add(time.Duration(s.Interval) * s.IntervalUnit.Duration())
}
// NextAfterPause calculate when the next payment should be executed after it is paused
func (s *Schedule) NextAfterPause() *time.Time {
// if this schedule is only one time, thus no next charge
if s.NextExecutionAt == nil {
return nil
}
now := time.Now()
if s.NextExecutionAt.After(now) {
return s.NextExecutionAt
}
if s.NextExecutionAt.Before(now) {
var next time.Time
prev := s.NextExecutionAt
for {
next = s.NextSince(*prev)
if next.After(now) {
break
}
prev = &next
}
return &next
}
return nil
}
|
package api
import (
"net/http"
"time"
"github.com/gorilla/mux"
"github.com/pkg/errors"
"github.com/rancher/go-rancher/api"
"github.com/rancher/go-rancher/client"
"github.com/rancher/longhorn-manager/types"
"github.com/rancher/longhorn-manager/util"
)
func (s *Server) ListVolume(rw http.ResponseWriter, req *http.Request) error {
apiContext := api.GetApiContext(req)
resp := &client.GenericCollection{}
volumes, err := s.man.List()
if err != nil {
return errors.Wrapf(err, "unable to list")
}
for _, v := range volumes {
resp.Data = append(resp.Data, toVolumeResource(v, apiContext))
}
resp.ResourceType = "volume"
resp.CreateTypes = map[string]string{
"volume": apiContext.UrlBuilder.Collection("volume"),
}
apiContext.Write(resp)
return nil
}
func (s *Server) GetVolume(rw http.ResponseWriter, req *http.Request) error {
apiContext := api.GetApiContext(req)
id := mux.Vars(req)["name"]
v, err := s.man.Get(id)
if err != nil {
return errors.Wrap(err, "unable to get volume")
}
if v == nil {
rw.WriteHeader(http.StatusNotFound)
apiContext.Write(&Empty{})
return nil
}
apiContext.Write(toVolumeResource(v, apiContext))
return nil
}
func (s *Server) UpdateRecurring(rw http.ResponseWriter, req *http.Request) error {
apiContext := api.GetApiContext(req)
id := mux.Vars(req)["name"]
var recurring RecurringInput
if err := apiContext.Read(&recurring); err != nil {
return errors.Wrapf(err, "unable to parse recurring schedule for update")
}
jobs := make([]*types.RecurringJob, len(recurring.Jobs))
for i := range recurring.Jobs { // cannot use i, job here: &job would be the same pointer for every iteration
jobs[i] = &recurring.Jobs[i]
}
if err := s.man.UpdateRecurring(id, jobs); err != nil {
return errors.Wrapf(err, "unable to update volume recurring schedule")
}
return s.GetVolume(rw, req)
}
func (s *Server) BgTaskQueue(rw http.ResponseWriter, req *http.Request) error {
apiContext := api.GetApiContext(req)
name := mux.Vars(req)["name"]
controller, err := s.man.Controller(name)
if err != nil {
return errors.Wrapf(err, "unable to get VolumeBackupOps for volume '%s'", name)
}
apiContext.Write(toBgTaskCollection(append(controller.LatestBgTasks(), controller.BgTaskQueue().List()...)))
return nil
}
func (s *Server) DeleteVolume(rw http.ResponseWriter, req *http.Request) error {
id := mux.Vars(req)["name"]
if err := s.man.Delete(id); err != nil {
return errors.Wrap(err, "unable to delete volume")
}
return nil
}
func (s *Server) CreateVolume(rw http.ResponseWriter, req *http.Request) error {
var v Volume
apiContext := api.GetApiContext(req)
if err := apiContext.Read(&v); err != nil {
return err
}
volume, err := filterCreateVolumeInput(&v)
if err != nil {
return errors.Wrap(err, "unable to filter create volume input")
}
volumeResp, err := s.man.Create(volume)
if err != nil {
return errors.Wrap(err, "unable to create volume")
}
apiContext.Write(toVolumeResource(volumeResp, apiContext))
return nil
}
func filterCreateVolumeInput(v *Volume) (*types.VolumeInfo, error) {
size, err := util.ConvertSize(v.Size)
if err != nil {
return nil, errors.Wrapf(err, "error converting size '%s'", v.Size)
}
return &types.VolumeInfo{
Name: v.Name,
Size: util.RoundUpSize(size),
BaseImage: v.BaseImage,
FromBackup: v.FromBackup,
NumberOfReplicas: v.NumberOfReplicas,
StaleReplicaTimeout: time.Duration(v.StaleReplicaTimeout) * time.Minute,
}, nil
}
func (s *Server) AttachVolume(rw http.ResponseWriter, req *http.Request) error {
id := mux.Vars(req)["name"]
if err := s.man.Attach(id); err != nil {
return errors.Wrap(err, "unable to attach volume")
}
return s.GetVolume(rw, req)
}
func (s *Server) DetachVolume(rw http.ResponseWriter, req *http.Request) error {
id := mux.Vars(req)["name"]
if err := s.man.Detach(id); err != nil {
return errors.Wrap(err, "unable to detach volume")
}
return s.GetVolume(rw, req)
}
func (s *Server) ReplicaRemove(rw http.ResponseWriter, req *http.Request) error {
var input ReplicaRemoveInput
apiContext := api.GetApiContext(req)
if err := apiContext.Read(&input); err != nil {
return errors.Wrapf(err, "error read replicaRemoveInput")
}
id := mux.Vars(req)["name"]
if err := s.man.ReplicaRemove(id, input.Name); err != nil {
return errors.Wrap(err, "unable to remove replica")
}
return s.GetVolume(rw, req)
}
|
/**
*@Author: haoxiongxiao
*@Date: 2019/3/20
*@Description: CREATE GO FILE admin
*/
package admin
import (
"bysj/models"
"bysj/services"
"github.com/kataras/iris"
"github.com/spf13/cast"
)
type OrderController struct {
Ctx iris.Context
Service *services.OrderService
Common
}
func NewOrderController() *OrderController {
return &OrderController{Service: services.NewOrderService()}
}
func (this *OrderController) PostList() (result *models.PageResult) {
if err := this.Ctx.ReadJSON(&result); err != nil {
return
}
this.Service.List(result)
return
}
func (this *OrderController) PostUpdate() {
m := make(map[string]interface{})
if err := this.Ctx.ReadJSON(&m); err != nil {
this.ReturnJson(10001, cast.ToString(err))
return
}
if err := this.Service.Update(m); err != nil {
this.ReturnJson(10002, "更新失败")
return
}
this.ReturnSuccess()
}
func (this *OrderController) PostDelete() {
m := make(map[string][]uint)
if err := this.Ctx.ReadJSON(&m); err != nil {
this.ReturnJson(10001, cast.ToString(err))
return
}
if err := this.Service.Delete(m); err != nil {
this.ReturnJson(10002, cast.ToString(err))
return
}
this.ReturnSuccess()
}
|
package main
import (
"fmt"
"math"
)
// Converter measures distance
type Converter struct{}
// Feet measures distance
type Feet float64
// Centimeter measures distance
type Centimeter float64
// Minutes measures time
type Minutes float64
// Seconds measures time
type Seconds float64
// Celsius measures temperature
type Celsius float64
// Farenheit measures temperature
type Farenheit float64
// Radian measures angle
type Radian float64
// Degree measures angle
type Degree float64
// Kilogram measures mass
type Kilogram float64
// Pounds measures mass
type Pounds float64
// FeetToCentimeter converts feet to centimeter
func (cvr Converter) FeetToCentimeter(c Feet) Centimeter {
return (c / 0.0328084)
}
// CentimeterToFeet converts Centimeter to Feet
func (cvr Converter) CentimeterToFeet(c Centimeter) Feet {
return (c * 0.0328084)
}
// MinutesToSeconds converts Minutes to Seconds
func (cvr Converter) MinutesToSeconds(c Minutes) Seconds {
return (c * 60)
}
// SecondsToMinutes converts Seconds to Minutes
func (cvr Converter) SecondsToMinutes(c Seconds) Minutes {
return (c / 60)
}
// CelsiusToFarenheit converts Celsius to Farenheit
func (cvr Converter) CelsiusToFarenheit(c Celsius) Farenheit {
return ((c * 9 / 5) + 32)
}
// FarenheitToCelsius converts Farenheit to Celsius
func (cvr Converter) FarenheitToCelsius(c Farenheit) Celsius {
return ((c - 32) * 5 / 9)
}
// RadianToDegree converts Radian to Degree
func (cvr Converter) RadianToDegree(c Radian) Degree {
return (c * math.Pi * 180)
}
// DegreeToRadian converts Degree to Radian
func (cvr Converter) DegreeToRadian(c Degree) Radian {
return (c / (math.Pi * 180))
}
// KilogramToPounds converts Kilogram to Pounds
func (cvr Converter) KilogramToPounds(c Kilogram) Pounds {
return (c * 2.205)
}
// PoundsToKilogram converts Pounds to Kilogram
func (cvr Converter) PoundsToKilogram(c Pounds) Kilogram {
return (c / 2.205)
}
func main() {
fmt.Println(Converter.FeetToCentimeter(10))
fmt.Println(Converter.CentimeterToFeet(10))
fmt.Println(Converter.MinutesToSeconds(10))
fmt.Println(Converter.SecondsToMinutes(10))
fmt.Println(Converter.CelsiusToFarenheit(10))
fmt.Println(Converter.FarenheitToCelsius(10))
fmt.Println(Converter.RadianToDegree(10))
fmt.Println(Converter.DegreeToRadian(10))
fmt.Println(Converter.KilogramToPounds(10))
fmt.Println(Converter.PoundsToKilogram(10))
}
|
package main
import (
"github.com/julianshen/gopttcrawler"
"log"
)
func main() {
alist, _ := gopttcrawler.GetArticles("Beauty", 0)
for _, a := range alist.Articles {
log.Println(a.Title)
}
nextpage, err := alist.GetFromPreviousPage()
if err != nil {
panic(err)
}
for _, a := range nextpage.Articles {
a.Load()
log.Println(a.Content)
log.Println(a.GetImageUrls())
}
}
|
package main
import (
"bufio"
"fmt"
"html"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"strings"
"time"
"os/exec"
"github.com/bitly/go-simplejson"
"github.com/boltdb/bolt"
)
var bucketName = "BGMReport"
func HandleError(err error) {
if err != nil {
log.Fatal(err)
}
}
func main() {
// Database
db, err := bolt.Open("my.db", 0600, nil)
if err != nil {
log.Fatal(err)
return
}
defer db.Close()
// Create bucket
db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists([]byte(bucketName))
if err != nil {
log.Fatal(fmt.Errorf("create bucket: %s", err))
}
return nil
})
// Read config
appid := ""
secretid := ""
configFile, err := os.Open("config.txt")
HandleError(err)
scanner := bufio.NewScanner(configFile)
i := 0
for scanner.Scan() {
switch i {
case 0:
appid = scanner.Text()
case 1:
secretid = scanner.Text()
default:
break
}
i++
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
log.Printf("Appid: %s, Secretid: %s", appid, secretid)
// Handler for report
ReportHandler := func(w http.ResponseWriter, r *http.Request) {
pathSlices := strings.Split(html.EscapeString(r.URL.Path), "/")
username := pathSlices[len(pathSlices)-1]
q := r.URL.Query()
year := q.Get("year")
if year == "" {
year = "2018"
}
// Call python script
cmd := exec.Command("./bangumi_report.py", "-u", username, "-o", "-q", "-y", year)
cmd.Dir = ".."
out, err := cmd.CombinedOutput()
if err == nil {
fmt.Fprint(w, string(out))
} else {
// fmt.Fprint(w, "Error occurs\n" + err.Error() + "\n" + string(out))
fmt.Fprint(w, "Error occurs\n" + err.Error())
}
}
// Handler for BGM callback
CallbackHandler := func(w http.ResponseWriter, r *http.Request) {
// Get access code
q := r.URL.Query()
code := q.Get("code")
resp, err := http.PostForm("https://bgm.tv/oauth/access_token",
url.Values{
"grant_type": {"authorization_code"},
"client_id": {appid},
"client_secret": {secretid},
"code": {code},
"redirect_uri": {"http://bgm.xiadong.info:8080/callback"},
})
HandleError(err)
body, err := ioutil.ReadAll(resp.Body)
HandleError(err)
resp.Body.Close()
json, err := simplejson.NewJson(body)
HandleError(err)
// Modify expire time
expireTime := json.Get("expires_in").MustInt64()
json.Set("expires_in", expireTime+time.Now().Unix())
uid := fmt.Sprintf("%d", json.Get("user_id").MustInt())
// Get username
resp, err = http.Get("https://api.bgm.tv/user/" + uid)
HandleError(err)
body, err = ioutil.ReadAll(resp.Body)
HandleError(err)
resp.Body.Close()
userinfo, err := simplejson.NewJson(body)
HandleError(err)
username := userinfo.Get("username").MustString()
// Add to data base
db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(bucketName))
value, err := json.MarshalJSON()
HandleError(err)
err = b.Put([]byte(username), value)
return err
})
// Redirect
http.Redirect(w, r, "/report/"+username, 301)
}
// Register handlers
http.HandleFunc("/report/", ReportHandler)
http.HandleFunc("/callback", CallbackHandler)
log.Fatal(http.ListenAndServe("127.0.0.1:8080", nil))
}
|
// ˅
package main
// ˄
type Command struct {
// ˅
// ˄
node INode
// ˅
// ˄
}
func NewCommand() *Command {
// ˅
return &Command{}
// ˄
}
func (self *Command) Parse(context *Context) {
// ˅
if context.GetToken() == "repeat" {
self.node = NewRepeat()
} else {
self.node = NewAction(context.GetToken())
}
self.node.Parse(context)
// ˄
}
func (self *Command) ToString() string {
// ˅
return self.node.ToString()
// ˄
}
// ˅
// ˄
|
package blob
import (
"context"
"gocloud.dev/blob"
"io"
)
// Bucket embeds the original blob.Bucket for providing compatible storage.Storage interface methods
type Bucket struct{ *blob.Bucket }
// Download provides compatible Download method of the storage.Storage interface
func (b *Bucket) Download(ctx context.Context, path string) (io.ReadCloser, error) {
return b.Bucket.NewReader(ctx, path, nil)
}
// Upload provides compatible Upload method of the storage.Storage interface
func (b *Bucket) Upload(ctx context.Context, body io.Reader, path string) error {
w, err := b.Bucket.NewWriter(ctx, path, nil)
if err != nil {
return err
}
defer w.Close()
_, err = io.Copy(w, body)
return err
}
|
//
// Package simpleflag is useful for creating command line Go applications.
//
// Limitations
//
// No arguments are managed, only flags.
// The App must have subcommands.
//
// Configuration
//
// App is the main structure of the cli application.
// The App has a list of Commands.
// Each command has a list of Flags.
// Each flag has a flag.Value and comma separated alternatives names.
//
// Flag of type Bool, Int, String and Strings are defined.
// Bool, Int, String have the Passed field, indicating if the flag was
// setted in the command line.
//
// Example
//
// Example of a configuration of simple "myapp" cli application,
// with a single "get" command.
//
// type myappArgs struct {
// config simpleflag.String
// workers simpleflag.Int
// dryrun simpleflag.Bool
// items simpleflag.Strings
// }
//
// args := myappArgs{}
//
// app := &simpleflag.App{
// Name: "myapp",
// Usage: "myapp <command>",
// Commands: []*simpleflag.Command{
// &simpleflag.Command{
// Names: "get,g",
// Usage: "myapp get [options]",
// Flags: []*simpleflag.Flag{
// {Value: &args.config, Names: "c,config"},
// {Value: &args.workers, Names: "w,workers"},
// {Value: &args.dryrun, Names: "n,dryrun,dry-run"},
// {Value: &args.items, Names: "i,items"},
// },
// },
// },
// }
//
// Usage
//
// First App.Parse function parses the arguments list.
//
// Then the App.CommandName method returns the name of the command invoked.
package simpleflag
import (
"flag"
"fmt"
"io"
"os"
"strings"
)
// App is the main structure of a cli application.
// The App has a list of Commands.
// Each command has a list of Flags.
type App struct {
Name string // name of the program
Usage string // printed as it is, without further manipulations
Commands []*Command
Writer io.Writer // nil means stderr; use Output() accessor
ErrorHandling flag.ErrorHandling
// Command invoked by command line arguments.
// Setted by Parse (if no error is returned).
invoked *Command
}
// Command represents an application (sub-)command.
type Command struct {
// Names contains the various names of the command,
// separated by a comma (",") with no spaces.
// The first name is the main name of the command.
// The other names, if present, are aliases.
Names string
// Usage string of the Command.
// It is printed as it is, without further manipulations.
Usage string
// Flags of the command.
Flags []*Flag
}
// A Flag represents the state of a flag.
type Flag struct {
Value flag.Value // value as set
Names string // comma separated aliases of the flag
}
// CommandName returns the Name of the command invoked in the command line.
// It returns an empty string in case no command was selected.
func (app *App) CommandName() string {
if app.invoked == nil {
return ""
}
return app.invoked.Name()
}
// Name returns the first name of the command.
// It is the main name of the command, returned by App.CommandName().
func (cmd *Command) Name() string {
return strings.SplitN(cmd.Names, ",", 2)[0]
}
// Output returns the destination for usage and error messages.
// os.Stderr is returned if output was not set or was set to nil.
func (app *App) Output() io.Writer {
if app.Writer == nil {
return os.Stderr
}
return app.Writer
}
func (app *App) findCommandByName(name string) *Command {
for _, cmd := range app.Commands {
for _, n := range strings.Split(cmd.Names, ",") {
if n == name {
return cmd
}
}
}
return nil
}
// usageFailf prints to app.Output a formatted error and usage message and
// returns the error.
func (app *App) usageFailf(format string, v ...interface{}) error {
out := app.Output()
err := fmt.Errorf(format, v...)
fmt.Fprintln(out, err)
fmt.Fprintln(out, app.Usage)
switch app.ErrorHandling {
case flag.PanicOnError:
panic(err)
case flag.ExitOnError:
os.Exit(2)
}
return err
}
// Parse parses flag definitions from the argument list
// which should not include the command name.
// Must be called after all flags in the FlagSet are defined
// and before flags are accessed by the program.
func (app *App) Parse(arguments []string) error {
// reset the requested command
app.invoked = nil
if arguments == nil || len(arguments) == 0 {
return app.usageFailf("no arguments")
}
out := app.Output()
initFlagSet := func(name, usage string) *flag.FlagSet {
fs := flag.NewFlagSet(name, app.ErrorHandling)
fs.SetOutput(out)
fs.Usage = func() {
fmt.Fprintln(out, usage)
}
return fs
}
cmdName := arguments[0]
if strings.HasPrefix(cmdName, "-") {
// TODO: make app like command interface and use (cmd *Command) FlagSet
fs := initFlagSet("", app.Usage)
return fs.Parse(arguments)
}
cmd := app.findCommandByName(cmdName)
if cmd == nil {
return app.usageFailf("unknown command %q", cmdName)
}
fs := cmd.FlagSet(out)
err := fs.Parse(arguments[1:])
if err == nil {
// save the requested command
app.invoked = cmd
}
return err
}
// FlagSet returns a *flag.FlagSet based on command Flags.
// Adds a flag.Flag for each name of each simpleflag.Flag.
func (cmd *Command) FlagSet(out io.Writer) *flag.FlagSet {
name := cmd.Name()
fs := flag.NewFlagSet(name, flag.ContinueOnError)
fs.SetOutput(out)
fs.Usage = func() {
fmt.Fprintln(fs.Output(), cmd.Usage)
}
// populate FlagSet variables with command options
for _, opt := range cmd.Flags {
for _, name := range strings.Split(opt.Names, ",") {
fs.Var(opt.Value, name, "")
}
}
return fs
}
|
// Package ciolite is the Golang client library for the Lite Context.IO API
package ciolite
//go:generate mockgen -source ciolite.go -destination ciolite_mock.go -package ciolite
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"hash"
"net/http"
"net/http/httptest"
"net/url"
"strconv"
"time"
)
const (
// DefaultHost is the default host of CIO Lite API
DefaultHost = "https://api.context.io"
// DefaultRequestTimeout is the default timeout duration used on HTTP requests
DefaultRequestTimeout = 120 * time.Second
)
// CioLite struct contains the api key and secret, along with an optional logger,
// and provides convenience functions for accessing all CIO Lite endpoints.
type CioLite struct {
apiKey string
apiSecret string
Host string
// Allow setting your own *http.Client, otherwise default is client with DefaultRequestTimeout
HTTPClient *http.Client
// PreRequestHook is a function (mostly for logging) that will be executed
// before the request is made.
// Its arguments are:
// User ID (if present),
// Account Label (if present),
// Method (GET/POST/etc),
// URL,
// redacted body values.
PreRequestHook func(string, string, string, string, url.Values)
// PostRequestShouldRetryHook is a function (mostly for logging) that will be
// executed after each request is made, and will be called at least once.
// Its arguments are:
// request Attempt # (starts at 1),
// User ID (if present),
// Account Label (if present),
// Method (GET/POST/etc),
// URL,
// response Status Code,
// response Payload,
// time at start of most recent attempt,
// time at start of all attempts,
// any error received while attempting this request.
// The returned boolean is whether this request should be retried or not, which
// if False then this is the last call of this function, but if True means this
// function will be called again.
PostRequestShouldRetryHook func(int, string, string, string, string, int, string, time.Time, time.Time, error) bool
// ResponseBodyCloseErrorHook is a function (purely for logging) that will
// execute if there is an error closing the response body.
ResponseBodyCloseErrorHook func(error)
}
// NewCioLite returns a CIO Lite struct (without a logger) for accessing the CIO Lite API.
func NewCioLite(key string, secret string) CioLite {
return CioLite{
apiKey: key,
apiSecret: secret,
Host: DefaultHost,
HTTPClient: &http.Client{Timeout: DefaultRequestTimeout},
}
}
// Interface is just to help generate a mocked client, for testing elsewhere.
// mockgen -source=ciolite.go -destination=ciolite_mock.go -package ciolite
type Interface interface {
ValidateCallback(token string, signature string, timestamp int) bool
GetStatusCallbackURL() (GetStatusCallbackURLResponse, error)
CreateStatusCallbackURL(formValues CreateStatusCallbackURLParams) (CreateDeleteStatusCallbackURLResponse, error)
DeleteStatusCallbackURL() (CreateDeleteStatusCallbackURLResponse, error)
GetConnectTokens() ([]GetConnectTokenResponse, error)
GetConnectToken(token string) (GetConnectTokenResponse, error)
CreateConnectToken(formValues CreateConnectTokenParams) (CreateConnectTokenResponse, error)
DeleteConnectToken(token string) (DeleteConnectTokenResponse, error)
CheckConnectToken(connectToken GetConnectTokenResponse, email string) error
GetDiscovery(queryValues GetDiscoveryParams) (GetDiscoveryResponse, error)
GetOAuthProviders() ([]GetOAuthProvidersResponse, error)
GetOAuthProvider(key string) (GetOAuthProvidersResponse, error)
CreateOAuthProvider(formValues CreateOAuthProviderParams) (CreateOAuthProviderResponse, error)
DeleteOAuthProvider(key string) (DeleteOAuthProviderResponse, error)
GetUserConnectTokens(userID string) ([]GetConnectTokenResponse, error)
GetUserConnectToken(userID string, token string) (GetConnectTokenResponse, error)
CreateUserConnectToken(userID string, formValues CreateConnectTokenParams) (CreateConnectTokenResponse, error)
DeleteUserConnectToken(userID string, token string) (DeleteConnectTokenResponse, error)
GetUserEmailAccountConnectTokens(userID string, label string) ([]GetConnectTokenResponse, error)
GetUserEmailAccountConnectToken(userID string, label string, token string) (GetConnectTokenResponse, error)
CreateUserEmailAccountConnectToken(userID string, label string, formValues CreateConnectTokenParams) (CreateConnectTokenResponse, error)
DeleteUserEmailAccountConnectToken(userID string, label string, token string) (DeleteConnectTokenResponse, error)
GetUserEmailAccountsFolderMessageAttachments(userID string, label string, folder string, messageID string, queryValues EmailAccountFolderDelimiterParam) ([]GetUserEmailAccountsFolderMessageAttachmentsResponse, error)
GetUserEmailAccountsFolderMessageAttachment(userID string, label string, folder string, messageID string, attachmentID string, queryValues GetUserEmailAccountsFolderMessageAttachmentParam) (GetUserEmailAccountsFolderMessageAttachmentsResponse, error)
GetUserEmailAccountsFolderMessageBody(userID string, label string, folder string, messageID string, queryValues GetUserEmailAccountsFolderMessageBodyParams) ([]GetUserEmailAccountsFolderMessageBodyResponse, error)
GetUserEmailAccountsFolderMessageFlags(userID string, label string, folder string, messageID string, queryValues EmailAccountFolderDelimiterParam) (GetUserEmailAccountsFolderMessageFlagsResponse, error)
GetUserEmailAccountsFolderMessageHeaders(userID string, label string, folder string, messageID string, queryValues GetUserEmailAccountsFolderMessageHeadersParams) (GetUserEmailAccountsFolderMessageHeadersResponse, error)
GetUserEmailAccountsFolderMessageRaw(userID string, label string, folder string, messageID string, queryValues EmailAccountFolderDelimiterParam) (GetUserEmailAccountsFolderMessageRawResponse, error)
MarkUserEmailAccountsFolderMessageRead(userID string, label string, folder string, messageID string, formValues EmailAccountFolderDelimiterParam) (UserEmailAccountsFolderMessageReadResponse, error)
MarkUserEmailAccountsFolderMessageUnRead(userID string, label string, folder string, messageID string, formValues EmailAccountFolderDelimiterParam) (UserEmailAccountsFolderMessageReadResponse, error)
GetUserEmailAccountsFolderMessages(userID string, label string, folder string, queryValues GetUserEmailAccountsFolderMessageParams) ([]GetUsersEmailAccountFolderMessagesResponse, error)
GetUserEmailAccountFolderMessage(userID string, label string, folder string, messageID string, queryValues GetUserEmailAccountsFolderMessageParams) (GetUsersEmailAccountFolderMessagesResponse, error)
MoveUserEmailAccountFolderMessage(userID string, label string, folder string, messageID string, queryValues MoveUserEmailAccountFolderMessageParams) (MoveUserEmailAccountFolderMessageResponse, error)
GetUserEmailAccountsFolders(userID string, label string, queryValues GetUserEmailAccountsFoldersParams) ([]GetUsersEmailAccountFoldersResponse, error)
GetUserEmailAccountFolder(userID string, label string, folder string, queryValues EmailAccountFolderDelimiterParam) (GetUsersEmailAccountFoldersResponse, error)
CreateUserEmailAccountFolder(userID string, label string, folder string, formValues EmailAccountFolderDelimiterParam) (CreateEmailAccountFolderResponse, error)
SafeCreateUserEmailAccountFolder(userID string, label string, folder string, formValues EmailAccountFolderDelimiterParam) (bool, error)
GetUserEmailAccountsMessages(userID string, label string, queryValues GetUserEmailAccountsMessageParams) ([]GetUsersEmailAccountMessagesResponse, error)
GetUserEmailAccountMessage(userID string, label string, messageID string, queryValues GetUserEmailAccountsMessageParams) (GetUsersEmailAccountMessagesResponse, error)
GetUserEmailAccounts(userID string, queryValues GetUserEmailAccountsParams) ([]GetUsersEmailAccountsResponse, error)
GetUserEmailAccount(userID string, label string) (GetUsersEmailAccountsResponse, error)
CreateUserEmailAccount(userID string, formValues CreateUserParams) (CreateEmailAccountResponse, error)
ModifyUserEmailAccount(userID string, label string, formValues ModifyUserEmailAccountParams) (ModifyEmailAccountResponse, error)
DeleteUserEmailAccount(userID string, label string) (DeleteEmailAccountResponse, error)
GetUserWebhooks(userID string) ([]GetUsersWebhooksResponse, error)
GetUserWebhook(userID string, webhookID string) (GetUsersWebhooksResponse, error)
CreateUserWebhook(userID string, formValues CreateUserWebhookParams) (CreateUserWebhookResponse, error)
ModifyUserWebhook(userID string, webhookID string, formValues ModifyUserWebhookParams) (ModifyWebhookResponse, error)
DeleteUserWebhookAccount(userID string, webhookID string) (DeleteWebhookResponse, error)
GetUsers(queryValues GetUsersParams) ([]GetUsersResponse, error)
GetUser(userID string) (GetUsersResponse, error)
CreateUser(formValues CreateUserParams) (CreateUserResponse, error)
ModifyUser(userID string, formValues ModifyUserParams) (ModifyUserResponse, error)
DeleteUser(userID string) (DeleteUserResponse, error)
GetWebhooks() ([]GetUsersWebhooksResponse, error)
GetWebhook(webhookID string) (GetUsersWebhooksResponse, error)
CreateWebhook(formValues CreateUserWebhookParams) (CreateUserWebhookResponse, error)
ModifyWebhook(webhookID string, formValues ModifyUserWebhookParams) (ModifyWebhookResponse, error)
DeleteWebhookAccount(webhookID string) (DeleteWebhookResponse, error)
}
// NewTestCioLiteServer is a convenience function that returns a CioLite object
// and a *httptest.Server (which must be closed when done being used).
// The CioLite instance will hit the test server for all requests.
func NewTestCioLiteServer(handler http.Handler) (CioLite, *httptest.Server) {
testServer := httptest.NewServer(handler)
testCioLite := CioLite{
Host: testServer.URL,
HTTPClient: &http.Client{Timeout: 5 * time.Second},
}
return testCioLite, testServer
}
// ValidateCallback returns true if this Webhook Callback or User Account Status Callback authenticates
func (cio CioLite) ValidateCallback(token string, signature string, timestamp int) bool {
// Hash timestamp and token with secret, compare to signature
message := strconv.Itoa(timestamp) + token
hash := hashHmac(sha256.New, message, cio.apiSecret)
return len(hash) > 0 && signature == hash
}
// hashHmac returns the hash of a message hashed with the provided hash function, using the provided secret
func hashHmac(hashAlgorithm func() hash.Hash, message string, secret string) string {
h := hmac.New(hashAlgorithm, []byte(secret))
if _, err := h.Write([]byte(message)); err != nil {
panic("hash.Hash unable to write message bytes, with error: " + err.Error())
}
return hex.EncodeToString(h.Sum(nil))
}
|
package dictionary
// import(
// "github.com/Evedel/fortify/src/say"
// )
func ruleOperand(ttail []Token) (resCode int, stopInd int, resNode TokenNode, errmsg string) {
errmsg = "ruleOperand: "
resCode = UndefinedError
stopInd = 0
index := 0
chStopIndx := 0
rhs := TokenNodeRightHS()
for index < len(ttail) {
tokenid := ttail[index].Id
tokenvalstr := ttail[index].Value
if tokenid == CarriageReturn {
stopInd = index - 1
resNode = rhs
resCode = Ok
return
} else if (tokenid == Space) {
rhs.List = append(rhs.List, TokenNodeSpace())
} else if (tokenid == RoundBracketOpen) {
iStop := -1
iTmp := index
for iTmp < len(ttail) {
if (ttail[iTmp].Id == RoundBracketClose) {
iStop = iTmp
iTmp = len(ttail)
}
iTmp++
}
if (iStop == -1) {
resCode = MissedRoundBracketClose
errmsg += "Missed close round bracket"
return
}
rhs.List = append(rhs.List, TokenNodeRoundBrackets())
rhsInside := TokenNode{}
resCode, chStopIndx, rhsInside, errmsg = ruleOperand(ttail[index+1:iStop])
index += chStopIndx + 1
rhs.List[len(rhs.List)-1].List = append(rhs.List[0].List, rhsInside)
} else if (tokenid == Addition) ||
(tokenid == Substraction) ||
(tokenid == Multiplication) ||
(tokenid == Division) {
resNode = TokenNodeRightHS()
operator := TokenNodeFromToken(ttail[index])
lhs := TokenNodeRHS2LHS(rhs)
operator.List = append([]TokenNode{}, lhs)
resCode, chStopIndx, rhs, errmsg = ruleOperand(ttail[index+1:])
stopInd = index + chStopIndx + 1
operator.List = append(operator.List, rhs)
resNode.List = append(resNode.List, operator)
return
} else if tokenid == Word {
if _, ok := Variables[tokenvalstr]; ok {
rhs.List = append(rhs.List, TokenNodeVarId(tokenvalstr))
} else {
rhs.List = append(rhs.List, typeStatic(ttail[index]))
}
} else if tokenid == RoundBracketClose {
resCode = MissedRoundBracketOpen
errmsg += "Missed open round bracket"
return
} else {
resCode = UnexpectedArgument
errmsg += "Unexpected symbol in math expression: <|" + ttail[index].IdName + "|><|" + tokenvalstr + "|>"
return
}
index += 1
}
stopInd = index
resNode = rhs
resCode = Ok
return
}
|
package personages
import (
"core/sessions"
"fmt"
"net/http"
"qutils/basehandlers"
"qutils/coder"
)
func CreatePersonageHandler(resp http.ResponseWriter, req *http.Request) {
defer req.Body.Close()
session, ok := sessions.GetSessionByRequest(req)
if !ok {
basehandlers.UnauthorizedRequest(resp, req)
return
}
data := PersonageRequest{}
decodingErr := coder.DecodeJson(req.Body, &data)
if decodingErr != nil {
basehandlers.JsonUnmarshallingError(resp, req)
}
registrationError := registerPersonage(session, data)
if registrationError != nil {
//it's mean that some error occured during inserting
basehandlers.InternalError(resp, req)
return
}
basehandlers.SuccessResponse(resp, req)
}
func GetOwnPersonagesHandler(resp http.ResponseWriter, req *http.Request) {
defer req.Body.Close()
session, ok := sessions.GetSessionByRequest(req)
if !ok {
basehandlers.UnauthorizedRequest(resp, req)
return
}
personagesList := getAccountPersonages(session)
fmt.Fprint(resp, coder.EncodeJson(personagesList))
}
|
package main
import (
"fmt"
polon "github.com/pharrisee/poloniex-api"
)
func main() {
p := polon.NewWithCredentials("Key goes here", "secret goes here")
fmt.Println(p)
// p.Subscribe("ticker")
// p.Subscribe("USDT_BTC")
// p.On("ticker", func(m polon.WSTicker) {
// pp.Println(m)
// }).On("USDT_BTC-trade", func(m polon.WSOrderbook) {
// pp.Println(m)
// })
// for _ = range time.Tick(1 * time.Second) {
// fmt.Println(p.Ticker())
// }
}
|
package controllers
import (
"goapi/models"
"html/template"
"log"
"net/http"
"strconv"
)
var templates = template.Must(template.ParseGlob("templates/*.html"))
func Index(w http.ResponseWriter, r *http.Request) {
products := models.GetProducts()
templates.ExecuteTemplate(w, "Index", products)
}
func New(w http.ResponseWriter, r *http.Request) {
templates.ExecuteTemplate(w, "New", nil)
}
func Insert(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
name := r.FormValue("name")
description := r.FormValue("description")
price, err := strconv.ParseFloat(r.FormValue("price"), 64)
if err != nil {
log.Println("Erro na conversão do preço:", err)
}
quantity, err := strconv.Atoi(r.FormValue("quantity"))
if err != nil {
log.Println("Erro na conversão da quantidade:", err)
}
models.CreateProduct(name, description, price, quantity)
http.Redirect(w, r, "/", 301)
}
}
func Delete(w http.ResponseWriter, r *http.Request) {
id := r.URL.Query().Get("id")
models.DeleteProduct(id)
http.Redirect(w, r, "/", 301)
}
func Edit(w http.ResponseWriter, r *http.Request) {
id := r.URL.Query().Get("id")
product := models.GetProduct(id)
templates.ExecuteTemplate(w, "Edit", product)
}
func Update(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
id, err := strconv.Atoi(r.FormValue("id"))
if err != nil {
log.Println("Erro na conversão do ID")
}
name := r.FormValue("name")
description := r.FormValue("description")
price, err := strconv.ParseFloat(r.FormValue("price"), 64)
if err != nil {
log.Println("Erro na conversão do preço")
}
quantity, err := strconv.Atoi(r.FormValue("quantity"))
if err != nil {
log.Println("Erro na conversão da quantidade")
}
models.UpdateProduct(id, name, description, price, quantity)
http.Redirect(w, r, "/", 301)
}
}
|
package utils
import (
"encoding/json"
"log"
)
func ToJSON(o interface{}) string {
b, err := json.Marshal(o)
if err != nil {
log.Print(err)
}
return string(b)
}
|
//go:build go1.21
package indenthandler
import (
"context"
"fmt"
"io"
"log/slog"
"runtime"
"slices"
"strconv"
"sync"
"time"
)
// !+IndentHandler
type IndentHandler struct {
opts Options
preformatted []byte // data from WithGroup and WithAttrs
unopenedGroups []string // groups from WithGroup that haven't been opened
indentLevel int // same as number of opened groups so far
mu *sync.Mutex
out io.Writer
}
//!-IndentHandler
type Options struct {
// Level reports the minimum level to log.
// Levels with lower levels are discarded.
// If nil, the Handler uses [slog.LevelInfo].
Level slog.Leveler
}
func New(out io.Writer, opts *Options) *IndentHandler {
h := &IndentHandler{out: out, mu: &sync.Mutex{}}
if opts != nil {
h.opts = *opts
}
if h.opts.Level == nil {
h.opts.Level = slog.LevelInfo
}
return h
}
func (h *IndentHandler) Enabled(ctx context.Context, level slog.Level) bool {
return level >= h.opts.Level.Level()
}
// !+WithGroup
func (h *IndentHandler) WithGroup(name string) slog.Handler {
if name == "" {
return h
}
h2 := *h
// Add an unopened group to h2 without modifying h.
h2.unopenedGroups = make([]string, len(h.unopenedGroups)+1)
copy(h2.unopenedGroups, h.unopenedGroups)
h2.unopenedGroups[len(h2.unopenedGroups)-1] = name
return &h2
}
//!-WithGroup
// !+WithAttrs
func (h *IndentHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
if len(attrs) == 0 {
return h
}
h2 := *h
// Force an append to copy the underlying array.
pre := slices.Clip(h.preformatted)
// Add all groups from WithGroup that haven't already been added.
h2.preformatted = h2.appendUnopenedGroups(pre, h2.indentLevel)
// Each of those groups increased the indent level by 1.
h2.indentLevel += len(h2.unopenedGroups)
// Now all groups have been opened.
h2.unopenedGroups = nil
// Pre-format the attributes.
for _, a := range attrs {
h2.preformatted = h2.appendAttr(h2.preformatted, a, h2.indentLevel)
}
return &h2
}
func (h *IndentHandler) appendUnopenedGroups(buf []byte, indentLevel int) []byte {
for _, g := range h.unopenedGroups {
buf = fmt.Appendf(buf, "%*s%s:\n", indentLevel*4, "", g)
indentLevel++
}
return buf
}
//!-WithAttrs
// !+Handle
func (h *IndentHandler) Handle(ctx context.Context, r slog.Record) error {
bufp := allocBuf()
buf := *bufp
defer func() {
*bufp = buf
freeBuf(bufp)
}()
if !r.Time.IsZero() {
buf = h.appendAttr(buf, slog.Time(slog.TimeKey, r.Time), 0)
}
buf = h.appendAttr(buf, slog.Any(slog.LevelKey, r.Level), 0)
if r.PC != 0 {
fs := runtime.CallersFrames([]uintptr{r.PC})
f, _ := fs.Next()
// Optimize to minimize allocation.
srcbufp := allocBuf()
defer freeBuf(srcbufp)
*srcbufp = append(*srcbufp, f.File...)
*srcbufp = append(*srcbufp, ':')
*srcbufp = strconv.AppendInt(*srcbufp, int64(f.Line), 10)
buf = h.appendAttr(buf, slog.String(slog.SourceKey, string(*srcbufp)), 0)
}
buf = h.appendAttr(buf, slog.String(slog.MessageKey, r.Message), 0)
// Insert preformatted attributes just after built-in ones.
buf = append(buf, h.preformatted...)
if r.NumAttrs() > 0 {
buf = h.appendUnopenedGroups(buf, h.indentLevel)
r.Attrs(func(a slog.Attr) bool {
buf = h.appendAttr(buf, a, h.indentLevel+len(h.unopenedGroups))
return true
})
}
buf = append(buf, "---\n"...)
h.mu.Lock()
defer h.mu.Unlock()
_, err := h.out.Write(buf)
return err
}
//!-Handle
func (h *IndentHandler) appendAttr(buf []byte, a slog.Attr, indentLevel int) []byte {
// Resolve the Attr's value before doing anything else.
a.Value = a.Value.Resolve()
// Ignore empty Attrs.
if a.Equal(slog.Attr{}) {
return buf
}
// Indent 4 spaces per level.
buf = fmt.Appendf(buf, "%*s", indentLevel*4, "")
switch a.Value.Kind() {
case slog.KindString:
// Quote string values, to make them easy to parse.
buf = append(buf, a.Key...)
buf = append(buf, ": "...)
buf = strconv.AppendQuote(buf, a.Value.String())
buf = append(buf, '\n')
case slog.KindTime:
// Write times in a standard way, without the monotonic time.
buf = append(buf, a.Key...)
buf = append(buf, ": "...)
buf = a.Value.Time().AppendFormat(buf, time.RFC3339Nano)
buf = append(buf, '\n')
case slog.KindGroup:
attrs := a.Value.Group()
// Ignore empty groups.
if len(attrs) == 0 {
return buf
}
// If the key is non-empty, write it out and indent the rest of the attrs.
// Otherwise, inline the attrs.
if a.Key != "" {
buf = fmt.Appendf(buf, "%s:\n", a.Key)
indentLevel++
}
for _, ga := range attrs {
buf = h.appendAttr(buf, ga, indentLevel)
}
default:
buf = append(buf, a.Key...)
buf = append(buf, ": "...)
buf = append(buf, a.Value.String()...)
buf = append(buf, '\n')
}
return buf
}
// !+pool
var bufPool = sync.Pool{
New: func() any {
b := make([]byte, 0, 1024)
return &b
},
}
func allocBuf() *[]byte {
return bufPool.Get().(*[]byte)
}
func freeBuf(b *[]byte) {
// To reduce peak allocation, return only smaller buffers to the pool.
const maxBufferSize = 16 << 10
if cap(*b) <= maxBufferSize {
*b = (*b)[:0]
bufPool.Put(b)
}
}
//!-pool
|
package models
//Тип для парсинга request/response
type JsonUrl struct {
Url string `json:"url"`
}
|
package main
import "fmt"
func main() {
myMap := map[string]string{
"1": "一",
"2": "二",
"3": "三",
}
fmt.Println("原始地图")
for k := range myMap {
fmt.Println(k, "首都是", myMap[k])
}
delete(myMap, "3")
fmt.Println("=======删除3=======")
for k := range myMap {
fmt.Println(k, "首都是", myMap[k])
}
}
|
package models
type Unit struct {
UnitId int `json:"unitId,omitempty" db:"UnitId"`
UnitNamePl string `json:"unitNamePl" db:"UnitNamePl"`
UnitNameEn string `json:"unitNameEn" db:"UnitNameEn"`
QuantityId int `json:"quantityId" db:"QuantityId"`
Ratio float32 `json:"ratio" db:"Ratio"`
UnitShortName string `json:"unitShortName" db:"UnitShortName"`
}
type GetUnit struct {
UnitId int `json:"unitId,omitempty" db:"UnitId"`
QuantityNamePl string `json:"QuantityNamePl" db:"QuantityNamePl"`
UnitNamePl string `json:"unitNamePl" db:"UnitNamePl"`
UnitNameEn string `json:"unitNameEn" db:"UnitNameEn"`
QuantityId int `json:"quantityId" db:"QuantityId"`
Ratio float32 `json:"ratio" db:"Ratio"`
UnitShortName string `json:"unitShortName" db:"UnitShortName"`
}
type UnitId struct {
UnitId int `json:"unitId,omitempty" db:"UnitId"`
UnitShortName string `json:"unitShortName" db:"UnitShortName"`
}
|
import "strconv"
func numDecodings(s string) int {
ls := len(s)
if ls == 0 {
return 0
} else if ls == 1 {
if isValid(s) {
return 1
} else {
return 0
}
}
nums := make([]int, len(s))
if isValid(s[0:1]) {
nums[0] = 1
}
if isValid(s[1:2]) {
nums[1] = nums[0]
}
if isValid(s[0:2]) {
nums[1] += 1
}
for idx := 2; idx < len(s); idx += 1 {
ways := 0
if isValid(s[idx: idx+1]) {
ways += nums[idx-1]
}
if isValid(s[idx-1 : idx+1]) {
ways += nums[idx-2]
}
nums[idx] = ways
}
return nums[len(s)-1]
}
func isValid(s string) bool {
if len(s) == 2 && s[0] == '0' {
return false
}
num, _ := strconv.Atoi(s)
return num > 0 && num <= 26
}
|
package nmxutil
import (
"sync"
)
type Bcaster struct {
chs [](chan interface{})
mtx sync.Mutex
}
func (b *Bcaster) Listen() chan interface{} {
b.mtx.Lock()
defer b.mtx.Unlock()
ch := make(chan interface{})
b.chs = append(b.chs, ch)
return ch
}
func (b *Bcaster) Send(val interface{}) {
b.mtx.Lock()
chs := b.chs
b.mtx.Unlock()
for _, ch := range chs {
ch <- val
close(ch)
}
}
func (b *Bcaster) Clear() {
b.mtx.Lock()
defer b.mtx.Unlock()
b.chs = nil
}
func (b *Bcaster) SendAndClear(val interface{}) {
b.Send(val)
b.Clear()
}
|
package main
import (
"fmt"
"os"
"os/exec"
"strings"
)
var cmdPs = &Command{
Name: "ps",
Description: "List all conair containers",
Summary: "List all conair containers",
Run: runPs,
}
func runPs(args []string) (exit int) {
path, err := exec.LookPath("machinectl")
if err != nil {
fmt.Fprintln(os.Stderr, "machinectl not found.")
}
args = append([]string{"list"}, args...)
output, err := exec.Command(path, args...).CombinedOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "machinectl failed: machinctl %v: %s (%s)", strings.Join(args, " "), output, err)
}
fmt.Fprintln(os.Stdout, string(output[:]))
return
}
|
/*
Copyright 2017 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Some of the code below came from https://github.com/coreos/etcd-operator
which also has the apache 2.0 license.
*/
// Package rgw to manage a rook object store.
package rgw
import (
"github.com/rook/rook/pkg/operator/k8sutil"
"github.com/rook/rook/pkg/operator/pool"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// schemeGroupVersion is group version used to register these objects
var schemeGroupVersion = schema.GroupVersion{Group: k8sutil.CustomResourceGroup, Version: k8sutil.V1Alpha1}
// ObjectStore is the definition of the object store custom resource
type ObjectStore struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
Spec ObjectStoreSpec `json:"spec"`
}
// ObjectstoreList is the definition of a list of object stores for CRDs (1.7+)
type ObjectStoreList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []ObjectStore `json:"items"`
}
// ObjectstoreList is the definition of a list of object stores for TPRs (pre-1.7)
type ObjectstoreList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []ObjectStore `json:"items"`
}
// ObjectStoreSpec represent the spec of a pool
type ObjectStoreSpec struct {
// The metadata pool settings
MetadataPool pool.PoolSpec `json:"metadataPool"`
// The data pool settings
DataPool pool.PoolSpec `json:"dataPool"`
// The rgw pod info
Gateway GatewaySpec `json:"gateway"`
}
type GatewaySpec struct {
// The port the rgw service will be listening on (http)
Port int32 `json:"port"`
// The port the rgw service will be listening on (https)
SecurePort int32 `json:"securePort"`
// The number of pods in the rgw replicaset. If "allNodes" is specified, a daemonset is created.
Instances int32 `json:"instances"`
// Whether the rgw pods should be started as a daemonset on all nodes
AllNodes bool `json:"allNodes"`
// The name of the secret that stores the ssl certificate for secure rgw connections
SSLCertificateRef string `json:"sslCertificateRef"`
// The affinity to place the rgw pods (default is to place on any available node)
Placement k8sutil.Placement `json:"placement"`
// The resource requirements for the rgw pods
Resources v1.ResourceRequirements `json:"resources"`
}
|
package matchserver
import (
"context"
"errors"
"log"
"math/rand"
"strconv"
"sync"
"time"
pb "github.com/ekotlikoff/gochess/api"
"github.com/ekotlikoff/gochess/internal/model"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
)
var (
// PollingDefaultTimeout is the default timeout for http requests
PollingDefaultTimeout time.Duration = 10 * time.Second
matchingServerID = 0
)
const (
// NullT is the WS response type for a null response
NullT = WebsocketResponseType(iota)
// RequestSyncT is the WS request type for a sync request
RequestSyncT = WebsocketRequestType(iota)
// RequestAsyncT is the WS request type for an async request
RequestAsyncT = WebsocketRequestType(iota)
// ResponseSyncT is the WS response type for a sync response
ResponseSyncT = WebsocketResponseType(iota)
// ResponseAsyncT is the WS response type for an async response
ResponseAsyncT = WebsocketResponseType(iota)
// OpponentPlayedMoveT is the WS response type for an opponent's move
OpponentPlayedMoveT = WebsocketResponseType(iota)
)
type (
// WebsocketResponseType represents the different type of responses
// supported over the WS conn
WebsocketResponseType uint8
// WebsocketResponse is a struct for a response over the WS conn
WebsocketResponse struct {
WebsocketResponseType WebsocketResponseType
ResponseSync ResponseSync
ResponseAsync ResponseAsync
OpponentPlayedMove model.MoveRequest
}
// WebsocketRequestType represents the different type of requests supported
// over the WS conn
WebsocketRequestType uint8
// WebsocketRequest is a struct for a request over the WS conn
WebsocketRequest struct {
WebsocketRequestType WebsocketRequestType
RequestSync model.MoveRequest
RequestAsync RequestAsync
}
// MatchDetails is a struct for the matched response
MatchDetails struct {
Color model.Color
OpponentName string
MaxTimeMs int64
}
// Player is a struct representing a matchserver client, containing channels
// for communications between the client and the the matchserver
Player struct {
name string
color model.Color
elapsedMs int64
requestChanSync chan model.MoveRequest
ResponseChanSync chan ResponseSync
RequestChanAsync chan RequestAsync
ResponseChanAsync chan ResponseAsync
OpponentPlayedMove chan model.MoveRequest
matchStart chan struct{}
matchStartMutex sync.RWMutex
matchMutex sync.RWMutex
searchingForMatch bool
match *Match
clientMutex sync.Mutex // Only one client connected at a time
}
)
// NewPlayer create a new player
func NewPlayer(name string) *Player {
player := Player{name: name, color: model.Black}
player.Reset()
return &player
}
// Name get the player's name
func (player *Player) Name() string {
return player.name
}
// GetSearchingForMatch get searching for match
func (player *Player) GetSearchingForMatch() bool {
player.matchMutex.RLock()
defer player.matchMutex.RUnlock()
return player.searchingForMatch
}
// SetSearchingForMatch set searching for match
func (player *Player) SetSearchingForMatch(searchingForMatch bool) {
player.matchMutex.Lock()
defer player.matchMutex.Unlock()
player.searchingForMatch = searchingForMatch
}
// GetMatch get the player's match
func (player *Player) GetMatch() *Match {
player.matchMutex.RLock()
defer player.matchMutex.RUnlock()
return player.match
}
// SetMatch set the player's match
func (player *Player) SetMatch(match *Match) {
player.matchMutex.Lock()
defer player.matchMutex.Unlock()
player.match = match
}
// ClientConnectToPlayer ensures that only one client is connected to the player
// at a time (even if two client's have the session token)
func (player *Player) ClientConnectToPlayer() {
player.clientMutex.Lock()
}
// ClientDisconnectFromPlayer ensures that only one client is connected to the
// player at a time (even if two client's have the session token)
func (player *Player) ClientDisconnectFromPlayer() {
player.clientMutex.Unlock()
}
// MatchedOpponentName returns the matched opponent name
func (player *Player) MatchedOpponentName() string {
player.matchMutex.RLock()
defer player.matchMutex.RUnlock()
opponentColor := model.Black
if player.color == opponentColor {
opponentColor = model.White
}
return player.GetMatch().PlayerName(opponentColor)
}
// MatchMaxTimeMs returns players max time in ms
func (player *Player) MatchMaxTimeMs() int64 {
return player.GetMatch().maxTimeMs
}
// Color returns player color
func (player *Player) Color() model.Color {
player.matchMutex.RLock()
defer player.matchMutex.RUnlock()
return player.color
}
// WaitForMatchStart player waits for match start
func (player *Player) WaitForMatchStart() error {
player.matchStartMutex.RLock()
defer player.matchStartMutex.RUnlock()
select {
case <-player.matchStart:
return nil
case <-time.After(120 * time.Second):
return errors.New("Timeout")
}
}
// WaitForMatchOver used by client servers to synchronize around match ending
func (player *Player) WaitForMatchOver() {
player.match.waitForMatchOver()
}
// HasMatchStarted player checks if their match has started
func (player *Player) HasMatchStarted(ctx context.Context) bool {
player.matchStartMutex.RLock()
defer player.matchStartMutex.RUnlock()
select {
case <-player.matchStart:
return true
case <-ctx.Done():
return false
}
}
// MakeMoveWS player (websocket client) make a move
func (player *Player) MakeMoveWS(pieceMove model.MoveRequest) {
player.requestChanSync <- pieceMove
}
// MakeMove player makes a move
func (player *Player) MakeMove(pieceMove model.MoveRequest) bool {
player.requestChanSync <- pieceMove
response := <-player.ResponseChanSync
return response.MoveSuccess
}
// GetSyncUpdate get the next sync update for a player
func (player *Player) GetSyncUpdate() *model.MoveRequest {
select {
case update := <-player.OpponentPlayedMove:
return &update
case <-time.After(PollingDefaultTimeout):
return nil
}
}
// RequestAsync player makes an async request
func (player *Player) RequestAsync(requestAsync RequestAsync) {
player.RequestChanAsync <- requestAsync
}
// GetAsyncUpdate get the next async update for a player
func (player *Player) GetAsyncUpdate() *ResponseAsync {
select {
case update := <-player.ResponseChanAsync:
return &update
case <-time.After(PollingDefaultTimeout):
return nil
}
}
// Reset a player for their next match
func (player *Player) Reset() {
// If the player preexisted there may be a client waiting on the opponent's
// move.
if player.OpponentPlayedMove != nil {
close(player.OpponentPlayedMove)
}
player.elapsedMs = 0
player.SetMatch(nil)
player.requestChanSync = make(chan model.MoveRequest, 1)
player.ResponseChanSync = make(chan ResponseSync, 10)
player.RequestChanAsync = make(chan RequestAsync, 1)
player.ResponseChanAsync = make(chan ResponseAsync, 1)
player.OpponentPlayedMove = make(chan model.MoveRequest, 10)
player.matchStartMutex.Lock()
defer player.matchStartMutex.Unlock()
player.matchStart = make(chan struct{})
}
func (player *Player) startMatch() {
player.matchStartMutex.RLock()
defer player.matchStartMutex.RUnlock()
close(player.matchStart)
player.ResponseChanAsync <- ResponseAsync{
Matched: true,
MatchDetails: MatchDetails{
Color: player.Color(),
OpponentName: player.MatchedOpponentName(),
MaxTimeMs: player.MatchMaxTimeMs(),
},
}
}
// ResponseSync represents a response to the client related to a move
type ResponseSync struct {
MoveSuccess bool
ElapsedMs int
ElapsedMsOpponent int
}
// RequestAsync represents a request from the client unrelated to a move
type RequestAsync struct {
Match, RequestToDraw, Resign bool
}
// ResponseAsync represents a response to the client unrelated to a move
type ResponseAsync struct {
GameOver, RequestToDraw, Draw, Resignation, Timeout, Matched bool
Winner string
MatchDetails MatchDetails
}
// MatchingServer handles matching players and carrying out the game
type MatchingServer struct {
id int
liveMatches []*Match
liveMatchesMetric prometheus.Gauge
matchingQueueLengthMetric prometheus.Gauge
mutex *sync.Mutex
matchingPlayers chan *Player
pendingMatch *sync.Mutex
botMatchingEnabled bool
engineClient pb.RustChessClient
engineClientConn *grpc.ClientConn
maxMatchingDuration time.Duration
}
// NewMatchingServer create a matching server with no engine
func NewMatchingServer() MatchingServer {
matchingServer := MatchingServer{
id: matchingServerID, mutex: &sync.Mutex{},
matchingPlayers: make(chan *Player), pendingMatch: &sync.Mutex{},
}
matchingServerID++
matchingQueueLengthMetric := prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "gochess",
Subsystem: "matchserver",
Name: "matching_queue_length",
Help: "The number of players in the matching queue.",
ConstLabels: prometheus.Labels{
"matching_server_id": strconv.Itoa(matchingServer.id),
},
})
prometheus.MustRegister(matchingQueueLengthMetric)
matchingServer.matchingQueueLengthMetric = matchingQueueLengthMetric
return matchingServer
}
// NewMatchingServerWithEngine create a matching server with an engine
func NewMatchingServerWithEngine(
engineAddr string, maxMatchingDuration time.Duration,
engineConnTimeout time.Duration,
) MatchingServer {
matchingServer := NewMatchingServer()
matchingServer.createEngineClient(engineAddr, engineConnTimeout)
matchingServer.maxMatchingDuration = maxMatchingDuration
return matchingServer
}
// LiveMatches current matches being played
func (matchingServer *MatchingServer) LiveMatches() []*Match {
matchingServer.mutex.Lock()
liveMatches := matchingServer.liveMatches
matchingServer.mutex.Unlock()
return liveMatches
}
func (matchingServer *MatchingServer) matchAndPlay(
matchGenerator MatchGenerator, playServerID int,
) {
var player1, player2 *Player
maxMatchingTimer := time.NewTimer(0)
<-maxMatchingTimer.C
// Lock until a full match is found and started, thus avoiding unmatched
// players stranded across goroutines.
matchingServer.pendingMatch.Lock()
for {
select {
case player := <-matchingServer.matchingPlayers:
if player1 == nil {
player1 = player
if matchingServer.botMatchingEnabled {
maxMatchingTimer.Reset(matchingServer.maxMatchingDuration)
}
} else if player2 == nil {
player2 = player
match := matchGenerator(player1, player2)
matchingServer.matchingQueueLengthMetric.Sub(2)
player1.SetMatch(&match)
player2.SetMatch(&match)
matchingServer.addMatch(&match)
matchingServer.pendingMatch.Unlock()
player1.startMatch()
player2.startMatch()
matchingServer.liveMatchesMetric.Inc()
(&match).play()
matchingServer.liveMatchesMetric.Dec()
matchingServer.removeMatch(&match)
player1, player2 = nil, nil
matchingServer.pendingMatch.Lock()
}
case <-maxMatchingTimer.C:
// The maxMatchingTimer has fired and we should match player1 with a
// bot.
botNames := [5]string{
"jessica", "cherry", "gumdrop", "roland", "pumpkin",
}
botPlayer := NewPlayer(botNames[rand.Intn(len(botNames))] + "bot")
go matchingServer.engineSession(botPlayer)
matchingServer.matchingQueueLengthMetric.Inc()
go (func() { matchingServer.matchingPlayers <- botPlayer })()
}
}
}
// StartMatchServers using default match generator
func (matchingServer *MatchingServer) StartMatchServers(
maxConcurrentGames int, quit chan bool,
) {
matchingServer.StartCustomMatchServers(
maxConcurrentGames, DefaultMatchGenerator, quit,
)
}
// StartCustomMatchServers using custom match generator
func (matchingServer *MatchingServer) StartCustomMatchServers(
maxConcurrentGames int, matchGenerator MatchGenerator, quit chan bool,
) {
matchingServer.liveMatchesMetric = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "gochess",
Subsystem: "matchserver",
Name: "live_matches",
Help: "The number of live matches",
ConstLabels: prometheus.Labels{
"matching_server_id": strconv.Itoa(matchingServer.id),
},
})
prometheus.MustRegister(matchingServer.liveMatchesMetric)
log.Printf("Starting %d matchAndPlay threads ...", maxConcurrentGames)
for i := 0; i < maxConcurrentGames; i++ {
go matchingServer.matchAndPlay(matchGenerator, i)
}
<-quit // Wait to be told to exit.
if matchingServer.engineClientConn != nil {
matchingServer.engineClientConn.Close()
}
}
func (matchingServer *MatchingServer) removeMatch(matchToRemove *Match) {
matchingServer.mutex.Lock()
defer matchingServer.mutex.Unlock()
liveMatches := matchingServer.liveMatches
for i, match := range liveMatches {
if match == matchToRemove {
if len(liveMatches) == 1 {
matchingServer.liveMatches = nil
} else {
liveMatches[i] = liveMatches[len(liveMatches)-1]
matchingServer.liveMatches = liveMatches[:len(liveMatches)-1]
return
}
}
}
}
func (matchingServer *MatchingServer) addMatch(match *Match) {
matchingServer.mutex.Lock()
defer matchingServer.mutex.Unlock()
matchingServer.liveMatches = append(matchingServer.liveMatches, match)
}
// MatchPlayer queues the player for matching
func (matchingServer *MatchingServer) MatchPlayer(player *Player) {
matchingServer.matchingPlayers <- player
matchingServer.matchingQueueLengthMetric.Inc()
}
|
package swap
import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
bin "github.com/gagliardetto/binary"
"github.com/gagliardetto/solana-go"
"github.com/gagliardetto/solana-go/programs/system"
"github.com/gagliardetto/solana-go/programs/token"
"github.com/gagliardetto/solana-go/rpc"
"github.com/gopartyparrot/goparrot-twap/config"
)
type RaydiumSwap struct {
clientRPC *rpc.Client
account solana.PrivateKey
}
func (s *RaydiumSwap) Swap(
ctx context.Context,
pool *config.RaydiumPoolConfig,
amount uint64,
fromToken string,
fromAccount solana.PublicKey,
toToken string,
toAccount solana.PublicKey,
) (*solana.Signature, error) {
res, err := s.clientRPC.GetMultipleAccounts(
ctx,
solana.MustPublicKeyFromBase58(pool.PoolCoinTokenAccount),
solana.MustPublicKeyFromBase58(pool.PoolPcTokenAccount),
)
if err != nil {
return nil, err
}
var poolCoinBalance token.Account
err = bin.NewBinDecoder(res.Value[0].Data.GetBinary()).Decode(&poolCoinBalance)
if err != nil {
return nil, err
}
var poolPcBalance token.Account
err = bin.NewBinDecoder(res.Value[1].Data.GetBinary()).Decode(&poolPcBalance)
if err != nil {
return nil, err
}
denominator := poolCoinBalance.Amount + amount
minimumOutAmount := poolPcBalance.Amount * amount / denominator
// slippage 2%
minimumOutAmount = minimumOutAmount * 98 / 100
if minimumOutAmount <= 0 {
return nil, errors.New("min swap output amount must be grater then zero, try to swap a bigger amount")
}
instrs := []solana.Instruction{}
signers := []solana.PrivateKey{s.account}
tempAccount := solana.NewWallet()
needWrapSOL := fromToken == config.NativeSOL || toToken == config.NativeSOL
if needWrapSOL {
rentCost, err := s.clientRPC.GetMinimumBalanceForRentExemption(
ctx,
config.TokenAccountSize,
rpc.CommitmentConfirmed,
)
if err != nil {
return nil, err
}
accountLamports := rentCost
if fromToken == config.NativeSOL {
// If is from a SOL account, transfer the amount
accountLamports += amount
}
createInst, err := system.NewCreateAccountInstruction(
accountLamports,
config.TokenAccountSize,
solana.TokenProgramID,
s.account.PublicKey(),
tempAccount.PublicKey(),
).ValidateAndBuild()
if err != nil {
return nil, err
}
instrs = append(instrs, createInst)
initInst, err := token.NewInitializeAccountInstruction(
tempAccount.PublicKey(),
solana.MustPublicKeyFromBase58(config.WrappedSOL),
s.account.PublicKey(),
solana.SysVarRentPubkey,
).ValidateAndBuild()
if err != nil {
return nil, err
}
instrs = append(instrs, initInst)
signers = append(signers, tempAccount.PrivateKey)
// Use this new temp account as from or to
if fromToken == config.NativeSOL {
fromAccount = tempAccount.PublicKey()
}
if toToken == config.NativeSOL {
toAccount = tempAccount.PublicKey()
}
}
instrs = append(instrs, NewRaydiumSwapInstruction(
amount,
minimumOutAmount,
solana.TokenProgramID,
solana.MustPublicKeyFromBase58(pool.AmmId),
solana.MustPublicKeyFromBase58(pool.AmmAuthority),
solana.MustPublicKeyFromBase58(pool.AmmOpenOrders),
solana.MustPublicKeyFromBase58(pool.AmmTargetOrders),
solana.MustPublicKeyFromBase58(pool.PoolCoinTokenAccount),
solana.MustPublicKeyFromBase58(pool.PoolPcTokenAccount),
solana.MustPublicKeyFromBase58(pool.SerumProgramId),
solana.MustPublicKeyFromBase58(pool.SerumMarket),
solana.MustPublicKeyFromBase58(pool.SerumBids),
solana.MustPublicKeyFromBase58(pool.SerumAsks),
solana.MustPublicKeyFromBase58(pool.SerumEventQueue),
solana.MustPublicKeyFromBase58(pool.SerumCoinVaultAccount),
solana.MustPublicKeyFromBase58(pool.SerumPcVaultAccount),
solana.MustPublicKeyFromBase58(pool.SerumVaultSigner),
fromAccount,
toAccount,
s.account.PublicKey(),
))
if needWrapSOL {
closeInst, err := token.NewCloseAccountInstruction(
tempAccount.PublicKey(),
s.account.PublicKey(),
s.account.PublicKey(),
[]solana.PublicKey{},
).ValidateAndBuild()
if err != nil {
return nil, err
}
instrs = append(instrs, closeInst)
}
sig, err := ExecuteInstructions(ctx, s.clientRPC, signers, instrs...)
if err != nil {
return nil, err
}
return sig, nil
}
/** Instructions **/
type RaySwapInstruction struct {
bin.BaseVariant
InAmount uint64
MinimumOutAmount uint64
solana.AccountMetaSlice `bin:"-" borsh_skip:"true"`
}
func (inst *RaySwapInstruction) ProgramID() solana.PublicKey {
return solana.MustPublicKeyFromBase58(config.RaydiumLiquidityPoolProgramIDV4)
}
func (inst *RaySwapInstruction) Accounts() (out []*solana.AccountMeta) {
return inst.Impl.(solana.AccountsGettable).GetAccounts()
}
func (inst *RaySwapInstruction) Data() ([]byte, error) {
buf := new(bytes.Buffer)
if err := bin.NewBorshEncoder(buf).Encode(inst); err != nil {
return nil, fmt.Errorf("unable to encode instruction: %w", err)
}
return buf.Bytes(), nil
}
func (inst *RaySwapInstruction) MarshalWithEncoder(encoder *bin.Encoder) (err error) {
// Swap instruction is number 9
err = encoder.WriteUint8(9)
if err != nil {
return err
}
err = encoder.WriteUint64(inst.InAmount, binary.LittleEndian)
if err != nil {
return err
}
err = encoder.WriteUint64(inst.MinimumOutAmount, binary.LittleEndian)
if err != nil {
return err
}
return nil
}
func NewRaydiumSwapInstruction(
// Parameters:
inAmount uint64,
minimumOutAmount uint64,
// Accounts:
tokenProgram solana.PublicKey,
ammId solana.PublicKey,
ammAuthority solana.PublicKey,
ammOpenOrders solana.PublicKey,
ammTargetOrders solana.PublicKey,
poolCoinTokenAccount solana.PublicKey,
poolPcTokenAccount solana.PublicKey,
serumProgramId solana.PublicKey,
serumMarket solana.PublicKey,
serumBids solana.PublicKey,
serumAsks solana.PublicKey,
serumEventQueue solana.PublicKey,
serumCoinVaultAccount solana.PublicKey,
serumPcVaultAccount solana.PublicKey,
serumVaultSigner solana.PublicKey,
userSourceTokenAccount solana.PublicKey,
userDestTokenAccount solana.PublicKey,
userOwner solana.PublicKey,
) *RaySwapInstruction {
inst := RaySwapInstruction{
InAmount: inAmount,
MinimumOutAmount: minimumOutAmount,
AccountMetaSlice: make(solana.AccountMetaSlice, 18),
}
inst.BaseVariant = bin.BaseVariant{
Impl: inst,
}
inst.AccountMetaSlice[0] = solana.Meta(tokenProgram)
inst.AccountMetaSlice[1] = solana.Meta(ammId).WRITE()
inst.AccountMetaSlice[2] = solana.Meta(ammAuthority)
inst.AccountMetaSlice[3] = solana.Meta(ammOpenOrders).WRITE()
inst.AccountMetaSlice[4] = solana.Meta(ammTargetOrders).WRITE()
inst.AccountMetaSlice[5] = solana.Meta(poolCoinTokenAccount).WRITE()
inst.AccountMetaSlice[6] = solana.Meta(poolPcTokenAccount).WRITE()
inst.AccountMetaSlice[7] = solana.Meta(serumProgramId)
inst.AccountMetaSlice[8] = solana.Meta(serumMarket).WRITE()
inst.AccountMetaSlice[9] = solana.Meta(serumBids).WRITE()
inst.AccountMetaSlice[10] = solana.Meta(serumAsks).WRITE()
inst.AccountMetaSlice[11] = solana.Meta(serumEventQueue).WRITE()
inst.AccountMetaSlice[12] = solana.Meta(serumCoinVaultAccount).WRITE()
inst.AccountMetaSlice[13] = solana.Meta(serumPcVaultAccount).WRITE()
inst.AccountMetaSlice[14] = solana.Meta(serumVaultSigner)
inst.AccountMetaSlice[15] = solana.Meta(userSourceTokenAccount).WRITE()
inst.AccountMetaSlice[16] = solana.Meta(userDestTokenAccount).WRITE()
inst.AccountMetaSlice[17] = solana.Meta(userOwner).SIGNER()
return &inst
}
|
package collection
import (
"math"
"github.com/tidwall/tile38/internal/object"
)
func geodeticDistAlgo(center [2]float64) (
algo func(min, max [2]float64, obj *object.Object, item bool) (dist float64),
) {
const earthRadius = 6371e3
return func(min, max [2]float64, obj *object.Object, item bool) (dist float64) {
if item {
r := obj.Rect()
min[0] = r.Min.X
min[1] = r.Min.Y
max[0] = r.Max.X
max[1] = r.Max.Y
}
return earthRadius * pointRectDistGeodeticDeg(
center[1], center[0],
min[1], min[0],
max[1], max[0],
)
}
}
func pointRectDistGeodeticDeg(pLat, pLng, minLat, minLng, maxLat, maxLng float64) float64 {
result := pointRectDistGeodeticRad(
pLat*math.Pi/180, pLng*math.Pi/180,
minLat*math.Pi/180, minLng*math.Pi/180,
maxLat*math.Pi/180, maxLng*math.Pi/180,
)
return result
}
func pointRectDistGeodeticRad(φq, λq, φl, λl, φh, λh float64) float64 {
// Algorithm from:
// Schubert, E., Zimek, A., & Kriegel, H.-P. (2013).
// Geodetic Distance Queries on R-Trees for Indexing Geographic Data.
// Lecture Notes in Computer Science, 146–164.
// doi:10.1007/978-3-642-40235-7_9
const (
twoΠ = 2 * math.Pi
halfΠ = math.Pi / 2
)
// distance on the unit sphere computed using Haversine formula
distRad := func(φa, λa, φb, λb float64) float64 {
if φa == φb && λa == λb {
return 0
}
Δφ := φa - φb
Δλ := λa - λb
sinΔφ := math.Sin(Δφ / 2)
sinΔλ := math.Sin(Δλ / 2)
cosφa := math.Cos(φa)
cosφb := math.Cos(φb)
return 2 * math.Asin(math.Sqrt(sinΔφ*sinΔφ+sinΔλ*sinΔλ*cosφa*cosφb))
}
// Simple case, point or invalid rect
if φl >= φh && λl >= λh {
return distRad(φl, λl, φq, λq)
}
if λl <= λq && λq <= λh {
// q is between the bounding meridians of r
// hence, q is north, south or within r
if φl <= φq && φq <= φh { // Inside
return 0
}
if φq < φl { // South
return φl - φq
}
return φq - φh // North
}
// determine if q is closer to the east or west edge of r to select edge for
// tests below
Δλe := λl - λq
Δλw := λq - λh
if Δλe < 0 {
Δλe += twoΠ
}
if Δλw < 0 {
Δλw += twoΠ
}
var Δλ float64 // distance to closest edge
var λedge float64 // longitude of closest edge
if Δλe <= Δλw {
Δλ = Δλe
λedge = λl
} else {
Δλ = Δλw
λedge = λh
}
sinΔλ, cosΔλ := math.Sincos(Δλ)
tanφq := math.Tan(φq)
if Δλ >= halfΠ {
// If Δλ > 90 degrees (1/2 pi in radians) we're in one of the corners
// (NW/SW or NE/SE depending on the edge selected). Compare against the
// center line to decide which case we fall into
φmid := (φh + φl) / 2
if tanφq >= math.Tan(φmid)*cosΔλ {
return distRad(φq, λq, φh, λedge) // North corner
}
return distRad(φq, λq, φl, λedge) // South corner
}
if tanφq >= math.Tan(φh)*cosΔλ {
return distRad(φq, λq, φh, λedge) // North corner
}
if tanφq <= math.Tan(φl)*cosΔλ {
return distRad(φq, λq, φl, λedge) // South corner
}
// We're to the East or West of the rect, compute distance using cross-track
// Note that this is a simplification of the cross track distance formula
// valid since the track in question is a meridian.
return math.Asin(math.Cos(φq) * sinΔλ)
}
|
package main
import (
"fmt"
)
type TZ int
type A struct {
name string
}
func main() {
a := A{}
a.Print()
fmt.Println(a.name)
var tz TZ
tz.Increase(100)
fmt.Println(tz)
}
func (a *A) Print() {
a.name = "122"
fmt.Println("A")
}
func (tz *TZ) Increase(num int) {
*tz += TZ(num)
}
|
package tasks_test
import (
"testing"
"github.com/stretchr/testify/assert"
"go.ua-ecm.com/chaki/tasks"
)
func TestValidate(t *testing.T) {
assert := assert.New(t)
task := &tasks.Task{
Schema: map[string]interface{}{
"properties": map[string]interface{}{
"number": map[string]interface{}{
"title": "Order Number",
"type": "string",
"pattern": "[0-9]+",
},
},
},
}
cases := []struct {
number string
valid bool
}{
{"123", true},
{"abc", false},
{"", false},
}
for _, c := range cases {
data := map[string]interface{}{
"number": c.number,
}
err := task.Validate(data)
if !c.valid {
_, ok := err.(*tasks.ValidationError)
assert.True(ok)
return
}
assert.NoError(err)
}
}
|
package orm
import (
"database/sql"
"errors"
)
// result is a pointer (eg. &[]struct or &[]*struct)
func Find(db *sql.DB, result interface{}, query string, args ...interface{}) (affectedRows int64, err error, closeErr error) {
if db == nil {
err = errors.New("db can't be nil")
return
}
rows, err := db.Query(query, args...)
if err == nil {
affectedRows, err = Scan(rows, result)
}
if rows != nil {
closeErr = rows.Close()
}
return
}
|
package discountsrv
import (
"context"
"github.com/amanbolat/furutsu/datastore"
"github.com/amanbolat/furutsu/internal/cart"
"github.com/amanbolat/furutsu/internal/discount"
)
type Service struct {
repo datastore.Repository
}
func NewService(repo datastore.Repository) *Service {
return &Service{repo: repo}
}
func (s Service) ApplyDiscounts(c cart.Cart, ctx context.Context) (cart.Cart, error) {
ds := datastore.NewDiscountDataStore(s.repo)
discounts, err := ds.ListDiscounts(ctx)
if err != nil {
return cart.Cart{}, err
}
for _, coupon := range c.Coupons {
if coupon == nil {
continue
}
// TODO: may be we should consider to put the check of expiration time
// into the database because of the different timezones
if coupon.GetPercentage() == 0 || coupon.IsExpired() {
continue
}
v, ok := coupon.(discount.Coupon)
if !ok {
continue
}
d := discount.Discount{
Name: v.Name,
Rule: v.Rule,
Percent: v.Percent,
}
discounts = append(discounts, d)
}
newCart := ApplyDiscountsToCart(c, discounts)
return newCart, nil
}
// ApplyDiscountsToCart applies all the discounts given to the cart items
// including coupon discounts and returns a new cart
func ApplyDiscountsToCart(c cart.Cart, discounts []discount.Discount) cart.Cart {
leftItems := make(map[string]cart.Item)
for k, v := range c.Items {
leftItems[k] = v
}
var discountSetArr []cart.ItemsSet
for _, d := range discounts {
discountSet, li := d.GetDiscountSetFor(leftItems)
if discountSet == nil {
continue
}
leftItems = li
discountSetArr = append(discountSetArr, *discountSet)
}
c.DiscountSets = make([]cart.ItemsSet, len(discountSetArr))
copy(c.DiscountSets, discountSetArr)
c.NonDiscountSet.Set = make(map[string]int)
c.NonDiscountSet.DiscountPercent = 0
for k, v := range leftItems {
c.NonDiscountSet.Set[k] = v.Amount
}
return c
}
|
/*
// Authors:
Rajagopalan Ranganathan (rajagopalan.ranganthan@aalto.fi)
Sunil Kumar Mohanty (sunil.mohanty@aalto.fi)
The following source code, has been created for academic purpose to experiment and use a
custom Kubernetes container scheduler logic.
It iterates through the PODs and assigns a Node to POD based on the custom logic
*/
package main
import (
"bytes"
"encoding/json"
"fmt"
"math/rand"
"net/http"
"os/exec"
"time"
)
/*
PODS Json structure
It maps the JSON of POD JSON to a strcuture
*/
type Pods struct {
APIVersion string `json:"apiVersion"`
Items []struct {
APIVersion string `json:"apiVersion"`
Kind string `json:"kind"`
Metadata struct {
Annotations struct {
KubernetesIoCreatedBy string `json:"kubernetes.io/created-by"`
SchedulerAlphaKubernetesIoCriticalPod string `json:"scheduler.alpha.kubernetes.io/critical-pod"`
} `json:"annotations"`
CreationTimestamp time.Time `json:"creationTimestamp"`
GenerateName string `json:"generateName"`
Labels struct {
K8SApp string `json:"k8s-app"`
PodTemplateGeneration string `json:"pod-template-generation"`
Network string `json:"network"`
Category string `json:"category"`
} `json:"labels"`
Name string `json:"name"`
Namespace string `json:"namespace"`
OwnerReferences []struct {
APIVersion string `json:"apiVersion"`
BlockOwnerDeletion bool `json:"blockOwnerDeletion"`
Controller bool `json:"controller"`
Kind string `json:"kind"`
Name string `json:"name"`
UID string `json:"uid"`
} `json:"ownerReferences"`
ResourceVersion string `json:"resourceVersion"`
SelfLink string `json:"selfLink"`
UID string `json:"uid"`
} `json:"metadata"`
Spec struct {
Containers []struct {
Args []string `json:"args"`
Command []string `json:"command"`
Env []struct {
Name string `json:"name"`
ValueFrom struct {
FieldRef struct {
APIVersion string `json:"apiVersion"`
FieldPath string `json:"fieldPath"`
} `json:"fieldRef"`
} `json:"valueFrom"`
} `json:"env"`
Image string `json:"image"`
ImagePullPolicy string `json:"imagePullPolicy"`
Name string `json:"name"`
Resources struct {
} `json:"resources"`
TerminationMessagePath string `json:"terminationMessagePath"`
TerminationMessagePolicy string `json:"terminationMessagePolicy"`
VolumeMounts []struct {
MountPath string `json:"mountPath"`
Name string `json:"name"`
ReadOnly bool `json:"readOnly,omitempty"`
} `json:"volumeMounts"`
} `json:"containers"`
DNSPolicy string `json:"dnsPolicy"`
HostNetwork bool `json:"hostNetwork"`
NodeName string `json:"nodeName"`
NodeSelector struct {
NodeRoleKubernetesIoMaster string `json:"node-role.kubernetes.io/master"`
} `json:"nodeSelector"`
RestartPolicy string `json:"restartPolicy"`
SchedulerName string `json:"schedulerName"`
SecurityContext struct {
} `json:"securityContext"`
ServiceAccount string `json:"serviceAccount"`
ServiceAccountName string `json:"serviceAccountName"`
TerminationGracePeriodSeconds int `json:"terminationGracePeriodSeconds"`
Tolerations []struct {
Effect string `json:"effect,omitempty"`
Key string `json:"key"`
Operator string `json:"operator,omitempty"`
} `json:"tolerations"`
Volumes []struct {
HostPath struct {
Path string `json:"path"`
} `json:"hostPath,omitempty"`
Name string `json:"name"`
Secret struct {
DefaultMode int `json:"defaultMode"`
SecretName string `json:"secretName"`
} `json:"secret,omitempty"`
} `json:"volumes"`
} `json:"spec"`
Status struct {
Conditions []struct {
LastProbeTime interface{} `json:"lastProbeTime"`
LastTransitionTime time.Time `json:"lastTransitionTime"`
Status string `json:"status"`
Type string `json:"type"`
} `json:"conditions"`
ContainerStatuses []struct {
ContainerID string `json:"containerID"`
Image string `json:"image"`
ImageID string `json:"imageID"`
LastState struct {
} `json:"lastState"`
Name string `json:"name"`
Ready bool `json:"ready"`
RestartCount int `json:"restartCount"`
State struct {
Running struct {
StartedAt time.Time `json:"startedAt"`
} `json:"running"`
} `json:"state"`
} `json:"containerStatuses"`
HostIP string `json:"hostIP"`
Phase string `json:"phase"`
PodIP string `json:"podIP"`
QosClass string `json:"qosClass"`
StartTime time.Time `json:"startTime"`
} `json:"status"`
} `json:"items"`
Kind string `json:"kind"`
Metadata struct {
} `json:"metadata"`
ResourceVersion string `json:"resourceVersion"`
SelfLink string `json:"selfLink"`
}
/*
Nodes Json structure
It maps the JSON of Node JSON to a strcuture
*/
type Nodes struct {
APIVersion string `json:"apiVersion"`
Items []struct {
APIVersion string `json:"apiVersion"`
Kind string `json:"kind"`
Metadata struct {
Annotations struct {
NodeAlphaKubernetesIoTTL string `json:"node.alpha.kubernetes.io/ttl"`
VolumesKubernetesIoControllerManagedAttachDetach string `json:"volumes.kubernetes.io/controller-managed-attach-detach"`
} `json:"annotations"`
CreationTimestamp time.Time `json:"creationTimestamp"`
Labels struct {
BetaKubernetesIoArch string `json:"beta.kubernetes.io/arch"`
BetaKubernetesIoOs string `json:"beta.kubernetes.io/os"`
KubernetesIoHostname string `json:"kubernetes.io/hostname"`
Network string `json:"network"`
Category string `json:"category"`
} `json:"labels"`
Name string `json:"name"`
Namespace string `json:"namespace"`
ResourceVersion string `json:"resourceVersion"`
SelfLink string `json:"selfLink"`
UID string `json:"uid"`
} `json:"metadata"`
Spec struct {
ExternalID string `json:"externalID"`
} `json:"spec"`
Status struct {
Addresses []struct {
Address string `json:"address"`
Type string `json:"type"`
} `json:"addresses"`
Allocatable struct {
CPU string `json:"cpu"`
Memory string `json:"memory"`
Pods string `json:"pods"`
} `json:"allocatable"`
Capacity struct {
CPU string `json:"cpu"`
Memory string `json:"memory"`
Pods string `json:"pods"`
} `json:"capacity"`
Conditions []struct {
LastHeartbeatTime time.Time `json:"lastHeartbeatTime"`
LastTransitionTime time.Time `json:"lastTransitionTime"`
Message string `json:"message"`
Reason string `json:"reason"`
Status string `json:"status"`
Type string `json:"type"`
} `json:"conditions"`
DaemonEndpoints struct {
KubeletEndpoint struct {
Port int `json:"Port"`
} `json:"kubeletEndpoint"`
} `json:"daemonEndpoints"`
Images []struct {
Names []string `json:"names"`
SizeBytes int `json:"sizeBytes"`
} `json:"images"`
NodeInfo struct {
Architecture string `json:"architecture"`
BootID string `json:"bootID"`
ContainerRuntimeVersion string `json:"containerRuntimeVersion"`
KernelVersion string `json:"kernelVersion"`
KubeProxyVersion string `json:"kubeProxyVersion"`
KubeletVersion string `json:"kubeletVersion"`
MachineID string `json:"machineID"`
OperatingSystem string `json:"operatingSystem"`
OsImage string `json:"osImage"`
SystemUUID string `json:"systemUUID"`
} `json:"nodeInfo"`
} `json:"status"`
} `json:"items"`
Kind string `json:"kind"`
Metadata struct {
} `json:"metadata"`
ResourceVersion string `json:"resourceVersion"`
SelfLink string `json:"selfLink"`
}
type MyJsonName struct {
Example struct {
From struct {
Json bool `json:"json"`
} `json:"from"`
} `json:"example"`
}
/*
Main function
Runs for ever, sleeps for every one second
*/
func main() {
for {
schedulePods()
time.Sleep(time.Second)
}
}
/*
@Function name: postbind
@Paramin: podname, nodename string
@return : none, panics during error
@desc: prepares a JSON string with the Node and POD names and sends the binding request to the master.
Binds a POD to a NODE
*/
func postbind(podname, nodename string) {
url := fmt.Sprintf("http://localhost:8001/api/v1/namespaces/default/pods/%s/binding", podname)
fmt.Println("URL:>", url)
var jsonStr = []byte(`{"apiVersion":"v1", "kind": "Binding", "metadata": {"name": "` + podname + `"}, "target": {"apiVersion": "v1", "kind": "Node", "name": "` + nodename + `"}}`)
fmt.Println(string(jsonStr))
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr))
req.Header.Set("X-Custom-Header", "myvalue")
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
}
/*
@Function name: schedulePods
@Paramin: <none>
@return : none, prints error
@desc: Iterate through the PODs and get POD name, category and network fields and call the assign_node_to_pod function
*/
func schedulePods() {
app := "kubectl"
arg0 := "get"
arg1 := "pods"
arg2 := "-o"
arg3 := "json"
cmd := exec.Command(app, "--server", "localhost:8001", arg0, arg1, arg2, arg3)
pods_byte, err := cmd.Output()
if err != nil {
println(err.Error())
return
}
var pods_json Pods
json.Unmarshal(pods_byte, &pods_json)
for _, elem := range pods_json.Items {
if elem.Spec.SchedulerName == "my-scheduler" && len(elem.Spec.NodeName) == 0 {
assign_node_to_pod(elem.Metadata.Name, elem.Metadata.Labels.Category, elem.Metadata.Labels.Network)
}
}
}
/*
@Function name: assign_node_to_pod
@Paramin: podname, category, network string
@return : none, prints error
@desc: Iterate through the Nodes and select a suitable node for the received POD. Check for the labels Category and network and assign
the Node. When no suitable criterion is found, assign a node randomly
*/
func assign_node_to_pod(podname, category, network string) {
app := "kubectl"
arg0 := "get"
arg1 := "nodes"
arg2 := "-o"
arg3 := "json"
nodename := ""
cmd := exec.Command(app, "--server", "localhost:8001", arg0, arg1, arg2, arg3)
nodes_byte, err := cmd.Output()
if err != nil {
println(err.Error())
return
}
var nodes_json Nodes
json.Unmarshal(nodes_byte, &nodes_json)
var avl_nodes []string
for _, elem := range nodes_json.Items {
if len(network) == 0 {
elem.Metadata.Labels.Network = ""
}
if len(category) == 0 {
elem.Metadata.Labels.Category = ""
}
if network == elem.Metadata.Labels.Network && category == elem.Metadata.Labels.Category {
avl_nodes = append(avl_nodes, elem.Metadata.Name)
}
}
nodename = avl_nodes[rand.Intn(len(avl_nodes))]
if len(nodename) != 0 {
postbind(podname, nodename)
}
}
|
package observer
import "fmt"
type concretePublisher struct {
observers []Observer
}
// Attach pins an Observer to the publisher (implements Publisher interface)
func (p *concretePublisher) Attach(obs Observer) {
p.observers = append(p.observers, obs)
}
// Notify notifies all of the Publisher's observers (implements Publisher interface)
func (p *concretePublisher) Notify() {
for _, obs := range p.observers {
obs.Update()
}
}
// Show shows the state of the Observers (implements Publisher interface)
func (p *concretePublisher) Show() {
for _, obs := range p.observers {
fmt.Println(obs)
}
}
// Unpin detaches an observer from the Publisher (implements Publisher interface)
func (p *concretePublisher) Unpin(observer Observer) {
for i, obs := range p.observers {
if obs == observer {
p.observers = append(p.observers[:i], p.observers[i+1:]...)
}
}
}
// NewPublisher ...
func NewPublisher() Publisher {
return &concretePublisher{}
}
|
/*
* @lc app=leetcode.cn id=704 lang=golang
*
* [704] 二分查找
*/
// @lc code=start
package main
import "fmt"
func search(nums []int, target int) int {
if len(nums) == 0 {
return -1
}
down := 0
upper := len(nums) -1
var mid int
for down <= upper {
mid = down+((upper-down)>>1)
// fmt.Printf("%d, %d, %d\n", down, upper, mid)
if nums[mid] == target {
return mid
}
if target > nums[mid] {
down = mid + 1
} else {
upper = mid - 1
}
}
return -1
}
// @lc code=end
func main(){
nums := []int{-1,0,3,5,9,12}
target := -1
fmt.Printf("%v, %d, %d\n", nums, target, search(nums, target))
target = 12
fmt.Printf("%v, %d, %d\n", nums, target, search(nums, target))
target = 5
fmt.Printf("%v, %d, %d\n", nums, target, search(nums, target))
target = -1
fmt.Printf("%v, %d, %d\n", nums, target, search(nums, target))
target = 13
fmt.Printf("%v, %d, %d\n", nums, target, search(nums, target))
target = -2
fmt.Printf("%v, %d, %d\n", nums, target, search(nums, target))
}
|
package main
import (
"fmt"
)
func main() {
f2(f1())
}
func f1() (int, string, float32) {
return 0, "xzy", 3.14
}
func f2(a int, b string, c interface{}) {
fmt.Println(a, b, c)
}
|
package LeetCode
import "math"
func SortArrayByParityII(A []int) []int {
result := make([]int,len(A))
p1,p2 := 0, 1
for _,v := range A {
if int(math.Mod( float64(v), float64(2) )) == 0 {
result[p1] = v
p1+=2
} else {
result[p2] = v
p2+=2
}
}
return result
} |
// Copyright (C) 2016 Lukas Lalinsky
// Distributed under the MIT license, see the LICENSE file for details.
package main
import (
"bufio"
"encoding/binary"
"flag"
"fmt"
"github.com/acoustid/go-acoustid/util"
"github.com/pkg/errors"
"io"
"log"
"os"
)
func readBlockIndex(name string) ([]uint32, error) {
file, err := os.Open(name)
if err != nil {
return nil, errors.Wrap(err, "open failed")
}
defer file.Close()
var blockIndex []uint32
for {
var x uint32
err := binary.Read(file, binary.BigEndian, &x)
if err != nil {
if err == io.EOF {
break
}
return nil, err
}
blockIndex = append(blockIndex, x)
}
return blockIndex, nil
}
func readData(name string, blockSize int, blockIndex []uint32) error {
file, err := os.Open(name)
if err != nil {
return errors.Wrap(err, "open failed")
}
defer file.Close()
buf := make([]byte, blockSize)
output := bufio.NewWriter(os.Stdout)
for _, term := range blockIndex {
_, err := io.ReadFull(file, buf)
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return errors.Wrap(err, "read failed")
}
numItems := int(binary.BigEndian.Uint16(buf))
ptr := 2
var docID uint32
for j := 0; j < numItems; j++ {
if j > 0 {
delta, n := util.Uvarint32(buf[ptr:])
if n < 0 {
return errors.New("error while parsing block data")
}
if delta > 0 {
docID = 0
}
term += delta
ptr += n
}
delta, n := util.Uvarint32(buf[ptr:])
if n < 0 {
return errors.New("error while parsing block data")
}
docID += delta
ptr += n
_, err := fmt.Fprintf(output, "%d %d\n", term>>4, docID)
if err != nil {
return errors.New("error while writing output")
}
}
}
err = output.Flush()
if err != nil {
return errors.New("error while writing output")
}
return nil
}
func main() {
var (
dataFilename = flag.String("d", "", "segment data file to dump")
indexFilename = flag.String("i", "", "segment index file to dump")
blockSize = flag.Int("b", 512, "block size")
)
flag.Parse()
if *dataFilename == "" || *indexFilename == "" {
log.Fatal("no input file")
}
blockIndex, err := readBlockIndex(*indexFilename)
if err != nil {
log.Fatalf("error while reading index: %v", err)
}
err = readData(*dataFilename, *blockSize, blockIndex)
if err != nil {
log.Fatalf("error while reading data: %v", err)
}
}
|
package equinix
import (
"fmt"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
const tstL2SellerProfileEnvVar = "TF_ACC_ECX_SELLER_PROFILE_NAME"
func TestAccECXL2SellerProfile(t *testing.T) {
t.Parallel()
profileName, _ := schema.EnvDefaultFunc(tstL2SellerProfileEnvVar, "AWS Direct Connect")()
context := map[string]interface{}{
"resourceName": "tf-aws",
"name": profileName,
}
resourceName := fmt.Sprintf("data.equinix_ecx_l2_sellerprofile.%s", context["resourceName"].(string))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccECXL2SellerProfile(context),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttrSet(resourceName, "uuid"),
resource.TestCheckResourceAttrSet(resourceName, "description"),
resource.TestCheckResourceAttrSet(resourceName, "speed_from_api"),
resource.TestCheckResourceAttrSet(resourceName, "speed_customization_allowed"),
resource.TestCheckResourceAttrSet(resourceName, "redundancy_required"),
resource.TestCheckResourceAttrSet(resourceName, "encapsulation"),
resource.TestCheckResourceAttrSet(resourceName, "organization_name"),
),
},
},
})
}
func testAccECXL2SellerProfile(ctx map[string]interface{}) string {
return nprintf(`
data "equinix_ecx_l2_sellerprofile" "%{resourceName}" {
name = "%{name}"
}
`, ctx)
}
|
package auth
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01000101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:auth.010.001.01 Document"`
Message *RegulatoryTransactionReportStatusV01 `xml:"RgltryTxRptStsV01"`
}
func (d *Document01000101) AddMessage() *RegulatoryTransactionReportStatusV01 {
d.Message = new(RegulatoryTransactionReportStatusV01)
return d.Message
}
// Scope
// A regulator or an intermediary sends the RegulatoryTransactionReportStatus to a reporting institution to provide the status of a RegulatoryTransactionReport previously sent by the reporting institution.
// Usage
// The message definition may be used to provide a status for the entire report or to provide a status at the level of individual transactions within the report. One of the following statuses can be reported:
// - Completed, or,
// - Pending, or,
// - Rejected.
// If the status is rejected, then reason for the rejection must be specified.
type RegulatoryTransactionReportStatusV01 struct {
// Identification of the RegulatoryTransactionReportStatus document.
Identification *iso20022.DocumentIdentification8 `xml:"Id"`
// Identification of the firm that is legally responsible for sending the transaction report.
ReportingInstitution *iso20022.PartyIdentification23Choice `xml:"RptgInstn"`
// Provides the status of the entire RegulatoryTransactionReport that was previously sent by the reporting institution.
ReportStatus *iso20022.ReportStatusAndReason1 `xml:"RptSts"`
// Provides the status of one or more transactions that were previously sent within a RegulatoryTransactionReport by the reporting institution.
IndividualTransactionStatus []*iso20022.TradeTransactionStatusAndReason1 `xml:"IndvTxSts"`
}
func (r *RegulatoryTransactionReportStatusV01) AddIdentification() *iso20022.DocumentIdentification8 {
r.Identification = new(iso20022.DocumentIdentification8)
return r.Identification
}
func (r *RegulatoryTransactionReportStatusV01) AddReportingInstitution() *iso20022.PartyIdentification23Choice {
r.ReportingInstitution = new(iso20022.PartyIdentification23Choice)
return r.ReportingInstitution
}
func (r *RegulatoryTransactionReportStatusV01) AddReportStatus() *iso20022.ReportStatusAndReason1 {
r.ReportStatus = new(iso20022.ReportStatusAndReason1)
return r.ReportStatus
}
func (r *RegulatoryTransactionReportStatusV01) AddIndividualTransactionStatus() *iso20022.TradeTransactionStatusAndReason1 {
newValue := new(iso20022.TradeTransactionStatusAndReason1)
r.IndividualTransactionStatus = append(r.IndividualTransactionStatus, newValue)
return newValue
}
|
package service
import (
"github.com/openshift/odo/pkg/odo/util/validation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ServiceInfo holds all important information about one service
type Service struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ServiceSpec `json:"spec,omitempty"`
Status ServiceStatus `json:"status,omitempty"`
}
// ServiceSpec ...
type ServiceSpec struct {
Type string `json:"type,omitempty"`
Plan string `json:"plan,omitempty"`
}
// ServiceStatus ...
type ServiceStatus struct {
Status string `json:"status,omitempty"`
}
// ServiceClass holds the information regarding a service catalog service class
type ServiceClass struct {
Name string
Bindable bool
ShortDescription string
LongDescription string
Tags []string
VersionsAvailable []string
ServiceBrokerName string
}
// ServicePlanParameter holds the information regarding a service catalog plan parameter
type ServicePlanParameter struct {
Name string `json:"name"`
Title string `json:"title,omitempty"`
Description string `json:"description,omitempty"`
Default string `json:"default,omitempty"`
validation.Validatable `json:",inline,omitempty"`
}
type ServiceList struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Items []Service `json:"items"`
}
// ServicePlan holds the information about service catalog plans associated to service classes
type ServicePlan struct {
Name string
DisplayName string
Description string
Parameters servicePlanParameters
}
|
package controllers_test
import (
"authentication/controllers"
"authentication/router"
"net/http"
"net/http/httptest"
"testing"
"authentication/models"
"bytes"
"encoding/json"
. "github.com/smartystreets/goconvey/convey"
)
// TestStatusRoute
func TestLoginRoute(t *testing.T) {
db := models.SetupModels()
var testUser = &models.User{Password: "super", Username: "tomcollins"}
router := router.SetupRouter(db)
db.Create(testUser)
defer func() {
db.Delete(&testUser)
db.Close()
}()
Convey("Creates a new JWT token", t, func() {
creds, _ := json.Marshal(&controllers.Credentials{Username: "tomcollins", Password: "super"})
w := httptest.NewRecorder()
req, _ := http.NewRequest("POST", "/v1/login", bytes.NewBuffer(creds))
router.ServeHTTP(w, req)
So(w.Code, ShouldEqual, http.StatusOK)
So(w.Header().Get("Content-Type"), ShouldEqual, "application/json; charset=utf-8")
var respJSON controllers.JWTResponse
json.Unmarshal([]byte(w.Body.String()), &respJSON)
So(respJSON.JWT, ShouldNotBeEmpty)
})
Convey("With a malformed request", t, func() {
Convey("With no username", func() {
withoutUsername, _ := json.Marshal(&controllers.Credentials{Password: "test"})
w := httptest.NewRecorder()
req, _ := http.NewRequest("POST", "/v1/login", bytes.NewBuffer(withoutUsername))
router.ServeHTTP(w, req)
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(w.Body.String(), ShouldContainSubstring, "Error:Field validation for 'Username'")
So(w.Header().Get("Content-Type"), ShouldEqual, "application/json; charset=utf-8")
})
Convey("With no password", func() {
withoutUsername, _ := json.Marshal(&controllers.Credentials{Username: "test"})
w := httptest.NewRecorder()
req, _ := http.NewRequest("POST", "/v1/login", bytes.NewBuffer(withoutUsername))
router.ServeHTTP(w, req)
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(w.Body.String(), ShouldContainSubstring, "Error:Field validation for 'Password'")
So(w.Header().Get("Content-Type"), ShouldEqual, "application/json; charset=utf-8")
})
})
Convey("With invalid username and password combination", t, func() {
Convey("invalid username", func() {
wrongUsername, _ := json.Marshal(&controllers.Credentials{Username: "wrongo", Password: "super"})
w := httptest.NewRecorder()
req, _ := http.NewRequest("POST", "/v1/login", bytes.NewBuffer(wrongUsername))
router.ServeHTTP(w, req)
So(w.Code, ShouldEqual, http.StatusForbidden)
So(w.Body.String(), ShouldContainSubstring, "Invalid username and password combination")
So(w.Header().Get("Content-Type"), ShouldEqual, "application/json; charset=utf-8")
})
Convey("invalid password", func() {
wrongPass, _ := json.Marshal(&controllers.Credentials{Username: "tomcollins", Password: "nope"})
w := httptest.NewRecorder()
req, _ := http.NewRequest("POST", "/v1/login", bytes.NewBuffer(wrongPass))
router.ServeHTTP(w, req)
So(w.Code, ShouldEqual, http.StatusForbidden)
So(w.Body.String(), ShouldContainSubstring, "Invalid username and password combination")
So(w.Header().Get("Content-Type"), ShouldEqual, "application/json; charset=utf-8")
})
})
}
|
// Copyright 2016-2021, Pulumi Corporation.
package schema
import (
"regexp"
"testing"
"github.com/stretchr/testify/assert"
jsschema "github.com/lestrrat-go/jsschema"
"github.com/pulumi/pulumi/pkg/v3/codegen"
pschema "github.com/pulumi/pulumi/pkg/v3/codegen/schema"
)
type PropertyTypeSpecTestCase struct {
json string
expected pschema.TypeSpec
}
func TestPropertyTypeSpec(t *testing.T) {
cases := []PropertyTypeSpecTestCase{
{
json: `{
"type": "string",
"oneOf": [
{"format": "date-time"},
{"format": "timestamp"}
]
}`,
expected: pschema.TypeSpec{Type: "string"},
},
{
json: `{
"type": "string",
"anyOf": [
{"format": "date-time"},
{"format": "timestamp"}
]
}`,
expected: pschema.TypeSpec{Type: "string"},
},
{
json: `{
"oneOf": [
{"type": "string", "format": "date-time"},
{"type": "string", "format": "timestamp"}
]
}`,
expected: pschema.TypeSpec{Type: "string"},
},
{
json: `{
"anyOf": [
{"type": "string", "format": "date-time"},
{"type": "string", "format": "timestamp"}
]
}`,
expected: pschema.TypeSpec{Type: "string"},
},
{
json: `{
"oneOf": [
{"type": "string"},
{"type": "number"}
]
}`,
expected: pschema.TypeSpec{
OneOf: []pschema.TypeSpec{{Type: "string"}, {Type: "number"}},
},
},
{
json: `{
"anyOf": [
{"type": "string"},
{"type": "number"}
]
}`,
expected: pschema.TypeSpec{
OneOf: []pschema.TypeSpec{{Type: "string"}, {Type: "number"}},
},
},
{
json: `{
"anyOf": [
{"type": "object", "properties": { "A": { "type": "number" } } },
{"type": "object", "properties": { "A": { "type": "string" } } }
]
}`,
expected: pschema.TypeSpec{
OneOf: []pschema.TypeSpec{
{Ref: "#/types/aws-native::Foo0Properties"},
{Ref: "#/types/aws-native::Foo1Properties"},
},
},
},
{
json: `{
"anyOf": [
{"type": "object", "properties": { "A": { "type": "number" } } },
{"type": "object" },
{"type": "string" }
]
}`,
expected: pschema.TypeSpec{
OneOf: []pschema.TypeSpec{
{Ref: "#/types/aws-native::FooProperties"},
{Ref: "pulumi.json#/Any"},
{Type: "string"},
},
},
},
{
json: `{
"type": "object",
"oneOf": [
{"properties": { "A": { "type": "number" } } },
{"properties": { "B": { "type": "number" } } }
]
}`,
expected: pschema.TypeSpec{
OneOf: []pschema.TypeSpec{
{Ref: "#/types/aws-native::Foo0Properties"},
{Ref: "#/types/aws-native::Foo1Properties"},
},
},
},
{
json: `{
"$ref": "#/definitions/Obj"
}`,
expected: pschema.TypeSpec{Ref: "#/types/aws-native::Obj"},
},
{
json: `{
"$ref": "#/definitions/ObjLike1"
}`,
expected: pschema.TypeSpec{Ref: "#/types/aws-native::ObjLike1"},
},
{
json: `{
"$ref": "#/definitions/ObjLike2"
}`,
expected: pschema.TypeSpec{Ref: "#/types/aws-native::ObjLike2"},
},
{
json: `{
"$ref": "#/definitions/OneOf"
}`,
expected: pschema.TypeSpec{
OneOf: []pschema.TypeSpec{
{Type: "number"},
{Type: "string"},
},
},
},
}
ctx := context{
pkg: &pschema.PackageSpec{
Types: map[string]pschema.ComplexTypeSpec{},
},
visitedTypes: codegen.NewStringSet(),
metadata: &CloudAPIMetadata{
Types: map[string]CloudAPIType{},
},
resourceSpec: &jsschema.Schema{
Definitions: map[string]*jsschema.Schema{
"Obj": &jsschema.Schema{Type: jsschema.PrimitiveTypes{jsschema.ObjectType}},
"OneOf": &jsschema.Schema{
OneOf: jsschema.SchemaList{
&jsschema.Schema{Type: jsschema.PrimitiveTypes{jsschema.NumberType}},
&jsschema.Schema{Type: jsschema.PrimitiveTypes{jsschema.StringType}},
},
},
"ObjLike1": &jsschema.Schema{
Properties: map[string]*jsschema.Schema{
"foo": jsschema.New(),
},
},
"ObjLike2": &jsschema.Schema{
PatternProperties: map[*regexp.Regexp]*jsschema.Schema{
regexp.MustCompile(".+"): jsschema.New(),
},
},
},
},
}
for _, tt := range cases {
schema := jsschema.New()
err := schema.UnmarshalJSON([]byte(tt.json))
assert.Nil(t, err)
actual, err := ctx.propertyTypeSpec("Foo", schema)
if assert.Nil(t, err) {
assert.Equal(t, tt.expected, *actual)
}
}
}
func TestEnumType(t *testing.T) {
cases := []struct {
name string
schema *jsschema.Schema
expectedType string
expectedValues map[string]string
}{
{
name: "SomeHTTPEnum",
schema: &jsschema.Schema{
Type: jsschema.PrimitiveTypes{jsschema.StringType},
Enum: []interface{}{"UseHTTP1Thing", "use_HTTP2_thing"},
},
expectedType: "aws-native::SomeHttpEnum",
expectedValues: map[string]string{
"UseHttp1Thing": "UseHTTP1Thing",
"UseHttp2Thing": "use_HTTP2_thing",
},
},
}
for _, tt := range cases {
ctx := context{
pkg: &pschema.PackageSpec{
Types: map[string]pschema.ComplexTypeSpec{},
},
metadata: &CloudAPIMetadata{
Types: map[string]CloudAPIType{},
},
}
out, err := (&ctx).genEnumType(tt.name, tt.schema)
assert.NoError(t, err)
assert.Equal(t, "#/types/"+tt.expectedType, out.Ref)
if assert.Contains(t, ctx.pkg.Types, tt.expectedType) {
v, _ := ctx.pkg.Types[tt.expectedType]
actualValues := map[string]string{}
for _, v := range v.Enum {
actualValues[v.Name] = v.Value.(string)
}
assert.Equal(t, tt.expectedValues, actualValues)
}
}
}
func TestModuleName(t *testing.T) {
cases := map[string]string{
"aws::SomeEC2Thing::SomeDHCPOptions": "SomeEc2Thing",
}
for input, expected := range cases {
assert.Equal(t, expected, moduleName(input))
}
}
func TestTypeName(t *testing.T) {
cases := map[string]string{
"aws::FOO::SomeDHCPOptions": "SomeDhcpOptions",
}
for input, expected := range cases {
assert.Equal(t, expected, typeName(input))
}
}
|
package statistics
import (
"math"
"sync"
"time"
"github.com/matrix-org/dendrite/federationsender/storage"
"github.com/matrix-org/gomatrixserverlib"
"github.com/sirupsen/logrus"
"go.uber.org/atomic"
)
// Statistics contains information about all of the remote federated
// hosts that we have interacted with. It is basically a threadsafe
// wrapper.
type Statistics struct {
DB storage.Database
servers map[gomatrixserverlib.ServerName]*ServerStatistics
mutex sync.RWMutex
// How many times should we tolerate consecutive failures before we
// just blacklist the host altogether? The backoff is exponential,
// so the max time here to attempt is 2**failures seconds.
FailuresUntilBlacklist uint32
}
// ForServer returns server statistics for the given server name. If it
// does not exist, it will create empty statistics and return those.
func (s *Statistics) ForServer(serverName gomatrixserverlib.ServerName) *ServerStatistics {
// If the map hasn't been initialised yet then do that.
if s.servers == nil {
s.mutex.Lock()
s.servers = make(map[gomatrixserverlib.ServerName]*ServerStatistics)
s.mutex.Unlock()
}
// Look up if we have statistics for this server already.
s.mutex.RLock()
server, found := s.servers[serverName]
s.mutex.RUnlock()
// If we don't, then make one.
if !found {
s.mutex.Lock()
server = &ServerStatistics{
statistics: s,
serverName: serverName,
}
s.servers[serverName] = server
s.mutex.Unlock()
blacklisted, err := s.DB.IsServerBlacklisted(serverName)
if err != nil {
logrus.WithError(err).Errorf("Failed to get blacklist entry %q", serverName)
} else {
server.blacklisted.Store(blacklisted)
}
}
return server
}
// ServerStatistics contains information about our interactions with a
// remote federated host, e.g. how many times we were successful, how
// many times we failed etc. It also manages the backoff time and black-
// listing a remote host if it remains uncooperative.
type ServerStatistics struct {
statistics *Statistics //
serverName gomatrixserverlib.ServerName //
blacklisted atomic.Bool // is the node blacklisted
backoffUntil atomic.Value // time.Time to wait until before sending requests
failCounter atomic.Uint32 // how many times have we failed?
successCounter atomic.Uint32 // how many times have we succeeded?
}
// Success updates the server statistics with a new successful
// attempt, which increases the sent counter and resets the idle and
// failure counters. If a host was blacklisted at this point then
// we will unblacklist it.
func (s *ServerStatistics) Success() {
s.successCounter.Add(1)
s.failCounter.Store(0)
s.blacklisted.Store(false)
if err := s.statistics.DB.RemoveServerFromBlacklist(s.serverName); err != nil {
logrus.WithError(err).Errorf("Failed to remove %q from blacklist", s.serverName)
}
}
// Failure marks a failure and works out when to backoff until. It
// returns true if the worker should give up altogether because of
// too many consecutive failures. At this point the host is marked
// as blacklisted.
func (s *ServerStatistics) Failure() bool {
// Increase the fail counter.
failCounter := s.failCounter.Add(1)
// Check that we haven't failed more times than is acceptable.
if failCounter >= s.statistics.FailuresUntilBlacklist {
// We've exceeded the maximum amount of times we're willing
// to back off, which is probably in the region of hours by
// now. Mark the host as blacklisted and tell the caller to
// give up.
s.blacklisted.Store(true)
if err := s.statistics.DB.AddServerToBlacklist(s.serverName); err != nil {
logrus.WithError(err).Errorf("Failed to add %q to blacklist", s.serverName)
}
return true
}
// We're still under the threshold so work out the exponential
// backoff based on how many times we have failed already. The
// worker goroutine will wait until this time before processing
// anything from the queue.
backoffSeconds := time.Second * time.Duration(math.Exp2(float64(failCounter)))
s.backoffUntil.Store(
time.Now().Add(backoffSeconds),
)
return false
}
// BackoffDuration returns both a bool stating whether to wait,
// and then if true, a duration to wait for.
func (s *ServerStatistics) BackoffDuration() (bool, time.Duration) {
backoff, until := false, time.Second
if b, ok := s.backoffUntil.Load().(time.Time); ok {
if b.After(time.Now()) {
backoff, until = true, time.Until(b)
}
}
return backoff, until
}
// Blacklisted returns true if the server is blacklisted and false
// otherwise.
func (s *ServerStatistics) Blacklisted() bool {
return s.blacklisted.Load()
}
// SuccessCount returns the number of successful requests. This is
// usually useful in constructing transaction IDs.
func (s *ServerStatistics) SuccessCount() uint32 {
return s.successCounter.Load()
}
|
package main
import (
"fmt"
)
/**
* 汉诺塔,经典的递归游戏,最短步数为 (2^n) -1 , n 是盘子数量
* 把大象搬到冰箱里只要 3 步 打开冰箱门, 把大象装进去, 把冰箱门关上。
*/
const DiskNum = 5
func main() {
hanoi_0(DiskNum, "a", "b", "c")
fmt.Printf("disk num: %d, min step: %d", DiskNum, times)
}
func hanoi_0(n int, a string, b string, c string) {
if (n < 1) {
fmt.Println("wrong n")
}
if (n == 1) {
move(n, a, c)
} else {
hanoi_0(n-1, a, c, b) // 把 a 柱最上面的 n-1 个盘子搬到 b, 以 c 做为辅助柱 (打开冰箱门)
move(n, a, c) // 把最下面的盘子 n 从 a --> c (把大象装进冰箱里)
hanoi_0(n-1, b, a, c) // 把 b 柱最上面的 n-1 个盘子搬到 c, 以 a 做为辅助柱 (把冰箱门关上)
}
}
var times int
func move(disk int, from string, to string) {
times++;
// fmt.Println("#" + strconv.Itoa(times) + " disk " + strconv.Itoa(disk) + ": " + from + " ---> " + to)
fmt.Printf("#%d disk %d: %s ---> %s \n", times, disk, from, to)
} |
package domain
import (
"time"
"github.com/gofrs/uuid"
)
type TaskService interface {
FindAreaByID(uid uuid.UUID) ServiceResult
FindCropByID(uid uuid.UUID) ServiceResult
FindMaterialByID(uid uuid.UUID) ServiceResult
FindReservoirByID(uid uuid.UUID) ServiceResult
}
// ServiceResult is the container for service result
type ServiceResult struct {
Result interface{}
Error error
}
type Task struct {
UID uuid.UUID `json:"uid"`
Title string `json:"title"`
Description string `json:"description"`
CreatedDate time.Time `json:"created_date"`
DueDate *time.Time `json:"due_date,omitempty"`
CompletedDate *time.Time `json:"completed_date"`
CancelledDate *time.Time `json:"cancelled_date"`
Priority string `json:"priority"`
Status string `json:"status"`
Domain string `json:"domain"`
DomainDetails TaskDomain `json:"domain_details"`
Category string `json:"category"`
IsDue bool `json:"is_due"`
AssetID *uuid.UUID `json:"asset_id"`
// Events
Version int
UncommittedChanges []interface{}
}
// CreateTask
func CreateTask(taskService TaskService, title string, description string, duedate *time.Time, priority string, taskdomain TaskDomain, taskcategory string, assetid *uuid.UUID) (*Task, error) {
// add validation
err := validateTaskTitle(title)
if err != nil {
return &Task{}, err
}
err = validateTaskDescription(description)
if err != nil {
return &Task{}, err
}
err = validateTaskDueDate(duedate)
if err != nil {
return &Task{}, err
}
err = validateTaskPriority(priority)
if err != nil {
return &Task{}, err
}
err = validateTaskCategory(taskcategory)
if err != nil {
return &Task{}, err
}
err = validateAssetID(taskService, assetid, taskdomain.Code())
if err != nil {
return &Task{}, err
}
uid, err := uuid.NewV4()
if err != nil {
return &Task{}, err
}
initial := &Task{}
initial.TrackChange(taskService, TaskCreated{
Title: title,
UID: uid,
Description: description,
CreatedDate: time.Now(),
DueDate: duedate,
Priority: priority,
Status: TaskStatusCreated,
Domain: taskdomain.Code(),
DomainDetails: taskdomain,
Category: taskcategory,
IsDue: false,
AssetID: assetid,
})
return initial, nil
}
func (t *Task) ChangeTaskTitle(taskService TaskService, title string) (*Task, error) {
err := validateTaskTitle(title)
if err != nil {
return &Task{}, err
}
event := TaskTitleChanged{
UID: t.UID,
Title: title,
}
t.TrackChange(taskService, event)
return t, nil
}
func (t *Task) ChangeTaskDescription(taskService TaskService, description string) (*Task, error) {
err := validateTaskDescription(description)
if err != nil {
return &Task{}, err
}
event := TaskDescriptionChanged{
UID: t.UID,
Description: description,
}
t.TrackChange(taskService, event)
return t, nil
}
func (t *Task) ChangeTaskDueDate(taskService TaskService, duedate *time.Time) (*Task, error) {
err := validateTaskDueDate(duedate)
if err != nil {
return &Task{}, err
}
event := TaskDueDateChanged{
UID: t.UID,
DueDate: duedate,
}
t.TrackChange(taskService, event)
return t, nil
}
func (t *Task) ChangeTaskPriority(taskService TaskService, priority string) (*Task, error) {
err := validateTaskPriority(priority)
if err != nil {
return &Task{}, err
}
event := TaskPriorityChanged{
UID: t.UID,
Priority: priority,
}
t.TrackChange(taskService, event)
return t, nil
}
func (t *Task) ChangeTaskCategory(taskService TaskService, category string) (*Task, error) {
err := validateTaskCategory(category)
if err != nil {
return &Task{}, err
}
event := TaskCategoryChanged{
UID: t.UID,
Category: category,
}
t.TrackChange(taskService, event)
return t, nil
}
func (t *Task) ChangeTaskDetails(taskService TaskService, details TaskDomain) (*Task, error) {
event := TaskDetailsChanged{
UID: t.UID,
DomainDetails: details,
}
t.TrackChange(taskService, event)
return t, nil
}
// SetTaskAsDue
func (t *Task) SetTaskAsDue(taskService TaskService) {
t.TrackChange(taskService, TaskDue{
UID: t.UID,
})
}
// CompleteTask
func (t *Task) CompleteTask(taskService TaskService) {
completedTime := time.Now()
t.TrackChange(taskService, TaskCompleted{
UID: t.UID,
Status: TaskCompletedCode,
CompletedDate: &completedTime,
})
}
// CompleteTask
func (t *Task) CancelTask(taskService TaskService) {
cancelledTime := time.Now()
t.TrackChange(taskService, TaskCancelled{
UID: t.UID,
Status: TaskCancelledCode,
CancelledDate: &cancelledTime,
})
}
// Event Tracking
func (state *Task) TrackChange(taskService TaskService, event interface{}) error {
state.UncommittedChanges = append(state.UncommittedChanges, event)
err := state.Transition(taskService, event)
if err != nil {
return err
}
return nil
}
func (state *Task) Transition(taskService TaskService, event interface{}) error {
switch e := event.(type) {
case TaskCreated:
state.Title = e.Title
state.UID = e.UID
state.Description = e.Description
state.CreatedDate = e.CreatedDate
state.DueDate = e.DueDate
state.Priority = e.Priority
state.Status = e.Status
state.Domain = e.Domain
state.DomainDetails = e.DomainDetails
state.Category = e.Category
state.IsDue = e.IsDue
state.AssetID = e.AssetID
case TaskTitleChanged:
state.Title = e.Title
case TaskDescriptionChanged:
state.Description = e.Description
case TaskDueDateChanged:
state.DueDate = e.DueDate
case TaskPriorityChanged:
state.Priority = e.Priority
case TaskCategoryChanged:
state.Category = e.Category
case TaskDetailsChanged:
state.DomainDetails = e.DomainDetails
case TaskCancelled:
state.CancelledDate = e.CancelledDate
state.Status = TaskStatusCancelled
case TaskCompleted:
state.CompletedDate = e.CompletedDate
state.Status = TaskStatusCompleted
case TaskDue:
state.IsDue = true
}
return nil
}
// Validation
// validateTaskTitle
func validateTaskTitle(title string) error {
if title == "" {
return TaskError{TaskErrorTitleEmptyCode}
}
return nil
}
// validateTaskDescription
func validateTaskDescription(description string) error {
if description == "" {
return TaskError{TaskErrorDescriptionEmptyCode}
}
return nil
}
// validateTaskDueDate
func validateTaskDueDate(newdate *time.Time) error {
if newdate != nil {
if (*newdate).Before(time.Now()) {
return TaskError{TaskErrorDueDateInvalidCode}
}
}
return nil
}
//validateTaskPriority
func validateTaskPriority(priority string) error {
if priority == "" {
return TaskError{TaskErrorPriorityEmptyCode}
}
_, err := FindTaskPriorityByCode(priority)
if err != nil {
return err
}
return nil
}
// validateTaskCategory
func validateTaskCategory(taskcategory string) error {
if taskcategory == "" {
return TaskError{TaskErrorCategoryEmptyCode}
}
_, err := FindTaskCategoryByCode(taskcategory)
if err != nil {
return err
}
return nil
}
// validateAssetID
func validateAssetID(taskService TaskService, assetid *uuid.UUID, taskdomain string) error {
if assetid != nil {
if taskdomain == "" {
return TaskError{TaskErrorDomainEmptyCode}
}
//Find asset in repository
// if not found return error
switch taskdomain {
case TaskDomainAreaCode:
serviceResult := taskService.FindAreaByID(*assetid)
if serviceResult.Error != nil {
return serviceResult.Error
}
case TaskDomainCropCode:
serviceResult := taskService.FindCropByID(*assetid)
if serviceResult.Error != nil {
return serviceResult.Error
}
case TaskDomainInventoryCode:
serviceResult := taskService.FindMaterialByID(*assetid)
if serviceResult.Error != nil {
return serviceResult.Error
}
case TaskDomainReservoirCode:
serviceResult := taskService.FindReservoirByID(*assetid)
if serviceResult.Error != nil {
return serviceResult.Error
}
default:
return TaskError{TaskErrorInvalidDomainCode}
}
}
return nil
}
|
package main
import (
"github.com/superboy724/wechatmessage/processer"
"github.com/superboy724/wechatmessage/server"
)
func main() {
server := server.NewServer(80)
p := processer.NewMessageProcesser()
server.SetProcesser(p)
server.Run()
}
|
// Copyright ©2012 The bíogo Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package kmeans_test
import (
"math/rand"
"strings"
"testing"
"github.com/biogo/cluster/cluster"
"github.com/biogo/cluster/kmeans"
"gopkg.in/check.v1"
)
func Test(t *testing.T) { check.TestingT(t) }
type S struct{}
func (s *S) TearDownSuite(_ *check.C) { rand.Seed(1) } // Reset the seed for the example test.
var _ = check.Suite(&S{})
var (
seq = []*Feature{
{ID: "0", Start: 0, End: 100},
{ID: "1", Start: 100, End: 200},
{ID: "2", Start: 200, End: 300},
{ID: "3", Start: 300, End: 400},
{ID: "4", Start: 400, End: 500},
{ID: "5", Start: 500, End: 600},
{ID: "6", Start: 600, End: 700},
{ID: "7", Start: 700, End: 800},
{ID: "8", Start: 800, End: 900},
{ID: "9", Start: 900, End: 1000},
}
tests = []struct {
set []*Feature
epsilon float64
effort int
clusters []cluster.Indices
// results determined with R
total int
within []float64
}{
{
feats,
0.15, 5,
[]cluster.Indices{{0, 1}, {2, 3, 4, 5}, {6, 7}, {8, 9, 10}},
4747787,
[]float64{0.5, 15820.75, 2500, 3829.333333333333},
},
{
feats,
0.1, 5,
[]cluster.Indices{{8, 9, 10}, {0, 1}, {6}, {2, 3, 4}, {5}, {7}},
4747787,
[]float64{3829.333333333333, 0.5, 0, 52, 0, 0},
},
{
seq,
0.2, 5,
[]cluster.Indices{{3}, {7}, {9}, {1}, {6}, {0}, {5}, {4}, {8}, {2}},
1650000,
[]float64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
},
{
seq,
1, 5,
[]cluster.Indices{{4, 5}, {2, 3}, {8, 9}, {0, 1}, {6, 7}},
1650000,
[]float64{10000, 10000, 10000, 10000, 10000},
},
}
)
// Tests
func (s *S) TestKmeans(c *check.C) {
for i, t := range tests {
rand.Seed(1)
km, err := ClusterFeatures(t.set, t.epsilon, t.effort)
c.Assert(err, check.Equals, nil)
clusters := km.Centers()
c.Logf("Test %d: epsilon = %.2f effort = %d", i, t.epsilon, t.effort)
for ci, cl := range clusters {
c.Logf("Cluster %d:", ci)
for _, j := range cl.Members() {
f := t.set[j]
c.Logf("%2s %s%s",
f.ID,
strings.Repeat(" ", f.Start/20),
strings.Repeat("-", f.Len()/20),
)
}
}
c.Log()
for ci, m := range clusters {
c.Check(m.Members(), check.DeepEquals, t.clusters[ci])
}
c.Check(int(km.Total()), check.Equals, t.total)
c.Check(km.Within(), check.DeepEquals, t.within)
}
}
type bench [][2]float64
func (b bench) Len() int { return len(b) }
func (b bench) Values(i int) []float64 { return b[i][:] }
var benchData bench = func() bench {
b := make(bench, 10000)
for i := 0; i < 20; i++ {
x, y := float64(rand.Intn(10000)), float64(rand.Intn(10000))
r := float64(rand.Intn(200))
for j := range b {
b[j] = [2]float64{x + r*rand.NormFloat64(), y + r*rand.NormFloat64()}
}
}
return b
}()
func Benchmark(b *testing.B) {
km, _ := kmeans.New(benchData)
km.Seed(20)
for i := 0; i < b.N; i++ {
km.Cluster()
}
_ = km.Centers()
}
|
package main
import (
"encoding/json"
"log"
"net/http"
"strings"
"io/ioutil"
"flag"
"path/filepath"
"regexp"
"github.com/julienschmidt/httprouter"
// "github.com/k0kubun/pp"
)
var ConfigDirectory = flag.String("c", ".", "Configuration directory (default .)")
type Message struct {
Status string
Messages []string
}
func main() {
StartServer()
}
func getLog(logFile string) string {
logstr, err := ioutil.ReadFile(logFile)
if err != nil {
log.Fatal("Error opening config: ", err)
}
s := strings.Replace(string(logstr), "\n", "\t\t", -1)
return strings.Replace(string(s), "\\n", "\n", -1)
}
func channelList(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
err := r.ParseForm()
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
fia, err := ioutil.ReadDir(".")
reg := regexp.MustCompile(`\.log$`)
l := make([]string, 0)
for i := range fia {
f := fia[i].Name()
if (!reg.MatchString(f)) {
continue
}
l = append(l, f)
}
w.WriteHeader(http.StatusOK)
type JsonRes struct {
Status string
Channnles []string
}
data := JsonRes {
Status: "ok",
Channnles: l,
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
t, err := json.Marshal(data)
if err != nil {
log.Println("Couldn't marshal hook response:", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write(t)
}
func groupLog(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
err := r.ParseForm()
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
str := getLog(filepath.Join(*ConfigDirectory, ps.ByName("group") + ".log"))
w.WriteHeader(http.StatusOK)
jsonResp(w, strings.Split(str, "\t\t"))
}
func channelLog(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
err := r.ParseForm()
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
str := getLog(filepath.Join(*ConfigDirectory, ps.ByName("channel") + ".log"))
w.WriteHeader(http.StatusOK)
jsonResp(w, strings.Split(str, "\t\t"))
}
func jsonResp(w http.ResponseWriter, msg []string) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
resp := Message {
Status: "ok",
Messages: msg[:len(msg)-1],
}
r, err := json.Marshal(resp)
if err != nil {
log.Println("Couldn't marshal hook response:", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write(r)
}
func StartServer() {
router := httprouter.New()
router.GET("/channel_list", channelList)
router.GET("/channel/:channel", channelLog)
router.GET("/group/:group", groupLog)
log.Printf("Starting HTTP server on %d", 3002)
log.Fatal(http.ListenAndServe(":3002", router))
}
|
/*
* Copyright 2017 StreamSets Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package textrecord
import (
"bufio"
"errors"
"fmt"
"github.com/streamsets/datacollector-edge/api"
"github.com/streamsets/datacollector-edge/api/fieldtype"
"github.com/streamsets/datacollector-edge/container/recordio"
"io"
)
const (
DEFAULT_TEXT_FIELD = "text"
)
type TextWriterFactoryImpl struct {
// TODO: Add needed configs
}
func (t *TextWriterFactoryImpl) CreateWriter(
context api.StageContext,
writer io.Writer,
) (recordio.RecordWriter, error) {
var recordWriter recordio.RecordWriter
recordWriter = newRecordWriter(context, writer)
return recordWriter, nil
}
type TextWriterImpl struct {
context api.StageContext
writer *bufio.Writer
}
func (textWriter *TextWriterImpl) WriteRecord(r api.Record) error {
recordValue, _ := r.Get()
textFieldValue, err := textWriter.getTextFieldPathValue(recordValue)
if err != nil {
return err
}
fmt.Fprintln(textWriter.writer, textFieldValue)
return nil
}
func (textWriter *TextWriterImpl) getTextFieldPathValue(field *api.Field) (string, error) {
var textFieldValue string
if field.Value == nil {
return textFieldValue, nil
}
var err error = nil
switch field.Type {
case fieldtype.MAP:
fieldValue := field.Value.(map[string]*api.Field)
textField := fieldValue[DEFAULT_TEXT_FIELD]
if textField.Type != fieldtype.STRING {
err = errors.New("Invalid Field Type for Text Field path - " + textField.Type)
return textFieldValue, err
}
textFieldValue = textField.Value.(string)
return textFieldValue, err
default:
err = errors.New("Unsupported Field Type")
}
return textFieldValue, err
}
func (textWriter *TextWriterImpl) Flush() error {
return recordio.Flush(textWriter.writer)
}
func (textWriter *TextWriterImpl) Close() error {
return recordio.Close(textWriter.writer)
}
func newRecordWriter(context api.StageContext, writer io.Writer) *TextWriterImpl {
return &TextWriterImpl{
context: context,
writer: bufio.NewWriter(writer),
}
}
|
package crawl
import "github.com/golang/glog"
const (
_ glog.Level = iota
SegmentI
LineI
TypingI
HttpI
LogI
SegmentD
LineD
TypingD
HttpD
LogD
SegmentV
LineV
TypingV
HttpV
LogV
)
|
package atomix
import (
"sync/atomic"
"unsafe"
)
// AddInt is same as [atomic.AddInt32] or [atomic.AddInt64] but for int type.
func AddInt(addr *int, delta int) (new int) {
switch unsafe.Sizeof(*addr) {
case 4:
return int(atomic.AddInt32((*int32)(unsafe.Pointer(addr)), int32(delta)))
case 8:
return int(atomic.AddInt64((*int64)(unsafe.Pointer(addr)), int64(delta)))
default:
panic("atomix: int must be 4 or 8 bytes")
}
}
// CompareAndSwapInt is same as [atomic.CompareAndSwapInt32] or [atomic.CompareAndSwapInt64] but for int type.
func CompareAndSwapInt(addr *int, old, new int) (swapped bool) {
switch unsafe.Sizeof(*addr) {
case 4:
return atomic.CompareAndSwapInt32((*int32)(unsafe.Pointer(addr)), int32(old), int32(new))
case 8:
return atomic.CompareAndSwapInt64((*int64)(unsafe.Pointer(addr)), int64(old), int64(new))
default:
panic("atomix: int must be 4 or 8 bytes")
}
}
// LoadInt is same as [atomic.LoadInt32] or [atomic.LoadInt64] but for int type.
func LoadInt(addr *int) (val int) {
switch unsafe.Sizeof(*addr) {
case 4:
return int(atomic.LoadInt32((*int32)(unsafe.Pointer(addr))))
case 8:
return int(atomic.LoadInt64((*int64)(unsafe.Pointer(addr))))
default:
panic("atomix: int must be 4 or 8 bytes")
}
}
// StoreInt is same as [atomic.StoreInt32] or [atomic.StoreInt64] but for int type.
func StoreInt(addr *int, val int) {
switch unsafe.Sizeof(*addr) {
case 4:
atomic.StoreInt32((*int32)(unsafe.Pointer(addr)), int32(val))
case 8:
atomic.StoreInt64((*int64)(unsafe.Pointer(addr)), int64(val))
default:
panic("atomix: int must be 4 or 8 bytes")
}
}
// SwapInt is same as [atomic.SwapInt32] or [atomic.SwapInt64] but for int type.
func SwapInt(addr *int, new int) (old int) {
switch unsafe.Sizeof(*addr) {
case 4:
return int(atomic.SwapInt32((*int32)(unsafe.Pointer(addr)), int32(new)))
case 8:
return int(atomic.SwapInt64((*int64)(unsafe.Pointer(addr)), int64(new)))
default:
panic("atomix: int must be 4 or 8 bytes")
}
}
|
package dao
import (
"fmt"
"github.com/go-redis/redis"
_ "github.com/go-sql-driver/mysql"
"github.com/jmoiron/sqlx"
"moriaty.com/cia/cia-supporter/config"
)
/**
* @author 16计算机 Moriaty
* @version 1.0
* @copyright :Moriaty 版权所有 © 2020
* @date 2020/4/7 11:08
* @Description TODO
* dao 初始化
*/
var (
DB *sqlx.DB
RDB *redis.Client
)
// 初始化数据库信息
func InitMysql(mysqlCfg *config.MysqlConfig) (err error) {
dsn := fmt.Sprintf("%s:%s@tcp(%s)/%s?%s",
mysqlCfg.Username, mysqlCfg.Password, mysqlCfg.Address, mysqlCfg.Database, mysqlCfg.Params)
DB, err = sqlx.Connect("mysql", dsn)
if err != nil {
// dsn 格式不正确
return err
}
DB.SetMaxOpenConns(mysqlCfg.MaxOpenConn)
DB.SetMaxIdleConns(mysqlCfg.MaxIdleConn)
return nil
}
// 初始化 Redis 信息
func InitRedis(redisCfg *config.RedisConfig) (err error) {
RDB = redis.NewClient(&redis.Options{
Addr: redisCfg.Address,
Password: redisCfg.Password,
})
_, err = RDB.Ping().Result()
if err != nil {
return err
}
return nil
}
|
package main
import (
"log"
"net/http"
"os"
"encoding/json"
gp "github.com/jayluxferro/ghanapostgps"
"github.com/gin-gonic/gin"
_ "github.com/heroku/x/hmetrics/onload"
"strings"
)
var params gp.Params
const identifier = "CenterLatitude"
type DataResponse struct {
Table []Info
}
type Info struct {
Area string
CenterLatitude float64
CenterLongitude float64
District string
EastLat float64
EastLong float64
GPSName string
NorthLat float64
NorthLong float64
PostCode string
Region string
SouthLat float64
SouthLong float64
Street string
WestLat float64
WestLong float64
}
func unAuthorized(c *gin.Context){
c.JSON(http.StatusForbidden, gin.H{"error": "unauthorized"})
}
func responseData(c *gin.Context, found bool, data interface{}){
c.JSON(http.StatusOK, gin.H{
"data": data,
"found": found,
})
}
func addressHandler(c *gin.Context){
var dataResponse DataResponse
isValid, address := gp.IsValidGPAddress(c.PostForm("address"))
//log.Println(isValid, address)
if !isValid {
unAuthorized(c)
return
}
response := gp.GetLocation(address, ¶ms)
if(!strings.Contains(response, identifier)){
responseData(c, false, dataResponse)
return
}
response = convertToJSON(response)
json.Unmarshal([]byte(response), &dataResponse)
responseData(c, true, dataResponse)
}
func convertToJSON(data string) string {
in := []byte(data)
var raw map[string]interface{}
if err := json.Unmarshal(in, &raw); err != nil {
panic(err)
}
out, err := json.Marshal(raw)
if err != nil {
panic(err)
}
return string(out)
}
func main() {
// inits
prefix := "GPGPS_"
params = gp.Params{}
params.AndroidCert = os.Getenv(prefix + "androidCert")
params.AndroidPackage = os.Getenv(prefix + "androidPackage")
params.ApiKey = os.Getenv(prefix + "apiKey")
params.ApiURL = os.Getenv(prefix + "apiURL")
params.AsaaseAPI = os.Getenv(prefix + "asaaseAPI")
params.Country = os.Getenv(prefix + "country")
params.CountryName = os.Getenv(prefix + "countryName")
params.Language = os.Getenv(prefix + "language")
params.LanguageCode = os.Getenv(prefix + "languageCode")
params.UUID = os.Getenv(prefix + "uuid")
port := os.Getenv("PORT")
if port == "" {
log.Fatal("$PORT must be set")
}
gin.SetMode(gin.ReleaseMode)
router := gin.New()
router.Use(gin.Logger())
router.GET("/", func(c *gin.Context) {
c.Redirect(http.StatusFound, "https://github.com/jayluxferro/GhanaPostGPS-REST-API")
})
// main
router.POST("/", addressHandler)
router.Run(":" + port)
}
|
package certs
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"crypto/x509"
"crypto/x509/pkix"
"encoding/hex"
"encoding/pem"
"fmt"
"math"
"math/big"
"time"
)
type CertGenerator interface {
Generate(notAfter time.Time, organization string, ca *KeyPair, hosts []string) (*KeyPair, error)
}
type CertGeneratorFunc func(notAfter time.Time, organization string, ca *KeyPair, hosts []string) (*KeyPair, error)
func (f CertGeneratorFunc) Generate(notAfter time.Time, organization string, ca *KeyPair, hosts []string) (*KeyPair, error) {
return f(notAfter, organization, ca, hosts)
}
var _ CertGenerator = CertGeneratorFunc(CreateSignedServingPair)
// KeyPair stores an x509 certificate and its ECDSA private key
type KeyPair struct {
Cert *x509.Certificate
Priv *ecdsa.PrivateKey
}
// ToPEM returns the PEM encoded cert pair
func (kp *KeyPair) ToPEM() (certPEM []byte, privPEM []byte, err error) {
// PEM encode private key
privDER, err := x509.MarshalECPrivateKey(kp.Priv)
if err != nil {
return
}
privBlock := &pem.Block{
Type: "EC PRIVATE KEY",
Bytes: privDER,
}
privPEM = pem.EncodeToMemory(privBlock)
// PEM encode cert
certBlock := &pem.Block{
Type: "CERTIFICATE",
Bytes: kp.Cert.Raw,
}
certPEM = pem.EncodeToMemory(certBlock)
return
}
// GenerateCA generates a self-signed CA cert/key pair that expires in expiresIn days
func GenerateCA(notAfter time.Time, organization string) (*KeyPair, error) {
notBefore := time.Now()
if notAfter.Before(notBefore) {
return nil, fmt.Errorf("invalid notAfter: %s before %s", notAfter.String(), notBefore.String())
}
serial, err := rand.Int(rand.Reader, new(big.Int).SetInt64(math.MaxInt64))
if err != nil {
return nil, err
}
caDetails := &x509.Certificate{
SerialNumber: serial,
Subject: pkix.Name{
CommonName: fmt.Sprintf("olm-selfsigned-%x", serial),
Organization: []string{organization},
},
NotBefore: notBefore,
NotAfter: notAfter,
IsCA: true,
KeyUsage: x509.KeyUsageCertSign,
BasicConstraintsValid: true,
}
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return nil, err
}
publicKey := &privateKey.PublicKey
certRaw, err := x509.CreateCertificate(rand.Reader, caDetails, caDetails, publicKey, privateKey)
if err != nil {
return nil, err
}
cert, err := x509.ParseCertificate(certRaw)
if err != nil {
return nil, err
}
ca := &KeyPair{
Cert: cert,
Priv: privateKey,
}
return ca, nil
}
// CreateSignedServingPair creates a serving cert/key pair signed by the given ca
func CreateSignedServingPair(notAfter time.Time, organization string, ca *KeyPair, hosts []string) (*KeyPair, error) {
notBefore := time.Now()
if notAfter.Before(notBefore) {
return nil, fmt.Errorf("invalid notAfter: %s before %s", notAfter.String(), notBefore.String())
}
serial, err := rand.Int(rand.Reader, new(big.Int).SetInt64(math.MaxInt64))
if err != nil {
return nil, err
}
certDetails := &x509.Certificate{
SerialNumber: serial,
Subject: pkix.Name{
CommonName: hosts[0],
Organization: []string{organization},
},
NotBefore: notBefore,
NotAfter: notAfter,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
DNSNames: hosts,
}
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return nil, err
}
publicKey := &privateKey.PublicKey
certRaw, err := x509.CreateCertificate(rand.Reader, certDetails, ca.Cert, publicKey, ca.Priv)
if err != nil {
return nil, err
}
cert, err := x509.ParseCertificate(certRaw)
if err != nil {
return nil, err
}
servingCert := &KeyPair{
Cert: cert,
Priv: privateKey,
}
return servingCert, nil
}
// PEMToCert converts the PEM block of the given byte array to an x509 certificate
func PEMToCert(certPEM []byte) (*x509.Certificate, error) {
block, _ := pem.Decode(certPEM)
if block == nil {
return nil, fmt.Errorf("cert PEM empty")
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
return cert, nil
}
// VerifyCert checks that the given cert is signed and trusted by the given CA
func VerifyCert(ca, cert *x509.Certificate, host string) error {
roots := x509.NewCertPool()
roots.AddCert(ca)
opts := x509.VerifyOptions{
DNSName: host,
Roots: roots,
}
if _, err := cert.Verify(opts); err != nil {
return err
}
return nil
}
// Active checks if the given cert is within its valid time window
func Active(cert *x509.Certificate) bool {
now := time.Now()
active := now.After(cert.NotBefore) && now.Before(cert.NotAfter)
return active
}
// PEMHash returns a hash of the given PEM encoded cert
type PEMHash func(certPEM []byte) (hash string)
// PEMSHA256 returns the hex encoded SHA 256 hash of the given PEM encoded cert
func PEMSHA256(certPEM []byte) (hash string) {
hasher := sha256.New()
hasher.Write(certPEM)
hash = hex.EncodeToString(hasher.Sum(nil))
return
}
|
package service
import (
"github.com/gorilla/mux"
"github.com/the-gigi/delinkcious/pkg/db_util"
"log"
"net/http"
httptransport "github.com/go-kit/kit/transport/http"
lm "github.com/the-gigi/delinkcious/pkg/link_manager"
sgm "github.com/the-gigi/delinkcious/pkg/social_graph_client"
)
func Run() {
dbHost, dbPort, err := db_util.GetDbEndpoint("social_graph")
if err != nil {
log.Fatal(err)
}
store, err := lm.NewDbLinkStore(dbHost, dbPort, "postgres", "postgres")
if err != nil {
log.Fatal(err)
}
socialGraphClient, err := sgm.NewClient("localhost:9090")
if err != nil {
log.Fatal(err)
}
svc, err := lm.NewLinkManager(store, socialGraphClient, nil)
if err != nil {
log.Fatal(err)
}
getLinksHandler := httptransport.NewServer(
makeGetLinksEndpoint(svc),
decodeGetLinksRequest,
encodeResponse,
)
addLinkHandler := httptransport.NewServer(
makeAddLinkEndpoint(svc),
decodeAddLinkRequest,
encodeResponse,
)
updateLinkHandler := httptransport.NewServer(
makeUpdateLinkEndpoint(svc),
decodeUpdateLinkRequest,
encodeResponse,
)
deleteLinkHandler := httptransport.NewServer(
makeDeleteLinkEndpoint(svc),
decodeDeleteLinkRequest,
encodeResponse,
)
r := mux.NewRouter()
r.Methods("GET").Path("/links").Handler(getLinksHandler)
r.Methods("POST").Path("/links").Handler(addLinkHandler)
r.Methods("PUT").Path("/links").Handler(updateLinkHandler)
r.Methods("DELETE").Path("/links").Handler(deleteLinkHandler)
log.Println("Listening on port 8080...")
log.Fatal(http.ListenAndServe(":8080", r))
}
|
package main
import "fmt"
func main() {
xs := filter([]int{1, 4, 6, 7, 8, 9, 10, 11, 17, 21, 23}, func(n int) bool {
return n > 1
})
fmt.Println(xs)
}
func filter(numbers []int, callback func(int) bool) []int {
var xs []int
for _, n := range numbers {
if callback(n) {
xs = append(xs, n)
}
}
return xs
}
// [4 6 7 8 9 10 11 17 21 23]
|
package pretty_poly
import "testing"
import "github.com/franela/goblin"
type toMixedRadixTestCase struct {
bases [ ]int
num int
}
func BenchmarkToMixedRadix (bench *testing.B) {
bases := [ ] int {100, 100, 100, 100, 100}
bench.StartTimer( )
for ith := 0; ith < bench.N; ith++ {
toMixedRadix(bases, 100)
}
}
func TestToMixedRadix (test *testing.T) {
var output [ ]float64
gob := goblin.Goblin(test)
gob.Describe("toMixedRadix", func ( ) {
gob.It("enumerates a space", func ( ) {
for ith := 0; ith < 100; ith++ {
output = toMixedRadix([]int {10, 10}, ith)
switch ith {
case 0:
gob.Assert(output[0]).Equal(float64(0))
gob.Assert(output[1]).Equal(float64(0))
case 1:
gob.Assert(output[0]).Equal(float64(0))
gob.Assert(output[1]).Equal(float64(1))
case 2:
gob.Assert(output[0]).Equal(float64(0))
gob.Assert(output[1]).Equal(float64(2))
case 99:
gob.Assert(output[0]).Equal(float64(9))
gob.Assert(output[1]).Equal(float64(9))
}
}
})
})
}
|
package nimkv
import (
"errors"
"sync"
"time"
)
type Cacher interface {
IsItemPresent(string) bool
GetItem(string) (*cacheItem, error)
GetAllItems() *cacheItems
DeleteItem(string) error
SetItemWithExpiry(string, interface{}, time.Duration)
SetItem(string, interface{})
Purge()
}
// CacheBase struct has fields that could be reused across various specialised cache implementations.
// No field to store items is present as the implementation of these will vary from cache to cache.
// For instance, LRU cache could use a doubly linked list for fast eviction, while LFU cache could
// use a minHeap.
// CacheBase is also used to load configuration info from config.yaml.
type cacheBase struct {
Capacity int `yaml:"Capacity"`
// Cache Type represents its eviction policy. For instance, Type could equal "LRU".
Type string `yaml:"Type"`
// If TickerPeriod is 0, then expired keys won't be evicted periodically.
// TickerPeriod should be = 0 or >= 30.
TickerPeriod time.Duration `yaml:"TickerPeriod"`
rwLock sync.RWMutex
// Can be used to evict expired keys periodically.
ticker <-chan time.Time
}
// Validates receiver struct, and initializes some fields.
func (c *cacheBase) checkAndSetFields() []error {
errorList := make([]error, 0, 2)
if c.Capacity <= 0 {
errorList = append(errorList, errors.New("Cache Capacity has to be > 0."))
}
if (c.TickerPeriod < 0) || (c.TickerPeriod > 0 && c.TickerPeriod < 30) {
errorList = append(errorList, errors.New("TickerPeriod should either be 0, or be >= 30."))
} else {
c.ticker = time.Tick(c.TickerPeriod * time.Second)
}
if len(errorList) > 0 {
return errorList
}
c.rwLock = sync.RWMutex{}
return nil
}
|
// This file was generated for SObject NamedCredential, API Version v43.0 at 2018-07-30 03:47:20.867565454 -0400 EDT m=+7.210442449
package sobjects
import (
"fmt"
"strings"
)
type NamedCredential struct {
BaseSObject
AuthProviderId string `force:",omitempty"`
CalloutOptionsAllowMergeFieldsInBody bool `force:",omitempty"`
CalloutOptionsAllowMergeFieldsInHeader bool `force:",omitempty"`
CalloutOptionsGenerateAuthorizationHeader bool `force:",omitempty"`
CreatedById string `force:",omitempty"`
CreatedDate string `force:",omitempty"`
DeveloperName string `force:",omitempty"`
Endpoint string `force:",omitempty"`
Id string `force:",omitempty"`
IsDeleted bool `force:",omitempty"`
Language string `force:",omitempty"`
LastModifiedById string `force:",omitempty"`
LastModifiedDate string `force:",omitempty"`
MasterLabel string `force:",omitempty"`
NamespacePrefix string `force:",omitempty"`
PrincipalType string `force:",omitempty"`
SystemModstamp string `force:",omitempty"`
}
func (t *NamedCredential) ApiName() string {
return "NamedCredential"
}
func (t *NamedCredential) String() string {
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("NamedCredential #%s - %s\n", t.Id, t.Name))
builder.WriteString(fmt.Sprintf("\tAuthProviderId: %v\n", t.AuthProviderId))
builder.WriteString(fmt.Sprintf("\tCalloutOptionsAllowMergeFieldsInBody: %v\n", t.CalloutOptionsAllowMergeFieldsInBody))
builder.WriteString(fmt.Sprintf("\tCalloutOptionsAllowMergeFieldsInHeader: %v\n", t.CalloutOptionsAllowMergeFieldsInHeader))
builder.WriteString(fmt.Sprintf("\tCalloutOptionsGenerateAuthorizationHeader: %v\n", t.CalloutOptionsGenerateAuthorizationHeader))
builder.WriteString(fmt.Sprintf("\tCreatedById: %v\n", t.CreatedById))
builder.WriteString(fmt.Sprintf("\tCreatedDate: %v\n", t.CreatedDate))
builder.WriteString(fmt.Sprintf("\tDeveloperName: %v\n", t.DeveloperName))
builder.WriteString(fmt.Sprintf("\tEndpoint: %v\n", t.Endpoint))
builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id))
builder.WriteString(fmt.Sprintf("\tIsDeleted: %v\n", t.IsDeleted))
builder.WriteString(fmt.Sprintf("\tLanguage: %v\n", t.Language))
builder.WriteString(fmt.Sprintf("\tLastModifiedById: %v\n", t.LastModifiedById))
builder.WriteString(fmt.Sprintf("\tLastModifiedDate: %v\n", t.LastModifiedDate))
builder.WriteString(fmt.Sprintf("\tMasterLabel: %v\n", t.MasterLabel))
builder.WriteString(fmt.Sprintf("\tNamespacePrefix: %v\n", t.NamespacePrefix))
builder.WriteString(fmt.Sprintf("\tPrincipalType: %v\n", t.PrincipalType))
builder.WriteString(fmt.Sprintf("\tSystemModstamp: %v\n", t.SystemModstamp))
return builder.String()
}
type NamedCredentialQueryResponse struct {
BaseQuery
Records []NamedCredential `json:"Records" force:"records"`
}
|
package web
import (
"fmt"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/session/v2"
"github.com/google/uuid"
"github.com/iamtraining/forum/entity"
"github.com/iamtraining/forum/store"
"golang.org/x/crypto/bcrypt"
)
var sessions *session.Session
type UserHandler struct {
store *store.Store
}
func init() {
sessions = session.New()
}
func (h *UserHandler) Register(c *fiber.Ctx) error {
type data struct {
Username string `json:"username"`
Password string `json:"password"`
}
body := data{}
err := c.BodyParser(&body)
if err != nil {
c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
"error": "failure while parsing params to a struct",
})
return nil
}
form := CreateUserForm{
Username: body.Username,
Password: body.Password,
IsNotAvailable: false,
}
if _, err := h.store.GetUserByUsername(form.Username); err == nil {
form.IsNotAvailable = true
}
if !form.Validate() {
c.Locals("form", form)
return c.Redirect("/register", fiber.StatusFound)
}
password, err := bcrypt.GenerateFromPassword([]byte(form.Password), bcrypt.DefaultCost)
if err != nil {
c.Status(fiber.StatusInternalServerError)
return nil
}
if err := h.store.Create(&entity.User{
ID: uuid.New(),
Username: form.Username,
Password: string(password),
}); err != nil {
c.Status(fiber.StatusInternalServerError)
return nil
}
return c.Redirect("/", fiber.StatusFound)
/*c.Status(fiber.StatusOK).JSON(fiber.Map{
"message": "your registration was successful. please log in",
"username": form.Username,
"password": form.Password,
})
*/
}
func (h *UserHandler) PrepareLogin(c *fiber.Ctx) error {
type data struct {
Username string `json:"username"`
Password string `json:"password"`
}
body := data{}
err := c.BodyParser(&body)
if err != nil {
c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
"error": "failure while parsing params to a struct",
})
return nil
}
form := LoginForm{
Username: body.Username,
Password: body.Password,
IncorrectCredentials: false,
}
user, err := h.store.GetUserByUsername(form.Username)
if err != nil {
form.IncorrectCredentials = true
fmt.Println("invalid credentials")
c.Status(fiber.StatusNotFound).JSON(fiber.Map{
"error": form.Err,
})
} else {
pwErr := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(form.Password))
form.IncorrectCredentials = pwErr != nil
fmt.Println("valid credentials")
}
if !form.Validate() {
c.Locals("form", form)
return c.Redirect("/login", fiber.StatusFound)
}
fmt.Println(form)
c.Locals("user", user)
return c.Next()
}
func (h *UserHandler) CommitLogin(c *fiber.Ctx) error {
user := c.Locals("user").(entity.User)
t, err := Login(c, user.ID, []byte("SECRET_KEY"))
if err != nil {
c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
"error": true,
"message": "commit login error",
})
}
fmt.Println("login t", t)
return c.Redirect("/", fiber.StatusFound)
}
func (h *UserHandler) Logout(c *fiber.Ctx) error {
Logout(c)
return c.Redirect("/", fiber.StatusFound)
}
|
// Copyright © 2018 Inanc Gumus
// Learn Go Programming Course
// License: https://creativecommons.org/licenses/by-nc-sa/4.0/
//
// For more tutorials : https://learngoprogramming.com
// In-person training : https://www.linkedin.com/in/inancgumus/
// Follow me on twitter: https://twitter.com/inancgumus
package main
// ---------------------------------------------------------
// EXERCISE: Print Your Name and LastName
//
// Print your name and lastname using Printf
//
// EXPECTED OUTPUT
// My name is Inanc and my lastname is Gumus.
//
// BONUS
// Store the formatting specifier (first argument of Printf)
// in a variable.
// Then pass it to printf
// ---------------------------------------------------------
func main() {
// BONUS: Use a variable for the format specifier
// fmt.Printf("?", ?, ?)
}
|
package main
import (
_ "bytes"
_ "encoding/json"
"github.com/gorilla/mux"
"log"
"net/http"
)
func RunMux() {
r := mux.NewRouter()
for url, h := range handler {
r.HandleFunc(url, h)
}
r.PathPrefix("/schuhe/pic/").Handler(http.StripPrefix("/schuhe/pic/", http.FileServer(http.Dir("./data/schuhe/pic/"))))
r.PathPrefix("/schmuck/pic/").Handler(http.StripPrefix("/schmuck/pic/", http.FileServer(http.Dir("./data/schmuck/pic/"))))
log.Fatal(http.ListenAndServe(":8000", r))
}
func main() {
RunMux()
// schuhe := ReadAll()
// dbPath = "backup.sqlite"
// for _, schuh := range schuhe {
// Insert(&schuh)
// }
}
|
package userstory
type Session map[string]interface{}
|
package envoyconfig
import (
"bytes"
"context"
"embed"
"encoding/base64"
"os"
"path/filepath"
"testing"
"text/template"
envoy_config_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/pomerium/pomerium/config"
"github.com/pomerium/pomerium/config/envoyconfig/filemgr"
"github.com/pomerium/pomerium/internal/testutil"
"github.com/pomerium/pomerium/pkg/cryptutil"
)
const (
aExampleComCert = `LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVQVENDQXFXZ0F3SUJBZ0lSQUlWMDhHSVFYTWRVT0NXV3FocXlGR3N3RFFZSktvWklodmNOQVFFTEJRQXcKY3pFZU1Cd0dBMVVFQ2hNVmJXdGpaWEowSUdSbGRtVnNiM0J0Wlc1MElFTkJNU1F3SWdZRFZRUUxEQnRqWVd4bApZa0J3YjNBdGIzTWdLRU5oYkdWaUlFUnZlSE5sZVNreEt6QXBCZ05WQkFNTUltMXJZMlZ5ZENCallXeGxZa0J3CmIzQXRiM01nS0VOaGJHVmlJRVJ2ZUhObGVTa3dIaGNOTVRrd05qQXhNREF3TURBd1doY05NekF3TlRJeU1qRXoKT0RRMFdqQlBNU2N3SlFZRFZRUUtFeDV0YTJObGNuUWdaR1YyWld4dmNHMWxiblFnWTJWeWRHbG1hV05oZEdVeApKREFpQmdOVkJBc01HMk5oYkdWaVFIQnZjQzF2Y3lBb1EyRnNaV0lnUkc5NGMyVjVLVENDQVNJd0RRWUpLb1pJCmh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTm1HMWFKaXc0L29SMHFqUDMxUjRXeTZkOUVqZHc5K1kyelQKcjBDbGNYTDYxRk11R0YrKzJRclV6Y0VUZlZ2dGM1OXNQa0xkRHNtZ0Y2VlZCOTkyQ3ArWDlicWczWmQwSXZtbApVbjJvdTM5eUNEYnV2Q0E2d1gwbGNHL2JkRDE3TkRrS0poL3g5SDMzU3h4SG5UamlKdFBhbmt1MUI3ajdtRmM5Ck5jNXRyamFvUHBGaFJqMTJ1L0dWajRhWWs3SStpWHRpZHBjZXp2eWNDT0NtQlIwNHkzeWx5Q2sxSWNMTUhWOEEKNXphUFpVck15ZUtnTE1PTGlDSDBPeHhhUzh0Nk5vTjZudDdmOUp1TUxTN2V5SkxkQW05bGg0c092YXBPVklXZgpJQitaYnk5bkQ1dWl4N3V0a3llWTFOeE05SFZhUmZTQzcrejM4TDBWN3lJZlpCNkFLcWNDQXdFQUFhTndNRzR3CkRnWURWUjBQQVFIL0JBUURBZ1dnTUJNR0ExVWRKUVFNTUFvR0NDc0dBUVVGQndNQk1Bd0dBMVVkRXdFQi93UUMKTUFBd0h3WURWUjBqQkJnd0ZvQVVTaG9mWE5rY1hoMnE0d25uV1oyYmNvMjRYRVF3R0FZRFZSMFJCQkV3RDRJTgpZUzVsZUdGdGNHeGxMbU52YlRBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVlFQVA3aHVraThGeG54azRoVnJYUk93Ck51Uy9OUFhmQ3VaVDZWemJYUVUxbWNrZmhweVNDajVRZkFDQzdodVp6Qkp0NEtsUHViWHdRQ25YMFRMSmg1L0cKUzZBWEFXQ3VTSW5jTTZxNGs4MFAzVllWK3hXOS9rdERnTk1FTlNxSjdKR3lqdzBWWHlhOUZwdWd6Q3ZnN290RQo5STcrZTN0cmJnUDBHY3plSml6WTJBMVBWU082MVdKQ1lNQjNDLzcwVE9KMkZTNy82bURPTG9DSVJCY215cW5KClY2Vk5sRDl3Y2xmUWIrZUp0YlY0Vlg2RUY5UEYybUtncUNKT0FKLzBoMHAydTBhZGgzMkJDS2dIMDRSYUtuSS8KUzY1N0MrN1YzVEgzQ1VIVHgrdDRRRll4UEhRL0loQ3pYdUpVeFQzYWtYNEQ1czJkTHp2RnBJMFIzTVBwUE9VQQpUelpSdDI2T3FVNHlUdUFnb0kvZnZMdk55VTNZekF3ZUQ2Mndxc1hiVHAranNFcWpoODUvakpXWnA4RExKK0w3CmhXQW0rSVNKTzhrNWgwR0lIMFllb01heXBJbjRubWVsbHNSM1dvYzZRVTZ4cFFTd3V1NXE0ckJzOUxDWS9kZkwKNkEzMEhlYXVVK2sydGFUVlBMY2FCZm11NDJPaHMyYzQ0bzNPYnlvVkNDNi8KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=`
aExampleComKey = `LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRRFpodFdpWXNPUDZFZEsKb3o5OVVlRnN1bmZSSTNjUGZtTnMwNjlBcFhGeSt0UlRMaGhmdnRrSzFNM0JFMzFiN1hPZmJENUMzUTdKb0JlbApWUWZmZGdxZmwvVzZvTjJYZENMNXBWSjlxTHQvY2dnMjdyd2dPc0Y5SlhCdjIzUTllelE1Q2lZZjhmUjk5MHNjClI1MDQ0aWJUMnA1THRRZTQrNWhYUFRYT2JhNDJxRDZSWVVZOWRydnhsWStHbUpPeVBvbDdZbmFYSHM3OG5BamcKcGdVZE9NdDhwY2dwTlNIQ3pCMWZBT2MyajJWS3pNbmlvQ3pEaTRnaDlEc2NXa3ZMZWphRGVwN2UzL1NiakMwdQozc2lTM1FKdlpZZUxEcjJxVGxTRm55QWZtVzh2WncrYm9zZTdyWk1ubU5UY1RQUjFXa1gwZ3UvczkvQzlGZThpCkgyUWVnQ3FuQWdNQkFBRUNnZ0VCQUsrclFrLzNyck5EQkgvMFFrdTBtbll5U0p6dkpUR3dBaDlhL01jYVZQcGsKTXFCU000RHZJVnlyNnRZb0pTN2VIbWY3QkhUL0RQZ3JmNjBYZEZvMGUvUFN4ckhIUSswUjcwVHBEQ3RLM3REWAppR2JFZWMwVlpqam95VnFzUWIxOUIvbWdocFY1MHRiL3BQcmJvczdUWkVQbTQ3dUVJUTUwc055VEpDYm5VSy8xCnhla2ZmZ3hMbmZlRUxoaXhDNE1XYjMzWG9GNU5VdWduQ2pUakthUFNNUmpISm9YSFlGWjdZdEdlSEd1aDR2UGwKOU5TM0YxT2l0MWNnQzNCSm1BM28yZmhYbTRGR1FhQzNjYUdXTzE5eHAwRWE1eXQ0RHZOTWp5WlgvSkx1Qko0NQpsZU5jUSs3c3U0dW0vY0hqcFFVenlvZmoydFBIU085QXczWGY0L2lmN0hFQ2dZRUE1SWMzMzVKUUhJVlQwc003CnhkY3haYmppbUE5alBWMDFXSXh0di8zbzFJWm5TUGFocEFuYXVwZGZqRkhKZmJTYlZXaUJTaUZpb2RTR3pIdDgKTlZNTGFyVzVreDl5N1luYXdnZjJuQjc2VG03aFl6L3h5T3AxNXFRbmswVW9DdnQ2MHp6dDl5UE5KQ1pWalFwNgp4cUw4T1c4emNlUGpxZzJBTHRtcVhpNitZRXNDZ1lFQTg2ME5zSHMzNktFZE91Q1o1TXF6NVRLSmVYSzQ5ZkdBCjdxcjM5Sm9RcWYzbEhSSWozUlFlNERkWmQ5NUFXcFRKUEJXdnp6NVROOWdwNHVnb3VGc0tCaG82YWtsUEZTUFIKRkZwWCtGZE56eHJGTlAwZHhydmN0bXU2OW91MFR0QU1jd1hYWFJuR1BuK0xDTnVUUHZndHZTTnRwSEZMb0dzUQorVDFpTjhpWS9aVUNnWUJpMVJQVjdkb1ZxNWVuNCtWYTE0azJlL0lMWDBSRkNxV0NpU0VCMGxhNmF2SUtQUmVFCjhQb1dqbGExUWIzSlRxMkxEMm95M0NOaTU1M3dtMHNKYU1QY1A0RmxYa2wrNzRxYk5ZUnkybmJZS3QzdzVYdTAKcjZtVHVOU2d2VnptK3dHUWo1NCtyczRPWDBIS2dJaStsVWhOc29qbUxXK05ZTTlaODZyWmxvK2c1d0tCZ0VMQQplRXlOSko2c2JCWng2cFo3Vk5hSGhwTm5jdldreDc0WnhiMFM2MWUxL3FwOUNxZ0lXQUR5Q0tkR2tmaCtZN1g2Cjl1TmQzbXdnNGpDUGlvQWVLRnZObVl6K01oVEhjQUlVVVo3dFE1cGxhZnAvRUVZZHRuT2VoV1ArbDFFenV3VlQKWjFEUXU3YnBONHdnb25DUWllOFRJbmoydEZIb29vaTBZUkNJK2lnVkFvR0JBSUxaOXd4WDlnMmVNYU9xUFk1dgo5RGxxNFVEZlpaYkprNFZPbmhjR0pWQUNXbmlpNTU0Y1RCSEkxUTdBT0ZQOHRqK3d3YWJBOWRMaUpDdzJzd0E2ClQrdnhiK1NySGxEUnFON3NNRUQ1Z091REo0eHJxRVdLZ3ZkSEsvME9EMC9ZMUFvSCt2aDlJMHVaV0RRNnNLcXcKeFcrbDk0UTZXSW1xYnpDODZsa3JXa0lCCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K`
)
var (
//go:embed testdata/*.json
testDataFS embed.FS
testDataTemplate = template.Must(template.ParseFS(testDataFS, "testdata/*.json"))
)
func testData(t *testing.T, name string, data interface{}) string {
t.Helper()
var buf bytes.Buffer
err := testDataTemplate.ExecuteTemplate(&buf, name, data)
require.NoError(t, err)
return buf.String()
}
func Test_buildMetricsHTTPConnectionManagerFilter(t *testing.T) {
cacheDir, _ := os.UserCacheDir()
certFileName := filepath.Join(cacheDir, "pomerium", "envoy", "files", "tls-crt-354e49305a5a39414a545530374e58454e48334148524c4e324258463837364355564c4e4532464b54355139495547514a38.pem")
keyFileName := filepath.Join(cacheDir, "pomerium", "envoy", "files", "tls-key-3350415a38414e4e4a4655424e55393430474147324651433949384e485341334b5157364f424b4c5856365a545937383735.pem")
b := New("local-grpc", "local-http", "local-metrics", filemgr.NewManager(), nil)
li, err := b.buildMetricsListener(&config.Config{
Options: &config.Options{
MetricsAddr: "127.0.0.1:9902",
MetricsCertificate: aExampleComCert,
MetricsCertificateKey: aExampleComKey,
},
})
expect := testData(t, "metrics_http_connection_manager.json", struct{ CertFile, KeyFile string }{certFileName, keyFileName})
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, expect, li)
}
func Test_buildMainHTTPConnectionManagerFilter(t *testing.T) {
b := New("local-grpc", "local-http", "local-metrics", nil, nil)
options := config.NewDefaultOptions()
options.SkipXffAppend = true
options.XffNumTrustedHops = 1
options.AuthenticateURLString = "https://authenticate.example.com"
filter, err := b.buildMainHTTPConnectionManagerFilter(context.Background(), &config.Config{Options: options}, false)
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, testData(t, "main_http_connection_manager_filter.json", nil), filter)
}
func Test_buildDownstreamTLSContext(t *testing.T) {
b := New("local-grpc", "local-http", "local-metrics", filemgr.NewManager(), nil)
cacheDir, _ := os.UserCacheDir()
clientCAFileName := filepath.Join(cacheDir, "pomerium", "envoy", "files", "client-ca-3533485838304b593757424e3354425157494c4747433534384f474f3631364d5332554c3332485a483834334d50454c344a.pem")
t.Run("no-validation", func(t *testing.T) {
downstreamTLSContext, err := b.buildDownstreamTLSContextMulti(context.Background(), &config.Config{Options: &config.Options{}}, nil)
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `{
"commonTlsContext": {
"tlsParams": {
"cipherSuites": [
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305"
],
"tlsMinimumProtocolVersion": "TLSv1_2"
},
"alpnProtocols": ["h2", "http/1.1"]
}
}`, downstreamTLSContext)
})
t.Run("client-ca", func(t *testing.T) {
downstreamTLSContext, err := b.buildDownstreamTLSContextMulti(context.Background(), &config.Config{Options: &config.Options{
DownstreamMTLS: config.DownstreamMTLSSettings{
CA: "VEVTVAo=", // "TEST\n" (with a trailing newline)
},
}}, nil)
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `{
"commonTlsContext": {
"tlsParams": {
"cipherSuites": [
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305"
],
"tlsMinimumProtocolVersion": "TLSv1_2"
},
"alpnProtocols": ["h2", "http/1.1"],
"validationContext": {
"maxVerifyDepth": 1,
"onlyVerifyLeafCertCrl": true,
"trustChainVerification": "ACCEPT_UNTRUSTED",
"trustedCa": {
"filename": "`+clientCAFileName+`"
}
}
}
}`, downstreamTLSContext)
})
t.Run("client-ca-strict", func(t *testing.T) {
downstreamTLSContext, err := b.buildDownstreamTLSContextMulti(context.Background(), &config.Config{Options: &config.Options{
DownstreamMTLS: config.DownstreamMTLSSettings{
CA: "VEVTVAo=", // "TEST\n" (with a trailing newline)
Enforcement: config.MTLSEnforcementRejectConnection,
},
}}, nil)
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `{
"commonTlsContext": {
"tlsParams": {
"cipherSuites": [
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305"
],
"tlsMinimumProtocolVersion": "TLSv1_2"
},
"alpnProtocols": ["h2", "http/1.1"],
"validationContext": {
"maxVerifyDepth": 1,
"onlyVerifyLeafCertCrl": true,
"trustedCa": {
"filename": "`+clientCAFileName+`"
}
}
},
"requireClientCertificate": true
}`, downstreamTLSContext)
})
t.Run("policy-client-ca", func(t *testing.T) {
downstreamTLSContext, err := b.buildDownstreamTLSContextMulti(context.Background(), &config.Config{Options: &config.Options{
Policies: []config.Policy{
{
From: "https://a.example.com:1234",
TLSDownstreamClientCA: "VEVTVA==", // "TEST" (no trailing newline)
},
},
}}, nil)
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `{
"commonTlsContext": {
"tlsParams": {
"cipherSuites": [
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305"
],
"tlsMinimumProtocolVersion": "TLSv1_2"
},
"alpnProtocols": ["h2", "http/1.1"],
"validationContext": {
"maxVerifyDepth": 1,
"onlyVerifyLeafCertCrl": true,
"trustChainVerification": "ACCEPT_UNTRUSTED",
"trustedCa": {
"filename": "`+clientCAFileName+`"
}
}
}
}`, downstreamTLSContext)
})
t.Run("client-ca-max-verify-depth", func(t *testing.T) {
var maxVerifyDepth uint32
config := &config.Config{Options: &config.Options{
DownstreamMTLS: config.DownstreamMTLSSettings{
MaxVerifyDepth: &maxVerifyDepth,
CA: "VEVTVAo=", // "TEST\n"
},
}}
maxVerifyDepth = 10
downstreamTLSContext, err :=
b.buildDownstreamTLSContextMulti(context.Background(), config, nil)
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `{
"maxVerifyDepth": 10,
"onlyVerifyLeafCertCrl": true,
"trustChainVerification": "ACCEPT_UNTRUSTED",
"trustedCa": {
"filename": "`+clientCAFileName+`"
}
}`, downstreamTLSContext.GetCommonTlsContext().GetValidationContext())
maxVerifyDepth = 0
downstreamTLSContext, err =
b.buildDownstreamTLSContextMulti(context.Background(), config, nil)
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `{
"onlyVerifyLeafCertCrl": true,
"trustChainVerification": "ACCEPT_UNTRUSTED",
"trustedCa": {
"filename": "`+clientCAFileName+`"
}
}`, downstreamTLSContext.GetCommonTlsContext().GetValidationContext())
})
t.Run("client-ca-san-matchers", func(t *testing.T) {
config := &config.Config{Options: &config.Options{
DownstreamMTLS: config.DownstreamMTLSSettings{
CA: "VEVTVAo=", // "TEST\n"
MatchSubjectAltNames: []config.SANMatcher{
{Type: config.SANTypeDNS, Pattern: `.*\.corp\.example\.com`},
{Type: config.SANTypeEmail, Pattern: `.*@example\.com`},
{Type: config.SANTypeIPAddress, Pattern: `10\.10\.42\..*`},
{Type: config.SANTypeURI, Pattern: `spiffe://example\.com/.*`},
},
},
}}
downstreamTLSContext, err :=
b.buildDownstreamTLSContextMulti(context.Background(), config, nil)
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `{
"maxVerifyDepth": 1,
"matchTypedSubjectAltNames": [
{
"matcher": {
"safeRegex": {
"googleRe2": {},
"regex": ".*\\.corp\\.example\\.com"
}
},
"sanType": "DNS"
},
{
"matcher": {
"safeRegex": {
"googleRe2": {},
"regex": ".*@example\\.com"
}
},
"sanType": "EMAIL"
},
{
"matcher": {
"safeRegex": {
"googleRe2": {},
"regex": "10\\.10\\.42\\..*"
}
},
"sanType": "IP_ADDRESS"
},
{
"matcher": {
"safeRegex": {
"googleRe2": {},
"regex": "spiffe://example\\.com/.*"
}
},
"sanType": "URI"
}
],
"onlyVerifyLeafCertCrl": true,
"trustChainVerification": "ACCEPT_UNTRUSTED",
"trustedCa": {
"filename": "`+clientCAFileName+`"
}
}`, downstreamTLSContext.GetCommonTlsContext().GetValidationContext())
})
t.Run("http1", func(t *testing.T) {
downstreamTLSContext, err := b.buildDownstreamTLSContextMulti(context.Background(), &config.Config{Options: &config.Options{
Cert: aExampleComCert,
Key: aExampleComKey,
CodecType: config.CodecTypeHTTP1,
}}, nil)
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `{
"commonTlsContext": {
"tlsParams": {
"cipherSuites": [
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305"
],
"tlsMinimumProtocolVersion": "TLSv1_2"
},
"alpnProtocols": ["http/1.1"]
}
}`, downstreamTLSContext)
})
t.Run("http2", func(t *testing.T) {
downstreamTLSContext, err := b.buildDownstreamTLSContextMulti(context.Background(), &config.Config{Options: &config.Options{
Cert: aExampleComCert,
Key: aExampleComKey,
CodecType: config.CodecTypeHTTP2,
}}, nil)
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `{
"commonTlsContext": {
"tlsParams": {
"cipherSuites": [
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305"
],
"tlsMinimumProtocolVersion": "TLSv1_2"
},
"alpnProtocols": ["h2"]
}
}`, downstreamTLSContext)
})
}
func Test_clientCABundle(t *testing.T) {
// Make sure multiple bundled CAs are separated by newlines.
clientCA1 := []byte("client CA 1")
clientCA2 := []byte("client CA 2")
clientCA3 := []byte("client CA 3")
b64 := base64.StdEncoding.EncodeToString
cfg := &config.Config{Options: &config.Options{
DownstreamMTLS: config.DownstreamMTLSSettings{
CA: b64(clientCA3),
},
Policies: []config.Policy{
{
From: "https://foo.example.com",
TLSDownstreamClientCA: b64(clientCA2),
},
{
From: "https://bar.example.com",
TLSDownstreamClientCA: b64(clientCA1),
},
},
}}
expected := []byte("client CA 3\nclient CA 2\nclient CA 1\n")
actual := clientCABundle(context.Background(), cfg)
assert.Equal(t, expected, actual)
}
func Test_getAllDomains(t *testing.T) {
cert, err := cryptutil.GenerateCertificate(nil, "*.unknown.example.com")
require.NoError(t, err)
certPEM, keyPEM, err := cryptutil.EncodeCertificate(cert)
require.NoError(t, err)
options := &config.Options{
Addr: "127.0.0.1:9000",
GRPCAddr: "127.0.0.1:9001",
Services: "all",
AuthenticateURLString: "https://authenticate.example.com",
AuthenticateInternalURLString: "https://authenticate.int.example.com",
AuthorizeURLString: "https://authorize.example.com:9001",
DataBrokerURLString: "https://cache.example.com:9001",
Policies: []config.Policy{
{From: "http://a.example.com"},
{From: "https://b.example.com"},
{From: "https://c.example.com"},
{From: "https://d.unknown.example.com"},
},
Cert: base64.StdEncoding.EncodeToString(certPEM),
Key: base64.StdEncoding.EncodeToString(keyPEM),
}
t.Run("routable", func(t *testing.T) {
t.Run("http", func(t *testing.T) {
actual, err := getAllRouteableHosts(options, "127.0.0.1:9000")
require.NoError(t, err)
expect := []string{
"a.example.com",
"a.example.com:80",
"authenticate.example.com",
"authenticate.example.com:443",
"authenticate.int.example.com",
"authenticate.int.example.com:443",
"b.example.com",
"b.example.com:443",
"c.example.com",
"c.example.com:443",
"d.unknown.example.com",
"d.unknown.example.com:443",
}
assert.Equal(t, expect, actual)
})
t.Run("grpc", func(t *testing.T) {
actual, err := getAllRouteableHosts(options, "127.0.0.1:9001")
require.NoError(t, err)
expect := []string{
"authorize.example.com:9001",
"cache.example.com:9001",
}
assert.Equal(t, expect, actual)
})
t.Run("both", func(t *testing.T) {
newOptions := *options
newOptions.GRPCAddr = newOptions.Addr
actual, err := getAllRouteableHosts(&newOptions, "127.0.0.1:9000")
require.NoError(t, err)
expect := []string{
"a.example.com",
"a.example.com:80",
"authenticate.example.com",
"authenticate.example.com:443",
"authenticate.int.example.com",
"authenticate.int.example.com:443",
"authorize.example.com:9001",
"b.example.com",
"b.example.com:443",
"c.example.com",
"c.example.com:443",
"cache.example.com:9001",
"d.unknown.example.com",
"d.unknown.example.com:443",
}
assert.Equal(t, expect, actual)
})
})
}
func Test_urlMatchesHost(t *testing.T) {
t.Parallel()
for _, tc := range []struct {
name string
sourceURL string
host string
matches bool
}{
{"no port", "http://example.com", "example.com", true},
{"host http port", "http://example.com", "example.com:80", true},
{"host https port", "https://example.com", "example.com:443", true},
{"with port", "https://example.com:443", "example.com:443", true},
{"url port", "https://example.com:443", "example.com", true},
{"non standard port", "http://example.com:81", "example.com", false},
{"non standard host port", "http://example.com:81", "example.com:80", false},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
assert.Equal(t, tc.matches, urlMatchesHost(mustParseURL(t, tc.sourceURL), tc.host),
"urlMatchesHost(%s,%s)", tc.sourceURL, tc.host)
})
}
}
func Test_buildRouteConfiguration(t *testing.T) {
b := New("local-grpc", "local-http", "local-metrics", nil, nil)
virtualHosts := make([]*envoy_config_route_v3.VirtualHost, 10)
routeConfig, err := b.buildRouteConfiguration("test-route-configuration", virtualHosts)
require.NoError(t, err)
assert.Equal(t, "test-route-configuration", routeConfig.GetName())
assert.Equal(t, virtualHosts, routeConfig.GetVirtualHosts())
assert.False(t, routeConfig.GetValidateClusters().GetValue())
}
func Test_requireProxyProtocol(t *testing.T) {
b := New("local-grpc", "local-http", "local-metrics", nil, nil)
t.Run("required", func(t *testing.T) {
li, err := b.buildMainListener(context.Background(), &config.Config{Options: &config.Options{
UseProxyProtocol: true,
InsecureServer: true,
}}, false)
require.NoError(t, err)
testutil.AssertProtoJSONEqual(t, `[
{
"name": "envoy.filters.listener.proxy_protocol",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.filters.listener.proxy_protocol.v3.ProxyProtocol"
}
}
]`, li.GetListenerFilters())
})
t.Run("not required", func(t *testing.T) {
li, err := b.buildMainListener(context.Background(), &config.Config{Options: &config.Options{
UseProxyProtocol: false,
InsecureServer: true,
}}, false)
require.NoError(t, err)
assert.Len(t, li.GetListenerFilters(), 0)
})
}
|
package services
import "errors"
var (
ErrNameDuplicate = errors.New("forum name duplicate")
ErrForumIdDuplicate = errors.New("forum_id duplicate")
ErrInternal = errors.New("net worker error")
)
|
//easyjson:json
package easyjson1
import "time"
type School struct {
Name string `json:"name"`
Addr string `json:"addr"`
}
//easyjson:json
type Student struct {
Id int `json:"id"`
Name string `json:"s_name"`
School School `json:"s_chool"`
Birthday time.Time `json:"birthday"`
}
|
package helper
// FullTitle title for application
func FullTitle(title string) string {
basetitle := "Ruby on Rails Tutorial Sample App"
if title == "" {
return basetitle
}
return title + " | " + basetitle
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.