text stringlengths 11 4.05M |
|---|
package main
import (
"fmt"
"os"
"github.com/Konboi/ghooks"
"github.com/Sirupsen/logrus"
_ "github.com/joho/godotenv/autoload"
"gopkg.in/alecthomas/kingpin.v2"
)
type cmd struct {
command string
payload string
}
var (
defaultPort = 18889
defaultHost = "127.0.0.1"
file = kingpin.Flag("config", "config file location").Short('c').Required().String()
port = kingpin.Flag("port", "listen port").Short('p').Default(fmt.Sprint(defaultPort)).Int()
host = kingpin.Flag("host", "listen host").Default(defaultHost).String()
logfile = kingpin.Flag("logfile", "log file location").Short('l').String()
pidfile = kingpin.Flag("pidfile", "pid file location").String()
isNotEncoded = kingpin.Flag("raw-payload", "raw payload").Default("false").Bool()
log = logrus.New()
)
func main() {
kingpin.CommandLine.Help = "Receives Github webhooks and runs commands"
kingpin.Version("0.4.3")
kingpin.Parse()
tmpConf := config{
Port: *port,
Host: *host,
Logfile: *logfile,
Pidfile: *pidfile,
}
conf, err := loadToml(*file, tmpConf)
if err != nil {
log.Fatal(err)
}
if conf.Logfile != "" {
var f *os.File
f, err = openFile(conf.Logfile)
defer f.Close()
if err != nil {
log.Fatal(err)
}
log.Out = f
log.Formatter = &logrus.TextFormatter{DisableColors: true}
}
hooks := ghooks.NewServer(conf.Port, conf.Host)
if envSecret := os.Getenv("SECRET_TOKEN"); envSecret != "" {
hooks.Secret = envSecret
} else if conf.Secret != "" {
hooks.Secret = conf.Secret
}
if conf.Pidfile != "" {
err = createPIDFile(conf.Pidfile)
if err != nil {
log.Fatal(err)
}
}
isEncoded := !*isNotEncoded
for _, h := range conf.Hook {
if h.Event == "" {
log.Fatal("event is required.")
}
h.isEncoded = isEncoded
hooks.On(h.Event, h.callback)
}
hooks.Run()
}
|
package netutil
import (
"bytes"
"fmt"
"io"
"net"
"testing"
"time"
)
func TestHalfCloser(t *testing.T) {
t.Parallel()
l, err := net.Listen("tcp", "localhost:15346")
if err != nil {
t.Skip(err)
}
errCh := make(chan error, 1)
done := make(chan struct{})
go func() {
defer close(done)
c, err := l.Accept()
if err != nil {
errCh <- err
return
}
defer c.Close()
hc, ok := c.(HalfCloser)
if !ok {
t.Error("c is not a HalfCloser")
}
c.Write([]byte("test"))
err = hc.CloseWrite()
if err != nil {
errCh <- err
return
}
data, err := io.ReadAll(c)
if err != nil {
errCh <- err
} else if !bytes.Equal(data, []byte("ack")) {
errCh <- fmt.Errorf(`!bytes.Equal(data, []byte("ack")), data=%s`, data)
}
}()
c, err := net.DialTimeout("tcp", "localhost:15346", 1*time.Second)
if err != nil {
t.Fatal(err)
}
defer c.Close()
data, err := io.ReadAll(c)
if err != nil {
t.Error(err)
}
if !bytes.Equal(data, []byte("test")) {
t.Error(`!bytes.Equal(data, []byte("test"))`)
}
_, err = c.Write([]byte("ack"))
if err != nil {
t.Error(err)
}
err = c.(HalfCloser).CloseWrite()
if err != nil {
t.Error(err)
}
<-done
select {
case err2 := <-errCh:
t.Error(err2)
default:
}
}
|
package metrics
import (
"net/url"
"github.com/cerana/cerana/acomm"
"github.com/cerana/cerana/pkg/errors"
"github.com/shirou/gopsutil/cpu"
"github.com/shirou/gopsutil/load"
)
// CPUResult is the result of the CPU handler.
type CPUResult struct {
Info []cpu.InfoStat `json:"info"`
Load load.AvgStat `json:"load"`
Times []cpu.TimesStat `json:"times"`
}
// CPU returns information about the CPU hardware, times, and load.
func (m *Metrics) CPU(req *acomm.Request) (interface{}, *url.URL, error) {
info, err := cpu.Info()
if err != nil {
return nil, nil, errors.Wrap(err)
}
loadAvg, err := load.Avg()
if err != nil {
return nil, nil, errors.Wrap(err)
}
times, err := cpu.Times(true)
if err != nil {
return nil, nil, errors.Wrap(err)
}
return &CPUResult{Info: info, Load: *loadAvg, Times: times}, nil, nil
}
|
// Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// +build !windows
package server
import (
"context"
"fmt"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
)
// rlimit is a replacement struct for `unix.Rlimit` which abstracts
// from the possible differences in type definitions between platforms
// (e.g. GNU/Linux uses uint64, FreeBSD uses signed int64).
type rlimit struct {
Cur, Max uint64
}
func setOpenFileLimitInner(physicalStoreCount int) (uint64, error) {
minimumOpenFileLimit := uint64(physicalStoreCount*storage.MinimumMaxOpenFiles + minimumNetworkFileDescriptors)
networkConstrainedFileLimit := uint64(physicalStoreCount*storage.RecommendedMaxOpenFiles + minimumNetworkFileDescriptors)
recommendedOpenFileLimit := uint64(physicalStoreCount*storage.RecommendedMaxOpenFiles + recommendedNetworkFileDescriptors)
var rLimit rlimit
if err := getRlimitNoFile(&rLimit); err != nil {
if log.V(1) {
log.Infof(context.TODO(), "could not get rlimit; setting maxOpenFiles to the recommended value %d - %s", storage.RecommendedMaxOpenFiles, err)
}
return storage.RecommendedMaxOpenFiles, nil
}
// The max open file descriptor limit is too low.
if rLimit.Max < minimumOpenFileLimit {
return 0, fmt.Errorf("hard open file descriptor limit of %d is under the minimum required %d\n%s",
rLimit.Max,
minimumOpenFileLimit,
productionSettingsWebpage)
}
// If the current limit is less than the recommended limit, set the current
// limit to the minimum of the max limit or the recommendedOpenFileLimit.
var newCurrent uint64
if rLimit.Max > recommendedOpenFileLimit {
newCurrent = recommendedOpenFileLimit
} else {
newCurrent = rLimit.Max
}
if rLimit.Cur < newCurrent {
if log.V(1) {
log.Infof(context.TODO(), "setting the soft limit for open file descriptors from %d to %d",
rLimit.Cur, newCurrent)
}
oldCurrent := rLimit.Cur
rLimit.Cur = newCurrent
if err := setRlimitNoFile(&rLimit); err != nil {
// It is surprising if setrlimit fails, because we were careful to check
// getrlimit first to construct a valid limit. However, the validation
// rules for setrlimit have been known to change between Go versions (for
// an example, see https://github.com/golang/go/issues/30401), so we don't
// want to fail hard if setrlimit fails. Instead we log a warning and
// carry on. If the rlimit is really too low, we'll bail out later in this
// function.
log.Warningf(context.TODO(), "adjusting the limit for open file descriptors to %d failed: %s",
rLimit.Cur, err)
// Setting the limit to our "recommended" level failed. This may
// be because getRlimitNoFile gave us the wrong answer (on some
// platforms there are limits that are not reflected by
// getrlimit()). If the previous limit is below our minimum, try
// one more time to increase it to the minimum.
if oldCurrent < minimumOpenFileLimit {
rLimit.Cur = minimumOpenFileLimit
if err := setRlimitNoFile(&rLimit); err != nil {
log.Warningf(context.TODO(), "adjusting the limit for open file descriptors to %d failed: %s",
rLimit.Cur, err)
}
}
}
// Sadly, even when setrlimit returns successfully, the new limit is not
// always set as expected (e.g. on macOS), so fetch the limit again to see
// the actual current limit.
if err := getRlimitNoFile(&rLimit); err != nil {
return 0, errors.Wrap(err, "getting updated soft limit for open file descriptors")
}
if log.V(1) {
log.Infof(context.TODO(), "soft open file descriptor limit is now %d", rLimit.Cur)
}
}
// The current open file descriptor limit is still too low.
if rLimit.Cur < minimumOpenFileLimit {
return 0, fmt.Errorf("soft open file descriptor limit of %d is under the minimum required %d and cannot be increased\n%s",
rLimit.Cur,
minimumOpenFileLimit,
productionSettingsWebpage)
}
if rLimit.Cur < recommendedOpenFileLimit {
// We're still below the recommended amount, we should always show a
// warning.
log.Warningf(context.TODO(), "soft open file descriptor limit %d is under the recommended limit %d; this may decrease performance\n%s",
rLimit.Cur,
recommendedOpenFileLimit,
productionSettingsWebpage)
}
// If we have no physical stores, return 0.
if physicalStoreCount == 0 {
return 0, nil
}
// If the current open file descriptor limit meets or exceeds the recommended
// value, we can divide up the current limit, less what we need for
// networking, between the stores.
if rLimit.Cur >= recommendedOpenFileLimit {
return (rLimit.Cur - recommendedNetworkFileDescriptors) / uint64(physicalStoreCount), nil
}
// If we have more than enough file descriptors to hit the recommended number
// for each store, than only constrain the network ones by giving the stores
// their full recommended number.
if rLimit.Cur >= networkConstrainedFileLimit {
return storage.RecommendedMaxOpenFiles, nil
}
// Always sacrifice all but the minimum needed network descriptors to be
// used by the stores.
return (rLimit.Cur - minimumNetworkFileDescriptors) / uint64(physicalStoreCount), nil
}
|
/***
Copyright 2017 Cisco Systems Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package systemtests
import (
"crypto/tls"
"net"
"net/http"
"sync"
log "github.com/Sirupsen/logrus"
)
// this cert and key are valid for 100 years
const certpem = `
-----BEGIN CERTIFICATE-----
MIIDozCCAougAwIBAgIJAM+dSt5+iemKMA0GCSqGSIb3DQEBCwUAMGgxCzAJBgNV
BAYTAlVTMQswCQYDVQQIDAJDQTERMA8GA1UEBwwIU2FuIEpvc2UxDTALBgNVBAoM
BENQU0cxFjAUBgNVBAsMDUlUIERlcGFydG1lbnQxEjAQBgNVBAMMCWxvY2FsaG9z
dDAeFw0xNzA0MjAxOTI4MTJaFw0yNzA0MTgxOTI4MTJaMGgxCzAJBgNVBAYTAlVT
MQswCQYDVQQIDAJDQTERMA8GA1UEBwwIU2FuIEpvc2UxDTALBgNVBAoMBENQU0cx
FjAUBgNVBAsMDUlUIERlcGFydG1lbnQxEjAQBgNVBAMMCWxvY2FsaG9zdDCCASIw
DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM1MKpN5mtgdSo7gk0M70mcNJC4G
XVuPcZdC43GSfUxL1buc+80NP5kCp8dzbDYrKfshTgalwmEV4J+5bvKe4osrEMmC
aTC6927nCDH2m+G30/qWxHXMDp4QiZm8GIp/EiDLPqtOOImsoP/QUQKtRGKSqltX
Ei0D5o3wq06Y7RhXRoSnGBUkTCkp1OMGyuJJKXbpoeN+CO3xVJ6OgxMAqoKpdF9k
j8uP4qu8A1jzuiN3/L/vh/JmBajiD54vL0Pb4DoVHJRCGP1RRkLbRUuEHJkW9Smt
67SxcYmZwFnJyXN7KZF+QlyeDDFTB8t0s1t66WwyMIiyN4fr1HYxPXL/Tn0CAwEA
AaNQME4wHQYDVR0OBBYEFCGX5Uzlt8818KcOVicpoFPPEE/NMB8GA1UdIwQYMBaA
FCGX5Uzlt8818KcOVicpoFPPEE/NMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEL
BQADggEBAJO8JCp4Aoi+QM0PsrDeoBL/nAiW+lwHoAwH3JGi9vnZ59RTUs0iCCBM
ecr2MUT7vpxHxWF3C2EH9dPBiXYwrr3q4b0A8Lhf+PrmGOB9nwpbxqAyEvNoj02B
Uc2dpblNsIQteceBdOBGkIKBWAkvXPXrA0ExlV31Qh0KHNsaYLb0d6uSBHZFX/d6
zBhHQqoYuhS3WCYVaPE2PUU9eV3Q6f0Xx+e6GovaO7DgmrSQ1mbAp33XnPiKUz2b
ioF6fl0GISEpfkbrPNBbhSCrXatLrtz+4DpneJQ5vVClG054qcms+hnziiomz7P+
TfQIVXFBQdXZedjqDxhga7ebCWb41yA=
-----END CERTIFICATE-----
`
const certkey = `
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAzUwqk3ma2B1KjuCTQzvSZw0kLgZdW49xl0LjcZJ9TEvVu5z7
zQ0/mQKnx3NsNisp+yFOBqXCYRXgn7lu8p7iiysQyYJpMLr3bucIMfab4bfT+pbE
dcwOnhCJmbwYin8SIMs+q044iayg/9BRAq1EYpKqW1cSLQPmjfCrTpjtGFdGhKcY
FSRMKSnU4wbK4kkpdumh434I7fFUno6DEwCqgql0X2SPy4/iq7wDWPO6I3f8v++H
8mYFqOIPni8vQ9vgOhUclEIY/VFGQttFS4QcmRb1Ka3rtLFxiZnAWcnJc3spkX5C
XJ4MMVMHy3SzW3rpbDIwiLI3h+vUdjE9cv9OfQIDAQABAoIBAQCa0Qtyd0vsGfq1
0Gl9VEmQ6PoVszsH5x6UIR7/8KaIuM+PUg0ZTxpcuwHniQVbvCVGepEqtinlqOfh
y6b9VBAnPuzD6ZKF6xjZC2TEuOJIz6YN3VB+PMnxLSt3Qb+IAdeb32l9Kdm9CO/I
ukG9MQjXBR9vDjRouf5Nn+avuOdjaGNaFWNCqZb3/0B4zdslsR8ynvKHgB9OH9a6
ggmKINzkvF1Fv6UyGjgLyfVjcdxgFDZ3TY5vsxoO7/jPWzxRY3LignaWV8hEo2L5
fFsyUFApHLmCXMW+fiEu/0QsN2zFcZp1oXCEc2+a9OF3p3e3FaXv5h9w3EdZJLql
b2zt2zzBAoGBAPC1zlZ8HkLcUxZgAA6O9d1lHCjDkSCG1HOLVQ2gKlDOhuQq8kxq
/0HsrtbC4KtjZeOTWHWnX035i7BU42G7y/cNwFtfplINr1XdH8DOgffS53UKnEbs
WyBSgBh6jsoDsPnuGrOnBVmaTB9aGLpznuHcZ/wMeZUEIrQI6wlL79nVAoGBANpW
g6d7BG62xs72e++T2tw/ZSpGNjLt+weKAWbhxHwhlvcrgEkQ5sS/FR/WIfoCplqh
MGhYV0a4zlmSOoOcg3ZQkqiwrNDT1CpIgC8ETzJzt5/eTwEE8UJtD9bIngA62Xec
iACYQgRox0v/UG9N9U1Tnr0oDLVXahZbN4BXiw4JAoGAGpWZskeG+A9pRcFYgEMd
uFPgZkgjERqTACfVPun/gmks0KpFlFcE1f0T2jgvo/4YVKgDTwsrJWt4GANoEXUy
M5jbM7w+nDVStgLz7NFh3UL3uR9w3wxfjBRQfWObvYfm1dOMM2cw2hKGcbf7nywB
0iQLf/TIwMJyKrwJaT9vv/kCgYEAvXoa4rtFS3de7LjHMVBUvJJfjuJDossX8KD5
Onlu9HKJ+pJL0BzUx6U0Bd7kuXyXNUtxIPyZMQysNttJ4HFxPLoLrE02jDtoghFM
/IB24ke98QUR9sZ9QLI47qJHS9fGZaD3/dwkXoM3gWJeQVmcKbEJrwoUjUMBE8mx
TrWqPVECgYBamOxzDC3dUQUFEUGNx0D8bGAHmMnU8tHglwjyQeC8wTYreAdFRYMp
KNPNa4q/NpcKXUcM9etcuzQC8Di/GZDAa+uEQIvwH/xHG6FwvOTfZp8bzN8rD+gQ
yGWqZkaNREZSyW+pNDCUXnBDkCBj7qwUgb6ysgodeF7RWFAHfoXJ1g==
-----END RSA PRIVATE KEY-----
`
// NewMockServer returns a configured, initialized, and running MockServer which
// can have routes added even though it's already running. Call Stop() to stop it.
func NewMockServer() *MockServer {
ms := &MockServer{}
ms.Init()
go ms.Serve()
return ms
}
// MockServer is a server which we can program to behave like netmaster for
// testing purposes.
type MockServer struct {
listener net.Listener // the actual HTTPS listener
mux *http.ServeMux // a custom ServeMux we can add routes onto later
stopChan chan bool // used to shut down the server
wg sync.WaitGroup // used to avoid a race condition when shutting down
}
// Init just sets up the stop channel and our custom ServeMux
func (ms *MockServer) Init() {
ms.stopChan = make(chan bool, 1)
ms.mux = http.NewServeMux()
}
// AddHardcodedResponse registers a HTTP handler func for `path' that returns `body'.
func (ms *MockServer) AddHardcodedResponse(path string, body []byte) {
ms.mux.HandleFunc(path, func(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Write(body)
})
}
// AddHandler allows adding a custom route handler to our custom ServeMux
func (ms *MockServer) AddHandler(path string, f func(http.ResponseWriter, *http.Request)) {
ms.mux.HandleFunc(path, f)
}
// Serve starts the mock server using the custom ServeMux we set up.
func (ms *MockServer) Serve() {
var err error
server := &http.Server{Handler: ms.mux}
// because of the tight time constraints around starting/stopping the
// mock server when running tests and the fact that lingering client
// connections can cause the server not to shut down in a timely
// manner, we will just disable keepalives entirely here.
server.SetKeepAlivesEnabled(false)
cert, err := tls.X509KeyPair([]byte(certpem), []byte(certkey))
if err != nil {
log.Fatalln("Failed to load TLS key pair:", err)
return
}
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{cert},
MinVersion: tls.VersionTLS11,
}
ms.listener, err = tls.Listen("tcp", "0.0.0.0:10000", tlsConfig)
if err != nil {
log.Fatalln("Failed to listen:", err)
return
}
ms.wg.Add(1)
go func() {
server.Serve(ms.listener)
ms.wg.Done()
}()
<-ms.stopChan
ms.listener.Close()
}
// Stop stops the mock server.
func (ms *MockServer) Stop() {
ms.stopChan <- true
// wait until the listener has actually been stopped
ms.wg.Wait()
}
|
// search
package results
type SearchReply struct {
Code int `xml:"ReplyCode,attr"`
Text string `xml:"ReplyText,attr"`
MaxRows string `xml:"MAXROWS"` // make this an int
Delimiter Delimiter `xml:"DELIMITER"`
Columns string `xml:"COLUMNS"`
Data []string `xml:"DATA"`
}
type Delimiter struct {
Value string `xml:"value,attr"`
}
|
package lib
import (
"github.com/dhaifley/dlib"
"github.com/dhaifley/dlib/dauth"
)
// PermAccess values are used to access perm records in the database.
type PermAccess struct {
DBS dlib.SQLExecutor
}
// PermAccessor is an interface describing values capable of providing
// access to perm records in the database.
type PermAccessor interface {
GetPerms(opt *dauth.PermFind) <-chan dlib.Result
GetPermByID(id int64) <-chan dlib.Result
DeletePerms(opt *dauth.PermFind) <-chan dlib.Result
DeletePermByID(id int64) <-chan dlib.Result
SavePerm(t *dauth.Perm) <-chan dlib.Result
SavePerms(t []dauth.Perm) <-chan dlib.Result
}
// NewPermAccessor creates a new PermAccess instance and
// returns a pointer to it.
func NewPermAccessor(dbs dlib.SQLExecutor) PermAccessor {
ua := PermAccess{DBS: dbs}
return &ua
}
// GetPerms finds perm values in the database.
func (pa *PermAccess) GetPerms(opt *dauth.PermFind) <-chan dlib.Result {
ch := make(chan dlib.Result, 256)
go func() {
defer close(ch)
rows, err := pa.DBS.Query(`
SELECT
p.id,
p.service,
p.name
FROM get_perms($1, $2, $3) AS p`,
opt.ID,
opt.Service,
opt.Name)
if err != nil {
ch <- dlib.Result{Err: err}
return
}
defer rows.Close()
for rows.Next() {
r := dauth.PermRow{}
if err := rows.Scan(
&r.ID,
&r.Service,
&r.Name,
); err != nil {
ch <- dlib.Result{Err: err}
continue
}
v := r.ToPerm()
ch <- dlib.Result{Val: v, Num: 1}
}
}()
return ch
}
// GetPermByID finds a perm value in the database by ID.
func (pa *PermAccess) GetPermByID(id int64) <-chan dlib.Result {
opt := dauth.PermFind{ID: &id}
return pa.GetPerms(&opt)
}
// DeletePerms deletes perm values from the database.
func (pa *PermAccess) DeletePerms(opt *dauth.PermFind) <-chan dlib.Result {
ch := make(chan dlib.Result, 256)
go func() {
defer close(ch)
rows, err := pa.DBS.Query(
"SELECT delete_perms($1, $2, $3) AS num",
opt.ID,
opt.Service,
opt.Name)
if err != nil {
ch <- dlib.Result{Err: err}
return
}
defer rows.Close()
n := 0
for rows.Next() {
r := struct{ Num int }{Num: 0}
if err := rows.Scan(&r.Num); err != nil {
ch <- dlib.Result{Err: err}
continue
}
n += r.Num
}
ch <- dlib.Result{Num: n, Err: nil}
}()
return ch
}
// DeletePermByID deletes a perm value from the database by ID.
func (pa *PermAccess) DeletePermByID(id int64) <-chan dlib.Result {
opt := dauth.PermFind{ID: &id}
return pa.DeletePerms(&opt)
}
// SavePerm saves a perm value to the database.
func (pa *PermAccess) SavePerm(u *dauth.Perm) <-chan dlib.Result {
ch := make(chan dlib.Result, 256)
go func() {
defer close(ch)
rows, err := pa.DBS.Query(
"SELECT save_perm($1, $2, $3) AS id",
u.ID,
u.Service,
u.Name)
if err != nil {
ch <- dlib.Result{Err: err}
return
}
defer rows.Close()
for rows.Next() {
r := struct{ ID int64 }{ID: 0}
if err := rows.Scan(&r.ID); err != nil {
ch <- dlib.Result{Err: err}
continue
}
u.ID = r.ID
}
ch <- dlib.Result{Val: *u, Err: nil}
}()
return ch
}
// SavePerms saves a slice of perm values to the database.
func (pa *PermAccess) SavePerms(u []dauth.Perm) <-chan dlib.Result {
ch := make(chan dlib.Result, 256)
go func() {
defer close(ch)
if u != nil {
for _, v := range u {
for sr := range pa.SavePerm(&v) {
ch <- sr
}
}
}
}()
return ch
}
|
package requests
type AddGuestRequest struct {
TableId int64 `json:"table"`
AccompanyingGuests int64 `json:"accompanying_guests"`
}
|
package main
import (
"bytes"
"io"
"testing"
)
func Test_response(t *testing.T) {
type args struct {
input io.Reader
}
tests := []struct {
name string
args args
want int64
}{
{"test1", args{bytes.NewBufferString("12.00\n20\n8")}, 15},
{"test2", args{bytes.NewBufferString("15.50\n15\n10")}, 19},
{"test3", args{bytes.NewBufferString("10.25\n17\n5")}, 13},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := response(tt.args.input); got != tt.want {
t.Errorf("response() = %v, want %v", got, tt.want)
}
})
}
}
func Test_round(t *testing.T) {
type args struct {
number float64
}
tests := []struct {
name string
args args
want int64
}{
{"", args{1.2}, 1},
{"", args{1.5}, 2},
{"", args{-1.3}, -1},
{"", args{-1.6}, -2},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := round(tt.args.number); got != tt.want {
t.Errorf("round() = %v, want %v", got, tt.want)
}
})
}
}
|
package main
import "fmt"
func main() {
nums := []int{1, 3, 4, 5, 6, 7, 8, 9, 12, 23, 34, 45, 56, 67, 78, 89, 90}
fmt.Println(len(nums))
i, m := (len(nums)+1)>>1, len(nums)>>1
k := 90
count := 0
for m != 0 {
if k < nums[i] {
i = i - (m+1)>>1
m = m >> 1
} else if k > nums[i] {
i = i + (m+1)>>1
m = (m + 1) >> 1
} else if k == nums[i] {
break
}
count++
}
fmt.Println(nums[i], i)
fmt.Println(count)
}
|
package gw
import (
"github.com/gin-gonic/gin"
)
// Hook represents a global gin engine http Middleware.
type Hook struct {
Name string
OnBefore gin.HandlerFunc
OnAfter gin.HandlerFunc
}
func NewBeforeHook(name string, before gin.HandlerFunc) *Hook {
return NewHook(name, before, nil)
}
func NewAfterHook(name string, after gin.HandlerFunc) *Hook {
return NewHook(name, nil, after)
}
func NewHook(name string, before gin.HandlerFunc, after gin.HandlerFunc) *Hook {
return &Hook{
Name: name,
OnBefore: before,
OnAfter: after,
}
}
|
package main
import (
"bufio"
// "golang.org/x/image/bmp"
"image/jpeg"
"image/png"
"log"
"os"
)
func main() {
// fi, err := os.Open("test.bmp")
fi, err := os.Open("test.jpg")
defer fi.Close()
if err != nil {
panic(err)
}
r := bufio.NewReader(fi)
// image, err := bmp.Decode(r)
image, err := jpeg.Decode(r)
fo, err := os.Create("test.png")
defer fo.Close()
if err != nil {
panic(err)
}
w := bufio.NewWriter(fo)
err = png.Encode(w, image)
if err != nil {
panic(err)
}
log.Println("success")
}
|
// Copyright (c) 2018-present, MultiVAC Foundation.
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
package db
import (
"os"
"path/filepath"
"github.com/multivactech/MultiVAC/configs/params"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
)
const (
cacheSize = 64
fileHandles = 64
)
// Batch interface is defined to deal for a series of data.
type Batch interface {
Put(key []byte, value []byte) error
Delete(key []byte) error
Reset()
}
// DB interface is used for database.
type DB interface {
Get(key []byte) ([]byte, error)
Put(key []byte, value []byte) error
Delete(key []byte) error
Has(key []byte) (bool, error)
StartTransactionRecord()
CommitTransaction()
Close() error
GetIter() iterator.Iterator
}
// Cache interface is used for cache data from database.
type Cache interface {
Put(key []byte, value []byte)
Get(key []byte) ([]byte, error)
Delete(key []byte) error
}
// OpenDB returns a wrapped leveldb instance(currently).
func OpenDB(dataDir string, namespace string) (DB, error) {
dir := filepath.Join(dataDir, "leveldb", namespace)
os.MkdirAll(dir, os.ModePerm)
// Open db and recover any potential corruptions
options := opt.Options{
WriteBuffer: params.LeveldbCacheMb * opt.MiB,
BlockCacheCapacity: params.LeveldbBlockCacheMb * opt.MiB,
OpenFilesCacheCapacity: params.LeveldbFileNumber,
Filter: filter.NewBloomFilter(19),
}
ldb, err := leveldb.OpenFile(dir, &options)
if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
ldb, err = leveldb.RecoverFile(dir, nil)
}
if err != nil {
return nil, err
}
return &dbImpl{
db: ldb,
fn: dir,
batch: NewBatch(),
}, nil
}
// NewBatch creates a batch instance.
func NewBatch() Batch {
return &batch{
b: &leveldb.Batch{},
}
}
|
// +build !prod
package mysql
func GetTestConfig() Config {
return Config{
DSNUser: `root`, // github actions use root user with a root password
DSNPassword: `githubactionpassword`, // defined as envvar in the go_tests.yaml
DSNHost: `localhost`,
DSNPort: 3306,
DatabaseName: `testing_cribbage`,
DSNParams: `parseTime=true`,
RunCreateStmts: true,
}
}
func GetTestConfigForLocal() Config {
return Config{
DSNUser: `root`, // locally, we just use "root" with no password
DSNPassword: ``,
DSNHost: `127.0.0.1`,
DSNPort: 3306,
DatabaseName: `testing_cribbage`,
DSNParams: `parseTime=true`,
RunCreateStmts: true,
}
}
|
// This program implements a web service which provides a browser user
// interface so that the user can design sudoku puzzles where the digits
// have been replaced by emoji symbols to produce a puzzle that can also
// artistically convey a symbolic message.
package main
import "encoding/json"
import "flag"
import "fmt"
import "github.com/gorilla/websocket"
import "io/ioutil"
import "log"
import "net/http"
import "os"
import "path/filepath"
import "sudoku/base"
import "sudoku/text"
var upgrader = websocket.Upgrader{
ReadBufferSize: 4096,
WriteBufferSize: 4096,
// ***** Should replace this with something safer some day.
CheckOrigin: func(r *http.Request) bool {
return true
},
}
var webAddress = flag.String("server-address", ":8000",
"The address for this web server.")
var topStaticFile = flag.String("top", "emodoku.html",
"The path to the static top level page.")
var bordersCSS = flag.String("bordersCSS", "../../html/borders.css",
"The path to the borders.css file.")
var jsFile = flag.String("jsFile", "emodoku.js",
"The path to the client-side javascript file.")
var makeSudoku = flag.String("makeSudoku", "make_sudoku.html",
"The path to the HTML page that implements the service.")
func main() {
flag.Parse()
http.HandleFunc("/", makeFileResponder(*topStaticFile))
http.HandleFunc("/borders.css", makeFileResponder(*bordersCSS))
http.HandleFunc("/emodoku.js", makeFileResponder(*jsFile))
http.HandleFunc("/make-sudoku.html", makeFileResponder(*makeSudoku))
http.HandleFunc("/solver", handleSolver)
err := http.ListenAndServe(*webAddress, nil)
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
func makeFileResponder(file string) func(http.ResponseWriter, *http.Request) {
// http.ServeFile doesn't accept .. for security reasons.
file, err := filepath.Abs(file)
// Don't start the server if the file doesn't exist.
_, err = os.Stat(file)
if err != nil {
panic(fmt.Sprintf("File %s: %s", file, err))
}
if err != nil {
panic(err)
}
return func (w http.ResponseWriter, r *http.Request) {
log.Printf("ServeFile %s", file)
http.ServeFile(w, r, file)
}
}
func handleSolver(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Printf("Error upgrading to websocket: %s", err)
return
}
for {
messageType, r, err := conn.NextReader()
if err != nil {
log.Printf("conn.NextReader: %s", err)
return
}
if messageType != websocket.TextMessage {
log.Printf("Received unsupported message type %d", messageType)
continue
}
msg, err := ioutil.ReadAll(r)
if err != nil {
log.Printf("Error reading message: %s", err)
continue
}
puzzle, err := text.TextToSudoku(string(msg))
if err != nil {
log.Printf("Error parsing sudoku from text: %s", err)
continue
}
response := MakeSolutionResponse(puzzle)
encoded, err := json.MarshalIndent(response, "", "")
if err != nil {
log.Printf("Error ncoding JSON: %s", err)
continue
}
// log.Printf("Solver response:\n%s", encoded)
w, err := conn.NextWriter(websocket.TextMessage)
if err != nil {
log.Printf("NextWriter error: %s",err)
continue
}
_, err = w.Write(encoded)
if err != nil {
log.Printf("Error writing to socket: %s", err)
}
err = w.Close()
if err != nil {
log.Printf("Error closing connection: %s", err)
}
}
}
type SolutionResponse struct {
// Size is the number of rows and columns.
Size uint
// Possibilities has an array of ints for eeach cell of the puzzle grid.
// This first index is the row number, the second the column number.
// these indices are zero origin because that's how vectors work in the
// languages we're using.
Possibilities [][][]uint
// Error will be the empty string if no error occurred while the puzzle
// was being solved, otherwise it is a string describing the error.
Error string
}
func MakeSolutionResponse(p *base.Puzzle) *SolutionResponse {
errMsg := ""
err := p.DoConstraints()
if err != nil {
errMsg = err.Error()
}
grid := make([][][]uint, p.Size)
for row := 0; row < p.Size; row++ {
grid[row] = make([][]uint, p.Size)
for col := 0; col < p.Size; col++ {
cell := p.Cell(col + 1, row + 1)
for val := 1; val <= p.Size; val++ {
if cell.Possibilities.HasValue(val) {
grid[row][col] = append(grid[row][col], uint(val))
}
}
}
}
return &SolutionResponse{
Size: uint(p.Size),
Possibilities: grid,
Error: errMsg,
}
}
|
package logger
import (
"encoding/json"
"fmt"
"log"
"time"
)
func Info(message string) {
fmt.Printf("[Info] %s: %s\n", timeFormat(), message)
}
func InfoJson(message string) {
type LogEntry struct {
Level string
Time string
Message string
}
logEntry := LogEntry{"Info", timeFormat(), message}
prettyJSON, err := json.MarshalIndent(logEntry, "", " ")
if err != nil {
log.Fatal(err)
}
fmt.Printf("%s\n", string(prettyJSON))
}
func timeFormat() string {
return time.Now().UTC().Format(time.RFC3339)
}
|
//
// Copyright (c) SAS Institute Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package appmanifest
// Sign Microsoft ClickOnce application manifests and deployment manifests.
// These take the form of an XML file using XML DSIG signatures and, unlike all
// other Microsoft signatures, does not use an Authenticode PKCS#7 structure.
import (
"fmt"
"io"
"io/ioutil"
"github.com/rs/zerolog"
"github.com/sassoftware/relic/v7/lib/appmanifest"
"github.com/sassoftware/relic/v7/lib/audit"
"github.com/sassoftware/relic/v7/lib/certloader"
"github.com/sassoftware/relic/v7/lib/magic"
"github.com/sassoftware/relic/v7/lib/pkcs9"
"github.com/sassoftware/relic/v7/signers"
)
var AppSigner = &signers.Signer{
Name: "appmanifest",
Magic: magic.FileTypeAppManifest,
CertTypes: signers.CertTypeX509,
FormatLog: formatLog,
Sign: sign,
VerifyStream: verify,
}
func init() {
AppSigner.Flags().Bool("rfc3161-timestamp", true, "(APPMANIFEST) Timestamp with RFC3161 server")
signers.Register(AppSigner)
}
func formatLog(info *audit.Info) *zerolog.Event {
return info.AttrsForLog("assembly.")
}
func sign(r io.Reader, cert *certloader.Certificate, opts signers.SignOpts) ([]byte, error) {
blob, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
signed, err := appmanifest.Sign(blob, cert, opts.Hash)
if err != nil {
return nil, err
}
if cert.Timestamper != nil {
tsreq := &pkcs9.Request{
EncryptedDigest: signed.EncryptedDigest,
Legacy: !opts.Flags.GetBool("rfc3161-timestamp"),
Hash: opts.Hash,
}
token, err := cert.Timestamper.Timestamp(opts.Context(), tsreq)
if err != nil {
return nil, err
}
if err := signed.AddTimestamp(token); err != nil {
return nil, err
}
}
opts.Audit.SetMimeType("application/xml")
opts.Audit.Attributes["assembly.name"] = signed.AssemblyName
opts.Audit.Attributes["assembly.version"] = signed.AssemblyVersion
opts.Audit.Attributes["assembly.publicKeyToken"] = signed.PublicKeyToken
opts.Audit.SetCounterSignature(signed.Signature.CounterSignature)
return signed.Signed, nil
}
func verify(r io.Reader, opts signers.VerifyOpts) ([]*signers.Signature, error) {
blob, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
sig, err := appmanifest.Verify(blob)
if err != nil {
return nil, err
}
return []*signers.Signature{&signers.Signature{
Package: fmt.Sprintf("%s %s", sig.AssemblyName, sig.AssemblyVersion),
Hash: sig.Hash,
X509Signature: sig.Signature,
}}, nil
}
|
/*
获取市场概要
api文档: https://huobiapi.github.io/docs/spot/v1/cn/#7c47ef3411
*/
package huobipro
import (
"encoding/json"
"fmt"
"strings"
)
/*
字段 数据类型 描述
id integer unix时间,同时作为消息ID
amount float 24小时成交量
count integer 24小时成交笔数
open float 24小时开盘价
close float 最新价
low float 24小时最低价
high float 24小时最高价
vol float 24小时成交额
*/
type HuobiproMarketDetail struct {
Ch string
Ts int
Tick struct {
Amount float64
Open float64
Close float64
High float64
Id int
Count int
Low float64
Vol float64
}
}
func (h *HuobiproWsConn) StartMarketDetail() {
// 创建ws
for {
if h.createConnection() {
break
}
}
defer h.wsConn.Close()
// 订阅
for {
if h.subscribeMarketDetail() {
break
}
}
// ping
go h.ping()
go h.readMarketDetail()
select {}
}
/*
订阅频道
*/
func (h *HuobiproWsConn) subscribeMarketDetail() bool {
symbol := h.symbol.ToLowerWithSep("")
sub := fmt.Sprintf("market.%s.detail", symbol)
message, _ := json.Marshal(
map[string]string{
"sub": sub,
"id": h.id,
},
)
if !h.writeMessage(message) {
return false
}
return true
}
/*
获取市场概要数据
*/
func (h *HuobiproWsConn) readMarketDetail() {
defer func() {
go h.StartMarketDetail()
}()
for {
unzipData, err := h.readMessage()
if err != nil {
h.ErrChan <- err
}
dataStr := string(unzipData)
if strings.Contains(dataStr, "subbed") { // 订阅成功消息
continue
} else if strings.Contains(dataStr, "ping") { // ping
h.writeMessage(unzipData)
} else if strings.Contains(dataStr, "pong") { // pong
continue
} else {
rt := HuobiproMarketDetail{
Ch: "",
Ts: 0,
Tick: struct {
Amount float64
Open float64
Close float64
High float64
Id int
Count int
Low float64
Vol float64
}{Amount: 0, Open: 0, Close: 0, High: 0, Id: 0, Count: 0, Low: 0, Vol: 0},
}
err := json.Unmarshal(unzipData, &rt)
if err != nil {
h.ErrChan <- err
}else {
h.OutChan <- rt
}
}
}
}
|
// Copyright 2015 go-smpp authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package pdufield
import (
"bytes"
"strconv"
"testing"
)
func TestFixed(t *testing.T) {
f := &Fixed{Data: 0x34}
if f.Len() != 1 {
t.Fatalf("unexpected len: want 1, have %d", f.Len())
}
if v, ok := f.Raw().(uint8); !ok {
t.Fatalf("unexpected type: want uint8, have %#v", v)
}
ws := strconv.Itoa(0x34)
if v := f.String(); v != string(ws) {
t.Fatalf("unexpected string: want %q, have %q", ws, v)
}
wb := []byte{0x34}
if v := f.Bytes(); !bytes.Equal(wb, v) {
t.Fatalf("unexpected bytes: want %q, have %q", wb, v)
}
var b bytes.Buffer
if err := f.SerializeTo(&b); err != nil {
t.Fatalf("serialization failed: %s", err)
}
if v := b.Bytes(); !bytes.Equal(wb, v) {
t.Fatalf("unexpected serialized bytes: want %q, have %q", wb, v)
}
}
func TestVariable(t *testing.T) {
want := []byte("foobar")
f := &Variable{Data: want}
lw := len(want) + 1
if f.Len() != lw {
t.Fatalf("unexpected len: want %d, have %d", lw, f.Len())
}
if v, ok := f.Raw().([]byte); !ok {
t.Fatalf("unexpected type: want []byte, have %#v", v)
}
if v := f.String(); v != string(want) {
t.Fatalf("unexpected string: want %q have %q", want, v)
}
want = []byte("foobar\x00")
if v := f.Bytes(); !bytes.Equal(want, v) {
t.Fatalf("unexpected bytes: want %q, have %q", want, v)
}
var b bytes.Buffer
if err := f.SerializeTo(&b); err != nil {
t.Fatalf("serialization failed: %s", err)
}
if v := b.Bytes(); !bytes.Equal(want, v) {
t.Fatalf("unexpected serialized bytes: want %q, have %q", want, v)
}
}
func TestSM(t *testing.T) {
want := []byte("foobar")
f := &SM{Data: want}
if f.Len() != len(want) {
t.Fatalf("unexpected len: want %d, have %d", len(want), f.Len())
}
if v, ok := f.Raw().([]byte); !ok {
t.Fatalf("unexpected type: want []byte, have %#v", v)
}
if v := f.String(); v != string(want) {
t.Fatalf("unexpected string: want %q have %q", want, v)
}
if v := f.Bytes(); !bytes.Equal(want, v) {
t.Fatalf("unexpected bytes: want %q, have %q", want, v)
}
var b bytes.Buffer
if err := f.SerializeTo(&b); err != nil {
t.Fatalf("serialization failed: %s", err)
}
if v := b.Bytes(); !bytes.Equal(want, v) {
t.Fatalf("unexpected serialized bytes: want %q, have %q", want, v)
}
}
func TestDestSme(t *testing.T) {
var want []byte
want = append(want, byte(0x01)) // flag
want = append(want, byte(0x01)) // ton
want = append(want, byte(0x01)) // npi
want = append(want, []byte("1234")...) // Address
want = append(want, byte(0x00)) // null terminator
flag := Fixed{Data: byte(0x01)}
ton := Fixed{Data: byte(0x01)}
npi := Fixed{Data: byte(0x01)}
destAddr := Variable{Data: []byte("1234")}
fieldLen := flag.Len() + ton.Len() + npi.Len() + destAddr.Len()
strRep := flag.String() + "," + ton.String() + "," + npi.String() + "," + destAddr.String()
f := &DestSme{Flag: flag, Ton: ton, Npi: npi, DestAddr: destAddr}
if f.Len() != fieldLen {
t.Fatalf("unexpected len: want %d, have %d", fieldLen, f.Len())
}
if v, ok := f.Raw().([]byte); !ok {
t.Fatalf("unexpected type: want []byte, have %#v", v)
}
if v := f.String(); v != string(strRep) {
t.Fatalf("unexpected string: want %q have %q", strRep, v)
}
if v := f.Bytes(); !bytes.Equal(want, v) {
t.Fatalf("unexpected bytes: want %q, have %q", want, v)
}
var b bytes.Buffer
if err := f.SerializeTo(&b); err != nil {
t.Fatalf("serialization failed: %s", err)
}
if v := b.Bytes(); !bytes.Equal(want, v) {
t.Fatalf("unexpected serialized bytes: want %q, have %q", want, v)
}
}
func TestDestSmeList(t *testing.T) {
flag := Fixed{Data: byte(0x01)}
ton := Fixed{Data: byte(0x01)}
npi := Fixed{Data: byte(0x01)}
destAddr := Variable{Data: []byte("1234")}
destAddr2 := Variable{Data: []byte("5678")}
sme1 := DestSme{Flag: flag, Ton: ton, Npi: npi, DestAddr: destAddr}
sme2 := DestSme{Flag: flag, Ton: ton, Npi: npi, DestAddr: destAddr2}
fieldLen := sme1.Len() + sme2.Len()
strRep := sme1.String() + ";" + sme2.String() + ";"
var bytesRep []byte
bytesRep = append(bytesRep, sme1.Bytes()...)
bytesRep = append(bytesRep, sme2.Bytes()...)
f := &DestSmeList{Data: []DestSme{sme1, sme2}}
if f.Len() != fieldLen {
t.Fatalf("unexpected len: want %d, have %d", fieldLen, f.Len())
}
if v, ok := f.Raw().([]byte); !ok {
t.Fatalf("unexpected type: want []byte, have %#v", v)
}
if v := f.String(); v != string(strRep) {
t.Fatalf("unexpected string: want %q have %q", strRep, v)
}
if v := f.Bytes(); !bytes.Equal(bytesRep, v) {
t.Fatalf("unexpected bytes: want %q, have %q", bytesRep, v)
}
var b bytes.Buffer
if err := f.SerializeTo(&b); err != nil {
t.Fatalf("serialization failed: %s", err)
}
if v := b.Bytes(); !bytes.Equal(bytesRep, v) {
t.Fatalf("unexpected serialized bytes: want %q, have %q", bytesRep, v)
}
}
func TestUnSme(t *testing.T) {
err := []byte{0x00, 0x00, 0x00, 0x11}
var want []byte
want = append(want, byte(0x01)) // TON
want = append(want, byte(0x01)) // NPI
want = append(want, []byte("123")...) // Address
want = append(want, byte(0x00)) // null terminator
want = append(want, err...) // Error
want = append(want, byte(0x00)) // null terminator
ton := Fixed{Data: byte(0x01)}
npi := Fixed{Data: byte(0x01)}
destAddr := Variable{Data: []byte("123")}
errCode := Variable{Data: err}
fieldLen := ton.Len() + npi.Len() + destAddr.Len() + errCode.Len()
strRep := ton.String() + "," + npi.String() + "," + destAddr.String() + "," + strconv.Itoa(17) // convertion to uint
f := UnSme{Ton: ton, Npi: npi, DestAddr: destAddr, ErrCode: errCode}
if f.Len() != fieldLen {
t.Fatalf("unexpected len: want %d, have %d", fieldLen, f.Len())
}
if v, ok := f.Raw().([]byte); !ok {
t.Fatalf("unexpected type: want []byte, have %#v", v)
}
if v := f.String(); v != string(strRep) {
t.Fatalf("unexpected string: want %q have %q", strRep, v)
}
if v := f.Bytes(); !bytes.Equal(want, v) {
t.Fatalf("unexpected bytes: want %q, have %q", want, v)
}
var b bytes.Buffer
if err := f.SerializeTo(&b); err != nil {
t.Fatalf("serialization failed: %s", err)
}
if v := b.Bytes(); !bytes.Equal(want, v) {
t.Fatalf("unexpected serialized bytes: want %q, have %q", want, v)
}
}
func TestUnSmeList(t *testing.T) {
err := []byte{0x00, 0x00, 0x00, 0x11}
ton := Fixed{Data: byte(0x01)}
npi := Fixed{Data: byte(0x01)}
destAddr := Variable{Data: []byte("123")}
destAddr2 := Variable{Data: []byte("456")}
errCode := Variable{Data: err}
unSme1 := UnSme{Ton: ton, Npi: npi, DestAddr: destAddr, ErrCode: errCode}
unSme2 := UnSme{Ton: ton, Npi: npi, DestAddr: destAddr2, ErrCode: errCode}
fieldLen := unSme1.Len() + unSme2.Len()
strRep := unSme1.String() + ";" + unSme2.String() + ";"
var bytesRep []byte
bytesRep = append(bytesRep, unSme1.Bytes()...)
bytesRep = append(bytesRep, unSme2.Bytes()...)
f := &UnSmeList{Data: []UnSme{unSme1, unSme2}}
if f.Len() != fieldLen {
t.Fatalf("unexpected len: want %d, have %d", fieldLen, f.Len())
}
if v, ok := f.Raw().([]byte); !ok {
t.Fatalf("unexpected type: want []byte, have %#v", v)
}
if v := f.String(); v != string(strRep) {
t.Fatalf("unexpected string: want %q have %q", strRep, v)
}
if v := f.Bytes(); !bytes.Equal(bytesRep, v) {
t.Fatalf("unexpected bytes: want %q, have %q", bytesRep, v)
}
var b bytes.Buffer
if err := f.SerializeTo(&b); err != nil {
t.Fatalf("serialization failed: %s", err)
}
if v := b.Bytes(); !bytes.Equal(bytesRep, v) {
t.Fatalf("unexpected serialized bytes: want %q, have %q", bytesRep, v)
}
}
|
// Copyright (c) 2013 The go-github AUTHORS. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Modified 2018; Tomas Joshua Cain.
package ukpolice
import (
"bytes"
"fmt"
"io"
"reflect"
)
// Stringify attempts to create a reasonable string representation of types
// returned by the data.police.uk API. It does things like resolve pointers to
// their values and omits struct fields with nil values.
func Stringify(message interface{}) string {
var buf bytes.Buffer
v := reflect.ValueOf(message)
stringifyValue(&buf, v)
return buf.String()
}
// stringifyValue was heavily inspired by the goprotobuf library.
func stringifyValue(w io.Writer, val reflect.Value) {
if val.Kind() == reflect.Ptr && val.IsNil() {
w.Write([]byte("<nil>"))
return
}
v := reflect.Indirect(val)
switch v.Kind() {
case reflect.String:
fmt.Fprintf(w, `"%s"`, v)
case reflect.Slice:
w.Write([]byte{'['})
for i := 0; i < v.Len(); i++ {
if i > 0 {
w.Write([]byte{' '})
}
stringifyValue(w, v.Index(i))
}
w.Write([]byte{']'})
return
case reflect.Struct:
if v.Type().Name() != "" {
w.Write([]byte(v.Type().String()))
}
w.Write([]byte{'{'})
var sep bool
for i := 0; i < v.NumField(); i++ {
fv := v.Field(i)
if fv.Kind() == reflect.Ptr && fv.IsNil() {
continue
}
if fv.Kind() == reflect.Slice && fv.IsNil() {
continue
}
if sep {
w.Write([]byte(", "))
} else {
sep = true
}
w.Write([]byte(v.Type().Field(i).Name))
w.Write([]byte{':'})
stringifyValue(w, fv)
}
w.Write([]byte{'}'})
default:
if v.CanInterface() {
fmt.Fprint(w, v.Interface())
}
}
}
|
package repositories
import (
"io/ioutil"
"net/http"
"strings"
)
type ResponseRepository interface {
Get(string) ([]byte, error)
}
type httpResponseRepository struct {
}
func (r httpResponseRepository) Get(url string) ([]byte, error) {
if !strings.HasPrefix(url, "http") {
url = "http://" + url
}
response, err := http.Get(url)
if err != nil {
return nil, err
}
return ioutil.ReadAll(response.Body)
}
func NewHTTPResponseRepository() ResponseRepository {
return httpResponseRepository{}
}
|
// Copyright © 2019 morgulbrut
// This work is free. You can redistribute it and/or modify it under the
// terms of the Do What The Fuck You Want To Public License, Version 2,
// as published by Sam Hocevar. See the LICENSE file for more details.
package main
import "github.com/morgulbrut/findChips/cmd"
func main() {
cmd.Execute()
}
|
package util
const (
SuccessCode = 0
ErrorLackCode = 1
ErrorSqlCode = 2
ErrorRidesCode = 3
//参数签名秘钥
DesKey = "r5k1*8a$@8dc!dytkcs2dqz!"
//redis key
RedisKeyRegisteredCode = "user:registered:code:" //注册验证码
RedisKeyRegisteredCodeNumber = "user:registered:code:number:" //注册验证码次数
RedisKeyForgotCode = "user:forgot:code:" //忘记密码验证码
RedisKeyForgotCodeNumber = "user:forgot:code:number:" //忘记密码验证码次数
RedisKeyBindingCode = "user:binding:code:" //绑定手机验证码
RedisKeyBindingCodeNumber = "user:binding:code:number:" //绑定手机验证码次数
RedisKeyToken = "user:login:token:" //token 缓存
RedisKeyLoginServer = "user:login:server:list:" //登录服务器
RedisKeyHallServerList = "user:hall:server:list:" //大厅服务器列表
RedisKeyTeam = "user:team:" //team缓存
RedisKeyTeamDire = ":dire" //直属查询
RedisKeyTeamAchievement = ":achievement" //业绩查询
RedisKeyTeamAchievementForm = ":achievement:form" //业绩来源
//time
FormatTime = "15:04:05" //时间格式
FormatDate = "2006-01-02" //日期格式
FormatDateTime = "2006-01-02 15:04:05" //完整时间格式
FormatDateTime2 = "2006-01-02 15:04" //完整时间格式
)
|
package v1beta1
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/validation/field"
)
var _ = Describe("Validation Webhook", func() {
invalidEntries := []TableEntry{
Entry("Duplicate versions",
ClusterWorkloadResourceMapping{
Spec: ClusterWorkloadResourceMappingSpec{
Versions: []ClusterWorkloadResourceMappingTemplate{
{
Version: "v1",
},
{
Version: "v1",
},
},
},
},
field.ErrorList{
field.Duplicate(field.NewPath("spec", "versions[1]"), "v1"),
}.ToAggregate(),
),
Entry("Invalid restricted jsonpath - annotations",
ClusterWorkloadResourceMapping{
Spec: ClusterWorkloadResourceMappingSpec{
Versions: []ClusterWorkloadResourceMappingTemplate{
{
Version: "v1",
Annotations: ".spec.template.spec.annotations[*]",
},
},
},
},
field.ErrorList{
field.Invalid(field.NewPath("spec", "versions[0]", "annotations"), ".spec.template.spec.annotations[*]", "Invalid fixed JSONPath"),
}.ToAggregate(),
),
Entry("Invalid restricted jsonpath - volumes",
ClusterWorkloadResourceMapping{
Spec: ClusterWorkloadResourceMappingSpec{
Versions: []ClusterWorkloadResourceMappingTemplate{
{
Version: "v1",
Volumes: ".spec.template.spec.volumes[*]",
},
},
},
},
field.ErrorList{
field.Invalid(field.NewPath("spec", "versions[0]", "volumes"), ".spec.template.spec.volumes[*]", "Invalid fixed JSONPath"),
}.ToAggregate(),
),
Entry("Invalid restricted jsonpath - container name",
ClusterWorkloadResourceMapping{
Spec: ClusterWorkloadResourceMappingSpec{
Versions: []ClusterWorkloadResourceMappingTemplate{
{
Version: "v1",
Containers: []ClusterWorkloadResourceMappingContainer{
{
Path: ".spec.template.spec.containers[*]",
Name: ".name[*]",
},
},
},
},
},
},
field.ErrorList{
field.Invalid(field.NewPath("spec", "versions[0]", "containers[0]", "name"), ".name[*]", "Invalid fixed JSONPath"),
}.ToAggregate(),
),
Entry("Invalid restricted jsonpath - container env",
ClusterWorkloadResourceMapping{
Spec: ClusterWorkloadResourceMappingSpec{
Versions: []ClusterWorkloadResourceMappingTemplate{
{
Version: "v1",
Containers: []ClusterWorkloadResourceMappingContainer{
{
Path: ".spec.template.spec.containers[*]",
Env: ".env[*]",
},
},
},
},
},
},
field.ErrorList{
field.Invalid(field.NewPath("spec", "versions[0]", "containers[0]", "env"), ".env[*]", "Invalid fixed JSONPath"),
}.ToAggregate(),
),
Entry("Invalid restricted jsonpath - container volumeMounts",
ClusterWorkloadResourceMapping{
Spec: ClusterWorkloadResourceMappingSpec{
Versions: []ClusterWorkloadResourceMappingTemplate{
{
Version: "v1",
Containers: []ClusterWorkloadResourceMappingContainer{
{
Path: ".spec.template.spec.containers[*]",
VolumeMounts: ".volumeMounts[*]",
},
},
},
},
},
},
field.ErrorList{
field.Invalid(field.NewPath("spec", "versions[0]", "containers[0]", "volumeMounts"), ".volumeMounts[*]", "Invalid fixed JSONPath"),
}.ToAggregate(),
),
Entry("Invalid restricted jsonpath - failed to parse",
ClusterWorkloadResourceMapping{
Spec: ClusterWorkloadResourceMappingSpec{
Versions: []ClusterWorkloadResourceMappingTemplate{
{
Version: "v1",
Containers: []ClusterWorkloadResourceMappingContainer{
{
Path: ".spec.template.spec.containers[*]",
VolumeMounts: ".volumeMounts[*",
},
},
},
},
},
},
field.ErrorList{
field.Invalid(field.NewPath("spec", "versions[0]", "containers[0]", "volumeMounts"), ".volumeMounts[*", "Unable to parse fixed JSONPath"),
}.ToAggregate(),
),
Entry("Invalid jsonpath - path",
ClusterWorkloadResourceMapping{
Spec: ClusterWorkloadResourceMappingSpec{
Versions: []ClusterWorkloadResourceMappingTemplate{
{
Version: "v1",
Containers: []ClusterWorkloadResourceMappingContainer{
{
Path: ".spec.template.spec.containers[*",
},
},
},
},
},
},
field.ErrorList{
field.Invalid(field.NewPath("spec", "versions[0]", "containers[0]", "path"), ".spec.template.spec.containers[*", "Invalid JSONPath"),
}.ToAggregate(),
),
Entry("Required version field",
ClusterWorkloadResourceMapping{
Spec: ClusterWorkloadResourceMappingSpec{
Versions: []ClusterWorkloadResourceMappingTemplate{
{
Annotations: "",
},
},
},
},
field.ErrorList{
field.Required(field.NewPath("spec", "versions[0]", "version"), "field \"version\" required"),
}.ToAggregate(),
),
Entry("Required version field",
ClusterWorkloadResourceMapping{
Spec: ClusterWorkloadResourceMappingSpec{
Versions: []ClusterWorkloadResourceMappingTemplate{
{
Version: " \t\n",
},
},
},
},
field.ErrorList{
field.Invalid(field.NewPath("spec", "versions[0]", "version"), " \t\n", "Whitespace-only version field forbidden"),
}.ToAggregate(),
),
Entry("Required path field",
ClusterWorkloadResourceMapping{
Spec: ClusterWorkloadResourceMappingSpec{
Versions: []ClusterWorkloadResourceMappingTemplate{
{
Version: "v1",
Containers: []ClusterWorkloadResourceMappingContainer{
{
Name: "",
},
},
},
},
},
},
field.ErrorList{
field.Required(field.NewPath("spec", "versions[0]", "containers[0]", "path"), "field \"path\" required"),
}.ToAggregate(),
),
}
DescribeTable("Reporting errors on invalid mappings",
func(mapping ClusterWorkloadResourceMapping, expected error) {
Expect(mapping.validate()).To(Equal(expected))
},
invalidEntries...,
)
It("should accept valid resources", func() {
mapping := ClusterWorkloadResourceMapping{
Spec: ClusterWorkloadResourceMappingSpec{
Versions: []ClusterWorkloadResourceMappingTemplate{
DefaultTemplate,
},
},
}
Expect(mapping.validate()).To(BeNil())
})
})
|
package main
import (
"net/http"
"github.com/gin-gonic/gin"
)
//gin.Context封装了request和response
func hello(c *gin.Context) {
//返回一个json格式
c.JSON(200, gin.H{
"message": "hello world!",
})
//返回string类型
c.String(http.StatusOK, "hello world")
}
func main() {
//创建一个默认路由,同时当中包含Logger和Recovery2个中间件
r := gin.Default()
//创建一个默认的路由,不包含中间件
// r := gin.New()
//绑定路由规则,方位uri要执行的函数
r.GET("/hello", hello)
//启动服务,指定监听端口
r.Run(":8888")
}
|
package taskfile
import (
"fmt"
"path/filepath"
"github.com/go-task/task/v3/internal/execext"
"github.com/go-task/task/v3/internal/filepathext"
"golang.org/x/exp/slices"
"gopkg.in/yaml.v3"
)
// IncludedTaskfile represents information about included taskfiles
type IncludedTaskfile struct {
Taskfile string
Dir string
Optional bool
Internal bool
Aliases []string
AdvancedImport bool
Vars *Vars
BaseDir string // The directory from which the including taskfile was loaded; used to resolve relative paths
}
// IncludedTaskfiles represents information about included tasksfiles
type IncludedTaskfiles struct {
Keys []string
Mapping map[string]IncludedTaskfile
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (tfs *IncludedTaskfiles) UnmarshalYAML(node *yaml.Node) error {
switch node.Kind {
case yaml.MappingNode:
// NOTE(@andreynering): on this style of custom unmarshalling,
// even number contains the keys, while odd numbers contains
// the values.
for i := 0; i < len(node.Content); i += 2 {
keyNode := node.Content[i]
valueNode := node.Content[i+1]
var v IncludedTaskfile
if err := valueNode.Decode(&v); err != nil {
return err
}
tfs.Set(keyNode.Value, v)
}
return nil
}
return fmt.Errorf("yaml: line %d: cannot unmarshal %s into included taskfiles", node.Line, node.ShortTag())
}
// Len returns the length of the map
func (tfs *IncludedTaskfiles) Len() int {
if tfs == nil {
return 0
}
return len(tfs.Keys)
}
// Set sets a value to a given key
func (tfs *IncludedTaskfiles) Set(key string, includedTaskfile IncludedTaskfile) {
if tfs.Mapping == nil {
tfs.Mapping = make(map[string]IncludedTaskfile, 1)
}
if !slices.Contains(tfs.Keys, key) {
tfs.Keys = append(tfs.Keys, key)
}
tfs.Mapping[key] = includedTaskfile
}
// Range allows you to loop into the included taskfiles in its right order
func (tfs *IncludedTaskfiles) Range(yield func(key string, includedTaskfile IncludedTaskfile) error) error {
if tfs == nil {
return nil
}
for _, k := range tfs.Keys {
if err := yield(k, tfs.Mapping[k]); err != nil {
return err
}
}
return nil
}
func (it *IncludedTaskfile) UnmarshalYAML(node *yaml.Node) error {
switch node.Kind {
case yaml.ScalarNode:
var str string
if err := node.Decode(&str); err != nil {
return err
}
it.Taskfile = str
return nil
case yaml.MappingNode:
var includedTaskfile struct {
Taskfile string
Dir string
Optional bool
Internal bool
Aliases []string
Vars *Vars
}
if err := node.Decode(&includedTaskfile); err != nil {
return err
}
it.Taskfile = includedTaskfile.Taskfile
it.Dir = includedTaskfile.Dir
it.Optional = includedTaskfile.Optional
it.Internal = includedTaskfile.Internal
it.Aliases = includedTaskfile.Aliases
it.AdvancedImport = true
it.Vars = includedTaskfile.Vars
return nil
}
return fmt.Errorf("yaml: line %d: cannot unmarshal %s into included taskfile", node.Line, node.ShortTag())
}
// DeepCopy creates a new instance of IncludedTaskfile and copies
// data by value from the source struct.
func (it *IncludedTaskfile) DeepCopy() *IncludedTaskfile {
if it == nil {
return nil
}
return &IncludedTaskfile{
Taskfile: it.Taskfile,
Dir: it.Dir,
Optional: it.Optional,
Internal: it.Internal,
AdvancedImport: it.AdvancedImport,
Vars: it.Vars.DeepCopy(),
BaseDir: it.BaseDir,
}
}
// FullTaskfilePath returns the fully qualified path to the included taskfile
func (it *IncludedTaskfile) FullTaskfilePath() (string, error) {
return it.resolvePath(it.Taskfile)
}
// FullDirPath returns the fully qualified path to the included taskfile's working directory
func (it *IncludedTaskfile) FullDirPath() (string, error) {
return it.resolvePath(it.Dir)
}
func (it *IncludedTaskfile) resolvePath(path string) (string, error) {
path, err := execext.Expand(path)
if err != nil {
return "", err
}
if filepathext.IsAbs(path) {
return path, nil
}
result, err := filepath.Abs(filepathext.SmartJoin(it.BaseDir, path))
if err != nil {
return "", fmt.Errorf("task: error resolving path %s relative to %s: %w", path, it.BaseDir, err)
}
return result, nil
}
|
package model
import (
)
type CmsSubjectCategory struct {
AppId string `json:"appId" gorm:"type:bigint unsigned;"` //
Icon string `json:"icon" gorm:"type:varchar(500);"` // 分类图标
Id int `json:"id" gorm:"type:bigint;primary_key"` //
Name string `json:"name" gorm:"type:varchar(100);"` //
ShowStatus string `json:"showStatus" gorm:"type:int;"` //
Sort string `json:"sort" gorm:"type:int;"` //
SubjectCount string `json:"subjectCount" gorm:"type:int;"` // 专题数量
BaseModel
}
func (CmsSubjectCategory) TableName() string {
return "cms_subject_category"
}
|
package handler
import (
"encoding/json"
"net/http"
"strings"
"github.com/dtan44/SMUG/service"
)
// Global variables
var jsonMarshal func(v interface{}) ([]byte, error)
const (
registerPath = "/register/"
deregisterPath = "/deregister/"
)
func init() {
jsonMarshal = json.Marshal
}
//Result JSON response body
type Result struct {
Result string `json:"result,omitempty"`
Reason string `json:"reason,omitempty"`
}
//ServiceHandler struct
type ServiceHandler struct {
Registration service.RegistrationInterface
Discovery service.DiscoveryInterface
}
type registerBody struct {
URL string `json:"URL"`
}
//HandleRegister register service
func (sh ServiceHandler) HandleRegister(w http.ResponseWriter, r *http.Request) {
serviceName := strings.TrimPrefix(r.URL.Path, registerPath)
secretKey := r.Header.Get("secret-key")
var res Result
// get serviceURL from request body
requestBody := registerBody{}
err := readJSONBody(r.Body, &requestBody)
if err != nil {
res = Result{"failure", err.Error()}
} else {
serviceURL := requestBody.URL
if !validateKey(secretKey) {
// http.Error(w, "Incorrect Key", http.StatusInternalServerError)
res = Result{"failure", "Incorrect Key"}
} else {
err := sh.Registration.Register(serviceName, serviceURL)
if err != nil {
res = Result{"failure", err.Error()}
} else {
res = Result{"success", ""}
}
}
}
j, err := jsonMarshal(res)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(j)
}
//HandleDeregister deregister service
func (sh ServiceHandler) HandleDeregister(w http.ResponseWriter, r *http.Request) {
serviceName := strings.TrimPrefix(r.URL.Path, deregisterPath)
secretKey := r.Header.Get("secret-key")
var res Result
if !validateKey(secretKey) {
// http.Error(w, "Incorrect Key", http.StatusInternalServerError)
res = Result{"failure", "Incorrect Key"}
} else {
err := sh.Registration.Deregister(serviceName)
if err != nil {
res = Result{"failure", err.Error()}
} else {
res = Result{"success", ""}
}
}
j, err := jsonMarshal(res)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(j)
}
//ServicesList List of Services
type ServicesList struct {
Services []string `json:"services"`
}
//HandleList list services
func (sh ServiceHandler) HandleList(w http.ResponseWriter, r *http.Request) {
serviceList := ServicesList{sh.Discovery.List()}
j, err := jsonMarshal(serviceList)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(j)
}
//HandleRoute route services
func (sh ServiceHandler) HandleRoute(w http.ResponseWriter, r *http.Request) {
res, body, err := sh.Discovery.Route(r)
if err != nil {
resp := Result{"failure", err.Error()}
j, err := jsonMarshal(resp)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
w.Write(j)
return
}
// add all headers (including multi-valued headers)
for key, vals := range res.Header {
val := ""
for _, item := range vals {
val += item + ","
}
// remove last comma
val = val[:len(val)-1]
w.Header().Set(key, val)
}
w.WriteHeader(res.StatusCode)
w.Header().Set("Content-Type", "application/json")
w.Write(body)
}
|
package model
// Task - service layer task model
type Task struct {
ID int64
Status Status
Description string
Assigned []int64
}
func (t *Task) String() string {
return t.Description
}
|
/*
* Quay Frontend
*
* This API allows you to perform many of the operations required to work with Quay repositories, users, and organizations. You can find out more at <a href=\"https://quay.io\">Quay</a>.
*
* API version: v1
* Contact: support@quay.io
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package quay
type ApiError struct {
// Status code of the response.
Status int32 `json:"status"`
// Deprecated; alias for detail
ErrorMessage string `json:"error_message,omitempty"`
// Unique error code to identify the type of error.
Title string `json:"title"`
// Deprecated; alias for detail
ErrorType string `json:"error_type,omitempty"`
// Details about the specific instance of the error.
Detail string `json:"detail,omitempty"`
// Reference to the type of the error.
Type_ string `json:"type"`
}
|
package main
import (
"code.google.com/p/go.net/websocket"
"encoding/json"
"flag"
"fmt"
"github.com/fmstephe/location_server/locserver"
"github.com/fmstephe/location_server/logutil"
"github.com/fmstephe/location_server/msgserver"
"github.com/fmstephe/location_server/msgutil/msgdef"
"github.com/fmstephe/simpleid"
"net/http"
"os"
)
var port = flag.Int("port", 80, "Sets the port the server will attach to")
var idMaker = simpleid.NewIdMaker()
// Provides a unique id in a nice json msg (Op: "sIdOp", Id: $$$)
func idProvider(w http.ResponseWriter, r *http.Request) {
id := idMaker.NewId()
idMsg := msgdef.SIdMsg{Op: msgdef.SIdOp, Id: id}
if buf, err := json.Marshal(idMsg); err != nil {
println(err.Error())
} else {
w.Write(buf)
}
}
// Simple file server for serving up static content from the /html/ directory
// Also provides a simple id service for AJAX convenience
func main() {
flag.Parse()
logutil.ServerStarted("Example")
pwd, err := os.Getwd()
if err != nil {
println(err.Error())
return
}
locserver.StartTreeManager(10000, true)
http.Handle("/loc", websocket.Handler(locserver.HandleLocationService))
http.Handle("/msg", websocket.Handler(msgserver.HandleMessageService))
http.HandleFunc("/id", idProvider)
http.Handle("/", http.FileServer(http.Dir(pwd+"/html/")))
portStr := fmt.Sprintf(":%d", *port)
println(fmt.Sprintf("Listening on port %s", portStr))
if err := http.ListenAndServe(portStr, nil); err != nil {
println(err.Error())
}
}
|
package layout
type TextWidget struct {
BaseWidget
Value string `json:"value"`
Placeholder string `json:"placeholder"`
Prefix string `json:"prefix"`
Suffix string `json:"suffix"`
NoRepeat bool `json:"no_repeat"`
Format string `json:"format"`
Linkage Linkage `json:"linkage"`
Formula Formula `json:"formula"`
}
func (this *TextWidget) GetValue() interface{} {
if this.Value == "" {
return nil
}
return this.Value
}
func (this *TextWidget) SetValue(val interface{}) error {
str, flag := val.(string)
if !flag {
//return fmt.Errorf("字段: %s 的值不是有效 String 类型", this.Field.GetLabel())
}
this.Value = str
return nil
}
func (this *TextWidget) Diff(widget Widget) (Diff, bool) {
if this.Value == "" && widget.String() == "" {
return nil, false
}
if this.Value != widget.(*TextWidget).Value {
records := make([]DiffItem, 0)
records = append(records, DiffItem{
Name: this.Field.GetLabel(),
FieldName: this.Field.GetName(),
Type: "change",
Original: this.Value,
Last: widget.(*TextWidget).Value,
})
return records, true
}
return Diff{}, false
}
func (this *TextWidget) String() string {
return this.Value
}
|
package common
const (
ALI_ACCESS_KEY_ID = "LTAIvUAUos5XypQv"
ALI_ACCESS_KEY_SECRET = "t4cjImTY1fngRKYyiV2WYVrfIhGPsb"
)
|
package plugins
import (
"fmt"
"github.com/petomalina/mirror/pkg/cp"
"golang.org/x/tools/go/packages"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"path/filepath"
"plugin"
"reflect"
"regexp"
"unsafe"
. "github.com/petomalina/mirror/pkg/logger"
)
var (
pkgRegex = regexp.MustCompile(`(?m:^package (?P<pkg>\w+$))`)
)
// Build builds the given package into plugin and saves it in
// current path under a random name .so, returning the name to the caller
func Build(pkg string, out string) (string, error) {
L.Method("Internal/plugin", "Build").Trace("Invoked with pkg: ", pkg)
// random file name so we'll get unique plugins each time
uniq := rand.Int()
objPath := filepath.Join(out, fmt.Sprintf("%d.so", uniq))
L.Method("Bundle", "Run").Trace("Object path: ", objPath)
// create the plugin from the passed package
err := ChangePackage(pkg, "main")
if err != nil {
return objPath, err
}
// create the command to execute the build
cmd := exec.Command("go", "build", "-buildmode=plugin", "-o="+objPath, pkg)
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
return objPath, cmd.Run()
}
// LoadSymbols accepts a plugin path and returns all symbols
// that were found in the given plugin.
// If * is provided as only value in `symbols`, all symbols from the
// given plugin will be returned
func LoadSymbols(pluginPath string, symbols []string) ([]interface{}, error) {
p, err := plugin.Open(pluginPath)
if err != nil {
return nil, err
}
// special case - load all exported symbols from the file
if len(symbols) == 1 && symbols[0] == "all" {
L.Method("Internal/plugin", "LoadSymbols").Trace("Got 'all' option, finding symbols")
// clear the symbols array so it doesn't contain the *
symbols = []string{}
// create the reflection from the 'syms' field of Plugin
symsField := reflect.ValueOf(p).Elem().FieldByName("syms")
// create an unsafe pointer so we can access that field (disables the runtime protection)
symsFieldPtr := reflect.NewAt(symsField.Type(), unsafe.Pointer(symsField.UnsafeAddr())).Elem()
// range through the map and create the symbols in our array
for sym := range symsFieldPtr.Interface().(map[string]interface{}) {
symbols = append(symbols, sym)
}
}
L.Method("Internal/plugin", "LoadSymbols").Trace("Looking up symbols: ", symbols)
// add model symbols that were loaded from the built plugin
models := []interface{}{}
for _, symName := range symbols {
sym, err := p.Lookup(symName)
if err != nil {
return nil, err
}
models = append(models, sym)
}
return models, nil
}
// ChangePackage changes the `package X` line of each file in the
// targeted package, changing its name to the desiredPkgName, running the
// `run` function and changing it back to the default
func ChangePackage(pkgName, desiredPkgName string) error {
L.Method("Internal/package", "ChangePackage").Trace("Invoked on pkgName: ", pkgName)
pkg, err := FindPackage(pkgName)
if err != nil {
return err
}
// replace all package directives to the desired package names
for _, f := range pkg.GoFiles {
// read the go file first
bb, err := ioutil.ReadFile(f)
if err != nil {
return err
}
// write it back with the replaced package (into the out dir)
err = ioutil.WriteFile(
f,
[]byte(pkgRegex.ReplaceAll(bb, []byte("package "+desiredPkgName))),
0,
)
if err != nil {
return err
}
}
return nil
}
// GenerateSymbolsForModels generates symbols for all models, mutates
// the input symbols to be compatible with newly created symbols and
// writes a new file with these generated symbols
func GenerateSymbolsForModels(symbolNames []string, out string) ([]string, error) {
symbolsFile := filepath.Join(out, fmt.Sprintf("/%d.go", rand.Int()))
tmpl := `// DO NOT EDIT: THIS BLOCK IS AUTOGENERATED BY MIRROR BUNDLE
package main
var (
`
for _, m := range symbolNames {
tmpl += "\tX" + m + " = " + m + "{}\n"
}
tmpl += ")\n"
// mutate to match the symbol prefix
for i := range symbolNames {
symbolNames[i] = "X" + symbolNames[i]
}
return symbolNames, ioutil.WriteFile(symbolsFile, []byte(tmpl), os.ModePerm)
}
const DefaultCache = ".mirror"
// CopyPackageToCache copies a given package into a given cache dir
// returning the cache dir subdirectory into which the package was copied
func CopyPackageToCache(pkg *packages.Package, cacheDir string) (string, error) {
// Copy the directory so the plugin can be build outside
pkgCacheDir, err := filepath.Abs(filepath.Join(cacheDir, fmt.Sprintf("%d", rand.Int())))
if err != nil {
return "", err
}
L.Method("Bundle", "Run").Trace("Making cache dir: ", pkgCacheDir)
err = os.MkdirAll(pkgCacheDir, os.ModePerm)
if err != nil {
return "", err
}
L.Method("Bundle", "Run").Trace("Copying ", pkg, "->", pkgCacheDir)
for _, f := range pkg.GoFiles {
err := cp.File(f, filepath.Join(pkgCacheDir, filepath.Base(f)))
if err != nil {
return pkgCacheDir, err
}
}
return pkgCacheDir, nil
}
// FindPackage returns names of go files in the targeted package
func FindPackage(pkg string) (*packages.Package, error) {
cfg := &packages.Config{
Mode: packages.LoadFiles,
Tests: false,
}
pkgs, err := packages.Load(cfg, pkg)
if err != nil {
return nil, err
}
return pkgs[0], nil
}
|
package main
import (
"bufio"
"flag"
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"path/filepath"
"strings"
"time"
)
// flags
var cpm bool // toggle CPM or WPM
var list string // input sentence list to be used
var rounds int // how many sentences to be tested on
func main() {
rand.Seed(time.Now().UTC().UnixNano())
parseFlags()
countdown(3)
clear()
wpm, errors := playRound(rounds)
fmt.Println("Result:", resultStats(wpm, errors))
}
func parseFlags() {
flag.BoolVar(&cpm, "cpm", false, "Use CPM instead of WPM")
flag.StringVar(&list, "list", "shakespeare", "Input sentence list to be used")
flag.IntVar(&rounds, "rounds", 3, "How many sentences to be tested on")
flag.Parse()
}
func countdown(length int) {
for i := length; i > 0; i-- {
clear()
fmt.Println(i)
time.Sleep(time.Second)
}
}
func playRound(rounds int) (float64, int) {
var tWPM float64 = 0
var tErrors int = 0
for i := 0; i < rounds; i++ {
clear()
fmt.Println(resultStats(tWPM, tErrors), "\n\n\n")
wpm, errors := ttest(getSentence())
if tWPM != 0 { // calculate average wpm
tWPM = (tWPM + wpm) / 2
} else {
tWPM = wpm
}
tErrors += errors
}
return tWPM, tErrors
}
func plural(n int) string {
if n == 1 {
return ""
} else {
return "s"
}
}
func ttest(s string) (float64, int) {
fmt.Println(" " + s)
start := time.Now() // start the timer
result := input(":")
t := time.Now() // end the timer
elapsed := t.Sub(start) // calculate time elapsed
wpm := calcWPM(s, elapsed) // calculate wpm
errors := calcErrors(s, result) // calculate errors
return wpm, errors
}
func input(s string) string {
reader := bufio.NewReader(os.Stdin)
fmt.Print(s)
text, err := reader.ReadString('\n') // get user input
if err != nil {
log.Fatal(err)
}
return text
}
func resultStats(wpm float64, errorCount int) string {
unit := "WPM"
if cpm {
unit = "CPM"
wpm *= 5
}
return fmt.Sprintf(" %.1f %s | %d error%s", wpm, unit, errorCount, plural(errorCount))
}
func calcWPM(s string, elapsed time.Duration) float64 {
chars := strings.Count(s, "") // count the number of characters in the string (sentence)
cps := float64(chars) / float64(elapsed.Seconds()) // calculate the characters per second
cpm := cps * 60 // calculate characters per minute
wpm := cpm / 5 // convert cpm to wpm (5 characters per word)
return wpm
}
func calcErrors(expected string, result string) int {
expectedWords := strings.Split(expected, " ")
resultWords := strings.Split(result, " ")
errorCount := 0
for i := 0; i < len(expectedWords); i++ {
if len(resultWords) <= i {
return errorCount + (len(expectedWords) - len(resultWords))
} else if expectedWords[i] != resultWords[i] {
errorCount++
}
}
return errorCount - 1
}
func getSentence() string {
path := directory()
raws, err := ioutil.ReadFile(path + "sentences/" + list)
if err != nil {
log.Fatal(err)
}
sentences := strings.Split(string(raws), "\n")
sentence := rand.Intn(len(sentences) - 1) // picks a random sentence from the given file
return sentences[sentence]
}
func directory() string {
ex, err := os.Executable()
if err != nil {
log.Fatal(err)
}
exPath := filepath.Dir(ex) + "/" // find program's working directory
return exPath
}
func clear() {
print("\033[H\033[2J")
}
|
package main
import (
"log"
"net/http"
controller "./controller"
"./database"
"github.com/gorilla/mux"
)
func check(e error) {
if e != nil {
panic(e)
}
}
func main() {
database.InitDB()
Router := mux.NewRouter().StrictSlash(true)
Router.HandleFunc("/api/messages", controller.HandleGetMessages)
Router.HandleFunc("/api/messages/post", controller.HandlePostMessage)
Router.HandleFunc("/api/reservations/reserve", controller.HandlePostReservation)
Router.HandleFunc("/api/reservations/getreservation/{startDate}/{endDate}", controller.HandleGetReservations)
Router.HandleFunc("/api/reservations/delete/{id}", controller.HandleDeleteReservation)
Router.HandleFunc("/api/reservations-user/{id}", controller.HandleGetUserReservations)
Router.HandleFunc("/api/companies", controller.HandleGetCompanies)
Router.HandleFunc("/api/company/{id}", controller.HandleGetCompanyById)
Router.HandleFunc("/api/companies/delete/{id}", controller.HandleDeleteCompany)
Router.HandleFunc("/api/companies/add", controller.HandleCompanySignup)
Router.HandleFunc("/api/companies/edit", controller.HandleUpdateCompany)
Router.HandleFunc("/api/companies/image/{id}", controller.UploadCompanyImageHandler)
Router.HandleFunc("/api/members", controller.HandleGetMembers)
Router.HandleFunc("/api/member/{id}", controller.HandleGetMemberById)
Router.HandleFunc("/api/members/add", controller.HandleMemberSignup)
Router.HandleFunc("/api/members/edit", controller.HandleUpdateMember)
Router.HandleFunc("/api/members/delete/{id}", controller.HandleDeleteMember)
Router.HandleFunc("/api/member/image/{id}", controller.UploadMemberImageHandler)
Router.HandleFunc("/api/events", controller.HandleGetEvents)
Router.HandleFunc("/api/events/add", controller.HandleCreateEvent)
Router.HandleFunc("/api/events/edit/{id}", controller.HandleUpdateEvent)
Router.HandleFunc("/api/events/delete/{id}", controller.HandleDeleteEvent)
Router.HandleFunc("/api/events/image/{id}", controller.UploadEventImageHandler)
Router.HandleFunc("/api/login/admin", controller.AdminLogin)
Router.HandleFunc("/api/login/user", controller.UserLogin)
Router.HandleFunc("/api/login/company", controller.CompanyLogin)
log.Fatal(http.ListenAndServe(":3001", Router))
}
|
// Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package rspb
import (
"context"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
)
// FromTimestamp constructs a read summary from the provided timestamp, treating
// the argument as the low water mark of each segment in the summary.
func FromTimestamp(ts hlc.Timestamp) ReadSummary {
seg := Segment{LowWater: ts}
return ReadSummary{
Local: seg,
Global: seg,
}
}
// Clone performs a deep-copy of the receiver.
func (c ReadSummary) Clone() *ReadSummary {
// NOTE: When ReadSummary is updated to include pointers to non-contiguous
// memory, this will need to be updated.
return &c
}
// Merge combines two read summaries, resulting in a single summary that
// reflects the combination of all reads in each original summary. The merge
// operation is commutative and idempotent.
func (c *ReadSummary) Merge(o ReadSummary) {
c.Local.merge(o.Local)
c.Global.merge(o.Global)
}
func (c *Segment) merge(o Segment) {
c.LowWater.Forward(o.LowWater)
}
// AssertNoRegression asserts that all reads in the parameter's summary are
// reflected in the receiver's summary with at least as high of a timestamp.
func (c *ReadSummary) AssertNoRegression(ctx context.Context, o ReadSummary) {
c.Local.assertNoRegression(ctx, o.Local, "local")
c.Global.assertNoRegression(ctx, o.Global, "global")
}
func (c *Segment) assertNoRegression(ctx context.Context, o Segment, name string) {
if c.LowWater.Less(o.LowWater) {
log.Fatalf(ctx, "read summary regression in %s segment, was %s, now %s",
name, o.LowWater, c.LowWater)
}
}
// Ignore unused warning.
var _ = (*ReadSummary).AssertNoRegression
|
package mocking
import "time"
type implementation struct {
store Store
}
func NewImplementation(store Store) *implementation {
return &implementation{store: store}
}
func (i *implementation) MethodA(at time.Time) error {
err := i.store.Open(at)
if err != nil {
return err
}
err = i.store.Sell("a", 2)
if err != nil {
return err
}
err = i.store.Close(time.Now().UTC())
if err != nil {
return err
}
return nil
}
|
package converters
import (
"encoding/json"
"fmt"
"math/big"
"reflect"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
func jsonMarshal(output interface{}) (string, error) {
bytes, err := json.MarshalIndent(output, "", "\t")
if err != nil {
return "", err
}
return string(bytes), nil
}
func RenderOutput(output interface{}) (string, error) {
// check for nil or interface nil:
// https://stackoverflow.com/a/50487104/474270
if output == nil || (reflect.ValueOf(output).Kind() == reflect.Ptr && reflect.ValueOf(output).IsNil()) {
return "nil", nil
}
switch outputObj := output.(type) {
case *externalapi.DomainHash:
return outputObj.String(), nil
case externalapi.BlockHeader:
return renderBlockHeader(outputObj)
case *externalapi.DomainBlock:
return renderBlock(outputObj)
case *model.BlockGHOSTDAGData:
return renderBlockGHOSTDAGData(outputObj)
case error:
return fmt.Sprintf("%+v", outputObj), nil
default:
return jsonMarshal(output)
}
}
func renderBlockGHOSTDAGData(ghostdagData *model.BlockGHOSTDAGData) (string, error) {
jsonable := jsonableBlockGHOSTDAGData(ghostdagData)
return jsonMarshal(jsonable)
}
func jsonableBlockGHOSTDAGData(ghostdagData *model.BlockGHOSTDAGData) interface{} {
return &struct {
BlueScore uint64
BlueWork *big.Int
SelectedParent string
MergeSetBlues []string
MergeSetReds []string
}{
BlueScore: ghostdagData.BlueScore(),
BlueWork: ghostdagData.BlueWork(),
SelectedParent: ghostdagData.SelectedParent().String(),
MergeSetBlues: hashes.ToStrings(ghostdagData.MergeSetBlues()),
MergeSetReds: hashes.ToStrings(ghostdagData.MergeSetReds()),
}
}
func renderBlock(block *externalapi.DomainBlock) (string, error) {
jsonable := jsonableBlock(block)
return jsonMarshal(jsonable)
}
func jsonableBlock(block *externalapi.DomainBlock) interface{} {
return struct {
Header interface{}
Transactions []*externalapi.DomainTransaction
}{
Header: jsonableBlockHeader(block.Header),
Transactions: block.Transactions,
}
}
func renderBlockHeader(blockHeader externalapi.BlockHeader) (string, error) {
jsonable := jsonableBlockHeader(blockHeader)
return jsonMarshal(jsonable)
}
func jsonableBlockHeader(blockHeader externalapi.BlockHeader) interface{} {
return struct {
Version uint16
ParentHashes []string
HashMerkleRoot string
AcceptedIDMerkleRoot string
UTXOCommitment string
TimeInMilliseconds int64
Bits uint32
Nonce uint64
}{
Version: blockHeader.Version(),
ParentHashes: hashes.ToStrings(blockHeader.ParentHashes()),
HashMerkleRoot: blockHeader.HashMerkleRoot().String(),
AcceptedIDMerkleRoot: blockHeader.AcceptedIDMerkleRoot().String(),
UTXOCommitment: blockHeader.UTXOCommitment().String(),
TimeInMilliseconds: blockHeader.TimeInMilliseconds(),
Bits: blockHeader.Bits(),
Nonce: blockHeader.Nonce(),
}
}
|
package ops
import (
"crypto/sha1"
"os"
)
func FileExists(filename string) bool {
if _, err := os.Stat(filename); os.IsNotExist(err) {
return false
}
return true
}
func GetSHA1(data []byte) []byte {
val := make([]byte, 0, 20)
sha := sha1.Sum(data)
val = append(val, sha[:]...)
return val
}
func FilterUnique(strSlice []string) []string {
keys := make(map[string]bool)
var list []string
for _, entry := range strSlice {
if _, value := keys[entry]; !value {
keys[entry] = true
list = append(list, entry)
}
}
return list
}
|
/*
* Npcf_SMPolicyControl API
*
* Session Management Policy Control Service © 2019, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TSDSI, TTA, TTC). All rights reserved.
*
* API version: 1.0.4
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package openapi
// SessionRuleFailureCode - Possible values are - NF_MAL: Indicate that the PCC rule could not be successfully installed (for those provisioned from the PCF) or activated (for those pre-defined in SMF) or enforced (for those already successfully installed) due to SMF/UPF malfunction. - RES_LIM: Indicate that the PCC rule could not be successfully installed (for those provisioned from PCF) or activated (for those pre-defined in SMF) or enforced (for those already successfully installed) due to a limitation of resources at the SMF/UPF. - UNSUCC_QOS_VAL: indicate that the QoS validation has failed. - UE_STA_SUSP: Indicates that the UE is in suspend state.
type SessionRuleFailureCode struct {
}
|
package controllers
import (
"net/http"
. "wukongServer/models"
)
type PageController struct {
BaseController
}
func (c *PageController) Search() {
kw := c.GetString(`kw`)
s := wk.SearchText(kw)
c.View(`page/search.html`, map[string]interface{}{
`searchRequest`: map[string]interface{}{
`kw`: kw,
},
`searchResult`: s,
})
return
}
func (c *PageController) Documents() {
c.View(`page/documents.html`, map[string]interface{}{
`documents`: Document{}.Get(),
})
}
func (c *PageController) Document() {
switch c.Ctx.Request.Method {
case http.MethodPost:
title := c.GetString(`title`)
content := c.GetString(`content`)
url := c.GetString(`url`)
if len(content) < 5 {
c.Error(`content length shouldn't less then 5`)
return
}
if len(title) == 0 {
title = content[:8]
}
doc := Document{
Title: title,
Content: content,
Url: url,
}
err := wk.AddIndexDocument(&doc)
if err == nil {
c.Redirect(c.Ctx.Request.Header.Get(`referer`), 301)
} else {
c.Error(err.Error())
}
default:
c.Error(`unknown http method`)
}
}
|
package lidar
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"reflect"
"strings"
)
type LasHeader struct {
FileSignature string //[4]byte
FileSourceID uint16
GlobalEncoding uint16
ProjectID1 uint32
ProjectID2 uint16
ProjectID3 uint16
ProjectID4 uint64
VersionMajor byte
VersionMinor byte
SystemID string // 32 characters
GeneratingSoftware string // 32 characters
FileCreationDay uint16
FileCreationYear uint16
HeaderSize uint16
OffsetToPoints uint32
NumberOfVLRs uint32
PointFormatID byte
PointRecordLength uint16
NumberPoints uint32
NumberPointsByReturn [7]uint32
XScaleFactor float64
YScaleFactor float64
ZScaleFactor float64
XOffset float64
YOffset float64
ZOffset float64
MaxX float64
MinX float64
MaxY float64
MinY float64
MaxZ float64
MinZ float64
WaveformDataStart uint64
}
func (las *LasFile) readHeader() {
b := make([]byte, 243)
if _, err := las.r.ReadAt(b[0:243], 0); err != nil && err != io.EOF {
panic(err)
}
las.Header.FileSignature = string(b[0:4])
las.Header.FileSourceID = binary.LittleEndian.Uint16(b[4:6])
las.Header.GlobalEncoding = binary.LittleEndian.Uint16(b[6:8])
las.Header.ProjectID1 = binary.LittleEndian.Uint32(b[8:12])
las.Header.ProjectID2 = binary.LittleEndian.Uint16(b[12:14])
las.Header.ProjectID3 = binary.LittleEndian.Uint16(b[14:16])
las.Header.ProjectID4 = binary.LittleEndian.Uint64(b[16:24])
las.Header.VersionMajor = b[24]
las.Header.VersionMinor = b[25]
las.Header.SystemID = string(b[26:58])
las.Header.SystemID = strings.Trim(las.Header.SystemID, " ")
las.Header.GeneratingSoftware = string(b[58:90])
las.Header.GeneratingSoftware = strings.Trim(las.Header.GeneratingSoftware, " ")
las.Header.FileCreationDay = binary.LittleEndian.Uint16(b[90:92])
las.Header.FileCreationYear = binary.LittleEndian.Uint16(b[92:94])
las.Header.HeaderSize = binary.LittleEndian.Uint16(b[94:96])
las.Header.OffsetToPoints = binary.LittleEndian.Uint32(b[96:100])
las.Header.NumberOfVLRs = binary.LittleEndian.Uint32(b[100:104])
las.Header.PointFormatID = b[104]
las.Header.PointRecordLength = binary.LittleEndian.Uint16(b[105:107])
las.Header.NumberPoints = binary.LittleEndian.Uint32(b[107:111])
offset := 111
var numReturns int
if las.Header.VersionMajor == 1 && (las.Header.VersionMinor < 3) {
numReturns = 5
} else if las.Header.VersionMajor == 1 && las.Header.VersionMinor == 3 {
numReturns = 7
} else {
panic(errors.New("Unsupported LAS file type"))
}
for i := 0; i < numReturns; i++ {
las.Header.NumberPointsByReturn[i] = binary.LittleEndian.Uint32(b[offset : offset+4])
offset += 4
}
las.Header.XScaleFactor = math.Float64frombits(binary.LittleEndian.Uint64(b[offset : offset+8]))
offset += 8
las.Header.YScaleFactor = math.Float64frombits(binary.LittleEndian.Uint64(b[offset : offset+8]))
offset += 8
las.Header.ZScaleFactor = math.Float64frombits(binary.LittleEndian.Uint64(b[offset : offset+8]))
offset += 8
las.Header.XOffset = math.Float64frombits(binary.LittleEndian.Uint64(b[offset : offset+8]))
offset += 8
las.Header.YOffset = math.Float64frombits(binary.LittleEndian.Uint64(b[offset : offset+8]))
offset += 8
las.Header.ZOffset = math.Float64frombits(binary.LittleEndian.Uint64(b[offset : offset+8]))
offset += 8
las.Header.MaxX = math.Float64frombits(binary.LittleEndian.Uint64(b[offset : offset+8]))
offset += 8
las.Header.MinX = math.Float64frombits(binary.LittleEndian.Uint64(b[offset : offset+8]))
offset += 8
las.Header.MaxY = math.Float64frombits(binary.LittleEndian.Uint64(b[offset : offset+8]))
offset += 8
las.Header.MinY = math.Float64frombits(binary.LittleEndian.Uint64(b[offset : offset+8]))
offset += 8
las.Header.MaxZ = math.Float64frombits(binary.LittleEndian.Uint64(b[offset : offset+8]))
offset += 8
las.Header.MinZ = math.Float64frombits(binary.LittleEndian.Uint64(b[offset : offset+8]))
offset += 8
if las.Header.VersionMajor == 1 && las.Header.VersionMinor == 3 {
las.Header.WaveformDataStart = binary.LittleEndian.Uint64(b[offset : offset+8])
}
}
func (h LasHeader) String() string {
var buffer bytes.Buffer
buffer.WriteString("LAS File Header:\n")
s := reflect.ValueOf(&h).Elem()
typeOfT := s.Type()
for i := 0; i < s.NumField(); i++ {
f := s.Field(i)
str := fmt.Sprintf("%s %s = %v\n", typeOfT.Field(i).Name, f.Type(), f.Interface())
buffer.WriteString(str)
}
return buffer.String()
}
|
package common
//space O(1)
func RotateSlcesInt(nums []int,k int){
n := len(nums) //数组长度
k %= n //如果k>n的情况,则取k/n的余数
}
func reverse(nums []int,start,end int){
for start < end {
nums[start], nums[end] = nums[end], nums[start]
start++
end--
}
}
|
package ravendb
// TcpConnectionInfo describes tpc connection
type TcpConnectionInfo struct {
Port int `json:"Port"`
URL string `json:"Url"`
Certificate *string `json:"Certificate"`
}
|
package main
import (
"fmt"
"flag"
)
func main() {
var filename string
flag.StringVar(&filename, "filename", "default.txt", "default txt file")
flag.Parse()
fmt.Println("This is a Jenkins Demo")
greetEmpty := greet("")
fmt.Println(greetEmpty)
greetNotEmpty := greet("World")
fmt.Println(greetNotEmpty)
sendEmail(filename)
} |
package migrate
import (
"bytes"
"encoding/json"
"fmt"
"github.com/boltdb/bolt"
"github.com/mpdroog/invoiced/invoice"
"log"
"strconv"
)
const LATEST = 1
func conv0(tx *bolt.Tx) error {
b := tx.Bucket([]byte("invoices"))
tmp, e := tx.CreateBucketIfNotExists([]byte("invoices-tmp"))
if e != nil {
return e
}
// 1) Set missing CONCEPT-ID in invoices-tmp
idx := 0
c := b.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
fmt.Printf("key=%s, value=%s\n", k, v)
u := new(invoice.Invoice)
if e := json.NewDecoder(bytes.NewBuffer(v)).Decode(u); e != nil {
return e
}
idx++
if len(u.Meta.Conceptid) == 0 {
u.Meta.Conceptid = fmt.Sprintf("CONCEPT-%d", idx)
}
u.Meta.Status = "FINAL"
// Save any changes..
buf := new(bytes.Buffer)
if e := json.NewEncoder(buf).Encode(u); e != nil {
return e
}
fmt.Printf("Write key=%s with val=%s\n", u.Meta.Conceptid, buf.Bytes())
if e := tmp.Put([]byte(u.Meta.Conceptid), buf.Bytes()); e != nil {
return e
}
}
// 2) Re-create invoices-bucket
if e := tx.DeleteBucket([]byte("invoices")); e != nil {
return e
}
b, e = tx.CreateBucketIfNotExists([]byte("invoices"))
if e != nil {
return e
}
if e := b.SetSequence(uint64(idx)); e != nil {
return e
}
// 3) Convert from -tmp bucket to default bucket
c = tmp.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
if e := b.Put(k, v); e != nil {
return e
}
}
return tx.DeleteBucket([]byte("invoices-tmp"))
}
// Convert BoltDB from old to new version
func Convert(db *bolt.DB) error {
return db.Update(func(tx *bolt.Tx) error {
b, e := tx.CreateBucketIfNotExists([]byte("config"))
if e != nil {
return e
}
v := b.Get([]byte("version"))
if v == nil {
// new db
log.Printf("Set version to %d\n", LATEST)
return b.Put([]byte("version"), []byte(strconv.Itoa(LATEST)))
}
version, e := strconv.Atoi(string(v))
if e != nil {
return e
}
if version == LATEST {
log.Printf("Running latest version %d\n", LATEST)
return nil
}
return fmt.Errorf("Unsupported version=%s", v)
})
}
|
package dns
import (
"designPattern/ABE_oberver_responsibilitychain/b_observer_dns/observer"
"math/rand"
"time"
"fmt"
"strings"
)
type IServer interface {
observer.Observer
IsLocal(recorder *Recorder) bool
SetUpperServer(server IServer)
ResponsFromUpperServer(recorder *Recorder)
Sign(recorder *Recorder)
}
type DnsServer struct {
*observer.Observable
specific IServer
}
func NewDnsServer(specific IServer) *DnsServer {
return &DnsServer{
observer.NewObservable(),
specific,
}
}
func (l *DnsServer) Update(o observer.IObservable, arg interface{}) {
recoder := arg.(*Recorder)
if l.specific.IsLocal(recoder) {
recoder.Ip = l.genIpAddress();
} else {
l.specific.ResponsFromUpperServer(recoder)
}
l.specific.Sign(recoder)
}
func (l *DnsServer) IsLocal(recorder *Recorder) bool {
panic("not implement")
return true
}
func (l *DnsServer)SetUpperServer(server IServer) {
l.DeleteObservers()
l.AddObserver(server)
}
func (l *DnsServer) ResponsFromUpperServer(recorder *Recorder) {
l.SetChanged()
l.NotifyObservers(recorder)
}
func (l *DnsServer) Sign(recorder *Recorder){
panic("not implement")
}
func (l *DnsServer) genIpAddress() string {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return fmt.Sprintf("%d.%d.%d.%d", r.Intn(255), r.Intn(255), r.Intn(255), r.Intn(255))
}
///////////////////////////////////////////////////
type SHDnsServer struct {
*DnsServer
}
func NewSHDnsServer() *SHDnsServer {
shd := new(SHDnsServer)
ds := NewDnsServer(shd)
shd.DnsServer = ds
return shd
}
func (l *SHDnsServer) Sign(recorder *Recorder) {
recorder.Owner = "shang hai"
}
func (l *SHDnsServer) IsLocal(recorder *Recorder) bool {
return strings.HasSuffix(recorder.Domain, ".sh.cn")
}
///////////////////////////////////////////////////
type ChinaDnsServer struct {
*DnsServer
}
func NewChinaDnsServer() *ChinaDnsServer {
shd := new(ChinaDnsServer)
ds := NewDnsServer(shd)
shd.DnsServer = ds
return shd
}
func (l *ChinaDnsServer) Sign(recorder *Recorder) {
recorder.Owner = "china"
}
func (l *ChinaDnsServer) IsLocal(recorder *Recorder) bool {
return strings.HasSuffix(recorder.Domain, ".cn")
}
///////////////////////////////////////////////////
type GlobalDnsServer struct {
*DnsServer
}
func NewGlobalDnsServer() *GlobalDnsServer {
shd := new(GlobalDnsServer)
ds := NewDnsServer(shd)
shd.DnsServer = ds
return shd
}
func (l *GlobalDnsServer) Sign(recorder *Recorder) {
recorder.Owner = "global"
}
func (l *GlobalDnsServer) IsLocal(recorder *Recorder) bool {
return true
}
|
package solutions
import (
"container/heap"
)
type Twitter struct {
userList map[int]User
timestamp int
}
type User struct {
userId int
follow map[int]struct{}
fans map[int]struct{}
news *MaxHeap
posts *[]Tweet
}
type MaxHeap []Tweet
type Tweet struct {
userId int
tweetId int
timestamp int
}
func Constructor() Twitter {
return Twitter{userList: make(map[int]User)}
}
func (this *Twitter) PostTweet(userId int, tweetId int) {
this.checkUser(userId)
this.timestamp++
time := this.timestamp
tweet := Tweet{
userId: userId,
tweetId: tweetId,
timestamp: time,
}
user := this.userList[userId]
*user.posts = append(*user.posts, tweet)
if len(*user.posts) > 10 {
*user.posts = (*user.posts)[1:]
}
}
func (this *Twitter) GetNewsFeed(userId int) []int {
var result []int
this.checkUser(userId)
user := this.userList[userId]
for _, post := range *user.posts {
heap.Push(user.news, post)
}
for followeeId := range user.follow {
for _, post := range *(this.userList[followeeId].posts) {
heap.Push(user.news, post)
}
}
for i := 0; i < 10 && user.news.Len() > 0; {
t := heap.Pop(user.news).(Tweet)
result = append(result, t.tweetId)
i++
}
*user.news = MaxHeap{}
return result
}
func (this *Twitter) Follow(followerId int, followeeId int) {
if followerId == followeeId {
return
}
this.checkUser(followerId)
this.checkUser(followeeId)
follower := this.userList[followerId]
followee := this.userList[followeeId]
follower.follow[followeeId] = struct{}{}
followee.fans[followerId] = struct{}{}
}
func (this *Twitter) Unfollow(followerId int, followeeId int) {
if followerId == followeeId {
return
}
this.checkUser(followerId)
this.checkUser(followeeId)
follower := this.userList[followerId]
followee := this.userList[followeeId]
if _, ok := follower.follow[followeeId]; ok {
delete(follower.follow, followeeId)
}
if _, ok := followee.fans[followerId]; ok {
delete(followee.fans, followerId)
}
}
func (this *Twitter) checkUser(userId int) {
if _, ok := this.userList[userId]; !ok {
this.userList[userId] = User{userId: userId,
follow: make(map[int]struct{}),
fans: make(map[int]struct{}),
news: &MaxHeap{},
posts: &[]Tweet{}}
}
}
func (heap MaxHeap) Len() int {
return len(heap)
}
func (heap MaxHeap) Less(i int, j int) bool {
return heap[i].timestamp > heap[j].timestamp
}
func (heap MaxHeap) Swap(i int, j int) {
heap[i], heap[j] = heap[j], heap[i]
}
func (heap *MaxHeap) Push(a interface{}) {
*heap = append(*heap, a.(Tweet))
}
func (heap *MaxHeap) Pop() interface{} {
length := len(*heap)
result := (*heap)[length - 1]
*heap = (*heap)[:length - 1]
return result
}
|
package model
import (
"errors"
"fmt"
"go_api_base/db"
. "go_api_base/log"
"time"
"strings"
"strconv"
"agent_keeper/package/pagination"
"net/http"
"encoding/json"
)
var (
mysqlconn *db.DBMYSQL
agentInsertSQL = `insert into agent_record(r_id,r_name,r_client,r_rule,r_where,r_callwhere,r_schedule,r_callback,r_status,r_description,r_created,r_retry) VALUES('%v','%v','%v','%v','%v','%v','%v','%v','%v','%v','%v',%v);`
)
// gorm 数据表agent_record 表操作部分
func (s *AgentInfo) Find() error {
mysqlconn = db.QuickConnect()
defer mysqlconn.Close()
if mysqlconn == nil {
return errors.New("mysql connect failed")
}
mysqlconn.Raw("select * FROM agent_record WHERE r_id = ? AND r_is_available = 1", s.AgentId).Scan(&s)
return nil
}
func (s *AgentInfo) FindByWhere(where map[string]string) error {
mysqlconn = db.QuickConnect()
defer mysqlconn.Close()
if mysqlconn == nil {
return errors.New("mysql connect failed")
}
sql := "select * FROM agent_record WHERE r_is_available = 1 "
for k,v := range where {
sql = sql + " and " + k + " = '" + v +"'"
}
mysqlconn.Raw(sql).Scan(&s)
return nil
}
func (s *AgentInfo) Delete() error {
mysqlconn = db.QuickConnect()
defer mysqlconn.Close()
ss := fmt.Sprintf("update agent_record SET r_is_available=0 Where r_id = %d", s.AgentId)
result, err := mysqlconn.CommonDB().Exec(ss)
if err != nil {
return err
}
if af, _ := result.RowsAffected(); af <= 0 {
return errors.New("agent`data updated fail")
}
return nil
}
// 主要用于更新状态
func (s *AgentInfo) Update() error {
mysqlconn = db.QuickConnect()
if mysqlconn == nil {
return errors.New("mysql connect failed")
}
defer mysqlconn.Close()
ss := fmt.Sprintf("update agent_record SET r_status = %d Where r_id = %d", s.Status, s.AgentId)
result, err := mysqlconn.CommonDB().Exec(ss)
if err != nil {
return err
}
if af, _ := result.RowsAffected(); af <= 0 {
return errors.New("agent`data updated fail")
}
//正在进行
return nil
}
// 主要用于更新数据
func (s *AgentInfo) Edit() error {
var sql string
mysqlconn = db.QuickConnect()
if mysqlconn == nil {
return errors.New("mysql connect failed")
}
defer mysqlconn.Close()
if s.Status != -1 {
sql = "update agent_record SET r_rule = '%v',r_name = '%v',r_retry = %v , r_where = '%v', r_schedule = '%v', r_callwhere = '%v', r_callback = '%v',r_description = '%v', "+ "r_status = "+ strconv.Itoa(s.Status)+" Where r_id = %d"
} else {
sql = "update agent_record SET r_rule = '%v',r_name = '%v',r_retry = %v , r_where = '%v', r_schedule = '%v', r_callwhere = '%v', r_callback = '%v',r_description = '%v' Where r_id = %d"
}
ss := fmt.Sprintf(sql, s.RuleType,s.Name,s.Retry, s.Where,s.Period,s.CallWhere,s.Callback,s.Description,s.AgentId)
_, err := mysqlconn.CommonDB().Exec(ss)
if err != nil {
return err
}
//if af, _ := result.RowsAffected(); af <= 0 {
// return errors.New("agent`data updated not affected")
//}
//正在进行
return nil
}
func CreateRecord(info *AgentInfo) (int64, error) {
mysqlconn := db.QuickConnect()
defer mysqlconn.Close()
info.Where = strings.Replace(info.Where, "'", `\"`, -1)
sql := fmt.Sprintf(agentInsertSQL, info.AgentId, info.Name, info.Client, info.RuleType, info.Where, info.CallWhere, info.Period, info.Callback, 0, info.Description, time.Now().Format("2006-01-02 15:04:05.999999999"),info.Retry)
result, err := mysqlconn.CommonDB().Exec(sql)
if err != nil {
Log.Error("add agent record failed:%v,%v", err.Error(), sql)
return 0, err
}
if _, err := result.RowsAffected(); err != nil {
Log.Error("add agent record failed:%v", err.Error())
return 0, err
} else {
Log.Info("add agent record:%v to db,%v", info.Name, sql)
}
if id, err := result.LastInsertId(); err == nil {
return id, nil
}
return 0, err
}
func SearchRecord(info *AgentInfo,start,end string,r *http.Request) (string,error){
// 通过 name、type,start,end,status,client
var (
findSql string
finalSql string
countSql string
id int
name string
rule string
client string
where string
callback string
callwhere string
schedule string
retry int
status int
description []byte
created string
one map[string]interface{}
result []interface{}
)
type Num struct {
Count int
}
findSql = " from agent_record where r_is_available = 1 "
if start != "" {
findSql = findSql + " and r_created >= '" + start +"'"
}
if end != "" {
findSql = findSql + " and r_created <= '" + end +"'"
}
if info.Client != "" {
findSql = findSql + " and r_client like '" + info.Client + "%'"
}
if info.Name != "" {
findSql = findSql + " and r_name like '%" + info.Name + "%'"
}
if info.Status != -1 {
findSql = findSql + " and r_status = " + strconv.Itoa(info.Status)
}
if info.RuleType != "" {
findSql = findSql + " and r_rule like '%" + info.RuleType + "%'"
}
finalSql = "select r_id,r_name,r_rule,r_client,r_where,r_callback,r_callwhere,r_schedule,r_retry,r_status,r_description,r_created"+ findSql + " order by r_id desc limit " + strconv.Itoa((info.Page-1)*info.Items) +"," +strconv.Itoa(info.Items)
countSql = "select count(r_id) as count " + findSql
mysqlconn = db.QuickConnect()
defer mysqlconn.Close()
if mysqlconn == nil {
return "",errors.New("mysql connect failed")
}
rows,err := mysqlconn.Raw(finalSql).Rows()
if err != nil {
return "", err
}
for rows.Next() {
err := rows.Scan(&id,&name,&rule,&client,&where,&callback,&callwhere,&schedule,&retry,&status,&description,&created)
if err != nil {
return "",err
}
one = map[string]interface{}{
"id":id,
"name":name,
"rule":rule,
"client":client,
"where":where,
"callback":callback,
"callwhere":callwhere,
"schedule":schedule,
"retry":retry,
"status":status,
"description":string(description),
"created":created,
}
result = append(result,one)
}
n := &Num{}
mysqlconn.Raw(countSql).Scan(n)
pag := pagination.NewPagination(r,n.Count,info.Items)
re := map[string]interface{}{
"pagination":pag.Pages(),
"data":result,
}
res,err := json.Marshal(re)
if err != nil {
return "",err
}
return string(res),nil
}
|
package binarytree
type Node struct {
Left *Node
Right *Node
Value int
}
func (node *Node) Insert(value int) {
if value > node.Value {
if node.Right == nil {
node.Right = &Node{
Value: value,
}
} else {
node.Right.Insert(value)
}
} else if value < node.Value {
if node.Left == nil {
node.Left = &Node{
Value: value,
}
} else {
node.Left.Insert(value)
}
}
}
/*func (node *Node) Exists(value int) bool {
}*/ |
package service
import (
"github.com/smartystreets/goconvey/convey"
"testing"
)
func TestService_GetId(t *testing.T) {
convey.Convey("TestService_GetId", t, func(c convey.C) {
res, err := s.GetId("test")
convey.So(err, convey.ShouldBeNil)
t.Logf("res %v", res)
})
}
func BenchmarkService_GetId(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := s.GetId("test")
if err != nil {
b.Error(err)
}
}
}
|
package factory_test
import (
"github.com/RackHD/ipam/resources"
. "github.com/RackHD/ipam/resources/factory"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Factory", func() {
Describe("Request", func() {
It("should return the requested resource", func() {
resource, err := Request(resources.PoolResourceType, resources.PoolResourceVersionV1)
Expect(err).ToNot(HaveOccurred())
Expect(resource).To(BeAssignableToTypeOf(&resources.PoolV1{}))
})
It("should return an error if the requested resource is not registered", func() {
_, err := Request("invalid", "1.0.0")
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(HavePrefix("Request"))
})
})
Describe("Require", func() {
It("should return the requested resource", func() {
resource, err := Require(resources.PoolResourceType, resources.PoolResourceVersionV1)
Expect(err).ToNot(HaveOccurred())
Expect(resource).To(BeAssignableToTypeOf(&resources.PoolV1{}))
})
It("should return an error if the requested resource is not registered", func() {
_, err := Require("invalid", "1.0.0")
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(HavePrefix("Request"))
})
It("should return an error if the requested version is not registered", func() {
_, err := Require(resources.PoolResourceType, "invalid")
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(HavePrefix("Require"))
})
})
})
|
package userbook
// If the main resource has a subresource, the naming convention is
// <resource><subresource>
|
package basefile
import (
"reflect"
"strings"
)
//判断元素是否在string array sli map中
func IsExistIn(arr, e interface{}) bool {
val := reflect.ValueOf(arr)
switch val.Kind() {
case reflect.String:
if reflect.TypeOf(e).Kind() == reflect.String {
return strings.Contains(arr.(string), e.(string))
}
case reflect.Array, reflect.Slice:
length := val.Len()
for i := 0; i < length; i++ {
if reflect.DeepEqual(val.Index(i).Interface(), e) {
return true
}
}
case reflect.Map:
for _, key := range val.MapKeys() {
if reflect.DeepEqual(val.MapIndex(key).Interface(), e) {
return true
}
}
}
return false
}
|
package repository
import (
"github.com/kosegor/go-covid19-api/app/domain/model"
"github.com/kosegor/go-covid19-api/app/interface/apierr"
)
type ElasticRepository interface {
Insert(*model.Incident) (*model.Incident, *apierr.ApiError)
//FindByCountry(string) ([]*model.Incident, *apierr.ApiError)
}
|
// Copyright © 2018 Wei Shen <shenwei356@gmail.com>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cmd
import (
"github.com/shenwei356/unikmer"
"github.com/spf13/cobra"
)
var mapInitSize = 100000
const (
flagContinue = iota
flagBreak
flagReturn
)
// Options contains the global flags
type Options struct {
NumCPUs int
Verbose bool
}
func getOptions(cmd *cobra.Command) *Options {
return &Options{
NumCPUs: getFlagPositiveInt(cmd, "threads"),
// NumCPUs: 1,
Verbose: getFlagBool(cmd, "verbose"),
}
}
var degenerateBaseMapNucl = map[byte]string{
'A': "A",
'T': "T",
'U': "U",
'C': "C",
'G': "G",
'R': "AG",
'Y': "CT",
'M': "AC",
'K': "GT",
'S': "CG",
'W': "AT",
'H': "ACT",
'B': "CGT",
'V': "ACG",
'D': "AGT",
'N': "ACGT",
'a': "a",
't': "t",
'u': "u",
'c': "c",
'g': "g",
'r': "ag",
'y': "ct",
'm': "ac",
'k': "gt",
's': "cg",
'w': "at",
'h': "act",
'b': "cgt",
'v': "acg",
'd': "agt",
'n': "acgt",
}
func extendDegenerateSeq(s []byte) (dseqs [][]byte, err error) {
dseqs = [][]byte{[]byte{}}
var i, j, k int
var ok bool
var dbases string
var dbase byte
for _, base := range s {
if dbases, ok = degenerateBaseMapNucl[base]; ok {
if len(dbases) == 1 {
dbase = dbases[0]
for i = 0; i < len(dseqs); i++ {
dseqs[i] = append(dseqs[i], dbase)
}
} else {
// 2nd
more := make([][]byte, len(dseqs)*(len(dbases)-1))
k = 0
for i = 1; i < len(dbases); i++ {
for j = 0; j < len(dseqs); j++ {
more[k] = []byte(string(append(dseqs[j], dbases[i])))
k++
}
}
// 1th
for i = 0; i < len(dseqs); i++ {
dseqs[i] = append(dseqs[i], dbases[0])
}
dseqs = append(dseqs, more...)
}
} else {
return dseqs, unikmer.ErrIllegalBase
}
}
return dseqs, nil
}
|
package main
import "fmt"
func main() {
fmt.Println(factorial(4))
}
func factorial(num int) int {
sum := num
count := num - 1
for i := 0; i < num; i++ {
if count != 0 {
sum *= count
count--
}
}
return sum
}
//challenge from Todd to do a factorial without using recursion and using loops.
|
package nopaste
import "testing"
func TestGetRegionFromARN(t *testing.T) {
arn1 := "arn:aws:sns:us-east-1:999999999:example"
r1, _ := getRegionFromARN(arn1)
if r1 != "us-east-1" {
t.Errorf("invalid region %s from %s", r1, arn1)
}
arn2 := "arn:aws:sns"
r2, err := getRegionFromARN(arn2)
if r2 != "" || err == nil {
t.Errorf("must be failed %s from %s", r2, arn2)
}
}
|
package decode_string
type pair struct {
text string
num int
}
func decodeString(s string) string {
// 3[a2[c]]
// stack
// top -> nil
// detect number: 3
// detect '[', push: 3 and ""
// top -> 3 -> ""
// detect text: a
// detect number: 2
// detect '[', push: 2 and "a"
// top -> 2 -> a -> 3 -> ""
// detect text: c
// detect ']', pop 2 and a
// top -> 3 -> ""
// cur = a + 2 * c = acc
// detect ']', pop 3 and ""
// cur = "" + 3 * acc
// cur = accaccacc
stack := make([]pair, 0, len(s)/2)
curText := ""
curNum := 0
for _, ch := range s {
switch {
case ch == '[':
stack = append(stack, pair{num: curNum, text: curText})
curText, curNum = "", 0
case ch == ']':
pre := stack[len(stack)-1]
stack = stack[:len(stack)-1]
for i := 0; i < pre.num; i++ {
pre.text += curText
}
curText = pre.text
case ch >= '0' && ch <= '9':
curNum = curNum*10 + int(ch-'0')
default:
curText += string(ch)
}
}
return curText
}
|
package impl_test
import (
"context"
"testing"
"time"
"github.com/mylxsw/adanos-alert/internal/repository"
"github.com/mylxsw/adanos-alert/internal/repository/impl"
"github.com/stretchr/testify/suite"
"go.mongodb.org/mongo-driver/bson"
)
type QueueTestSuit struct {
suite.Suite
repo repository.QueueRepo
}
func (q *QueueTestSuit) TearDownTest() {
q.NoError(q.repo.Delete(bson.M{}))
}
func (q *QueueTestSuit) SetupTest() {
db, err := Database()
q.NoError(err)
q.repo = impl.NewQueueRepo(db)
}
func (q *QueueTestSuit) TestEnqueueDequeue() {
// test empty queue
_, err := q.repo.Dequeue(context.TODO())
q.Error(err)
q.Equal(repository.ErrNotFound, err)
// test enqueue
item := repository.QueueJob{
Name: "action",
Payload: "{}",
}
// add a item to queue
insertID, err := q.repo.Enqueue(context.TODO(), item)
q.NoError(err)
q.NotEmpty(insertID)
{
time.Sleep(10 * time.Millisecond)
// test dequeue one item
item2, err := q.repo.Dequeue(context.TODO())
q.NoError(err)
q.EqualValues(item.Name, item2.Name)
q.EqualValues(repository.QueueItemStatusRunning, item2.Status)
// test empty queue
{
_, err := q.repo.Dequeue(context.TODO())
q.Error(err)
q.Equal(repository.ErrNotFound, err)
}
// test item's status changed to running after dequeue
item21, err := q.repo.Get(item2.ID)
q.NoError(err)
q.EqualValues(repository.QueueItemStatusRunning, item21.Status)
// test queue item count
c21, err := q.repo.Count(bson.M{"status": repository.QueueItemStatusWait})
q.NoError(err)
q.EqualValues(0, c21)
}
}
func TestQueueRepo(t *testing.T) {
suite.Run(t, new(QueueTestSuit))
}
|
/*
Copyright 2021 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validate
import (
"context"
"fmt"
"os/exec"
sErrors "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/errors"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/kubernetes/manifest"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/render/kptfile"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/schema/latest"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/util"
"github.com/GoogleContainerTools/skaffold/v2/proto/v1"
)
var (
allowListedValidators = []string{"kubeval"}
validatorAllowlist = map[string]kptfile.Function{
"kubeval": {Image: "gcr.io/kpt-fn/kubeval:v0.1"},
// TODO: Add conftest validator in kpt catalog.
"gatekeeper": {
Image: "gcr.io/kpt-fn/gatekeeper:v0.2.1",
ConfigMap: map[string]string{}},
}
)
// NewValidator instantiates a Validator object.
func NewValidator(config []latest.Validator) (Validator, error) {
var fns []kptfile.Function
for _, c := range config {
fn, ok := validatorAllowlist[c.Name]
if !ok {
// TODO: Add links to explain "skaffold-managed mode" and "kpt-managed mode".
return Validator{}, sErrors.NewErrorWithStatusCode(
&proto.ActionableErr{
Message: fmt.Sprintf("unsupported validator %q", c.Name),
ErrCode: proto.StatusCode_CONFIG_UNKNOWN_VALIDATOR,
Suggestions: []*proto.Suggestion{
{
SuggestionCode: proto.SuggestionCode_CONFIG_ALLOWLIST_VALIDATORS,
Action: fmt.Sprintf(
"please only use the following validators in skaffold-managed mode: %v. "+
"to use custom validators, please use kpt-managed mode.", allowListedValidators),
},
},
})
}
fns = append(fns, fn)
}
return Validator{kptFn: fns}, nil
}
type Validator struct {
kptFn []kptfile.Function
}
// GetDeclarativeValidators transforms and returns the skaffold validators defined in skaffold.yaml
func (v Validator) GetDeclarativeValidators() []kptfile.Function {
// TODO: guarantee the v.kptFn is updated once users changed skaffold.yaml file.
return v.kptFn
}
func (v Validator) Validate(ctx context.Context, ml manifest.ManifestList) error {
if v.kptFn == nil || len(v.kptFn) == 0 {
return nil
}
for _, validator := range v.kptFn {
kvs := util.EnvMapToSlice(validator.ConfigMap, "=")
args := []string{"fn", "eval", "-i", validator.Image, "-o", "unwrap", "-", "--"}
args = append(args, kvs...)
cmd := exec.CommandContext(ctx, "kpt", args...)
reader := ml.Reader()
cmd.Stdin = reader
err := cmd.Run()
if err != nil {
return err
}
}
return nil
}
|
package main
import "sort"
//524. 通过删除字母匹配到字典里最长单词
//给你一个字符串 s 和一个字符串数组 dictionary 作为字典,找出并返回字典中最长的字符串,该字符串可以通过删除 s 中的某些字符得到。
//
//如果答案不止一个,返回长度最长且字典序最小的字符串。如果答案不存在,则返回空字符串。
//
//
//
//示例 1:
//
//输入:s = "abpcplea", dictionary = ["ale","apple","monkey","plea"]
//输出:"apple"
//示例 2:
//
//输入:s = "abpcplea", dictionary = ["a","b","c"]
//输出:"a"
//
//
//提示:
//
//1 <= s.length <= 1000
//1 <= dictionary.length <= 1000
//1 <= dictionary[i].length <= 1000
//s 和 dictionary[i] 仅由小写英文字母组成
//思路 双指针
func findLongestWord(s string, dictionary []string) string {
sort.Slice(dictionary, func(i, j int) bool {
a, b := dictionary[i], dictionary[j]
return len(a) > len(b) || len(a) == len(b) && a < b
})
for _, t := range dictionary {
i := 0
for j, _ := range s {
if s[j] == t[i] {
i++
}
if i == len(t) {
return t
}
}
}
return ""
}
func main() {
println(findLongestWord("abpcplea", []string{"ale", "apple", "monkey", "plea"}))
}
|
// Copyright (C) 2016-Present Pivotal Software, Inc. All rights reserved.
// This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package instanceiterator_test
import (
"errors"
"fmt"
"github.com/pivotal-cf/on-demand-service-broker/instanceiterator"
"testing"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/pivotal-cf/on-demand-service-broker/broker"
"github.com/pivotal-cf/on-demand-service-broker/config"
"github.com/pivotal-cf/on-demand-service-broker/instanceiterator/fakes"
"github.com/pivotal-cf/on-demand-service-broker/service"
)
func TestIterator(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Iterator Suite")
}
type testState struct {
instance service.Instance
triggerOutput []instanceiterator.OperationState
triggerCallCount int
checkStatusOutput []instanceiterator.OperationState
checkStatusCallCount int
taskID int
controller *processController
}
func setupTest(states []*testState, brokerServices *fakes.FakeBrokerServices, fakeTriggerer *fakes.FakeTriggerer) {
var instances []service.Instance
for i, s := range states {
instances = append(instances, s.instance)
s.controller = newProcessController(fmt.Sprintf("si_%d", i))
}
brokerServices.InstancesReturns(instances, nil)
brokerServices.LatestInstanceInfoStub = func(i service.Instance) (service.Instance, error) {
return i, nil
}
fakeTriggerer.TriggerOperationStub = func(instance service.Instance) (instanceiterator.TriggeredOperation, error) {
for _, s := range states {
if instance.GUID == s.instance.GUID {
s.controller.NotifyStart()
s.triggerCallCount++
return instanceiterator.TriggeredOperation{
State: s.triggerOutput[s.triggerCallCount-1],
Data: broker.OperationData{BoshTaskID: s.taskID, OperationType: broker.OperationTypeUpgrade},
}, nil
}
}
return instanceiterator.TriggeredOperation{}, errors.New("unexpected instance GUID")
}
fakeTriggerer.CheckStub = func(guid string, operationData broker.OperationData) (instanceiterator.TriggeredOperation, error) {
for _, s := range states {
if guid == s.instance.GUID {
s.controller.WaitForSignalToProceed()
s.checkStatusCallCount++
return instanceiterator.TriggeredOperation{
State: s.checkStatusOutput[s.checkStatusCallCount-1],
Data: broker.OperationData{BoshTaskID: s.taskID, OperationType: broker.OperationTypeUpgrade},
}, nil
}
}
return instanceiterator.TriggeredOperation{}, errors.New("unexpected instance GUID")
}
}
func hasReportedFinished(fakeListener *fakes.FakeListener, expectedOrphans, expectedProcessed, expectedDeleted int, expectedBusyInstances []string, expectedFailedInstances []string) {
Expect(fakeListener.FinishedCallCount()).To(Equal(1), "Finished call count")
orphanCount, processedCount, _, deletedCount, busyInstances, failedInstances := fakeListener.FinishedArgsForCall(0)
Expect(orphanCount).To(Equal(expectedOrphans), "orphans")
Expect(processedCount).To(Equal(expectedProcessed), "processed")
Expect(deletedCount).To(Equal(expectedDeleted), "deleted")
Expect(busyInstances).To(ConsistOf(expectedBusyInstances), "busyInstances")
Expect(failedInstances).To(ConsistOf(expectedFailedInstances), "failedInstances")
}
func hasSlept(fakeSleeper *fakes.FakeSleeper, callIndex int, expectedInterval time.Duration) {
Expect(fakeSleeper.SleepCallCount()).To(BeNumerically(">", callIndex))
Expect(fakeSleeper.SleepArgsForCall(callIndex)).To(Equal(expectedInterval))
}
func hasReportedAttempts(fakeListener *fakes.FakeListener, index, attempt, limit int) {
Expect(fakeListener.RetryAttemptCallCount()).To(BeNumerically(">", index), "Retries call count")
c, l := fakeListener.RetryAttemptArgsForCall(index)
Expect(c).To(Equal(attempt))
Expect(l).To(Equal(limit))
}
func hasReportedCanaryAttempts(fakeListener *fakes.FakeListener, count, limit, remaining int) {
Expect(fakeListener.RetryCanariesAttemptCallCount()).To(Equal(count), "Canary retries call count")
for i := 0; i < count; i++ {
c, l, r := fakeListener.RetryCanariesAttemptArgsForCall(i)
Expect(c).To(Equal(i + 1))
Expect(l).To(Equal(limit))
Expect(r).To(Equal(remaining))
}
}
func hasReportedRetries(fakeListener *fakes.FakeListener, expectedPendingInstancesCount ...int) {
for i, expectedRetryCount := range expectedPendingInstancesCount {
_, _, _, _, toRetryCount, _ := fakeListener.ProgressArgsForCall(i)
Expect(toRetryCount).To(Equal(expectedRetryCount), fmt.Sprintf("Retry count: %v", i))
}
}
func hasReportedStarting(fakeListener *fakes.FakeListener, maxInFlight int) {
Expect(fakeListener.StartingCallCount()).To(Equal(1))
threads := fakeListener.StartingArgsForCall(0)
Expect(threads).To(Equal(maxInFlight))
}
func hasReportedProgress(fakeListener *fakes.FakeListener, callIndex int, expectedInterval time.Duration, expectedOrphans, expectedProcessed, expectedToRetry, expectedDeleted int) {
Expect(fakeListener.ProgressCallCount()).To(BeNumerically(">", callIndex), "callCount")
attemptInterval, orphanCount, processedCount, _, toRetryCount, deletedCount := fakeListener.ProgressArgsForCall(callIndex)
Expect(attemptInterval).To(Equal(expectedInterval), "attempt interval")
Expect(orphanCount).To(Equal(expectedOrphans), "orphans")
Expect(processedCount).To(Equal(expectedProcessed), "processed")
Expect(toRetryCount).To(Equal(expectedToRetry), "to retry")
Expect(deletedCount).To(Equal(expectedDeleted), "deleted")
}
func hasReportedCanariesStarting(fakeListener *fakes.FakeListener, count int, filter config.CanarySelectionParams) {
Expect(fakeListener.CanariesStartingCallCount()).To(Equal(1), "CanariesStarting() call count")
canaryCount, actualFilter := fakeListener.CanariesStartingArgsForCall(0)
Expect(canaryCount).To(Equal(count), "canaryCount")
Expect(actualFilter).To(Equal(filter), "filter")
}
func hasReportedCanariesFinished(fakeListener *fakes.FakeListener, count int) {
Expect(fakeListener.CanariesFinishedCallCount()).To(Equal(count), "CanariesFinished() call count")
}
func hasReportedInstanceOperationStartResult(fakeListener *fakes.FakeListener, idx int,
expectedGuid string, expectedStatus instanceiterator.OperationState) {
Expect(fakeListener.InstanceOperationStartResultCallCount()).To(BeNumerically(">", idx))
guid, operationType := fakeListener.InstanceOperationStartResultArgsForCall(idx)
Expect(guid).To(Equal(expectedGuid))
Expect(operationType).To(Equal(expectedStatus))
}
func hasReportedInstanceOperationStarted(fakeListener *fakes.FakeListener, idx int,
expectedInstance string, expectedIndex, expectedTotalInstances int, expectedIsDoingCanaries bool) {
Expect(fakeListener.InstanceOperationStartingCallCount()).To(BeNumerically(">", idx))
instance, index, total, canaryFlag := fakeListener.InstanceOperationStartingArgsForCall(idx)
Expect(instance).To(Equal(expectedInstance))
Expect(index).To(Equal(expectedIndex), "expected index for instance operation started")
Expect(total).To(Equal(expectedTotalInstances), "expected total num of instances for instance operation started")
Expect(canaryFlag).To(Equal(expectedIsDoingCanaries), "expected is doing canaries")
}
func hasReportedWaitingFor(fakeListener *fakes.FakeListener, idx int, expectedGuid string, expectedTaskID int) {
Expect(fakeListener.WaitingForCallCount()).To(BeNumerically(">", idx))
guid, taskID := fakeListener.WaitingForArgsForCall(idx)
Expect(guid).To(Equal(expectedGuid))
Expect(taskID).To(Equal(expectedTaskID))
}
func hasReportedOperationState(fakeListener *fakes.FakeListener, idx int, expectedGuid, expectedStatus string) {
Expect(fakeListener.InstanceOperationFinishedCallCount()).To(BeNumerically(">", idx))
guid, status := fakeListener.InstanceOperationFinishedArgsForCall(idx)
Expect(guid).To(Equal(expectedGuid))
Expect(status).To(Equal(expectedStatus))
}
func hasReportedInstancesToProcess(fakeListener *fakes.FakeListener, instances ...service.Instance) {
Expect(fakeListener.InstancesToProcessCallCount()).To(Equal(1))
Expect(fakeListener.InstancesToProcessArgsForCall(0)).To(Equal(instances))
}
func expectToHaveStarted(controllers ...*processController) {
for _, c := range controllers {
c.HasStarted()
}
}
func expectToHaveNotStarted(controllers ...*processController) {
for _, c := range controllers {
c.DoesNotStart()
}
}
func allowToProceed(controllers ...*processController) {
for _, c := range controllers {
c.AllowToProceed()
}
}
type processController struct {
name string
startedState bool
started chan bool
canProceed chan bool
}
func newProcessController(name string) *processController {
return &processController{
started: make(chan bool, 1),
canProceed: make(chan bool, 1),
name: name,
}
}
func (p *processController) NotifyStart() {
p.started <- true
}
func (p *processController) WaitForSignalToProceed() {
<-p.canProceed
}
func (p *processController) HasStarted() {
Eventually(p.started).Should(Receive(), fmt.Sprintf("Process %s expected to be in a started state", p.name))
}
func (p *processController) DoesNotStart() {
Consistently(p.started).ShouldNot(Receive(), fmt.Sprintf("Process %s expected to be in a non-started state", p.name))
}
func (p *processController) AllowToProceed() {
p.canProceed <- true
}
|
package usecase
import (
"marketplace/ads/domain"
"github.com/go-pg/pg/v10"
)
type ListUserAdsCmd func (db *pg.DB, userId int64) ([]domain.Ads, error)
func ListUserAds() ListUserAdsCmd {
return func (db *pg.DB, userId int64) ([]domain.Ads, error) {
var adsArray []domain.Ads
err := db.Model(&adsArray).
Where("ads.User_Id = ?", userId).
Select()
if err != nil {
return []domain.Ads{}, err
}
return adsArray, nil
}
}
|
package main
import (
"flag"
"log"
"time"
"github.com/jroimartin/gocui"
"github.com/serialx/goclair"
)
func main() {
var bucket, key string
var timeout time.Duration
flag.StringVar(&bucket, "b", "", "Bucket name.")
flag.StringVar(&key, "k", "", "Object key name.")
flag.DurationVar(&timeout, "d", 0, "Upload timeout.")
flag.Parse()
ctrl := goclair.NewInstanceController()
go func() {
ctrl.SetInstances(goclair.GetInstances())
}()
g, err := gocui.NewGui(gocui.OutputNormal)
if err != nil {
log.Panicln(err)
}
defer g.Close()
if err := ctrl.InitializeGui(g); err != nil {
log.Panicln(err)
}
if err := g.MainLoop(); err != nil && err != gocui.ErrQuit {
log.Panicln(err)
}
}
|
package cmd
import (
"fmt"
"github.com/object88/isomorphicTest/client"
"github.com/spf13/cobra"
)
func createGenerateCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "generate",
Short: "generate will create a new UUID",
RunE: run,
}
return cmd
}
func run(_ *cobra.Command, _ []string) error {
c, err := client.NewClient()
if err != nil {
return err
}
if uuid, ok := c.GenerateUUID(); ok {
c.DestroyClient()
fmt.Printf("Received UUID: %s\n", uuid)
return nil
}
if err = c.RequestNewService(); err != nil {
return err
}
if uuid, ok := c.GenerateUUID(); ok {
fmt.Printf("Received UUID: %s\n", uuid)
}
c.DestroyClient()
return nil
}
|
// +build windows
package launcher
func runReaper() {}
|
package store
import (
"strconv"
model "github.com/wlanboy/kanbantabs/v2/model"
)
/*AddBoard to Workplace*/
func (storage *Storage) AddBoard(board model.Board) {
storage.Workplace.Lanes = append(storage.Workplace.Lanes, board)
storage.Save()
}
/*DeleteBoard to Workplace*/
func (storage *Storage) DeleteBoard(boardnumber string) {
number, err := strconv.ParseInt(boardnumber, 10, 32)
if err == nil {
number := int(number) + 1
if number > 0 && number < len(storage.Workplace.Lanes) {
storage.Workplace.Lanes[number] = storage.Workplace.Lanes[len(storage.Workplace.Lanes)-1]
storage.Workplace.Lanes = storage.Workplace.Lanes[:len(storage.Workplace.Lanes)-1]
storage.Save()
}
}
}
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package utils
import (
"reflect"
"testing"
"github.com/DataDog/datadog-operator/pkg/controller/utils/datadog"
)
func Test_EventInfo_GetReason(t *testing.T) {
type fields struct {
objName string
objNamespace string
objKind string
eventType datadog.EventType
}
tests := []struct {
name string
fields fields
want string
}{
{
name: "DaemonSet creation",
fields: fields{
objName: "foo",
objNamespace: "bar",
objKind: "DaemonSet",
eventType: datadog.CreationEvent,
},
want: "Create DaemonSet",
},
{
name: "Service deletion",
fields: fields{
objName: "foo",
objNamespace: "bar",
objKind: "Service",
eventType: datadog.DeletionEvent,
},
want: "Delete Service",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ei := &EventInfo{
objName: tt.fields.objName,
objNamespace: tt.fields.objNamespace,
objKind: tt.fields.objKind,
eventType: tt.fields.eventType,
}
if got := ei.GetReason(); got != tt.want {
t.Errorf("EventInfo.GetReason() = %v, want %v", got, tt.want)
}
})
}
}
func Test_EventInfo_GetMessage(t *testing.T) {
type fields struct {
objName string
objNamespace string
objKind string
eventType datadog.EventType
}
tests := []struct {
name string
fields fields
want string
}{
{
name: "nominal case",
fields: fields{
objName: "foo",
objNamespace: "bar",
objKind: "DaemonSet",
eventType: datadog.CreationEvent,
},
want: "bar/foo",
},
{
name: "empty namespace",
fields: fields{
objName: "foo",
objNamespace: "",
objKind: "ClusterRole",
eventType: datadog.CreationEvent,
},
want: "/foo",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ei := &EventInfo{
objName: tt.fields.objName,
objNamespace: tt.fields.objNamespace,
objKind: tt.fields.objKind,
eventType: tt.fields.eventType,
}
if got := ei.GetMessage(); got != tt.want {
t.Errorf("EventInfo.GetMessage() = %v, want %v", got, tt.want)
}
})
}
}
func Test_EventInfo_GetDDEvent(t *testing.T) {
type fields struct {
objName string
objNamespace string
objKind string
eventType datadog.EventType
}
tests := []struct {
name string
fields fields
want datadog.Event
}{
{
name: "DaemonSet creation",
fields: fields{
objName: "foo",
objNamespace: "bar",
objKind: "DaemonSet",
eventType: datadog.CreationEvent,
},
want: datadog.Event{
Title: "Create DaemonSet bar/foo",
Type: datadog.CreationEvent,
},
},
{
name: "Service deletion",
fields: fields{
objName: "foo",
objNamespace: "bar",
objKind: "Service",
eventType: datadog.DeletionEvent,
},
want: datadog.Event{
Title: "Delete Service bar/foo",
Type: datadog.DeletionEvent,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ei := &EventInfo{
objName: tt.fields.objName,
objNamespace: tt.fields.objNamespace,
objKind: tt.fields.objKind,
eventType: tt.fields.eventType,
}
if got := ei.GetDDEvent(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("EventInfo.GetDDEvent() = %v, want %v", got, tt.want)
}
})
}
}
|
// The Manager reacts to messages send to it by Notifiers. It calls all ServiceGenerators to generate new Services
// and passes these to ConfigGenerators which generate configuration files.
package manager
import (
"github.com/bmizerany/pat"
"github.com/kelseyhightower/envconfig"
"github.com/prometheus/client_golang/prometheus"
"github.com/wndhydrnt/proxym/log"
"github.com/wndhydrnt/proxym/types"
"net/http"
"sync"
)
type Config struct {
ListenAddress string `envconfig:"listen_address",default:":5678"`
}
// Manager orchestrates Notifiers, ServiceGenerators and ConfigGenerators.
type Manager struct {
annotators []types.Annotator
Config *Config
configGenerators []types.ConfigGenerator
httpRouter *pat.PatternServeMux
notifiers []types.Notifier
quit chan int
refresh chan string
refreshCounter *prometheus.CounterVec
serviceGenerators []types.ServiceGenerator
waitGroup *sync.WaitGroup
}
// Add an Annotator
func (m *Manager) AddAnnotator(a types.Annotator) *Manager {
m.annotators = append(m.annotators, a)
return m
}
// Add a ConfigGenerator.
func (m *Manager) AddConfigGenerator(cg types.ConfigGenerator) *Manager {
m.configGenerators = append(m.configGenerators, cg)
return m
}
// Add a Notifier.
func (m *Manager) AddNotifier(notifier types.Notifier) *Manager {
m.notifiers = append(m.notifiers, notifier)
return m
}
// Add a ServiceGenerator
func (m *Manager) AddServiceGenerator(sg types.ServiceGenerator) *Manager {
m.serviceGenerators = append(m.serviceGenerators, sg)
return m
}
// Register an endpoint with the HTTP server
func (m *Manager) RegisterHttpHandler(method string, path string, handle http.Handler) *Manager {
log.AppLog.Debug("Registering HTTP endpoint on '%s' with method '%s'", path, method)
m.httpRouter.Add(method, path, prometheus.InstrumentHandler(path, handle))
return m
}
func (m *Manager) RegisterHttpHandleFunc(method, path string, handle func(w http.ResponseWriter, r *http.Request)) {
m.RegisterHttpHandler(method, path, http.HandlerFunc(handle))
}
// Starts every notifier and listens for messages that trigger a refresh.
// When a refresh is triggered it calls all ServiceGenerators and then all ConfigGenerators.
func (m *Manager) Run() {
m.waitGroup = &sync.WaitGroup{}
m.waitGroup.Add(len(m.notifiers))
for _, notifier := range m.notifiers {
go notifier.Start(m.refresh, m.quit, m.waitGroup)
}
go http.ListenAndServe(m.Config.ListenAddress, m.httpRouter)
// Refresh right on startup
m.process()
for _ = range m.refresh {
log.AppLog.Debug("Refresh received")
err := m.process()
if err != nil {
log.ErrorLog.Error("%s", err)
m.refreshCounter.WithLabelValues("error").Inc()
} else {
m.refreshCounter.WithLabelValues("success").Inc()
}
}
}
func (m *Manager) Quit() {
close(m.quit)
m.waitGroup.Wait()
}
func (m *Manager) process() error {
var services []*types.Service
for _, sg := range m.serviceGenerators {
svrs, err := sg.Generate()
if err != nil {
return err
}
services = append(services, svrs...)
}
for _, a := range m.annotators {
err := a.Annotate(services)
if err != nil {
return err
}
}
for _, cg := range m.configGenerators {
err := cg.Generate(services)
if err != nil {
return err
}
}
return nil
}
// Creates and returns a new Manager.
func New() *Manager {
refreshChannel := make(chan string, 10)
quitChannel := make(chan int)
refreshCounter := prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "proxym",
Subsystem: "refresh",
Name: "count",
Help: "Number of refreshes triggered",
}, []string{"result"})
prometheus.MustRegister(refreshCounter)
var c Config
envconfig.Process("proxym", &c)
m := &Manager{
Config: &c,
httpRouter: pat.New(),
refresh: refreshChannel,
refreshCounter: refreshCounter,
quit: quitChannel,
}
m.httpRouter.Get("/metrics", prometheus.Handler())
return m
}
var DefaultManager *Manager = New()
// Add an Annotator
func AddAnnotator(a types.Annotator) {
DefaultManager.AddAnnotator(a)
}
// Add a ConfigGenerator.
func AddConfigGenerator(cg types.ConfigGenerator) {
DefaultManager.AddConfigGenerator(cg)
}
// Add a Notifier.
func AddNotifier(n types.Notifier) {
DefaultManager.AddNotifier(n)
}
// Add a ServiceGenerator
func AddServiceGenerator(sg types.ServiceGenerator) {
DefaultManager.AddServiceGenerator(sg)
}
func RegisterHttpHandler(method string, path string, handle http.Handler) {
DefaultManager.RegisterHttpHandler(method, path, handle)
}
func RegisterHttpHandleFunc(method, path string, handle func(w http.ResponseWriter, r *http.Request)) {
DefaultManager.RegisterHttpHandleFunc(method, path, handle)
}
// Start the default manager.
func Run() {
DefaultManager.Run()
}
func Quit() {
DefaultManager.Quit()
}
|
package api
/*
the place to set constants
*/
const DATASTORE_USERS = "Users"
const DATASTORE_TEAMS = "Teams"
|
/*
* Swagger Kubechat
*
* Wrapper API of kubectl CLI command
*
* API version: 0.1.0
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package main
import (
"log"
// WARNING!
// Change this to a fully-qualified import path
// once you place this file into your project.
// For example,
//
// sw "github.com/myname/myrepo/go"
//
_ "github.com/supernova106/kubechat/docs" // docs is generated by Swag CLI, you have to import it.
sw "github.com/supernova106/kubechat/go"
ginSwagger "github.com/swaggo/gin-swagger"
"github.com/swaggo/gin-swagger/swaggerFiles"
)
// @title Swagger Kubechat API
// @version 0.1.0
// @description This is a API wraper of kubectl for chatbot
// @termsOfService http://swagger.io/terms/
// @contact.name Binh Nguyen
// @contact.url http://www.swagger.io/support
// @contact.email ntbinh106@gmail.com
// @license.name Apache 2.0
// @license.url http://www.apache.org/licenses/LICENSE-2.0.html
// @host localhost:8080
// @BasePath /v1
// @schemes http https
func main() {
log.Printf("Server started")
router := sw.NewRouter()
router.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler))
log.Fatal(router.Run(":8080"))
}
|
package tbot
import (
"context"
"time"
"go.mongodb.org/mongo-driver/mongo"
)
const article = "article"
// Article is parsed article
type Article struct {
ID string `json:"id" bson:"_id"`
Title string `json:"title" bson:"title"`
Link string `json:"link" bson:"link"`
Description string `json:"description" bson:"description"`
Author string `json:"author" bson:"author"`
CreatedAt time.Time `json:"createdAt" bson:"createdAt"`
}
// ArticleDAO collection
type ArticleDAO struct {
*mongo.Collection
}
// NewArticleDAO fabric new user collection
func NewArticleDAO() *ArticleDAO {
return &ArticleDAO{NewCollection(article)}
}
// InsertArticle inserts new article
func (ad *ArticleDAO) InsertArticle(a Article) (*mongo.InsertOneResult, error) {
r, err := ad.InsertOne(context.Background(), &a)
if err != nil && IsDup(err) == false {
return r, err
}
return r, nil
}
|
package modules
import (
"encoding/json"
"gopkg.in/telegram-bot-api.v4"
"io/ioutil"
"log"
"net/http"
)
type Response struct {
Kind string `json: "kind"`
Data map[string]interface{} `json: "data"`
}
func Reddit_updates(bot *tgbotapi.BotAPI,update * tgbotapi.Update){
queryId := update.CallbackQuery.ID
Api_url := "https://www.reddit.com/r/SENT/new.json?limit=3"
chatID := update.CallbackQuery.Message.Chat.ID
//msg := tgbotapi.MessageConfig{}
client := &http.Client{}
req, err := http.NewRequest("GET", Api_url, nil)
if err != nil {
log.Fatalln(err)
}
req.Header.Set("User-Agent", "Chrome")
resp, err := client.Do(req)
if err != nil {
log.Fatalln(err)
}
if err != nil {
log.Fatal("Error while getting response..")
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal("Error reading response")
}
var result Response
json.Unmarshal(data, &result)
config := tgbotapi.CallbackConfig{queryId,"",false,"",0}
bot.AnswerCallbackQuery(config)
nums := []int{2, 3, 4}
for i,_ := range nums{
urls := result.Data["children"].([]interface{})[i].(map[string]interface{})["data"].(map[string]interface{})["url"].(string)
msg := tgbotapi.NewMessage(chatID,urls)
msg.ParseMode = tgbotapi.ModeHTML
bot.Send(msg)
}
HandleGreet(bot,update)
}
|
package repositories
import (
"context"
"headless-todo-tasks-service/internal/entities"
)
type TasksRepository interface {
Create(context.Context, string, string, string) (*entities.Task, error)
}
|
package main
import "fmt"
type twoInts struct {
X, Y int64
}
func (a twoInts) method(b twoInts) twoInts {
// a is a receiver
return twoInts{X: a.X + b.X, Y: a.Y + b.Y}
}
func main() {
two := twoInts{10, 0}
fmt.Println(two)
}
|
package user
type User struct {
Username string `json:"username"`
Password string `json:"password"`
Email string `json:"email"`
Company Company `json:"company"`
}
type Company struct {
Name string `json:"name"`
Phone string `json:"phone"`
Address string `json:"address"`
}
|
package cache
import (
"sync"
"github.com/apache/servicecomb-kie/pkg/model"
"github.com/go-chassis/cari/pkg/errsvc"
)
var pollingCache = &LongPollingCache{}
// LongPollingCache exchange space for time
type LongPollingCache struct {
m sync.Map
}
type DBResult struct {
KVs *model.KVResponse
Err *errsvc.Error
Rev int64
}
func CachedKV() *LongPollingCache {
return pollingCache
}
// Read reads the cached query result
// only need to filter by labels if match pattern is exact
func (c *LongPollingCache) Read(topic string) (int64, *model.KVResponse, *errsvc.Error) {
value, ok := c.m.Load(topic)
if !ok {
return 0, nil, nil
}
t := value.(*DBResult)
if t.Err != nil {
return 0, nil, t.Err
}
return t.Rev, t.KVs, nil
}
func (c *LongPollingCache) Write(topic string, r *DBResult) {
c.m.Store(topic, r)
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package dutcontrol is generated from dutcontrol.proto in the ti50 repository.
package dutcontrol
|
package health
import (
"bytes"
"net/http"
"net/url"
"github.com/cerana/cerana/acomm"
"github.com/cerana/cerana/pkg/errors"
"github.com/cerana/cerana/pkg/logrusx"
)
// HTTPStatusArgs are arguments for HTTPStatus health checks.
type HTTPStatusArgs struct {
URL string `json:"url"`
Method string `json:"method"`
Body []byte `json:"body"`
StatusCode int `json:"statusCode"`
}
// HTTPStatus makes an HTTP request to the specified URL and compares the
// response status code to an expected status code.
func (h *Health) HTTPStatus(req *acomm.Request) (interface{}, *url.URL, error) {
var args HTTPStatusArgs
if err := req.UnmarshalArgs(&args); err != nil {
return nil, nil, err
}
if args.URL == "" {
return nil, nil, errors.Newv("missing arg: url", map[string]interface{}{"args": args, "missing": "url"})
}
if args.StatusCode == 0 {
args.StatusCode = http.StatusOK
}
httpReq, err := http.NewRequest(args.Method, args.URL, bytes.NewReader(args.Body))
httpResp, err := http.DefaultClient.Do(httpReq)
if err != nil {
return nil, nil, errors.Wrapv(err, map[string]interface{}{"args": args})
}
defer logrusx.LogReturnedErr(httpResp.Body.Close, nil, "failed to close resp body")
if httpResp.StatusCode != args.StatusCode {
err = errors.Newv("unexpected response status code", map[string]interface{}{"expectedStatusCode": args.StatusCode, "statusCode": httpResp.StatusCode})
}
return nil, nil, err
}
|
package api_test
// STARTMOCK, OMIT
import (
"fmt"
"testing"
"github.com/imrenagi/gotalks/content/2021/testing/api"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock" // HL
)
type PaymentServiceMock struct {
mock.Mock // HL
}
func (m *PaymentServiceMock) GenerateInvoice(ID string) (string, error) {
args := m.Called(ID) // verification // HL
return args.String(0), args.Error(1)
}
// STOPMOCK, OMIT
// STARTFINALIZE, OMIT
func TestFinalize(t *testing.T) {
m := new(PaymentServiceMock)
m.On("GenerateInvoice", mock.Anything). // HL
Return(&Invoice{URL: "example.com/invoices/1"}, nil) // HL
orderService := api.OrderService{PaymentService: m}
url, err := orderService.Finalize("1234")
assert.NoError(err)
assert.Equal(t, "example.com/invoices/1", url)
}
// STOPFINALIZE, OMIT
func TestFinalize_Error(t *testing.T) {
m := new(PaymentServiceMock)
// STARTFINALIZEERROR, OMIT
m.On("GenerateInvoice", mock.Anything). // HL
Return("", fmt.Errorf("random error")) // HL
// STOPFINALIZEERROR, OMIT
orderService := api.OrderService{PaymentService: m}
url, err := orderService.Finalize("1234")
assert.NoError(err)
assert.Equal(t, "example.com/invoices/1", url)
}
// STARTVERIFICATION, OMIT
func TestFinalize_WithVerification(t *testing.T) {
m := new(PaymentServiceMock) // OMIT
m.On("GenerateInvoice", "1234"). // HL
Return(&Invoice{URL: "example.com/invoices/1"}, nil)
orderService := api.OrderService{PaymentService: m}
url, err := orderService.Finalize("1234") // HL
assert.NoError(err)
assert.Equal(t, "example.com/invoices/1", url)
m.AssertExpectations(t) // HL
m.AssertNumberOfCalls(t, "GenerateInvoice", 1) // HL
}
// STOPVERIFICATION, OMIT
type LocationServiceeMock struct {
mock.Mock
}
func (m *PaymentServiceMock) CreatePinPoint(l Location) (*PinPoint, error) {
args := m.Called(l) // verification // HL
return args.Get(0).(*PinPoint), args.Error(1)
}
// STARTCOMPLEXVER, OMIT
func TestVerifyCreatePoint(t *testing.T) {
// some processing
m := new(LocationServiceeMock)
m.On("CreatePinPoint",
mock.MatchedBy(func (l Location) bool) { // HL
assert.NotEmpty(t, l.Name)
assert.Equal(t, "1600 Villa st", l.Address)
return true
})
}
// STOPCOMPLEXVER, OMIT
|
package server
import (
"github.com/zerolinke/pudge/src/pudge/log"
"encoding/gob"
"sync"
"github.com/zerolinke/pudge/src/pudge/cache"
"time"
)
type cacheUrl string
type TunnelRegistry struct {
tunnels map[string]*Tunnel
affinity *cache.LRUCache
log.Logger
sync.RWMutex
}
func NewTunnelRegistry(cacheSize uint64, cacheFile string) *TunnelRegistry {
registry := &TunnelRegistry{
tunnels:make(map[string]*Tunnel),
affinity:cache.NewLRUCache(cacheSize),
Logger:log.NewPrefixLogger("registry", "tun"),
}
// LRUCache uses Gob encoding. Unfortunately, Gob is fickle and will fail
// to encode or decode any non-primitive types that haven't been "registered"
// with it. Since we store cacheUrl objects, we need to register them here first
// for the encoding/decoding to work
var urlobj cacheUrl
gob.Register(urlobj)
//try to load and then periodically save the affinity cache to file, if specified
if cacheFile != "" {
err := registry.affinity.LoadItemsFromFile(cacheFile)
if err != nil {
registry.Error("Failed to load affinity cache %s: %v", cacheFile, err)
}
registry.SaveCacheThread(cacheFile, cacheSaveInterval)
} else {
registry.Info("No affinity cache specified")
}
return registry
}
func (r *TunnelRegistry ) SaveCacheThread(path string, interval time.Duration) {
go func() {
r.Info("Saving affinty cache to %s every %s", path, interval.String())
for {
time.Sleep(interval)
r.Debug("Saving affinity cache")
err:=r.affinity.SaveItemsToFile(path)
if err != nil {
r.Error("Failed to save affinity cache: %v",)
}
}
}()
} |
package main
import "fmt"
// Hands-on exercise #3
// Create TYPED and UNTYPED constants. Print the values of the constants.
const (
x int = 33
y = "I am a string"
)
func main() {
fmt.Println(x, y)
}
|
package grpool
import (
"sync"
)
type Pool struct {
JobQueue chan Job
dispatcher *dispatcher
wg sync.WaitGroup
}
func NewPool(numWorkers int, jobQueueLen int) *Pool {
jobQueue := make(chan Job, jobQueueLen)
workerPool := make(chan *worker, numWorkers)
pool := &Pool{
JobQueue: jobQueue,
dispatcher: newDispatcher(workerPool, jobQueue),
}
return pool
}
func (p *Pool) JobDone() {
p.wg.Done()
}
func (p *Pool) WaitAll() {
p.wg.Wait()
}
func (p *Pool) Release() {
p.dispatcher.stop <- struct{}{}
<-p.dispatcher.stop
}
|
package metrics
type AgentMetrics struct {
MachineMemoryUsage int64
MachineMemoryPercentage float64
MachineCPULoad float64
ProcessResourceUsages []*ProcessResourceUsage
}
type ProcessResourceUsage struct {
Name string
CPUPercentage float64
MemoryRSS int64
}
|
package udwSqlite3
import (
"bytes"
"github.com/tachyon-protocol/udw/udwMap"
"github.com/tachyon-protocol/udw/udwStrconv"
"github.com/tachyon-protocol/udw/udwStrings"
"strconv"
)
type GetRangeReq struct {
K1 string
IsDescOrder bool
MinValue string
MaxValue string
MinValueNotInclude string
MaxValueNotInclude string
Prefix string
Limit int
}
func addGetRangeSql(req GetRangeReq, sqlBuf *bytes.Buffer) (valueList [][]byte) {
valueList = [][]byte{}
kr := udwStrings.RangeMinMax{}
kr.AddMinKeyInclude(req.MinValue)
kr.AddMinKeyNotInclude(req.MinValueNotInclude)
kr.AddMaxKeyInclude(req.MaxValue)
kr.AddMaxKeyNotInclude(req.MaxValueNotInclude)
kr.AddPrefix(req.Prefix)
minInclude := kr.GetMinKeyInclude()
maxNotInclude := kr.GetMaxKeyNotInclude()
hasWhere := minInclude != "" || maxNotInclude != ""
if hasWhere {
sqlBuf.WriteString(" WHERE ")
if minInclude != "" {
sqlBuf.WriteString("k>=?")
valueList = append(valueList, []byte(minInclude))
}
if maxNotInclude != "" {
if minInclude != "" {
sqlBuf.WriteString(" AND k<?")
} else {
sqlBuf.WriteString("k<?")
}
valueList = append(valueList, []byte(maxNotInclude))
}
}
if req.IsDescOrder {
sqlBuf.WriteString(" ORDER BY k DESC")
} else {
sqlBuf.WriteString(" ORDER BY k ASC")
}
if req.Limit > 0 {
sqlBuf.WriteString(" LIMIT ")
sqlBuf.WriteString(strconv.Itoa(req.Limit))
}
return valueList
}
func (db *Db) MustCountGetRange(req GetRangeReq) int {
sqlBuf := bytes.NewBufferString(`SELECT count(1) FROM ` + db.getTableNameFromK1(req.K1))
valueList := addGetRangeSql(req, sqlBuf)
s, errMsg := db.queryToOneString(sqlBuf.String(), valueList...)
if errMsg != "" {
panic(errMsg)
}
return udwStrconv.MustParseInt(s)
}
func (db *Db) MustGetRange(req GetRangeReq) []udwMap.KeyValuePair {
sqlBuf := bytes.NewBufferString(`SELECT k,v FROM ` + db.getTableNameFromK1(req.K1))
valueList := addGetRangeSql(req, sqlBuf)
output := []udwMap.KeyValuePair{}
errMsg := db.Query(QueryReq{
Query: sqlBuf.String(),
Args: valueList,
RespDataCb: func(row [][]byte) {
output = append(output, udwMap.KeyValuePair{
Key: string(row[0]),
Value: string(row[1]),
})
},
ColumnCount: 2,
})
if errMsg != "" {
if errorIsTableNotExist(errMsg) {
return nil
}
panic(errMsg)
}
return output
}
func (db *Db) MustGetRangeKeyList(req GetRangeReq) []string {
sqlBuf := bytes.NewBufferString(`SELECT k FROM ` + db.getTableNameFromK1(req.K1))
valueList := addGetRangeSql(req, sqlBuf)
output := []string{}
errMsg := db.Query(QueryReq{
Query: sqlBuf.String(),
Args: valueList,
RespDataCb: func(row [][]byte) {
output = append(output, string(row[0]))
},
ColumnCount: 1,
})
if errMsg != "" {
if errorIsTableNotExist(errMsg) {
return nil
}
panic(errMsg)
}
return output
}
func (db *Db) MustGetRangeValueList(req GetRangeReq) []string {
sqlBuf := bytes.NewBufferString(`SELECT v FROM ` + db.getTableNameFromK1(req.K1))
valueList := addGetRangeSql(req, sqlBuf)
output := []string{}
errMsg := db.Query(QueryReq{
Query: sqlBuf.String(),
Args: valueList,
RespDataCb: func(row [][]byte) {
output = append(output, string(row[0]))
},
ColumnCount: 1,
})
if errMsg != "" {
if errorIsTableNotExist(errMsg) {
return nil
}
panic(errMsg)
}
return output
}
func (db *Db) MustGetRangeKeyMap(req GetRangeReq) map[string]struct{} {
out := map[string]struct{}{}
db.MustGetRangeKeyListCallback(req, func(key string) {
out[key] = struct{}{}
return
})
return out
}
func (db *Db) MustGetRangeToMap(req GetRangeReq) map[string]string {
ret := db.MustGetRange(req)
m := make(map[string]string, len(ret))
for _, pair := range ret {
m[pair.Key] = pair.Value
}
return m
}
|
/* SPDX-License-Identifier: MIT
*
* Copyright (C) 2019-2020 WireGuard LLC. All Rights Reserved.
*/
package version
import (
"os"
"unsafe"
"golang.org/x/sys/windows"
"golang.zx2c4.com/wireguard/windows/version/wintrust"
)
const (
officialCommonName = "WireGuard LLC"
evPolicyOid = "2.23.140.1.3"
policyExtensionOid = "2.5.29.32"
)
func VerifyAuthenticode(path string) bool {
path16, err := windows.UTF16PtrFromString(path)
if err != nil {
return false
}
file := &wintrust.WinTrustFileInfo{
CbStruct: uint32(unsafe.Sizeof(wintrust.WinTrustFileInfo{})),
FilePath: path16,
}
data := &wintrust.WinTrustData{
CbStruct: uint32(unsafe.Sizeof(wintrust.WinTrustData{})),
UIChoice: wintrust.WTD_UI_NONE,
RevocationChecks: wintrust.WTD_REVOKE_WHOLECHAIN, // Full revocation checking, as this is called with network connectivity.
UnionChoice: wintrust.WTD_CHOICE_FILE,
StateAction: wintrust.WTD_STATEACTION_VERIFY,
FileOrCatalogOrBlobOrSgnrOrCert: uintptr(unsafe.Pointer(file)),
}
return wintrust.WinVerifyTrust(windows.InvalidHandle, &wintrust.WINTRUST_ACTION_GENERIC_VERIFY_V2, data) == nil
}
// These are easily by-passable checks, which do not serve serve security purposes. Do not place security-sensitive
// functions below this line.
func IsRunningOfficialVersion() bool {
path, err := os.Executable()
if err != nil {
return false
}
names, err := wintrust.ExtractCertificateNames(path)
if err != nil {
return false
}
for _, name := range names {
if name == officialCommonName {
return true
}
}
return false
}
func IsRunningEVSigned() bool {
path, err := os.Executable()
if err != nil {
return false
}
policies, err := wintrust.ExtractCertificatePolicies(path, policyExtensionOid)
if err != nil {
return false
}
for _, policy := range policies {
if policy == evPolicyOid {
return true
}
}
return false
}
|
package main
import (
"errors"
"flag"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"strings"
pit "github.com/typester/go-pit"
)
var (
Endpoint string
)
func main() {
// parse arguments
var channel string
var summary string
var notice bool
var useAuth bool
var username string
var password string
flag.StringVar(&channel, "channel", "", "irc channel")
flag.StringVar(&channel, "c", "", "irc channel")
flag.StringVar(&summary, "summary", "", "text summary")
flag.StringVar(&summary, "s", "", "text summary")
flag.BoolVar(¬ice, "notice", false, "send as notice")
flag.BoolVar(¬ice, "n", false, "send as notice")
flag.BoolVar(&useAuth, "use_auth", false, "use auth from pit")
flag.BoolVar(&useAuth, "u", false, "use auth from pit")
flag.Parse()
if os.Getenv("NOPASTE") != "" {
Endpoint = os.Getenv("NOPASTE")
}
// get password if use_auth specified
if useAuth {
u, err := url.Parse(Endpoint)
if err != nil {
panic(err)
}
profile, err := pit.Get(u.Host, pit.Requires{"username": "username on nopaste", "password": "password on nopaste"})
if err != nil {
log.Fatal(err)
}
tmp_user, ok := (*profile)["username"]
if !ok {
log.Fatal("password is not found")
}
username = tmp_user
tmp_pass, ok := (*profile)["password"]
if !ok {
log.Fatal("password is not found")
}
password = tmp_pass
}
bytes, err := ioutil.ReadAll(os.Stdin)
values := make(url.Values)
values.Set("text", string(bytes))
if len(summary) > 0 {
values.Set("summary", summary)
}
if len(channel) > 0 {
values.Set("channel", "#"+channel)
}
if notice {
values.Set("notice", "1")
} else {
values.Set("notice", "0")
}
request, err := http.NewRequest("POST", Endpoint, strings.NewReader(values.Encode()))
request.ParseForm()
request.Header.Set("Content-Type", "application/x-www-form-urlencoded")
if len(username) > 0 {
request.SetBasicAuth(username, password)
}
client := new(http.Client)
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
// goのhttpはデフォルトでリダイレクトを読みに行って
// しかもAuth Headerを付けずに401で死ぬやつなのでここで
// リダイレクトポリシーを設定して差し止める
os.Stdout.WriteString("Nopaste URL: " + req.URL.String() + "\n")
return errors.New("")
}
response, err := client.Do(request)
if err != nil {
// log.Fatal(err)
return
}
defer response.Body.Close()
}
|
package lc
// Time: O(n)
// Benchmark: 124ms 7.9mb | 88% 61%
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func walk(node *TreeNode, level int, sums *[]int) {
if node == nil {
return
}
if level >= len(*sums) {
*sums = append(*sums, 0)
}
walk(node.Left, level+1, sums)
(*sums)[level] += node.Val
walk(node.Right, level+1, sums)
}
func maxLevelSum(root *TreeNode) int {
sums := []int{}
walk(root, 0, &sums)
var level, max int
for i, total := range sums {
if total > max {
max = total
level = i
}
}
return level + 1
}
|
package main
import (
"errors"
"fmt"
)
type Day struct {
Date string
Sunrise string
Sunset string
}
func findDay(days []Day, today string) (Day, error) {
for _, day := range days {
if today == day.Date {
return day, nil
}
}
return Day{}, errors.New(fmt.Sprintf("Could not find entry for '%s' in config.json", today))
}
|
package entity
type Nodes []Node
func (n Nodes) Exist(id string) bool {
for _, v := range n {
if v.Data.Id == id {
return true
}
}
return false
}
type Node struct {
Data struct {
Id string `json:"id"`
Parent string `json:"parent,omitempty"`
} `json:"data"`
}
func NewNode(id, parent string) Node {
return Node{Data: struct {
Id string `json:"id"`
Parent string `json:"parent,omitempty"`
}{Id: id, Parent: parent}}
}
|
package client_relay
//
// Copyright (c) 2019 ARM Limited.
//
// SPDX-License-Identifier: MIT
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
import (
"bufio"
"encoding/json"
"errors"
"io"
"strings"
"devicedb/transport"
)
type UpdateIterator interface {
// Move to the next result. Returns
// false if there is an error or if
// there are no more results to iterate
// through. If there is an error, the
// Error() function will return the
// error that occurred
Next() bool
// Return the next update
Update() Update
// Return the error that occurred
// while iterating
Error() error
}
type StreamedUpdateIterator struct {
reader io.ReadCloser
scanner *bufio.Scanner
closed bool
err error
update Update
}
func (iter *StreamedUpdateIterator) Next() bool {
if iter.closed {
return false
}
if iter.scanner == nil {
iter.scanner = bufio.NewScanner(iter.reader)
}
// data: %s line
if !iter.scanner.Scan() {
if iter.scanner.Err() != nil {
iter.err = iter.scanner.Err()
}
iter.close()
return false
}
if !strings.HasPrefix(iter.scanner.Text(), "data: ") {
// protocol error.
iter.err = errors.New("Protocol error")
iter.close()
return false
}
encodedUpdate := iter.scanner.Text()[len("data: "):]
if encodedUpdate == "" {
// this is a marker indicating the last of the initial
// pushes of missed messages
iter.update = Update{}
} else {
var update transport.TransportRow
if err := json.Unmarshal([]byte(encodedUpdate), &update); err != nil {
iter.err = err
iter.close()
return false
}
iter.update = Update{
Key: update.Key,
Serial: update.LocalVersion,
Context: update.Context,
Siblings: update.Siblings,
}
}
// consume newline between "data: %s" lines
if !iter.scanner.Scan() {
if iter.scanner.Err() != nil {
iter.err = iter.scanner.Err()
}
iter.close()
return false
}
return true
}
func (iter *StreamedUpdateIterator) close() {
iter.update = Update{}
iter.closed = true
iter.reader.Close()
}
func (iter *StreamedUpdateIterator) Update() Update {
return iter.update
}
func (iter *StreamedUpdateIterator) Error() error {
return iter.err
} |
/**
*
* @author nghiatc
* @since Dec 6, 2019
*/
package main
import (
"fmt"
"github.com/congnghia0609/ntc-gconf/nconf"
"github.com/congnghia0609/ntc-gnats/npub"
"github.com/congnghia0609/ntc-gnats/nreq"
"log"
"os"
"os/signal"
"path/filepath"
"runtime"
"strconv"
)
func InitNConf() {
_, b, _, _ := runtime.Caller(0)
wdir := filepath.Dir(b)
fmt.Println("wdir:", wdir)
nconf.Init(wdir)
}
/** https://github.com/nats-io/nats.go */
/**
* cd ~/go-projects/src/ntc-gnats
* go run main.go
*/
func main() {
// Init NConf
InitNConf()
//// Start Simple Subscriber
for i := 0; i < 2; i++ {
StartSimpleSubscriber()
}
// Start Simple Worker
for i := 0; i < 2; i++ {
StartSimpleWorker()
}
// Start Simple Response
for i := 0; i < 2; i++ {
StartSimpleResponse()
}
////// Publish
//// Case 1: PubSub.
////// Cach 1.1.
//name := "notify"
//subj := "msg.test"
//for i:=0; i<10; i++ {
// msg := "hello " + strconv.Itoa(i)
// npub.Publish(name, subj, msg)
// log.Printf("Published PubSub[%s] : '%s'\n", subj, msg)
//}
//// Cach 1.2.
name := "notify"
subj := "msg.test"
np := npub.GetInstance(name)
for i := 0; i < 10; i++ {
msg := "hello " + strconv.Itoa(i)
np.Publish(subj, msg)
log.Printf("Published PubSub[%s] : '%s'\n", subj, msg)
}
//// Case 2: Queue Group.
namew := "notify"
subjw := "worker.email"
npw := npub.GetInstance(namew)
for i := 0; i < 10; i++ {
msg := "hello " + strconv.Itoa(i)
npw.Publish(subjw, msg)
log.Printf("Published QueueWorker[%s] : '%s'\n", subjw, msg)
}
////// Request
////// Cach 1.
//name := "dbreq"
//subj := "reqres"
//for i:=0; i<10; i++ {
// payload := "this is request " + strconv.Itoa(i)
// msg, err := nreq.Request(name, subj, payload)
// if err != nil {
// log.Fatalf("%v for request", err)
// }
// log.Printf("NReq Published [%s] : '%s'", subj, payload)
// log.Printf("NReq Received [%v] : '%s'", msg.Subject, string(msg.Data))
//}
//// Cach 2.
namer := "dbreq"
subjr := "reqres"
nr := nreq.GetInstance(namer)
for i := 0; i < 10; i++ {
payload := "this is request " + strconv.Itoa(i)
msg, err := nr.Request(subjr, payload)
if err != nil {
log.Fatalf("%v for request", err)
}
log.Printf("NReq[%s] Published [%s] : '%s'", nr.Name, subjr, payload)
log.Printf("NReq[%s] Received [%v] : '%s'", nr.Name, msg.Subject, string(msg.Data))
}
// Hang thread Main.
s := make(chan os.Signal, 1)
// We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C) SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught.
signal.Notify(s, os.Interrupt)
// Block until we receive our signal.
<-s
log.Println("################# End Main #################")
}
|
package utils
import (
"math/rand"
"time"
)
// RandomString generate random string with lower case alphabets and digits
func RandomString(length int) string {
var seededRand *rand.Rand = rand.New(rand.NewSource(time.Now().UnixNano()))
charset := "abcdefghijklmnopqrstuvwxyz1234567890"
str := make([]byte, length)
for i := range str {
str[i] = charset[seededRand.Intn(len(charset))]
}
return string(str)
}
|
package github
import (
"context"
"errors"
"fmt"
"time"
lru "github.com/hashicorp/golang-lru"
"github.com/m-zajac/goprojectdemo/internal/app"
)
// CachedClient wraps github client with caching layer.
type CachedClient struct {
client app.GithubClient
projectsCache *lru.Cache
statsCache *lru.Cache
ttl time.Duration
}
// NewCachedClient creates new CachedClient instance.
func NewCachedClient(client app.GithubClient, size int, ttl time.Duration) (*CachedClient, error) {
if size <= 0 {
return nil, errors.New("cache size must be greater than 0")
}
projectsCache, err := lru.New(size)
if err != nil {
return nil, fmt.Errorf("creating lru cache for projects: %w", err)
}
statsCache, err := lru.New(size)
if err != nil {
return nil, fmt.Errorf("creating lru cache for stats: %w", err)
}
return &CachedClient{
client: client,
projectsCache: projectsCache,
statsCache: statsCache,
ttl: ttl,
}, nil
}
// ProjectsByLanguage returns projects by given programming language name.
func (c *CachedClient) ProjectsByLanguage(ctx context.Context, language string, count int) ([]app.Project, error) {
key := c.projectsCacheKey(language)
val, ok := c.projectsCache.Get(key)
if ok {
entry := val.(projectsCacheEntry)
if entry.count >= count && entry.created.Add(c.ttl).After(time.Now()) {
projects := entry.data
if len(projects) > count {
projects = projects[:count]
}
return projects, nil
}
}
projects, err := c.client.ProjectsByLanguage(ctx, language, count)
if err != nil {
return projects, err
}
entry := projectsCacheEntry{
created: time.Now(),
count: count,
data: projects,
}
c.projectsCache.Add(key, entry)
return projects, nil
}
// StatsByProject returns stats by given github project params.
func (c *CachedClient) StatsByProject(ctx context.Context, name string, owner string) ([]app.ContributorStats, error) {
key := c.statsCacheKey(name, owner)
val, ok := c.statsCache.Get(key)
if ok {
entry := val.(statsCacheEntry)
if entry.created.Add(c.ttl).After(time.Now()) {
return entry.data, nil
}
}
stats, err := c.client.StatsByProject(ctx, name, owner)
if err != nil {
return stats, err
}
entry := statsCacheEntry{
created: time.Now(),
data: stats,
}
c.statsCache.Add(key, entry)
return stats, nil
}
func (c *CachedClient) projectsCacheKey(language string) string {
return language
}
func (c *CachedClient) statsCacheKey(name string, owner string) string {
return name + "/" + owner
}
type projectsCacheEntry struct {
created time.Time
count int
data []app.Project
}
type statsCacheEntry struct {
created time.Time
data []app.ContributorStats
}
|
package leetcode_go
var pre99, q, p *TreeNode
func recoverTree(root *TreeNode) {
pre99, q, p = nil, nil, nil
traverse(root)
q.Val, p.Val = p.Val, q.Val
}
func traverse(root *TreeNode) {
if root == nil {
return
}
traverse(root.Left)
if pre99 != nil && root.Val < pre.Val {
if q == nil {
q = pre99
}
p = root
}
pre99 = root
traverse(root.Right)
}
|
package swaggerT
import (
"reflect"
"strings"
"github.com/go-openapi/jsonreference"
"github.com/go-openapi/spec"
"github.com/ltto/gobox/ref"
)
type schema struct {
Map map[string]spec.Schema
RefMap map[string]*spec.Schema
}
type Key struct {
K string
t reflect.Type
m InterfaceMap
}
func NewKey(t reflect.Type, m InterfaceMap) Key {
return Key{t: t, m: m}
}
func (k Key) FullName() string {
if k.K != "" {
return strings.ReplaceAll(k.K,"/","_")
}
Generic := ""
if len(k.m) > 0 {
for e := range k.m {
Generic += strings.ReplaceAll(ref.FullName(k.m[e]), "/", "_") + ","
}
} else {
Generic = ","
}
return strings.ReplaceAll(ref.FullName(k.t), "/", "_") + "<" + Generic[:len(Generic)-1] + ">"
}
func (s *schema) getRef(k Key) *spec.Schema {
key := k.FullName()
_, ok := s.RefMap[key]
if !ok && k.t != nil {
SchemaMap.Set(k, byT(k, ""))
}
return s.RefMap[key]
}
func (s *schema) Set(k Key, v *spec.Schema) {
if v == nil {
return
}
key := k.FullName()
if !ref.IsBaseTime(k.t) {
s.Map[key] = spec.Schema{SchemaProps: spec.SchemaProps{
Type: v.Type,
Properties: v.Properties,
}}
refObj, e := jsonreference.New("#/definitions/" + key)
if e != nil {
panic(e)
}
s.RefMap[key] = &spec.Schema{SchemaProps: spec.SchemaProps{
Type: v.Type,
Ref: spec.Ref{Ref: refObj},
}}
} else {
s.RefMap[key] = v
}
}
var SchemaMap = schema{
Map: make(map[string]spec.Schema, 0),
RefMap: make(map[string]*spec.Schema, 0),
}
var SchemaObj = spec.Schema{SchemaProps: spec.SchemaProps{
Type: []string{string(ParaObject)},
Format: "interface{}",
}}
var SchemaTime = spec.Schema{SchemaProps: spec.SchemaProps{
Type: []string{string(ParaObject)},
Format: "Time",
}}
func byT(k Key, field string) *spec.Schema {
t := k.t
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
switch t.String() {
case "null.Bool":
return getSchemaBse(reflect.TypeOf(true))
case "null.Int":
return getSchemaBse(reflect.TypeOf(int64(0)))
case "null.Float":
return getSchemaBse(reflect.TypeOf(float64(0.1)))
case "null.Time", "time.Time":
return &SchemaTime
case "null.String":
return getSchemaBse(reflect.TypeOf("string"))
default:
switch t.Kind() {
case reflect.Bool:
return getSchemaBse(t)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return getSchemaBse(t)
case reflect.Float32, reflect.Float64:
return getSchemaBse(t)
case reflect.String:
return getSchemaBse(t)
case reflect.Slice:
return getSchemaArr(NewKey(t, k.m))
case reflect.Struct:
return getSchemaStruct(NewKey(t, k.m))
case reflect.Interface:
if k.m != nil {
if i, ok := k.m[field]; ok {
if i.Kind() == reflect.Ptr {
i = i.Elem()
}
switch i.Kind() {
case reflect.Interface, reflect.Struct:
return getSchemaStruct(NewKey(i, k.m))
case reflect.Slice:
return getSchemaArr(NewKey(i, k.m))
}
}
}
}
return &SchemaObj
}
}
func getSchemaStruct(k Key) *spec.Schema {
schema := spec.Schema{}
t := k.t
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() != reflect.Struct {
panic("滚蛋")
}
schema.Properties = make(map[string]spec.Schema)
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
ft := field.Type
if ft.Kind() == reflect.Ptr {
ft = ft.Elem()
}
name := field.Tag.Get("json")
if name == "" {
name = field.Name
}
b := byT(NewKey(ft, k.m), name)
if b != nil {
schema.Properties[name] = *b
}
}
return &schema
}
func getSchemaBse(t reflect.Type) *spec.Schema {
schema := spec.Schema{SchemaProps: spec.SchemaProps{}}
paramType, Format := getParamType(t)
schema.Type = []string{string(paramType)}
schema.Format = Format
return &schema
}
func getSchemaArr(k Key) *spec.Schema {
t := k.t
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
schema := spec.Schema{}
schema.Type = []string{string(ParaArray)}
array := spec.SchemaOrArray{Schema: byT(NewKey(t.Elem(), k.m), "")}
schema.Items = &array
return &schema
}
func getParamType(t reflect.Type) (paramType ParamType, Format string) {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
switch t.String() {
case "null.Bool":
return ParaBoolean, t.Name()
case "null.Int":
return ParaInteger, t.Name()
case "null.Float":
return ParaNumber, t.Name()
case "null.Time", "time.Time":
return ParaObject, t.Name()
case "null.String":
return ParaString, t.Name()
default:
switch t.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return ParaInteger, t.Name()
case reflect.String:
return ParaString, t.Name()
case reflect.Float32, reflect.Float64:
return ParaNumber, t.Name()
case reflect.Bool:
return ParaBoolean, t.Name()
case reflect.Slice:
return ParaArray, t.Name()
default:
return ParaObject, t.Name()
}
}
}
|
package acceptance_test
import (
"github.com/d11wtq/bijou/runtime"
"testing"
)
func TestEq(t *testing.T) {
AssertRunEqual(t, "(=)", runtime.True)
AssertRunEqual(t, "(= 42)", runtime.True)
AssertRunEqual(t, "(= 42 7)", runtime.False)
AssertRunEqual(t, "(= 42 42 42)", runtime.True)
AssertRunEqual(t, "(= 42 7 42 42)", runtime.False)
}
func TestGt(t *testing.T) {
AssertRunEqual(t, "(>)", runtime.True)
AssertRunEqual(t, "(> 42)", runtime.True)
AssertRunEqual(t, "(> 42 7)", runtime.True)
AssertRunEqual(t, "(> 42 41 40)", runtime.True)
AssertRunEqual(t, "(> 42 7 41 40)", runtime.False)
}
func TestLt(t *testing.T) {
AssertRunEqual(t, "(<)", runtime.True)
AssertRunEqual(t, "(< 42)", runtime.True)
AssertRunEqual(t, "(< 7 42)", runtime.True)
AssertRunEqual(t, "(< 40 41 42)", runtime.True)
AssertRunEqual(t, "(< 40 41 7 42)", runtime.False)
}
|
package server
import "log"
type Session struct {
Id string
Peer *Peer
}
type SessionManager struct {
register chan Session
unregister chan Session
}
func NewSessionManager() *SessionManager {
sm := &SessionManager{
register: make(chan Session),
unregister: make(chan Session),
}
go sm.run()
return sm
}
func (s *SessionManager) Register(session Session) {
s.register <- session
}
func (s *SessionManager) Unregister(session Session) {
s.unregister <- session
}
func (s *SessionManager) run() {
sessions := make(map[string][]*Peer)
for {
select {
case session := <-s.register:
peers, exist := sessions[session.Id]
if !exist {
sessions[session.Id] = []*Peer{session.Peer}
log.Printf("%s joined, total peers %+v \n", session.Peer.Id, sessions)
continue
}
if peerExist(peers, session.Peer.Id) {
continue
}
notifyPeers(peers, session.Peer)
peers = append(peers, session.Peer)
sessions[session.Id] = peers
log.Printf("%s joined, total peers %+v \n", session.Peer.Id, sessions)
case session := <-s.unregister:
peers, exist := sessions[session.Id]
if exist {
for i, p := range peers {
if p.Id == session.Peer.Id {
peers = append(peers[:i], peers[i+1:]...)
p.Conn.Close()
}
}
sessions[session.Id] = peers
if len(peers) == 0 {
delete(sessions, session.Id)
}
}
log.Printf("%s left, remaining peers %+v \n", session.Peer.Id, sessions)
}
}
}
func notifyPeers(peers []*Peer, peer *Peer) {
for _, p := range peers {
// let others know about the new peer
p.Connect(peer)
// let the new peer know about others
peer.Connect(p)
}
}
func peerExist(peers []*Peer, id string) bool {
for _, p := range peers {
if p.Id == id {
return true
}
}
return false
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.