text stringlengths 11 4.05M |
|---|
package mailmanv2
import (
"log"
"time"
"io/ioutil"
apns "github.com/joekarl/go-libapns"
)
const (
MAX_BUFFERED_MESSAGES = 100
)
var (
error_handlers = make(map[string]errorhandler)
apns_keys = make([]string, 0)
)
//
// Worker structure
//
type Worker struct {
Id int `json:"id"`
Work chan *WorkRequest `json:"-"`
WorkerQueue chan chan *WorkRequest `json:"-"`
Quit chan bool `json:"-"`
Done chan bool `json:"-"`
Status string `json:"status"`
Handled int64 `json:"num_requests"`
Online int64 `json:"last_restart"`
// Parallel maps, not good (TODO)
Apns_cons map[string]*apns.APNSConnection `json:"-"`
payload_buffer map[string]*PayloadBuffer `json:"-"`
}
//
// Add an error handling function
//
func AddErrorHandler(name string, f errorhandler) {
error_handlers[name] = f
}
//
// Create a new worker
//
func NewWorker(id int, workerQueue chan chan *WorkRequest) *Worker {
// Config worker
w := &Worker{
Id: id,
Work: make(chan *WorkRequest),
WorkerQueue: workerQueue,
Quit: make(chan bool),
Done: make(chan bool),
Apns_cons: make(map[string]*apns.APNSConnection),
payload_buffer: make(map[string]*PayloadBuffer),
}
// Give it all apns keys
for _, key := range apns_keys {
w.Apns_cons[key] = nil
w.payload_buffer[key] = &PayloadBuffer{
buffer: make([]*apns.Payload, 0),
buffer_offset: 1,
error: true,
}
}
return w
}
//
// Map a pair of config values to apns key/cert pair
// TODO explicar
//
func NewApns(key string) {
// APNS key to the config document
// For all workers currently running
apns_keys = append(apns_keys, key)
for _, w := range workers {
// Set a blank connection and set its error to true
w.Apns_cons[key] = nil
w.payload_buffer[key] = &PayloadBuffer{
buffer: make([]*apns.Payload, 0),
buffer_offset: 1,
error: true,
}
}
}
//
// Load apns settings
// TODO MAKE USEFUL DOCS HERE MAN
//
func (w *Worker) OpenAPNS(key string) {
_, ok1 := Config[key + "_key"]
_, ok2 := Config[key + "_cert"]
if ok1 && ok2 {
log.Printf("Opening APNS connection %s for worker %d\n", key, w.Id)
// load cert/key
certPem, err := ioutil.ReadFile(Config[key + "_cert"])
if err != nil {
log.Fatal(err)
return
}
keyPem, err := ioutil.ReadFile(Config[key + "_key"])
if err != nil {
log.Fatal(err)
return
}
gateway := "gateway.push.apple.com"
if ngate, ok := Config[key + "_gate"]; ok {
gateway = ngate
}
conn, err := apns.NewAPNSConnection(&apns.APNSConfig{
CertificateBytes: certPem,
KeyBytes: keyPem,
GatewayHost: gateway,
})
if err != nil {
log.Fatal(err)
return
}
// Add connection to connection map
w.Apns_cons[key] = conn
// Create buffer
w.payload_buffer[key] = &PayloadBuffer{
buffer: make([]*apns.Payload, 0),
buffer_offset: 1,
error: false,
}
go w.ErrorListen(key)
} else {
log.Printf("No such key %s found in config\n", key)
}
}
//
// Listen for apns errors on a specific connection
//
func (w *Worker) ErrorListen(key string) {
if _, ok := w.payload_buffer[key]; !ok {
log.Printf("No such key %s\n", key)
return
}
// If we make it to the end of the function there was an error
defer func() {
w.payload_buffer[key].error = true
}()
// Fetch close channel for the connection
cc, ok := <- w.Apns_cons[key].CloseChannel
if !ok || cc.Error == nil {
return
}
// Handle an error
handle := func(code string) {
if eh, ok := error_handlers[code]; ok {
log.Println("Handling", code)
pb := w.payload_buffer[key]
if idx := cc.Error.MessageID - pb.buffer_offset; idx >= 0 && idx < uint32(len(pb.buffer)) {
eh(pb.buffer[idx])
} else {
log.Println("MessageID out of bounds", idx, len(pb.buffer))
}
} else {
log.Println("No handler for", code)
}
}
// Which error is it
switch cc.Error.ErrorCode {
case 251:
log.Println("EOF")
handle("EOF")
// Disconnect
w.Apns_cons[key].Disconnect()
case 1:
log.Println("PROCESSING_ERROR")
handle("PROCESSING_ERROR")
case 2:
log.Println("MISSING_DEVICE_TOKEN")
handle("MISSING_DEVICE_TOKEN")
case 3:
log.Println("MISSING_TOPIC")
handle("MISSING_TOPIC")
case 4:
log.Println("MISSING_PAYLOAD")
handle("MISSING_PAYLOAD")
case 5:
log.Println("INVALID_TOKEN_SIZE")
handle("INVALID_TOKEN_SIZE")
case 6:
log.Println("INVALID_TOPIC_SIZE")
handle("INVALID_TOKEN_SIZE")
case 7:
log.Println("INVALID_PAYLOAD_SIZE")
handle("INVALID_PAYLOAD_SIZE")
case 8:
log.Println("INVALID_TOKEN")
handle("INVALID_TOKEN")
}
}
//
// Place last sent payload into a buffer to reference in case of error
//
func (w *Worker) bufferPayload(key string, payload *apns.Payload) {
if pb, ok := w.payload_buffer[key]; ok {
pb.buffer = append(pb.buffer, payload)
if len(pb.buffer) > MAX_BUFFERED_MESSAGES {
pb.buffer = pb.buffer[1:]
pb.buffer_offset++
}
}
}
//
// Send over the channel opening if required
//
func (w *Worker) Send(key string, payload *apns.Payload) {
log.Println("Sending", key)
if pb, ok := w.payload_buffer[key]; ok {
// Check for error and reopen if required
if pb.error {
pb.error = false
w.OpenAPNS(key)
}
// Send message and buffer it
w.Apns_cons[key].SendChannel <- payload
w.bufferPayload(key, payload)
} else {
log.Printf("Cannot send to channel located at key %s\n", key)
log.Println(w.payload_buffer)
}
}
//
// Start worker routine
//
func (w *Worker) Start() {
defer func() {
wg.Done()
}()
wg.Add(1)
// Setup
w.Online = time.Now().Unix()
go func() {
for {
log.Println("Queueing worker", w.Id)
w.Status = "Queued"
w.WorkerQueue <- w.Work
select {
case wr := <- w.Work:
func () {
w.Status = "Working"
w.Handled++
// Recover from any error that occurs during work, must requeue the worker
defer func() {
if r := recover(); r != nil {
log.Println("Worker", w.Id, "recovered", r)
}
}()
// Check for endpoint and call function
if fn, ok := endpoints[wr.Endpoint]; ok {
log.Println("Worker", w.Id, "Starting work")
fn(wr, w)
} else {
log.Println("No such endpoint", wr.Endpoint)
}
}()
case <- w.Quit:
log.Printf("Worker %d shutting down\n", w.Id)
close(w.Work) // Close our worker workrequest channel
for _, con := range w.Apns_cons {
if con != nil {
con.Disconnect()
}
}
close(w.Done)
return
}
}
}()
}
|
package appdata
import (
"github.com/project-flogo/core/activity"
"github.com/project-flogo/core/app"
"github.com/project-flogo/core/support/test"
"testing"
"github.com/stretchr/testify/assert"
)
func TestRegister(t *testing.T) {
ref := activity.GetRef(&Activity{})
act := activity.Get(ref)
assert.NotNil(t, act)
}
func TestSet(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Failed()
t.Errorf("panic during execution: %v", r)
}
}()
settings := &Settings{Name: "test", Op: "set"}
iCtx := test.NewActivityInitContext(settings, nil)
act, err := New(iCtx)
assert.Nil(t, err)
tc := test.NewActivityContext(act.Metadata())
tc.SetInput("value", "foo")
_, err = act.Eval(tc)
assert.Nil(t, err)
appValue, _ := app.GetValue("test")
assert.Equal(t, "foo", appValue)
}
func TestGet(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Failed()
t.Errorf("panic during execution: %v", r)
}
}()
err := app.SetValue("test", "bar")
assert.Nil(t, err)
settings := &Settings{Name: "test", Op: "get"}
iCtx := test.NewActivityInitContext(settings, nil)
act, err := New(iCtx)
assert.Nil(t, err)
tc := test.NewActivityContext(act.Metadata())
tc.SetInput("value", "bar")
_, err = act.Eval(tc)
assert.Nil(t, err)
appValue, _ := app.GetValue("test")
assert.Equal(t, "bar", appValue)
}
|
package models
import (
"database/sql"
"github.com/KashEight/not/utils"
"github.com/google/uuid"
"time"
)
type NotePostData interface {
ConvertToNoteContent() (*NoteContent, error)
}
type PostDataCreateNote struct {
Content string `json:"content"`
ExpiredTime *time.Time `json:"expired_time" time_format:""` // Empty string because time_format should be RFC3339 format
}
func (pd *PostDataCreateNote) ConvertToNoteContent() (*NoteContent, error) {
c := pd.Content
et := pd.ExpiredTime
if c == "" && et == nil {
return nil, utils.ErrInvalidPostData
}
t := sql.NullTime{}
if et != nil {
t.Time = *et
t.Valid = true
} else {
t.Valid = false
}
newUUID, _ := uuid.NewRandom()
note := &NoteContent{
NoteID: newUUID,
Content: c,
ExpiredTime: t,
}
return note, nil
}
type PostDataUpdateNote struct {
Content string `json:"content"`
ExpiredTime *time.Time `json:"expired_time" time_format:""` // Empty string because time_format should be RFC3339 format
}
func (pd *PostDataUpdateNote) ConvertToNoteContent() (*NoteContent, error) {
c := pd.Content
et := pd.ExpiredTime
if c == "" && et == nil {
return nil, utils.ErrInvalidPostData
}
t := sql.NullTime{}
if et != nil {
t.Time = *et
t.Valid = true
} else {
t.Valid = false
}
note := &NoteContent{
Content: c,
ExpiredTime: t,
}
return note, nil
}
|
package customer
import (
"net/http"
"github.com/Top-Pattarapol/finalexam/database"
"github.com/gin-gonic/gin"
)
type Handler struct {
database *database.Handler
}
func (h *Handler) Init() {
database := &database.Handler{}
h.database = database
h.database.Open()
h.CreateCustomerTable()
}
func (h *Handler) Close() {
h.database.Close()
}
func (h *Handler) AuthMiddlewere(c *gin.Context) {
if token := c.GetHeader("Authorization"); token != "token2019" {
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": http.StatusText(http.StatusUnauthorized)})
return
}
c.Next()
}
|
// Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package registrybackend
import (
"errors"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"github.com/uber/kraken/core"
"github.com/uber/kraken/lib/backend"
"github.com/uber/kraken/lib/backend/backenderrors"
"github.com/uber/kraken/lib/backend/registrybackend/security"
"github.com/uber/kraken/utils/dockerutil"
"github.com/uber/kraken/utils/httputil"
yaml "gopkg.in/yaml.v2"
)
const _registrytag = "registry_tag"
func init() {
backend.Register(_registrytag, &tagClientFactory{})
}
type tagClientFactory struct{}
func (f *tagClientFactory) Create(
confRaw interface{}, authConfRaw interface{}) (backend.Client, error) {
confBytes, err := yaml.Marshal(confRaw)
if err != nil {
return nil, errors.New("marshal hdfs config")
}
var config Config
if err := yaml.Unmarshal(confBytes, &config); err != nil {
return nil, errors.New("unmarshal hdfs config")
}
return NewTagClient(config)
}
const _tagquery = "http://%s/v2/%s/manifests/%s"
// TagClient stats and downloads tag from registry.
type TagClient struct {
config Config
authenticator security.Authenticator
}
// NewTagClient creates a new TagClient.
func NewTagClient(config Config) (*TagClient, error) {
config = config.applyDefaults()
authenticator, err := security.NewAuthenticator(config.Address, config.Security)
if err != nil {
return nil, fmt.Errorf("cannot create tag client authenticator: %s", err)
}
return &TagClient{
config: config,
authenticator: authenticator,
}, nil
}
// Stat sends a HEAD request to registry for a tag and returns the manifest size.
func (c *TagClient) Stat(namespace, name string) (*core.BlobInfo, error) {
tokens := strings.Split(name, ":")
if len(tokens) != 2 {
return nil, fmt.Errorf("invald name %s: must be repo:tag", name)
}
repo, tag := tokens[0], tokens[1]
opts, err := c.authenticator.Authenticate(repo)
if err != nil {
return nil, fmt.Errorf("get security opt: %s", err)
}
URL := fmt.Sprintf(_tagquery, c.config.Address, repo, tag)
resp, err := httputil.Head(
URL,
append(
opts,
httputil.SendHeaders(map[string]string{"Accept": dockerutil.GetSupportedManifestTypes()}),
httputil.SendAcceptedCodes(http.StatusOK, http.StatusNotFound),
)...,
)
if err != nil {
return nil, fmt.Errorf("check blob exists: %s", err)
}
if resp.StatusCode == http.StatusNotFound {
return nil, backenderrors.ErrBlobNotFound
}
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if err != nil {
return nil, fmt.Errorf("parse blob size: %s", err)
}
return core.NewBlobInfo(size), nil
}
// Download gets the digest for a tag from registry.
func (c *TagClient) Download(namespace, name string, dst io.Writer) error {
tokens := strings.Split(name, ":")
if len(tokens) != 2 {
return fmt.Errorf("invald name %s: must be repo:tag", name)
}
repo, tag := tokens[0], tokens[1]
opts, err := c.authenticator.Authenticate(repo)
if err != nil {
return fmt.Errorf("get security opt: %s", err)
}
URL := fmt.Sprintf(_tagquery, c.config.Address, repo, tag)
resp, err := httputil.Get(
URL,
append(
opts,
httputil.SendHeaders(map[string]string{"Accept": dockerutil.GetSupportedManifestTypes()}),
httputil.SendAcceptedCodes(http.StatusOK, http.StatusNotFound),
)...,
)
if err != nil {
return fmt.Errorf("check blob exists: %s", err)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return backenderrors.ErrBlobNotFound
}
_, digest, err := dockerutil.ParseManifest(resp.Body)
if err != nil {
return fmt.Errorf("parse manifest v2: %s", err)
}
if _, err := io.Copy(dst, strings.NewReader(digest.String())); err != nil {
return fmt.Errorf("copy: %s", err)
}
return nil
}
// Upload is not supported as users can push directly to registry.
func (c *TagClient) Upload(namespace, name string, src io.Reader) error {
return errors.New("not supported")
}
// List is not supported as users can list directly from registry.
func (c *TagClient) List(prefix string, opts ...backend.ListOption) (*backend.ListResult, error) {
return nil, errors.New("not supported")
}
|
package meta
import (
"database/sql"
_ "github.com/lib/pq"
)
// PgColumnMetadata contains metadata for domains
type PgDomainMetadata struct {
SchemaName string `db:"schema_name"`
ObjName string `db:"obj_name"`
DataType string `db:"data_type"`
TypeName string `db:"type_name"`
TypeCategory string `db:"type_category"`
IsRequired bool `db:"is_required"`
Description string `db:"description"`
}
// GetDomainMetas returns the metadata for the avaiable domains
func GetDomainMetas(db *sql.DB, schema, objName, user string, pgVersion int) (d []PgDomainMetadata, err error) {
var u PgDomainMetadata
q := `
WITH args AS (
SELECT $1 AS schema_name,
regexp_split_to_table ( $2, ', *' ) AS obj_name
)
SELECT n.nspname::text AS schema_name,
t.typname::text AS obj_name,
pg_catalog.format_type ( t.typbasetype, t.typtypmod ) AS data_type,
tc.typname AS type_name,
tc.typcategory AS type_category,
t.typnotnull AS is_required,
coalesce ( d.description, '' ) AS description
FROM pg_catalog.pg_type t
JOIN pg_catalog.pg_namespace n
ON ( n.oid = t.typnamespace )
JOIN pg_catalog.pg_type tc
ON ( tc.oid = t.typbasetype )
LEFT JOIN pg_catalog.pg_description d
ON ( d.classoid = t.tableoid
AND d.objoid = t.oid
AND d.objsubid = 0 )
CROSS JOIN args
WHERE t.typtype = 'd'
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'
AND ( n.nspname::text = args.schema_name
OR args.schema_name = '' )
AND ( t.typname::text = args.obj_name
OR coalesce ( args.obj_name, '' ) = '' )
`
rows, err := db.Query(q, schema, objName)
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
err = rows.Scan(&u.SchemaName,
&u.ObjName,
&u.DataType,
&u.TypeName,
&u.TypeCategory,
&u.IsRequired,
&u.Description,
)
if err != nil {
return
}
d = append(d, u)
addUserDomain(u.ObjName, u.TypeName)
}
return
}
|
package statsdclient
import (
"strings"
)
// Generates a prefix in the form "environment.app.hostname.", where dots in
// the hostname are replaced with underscores so they don't conflict with stats
// dot namespacing
func MakePrefix(environment, app, hostname string) string {
underscoreHostname := strings.Replace(hostname, ".", "_", -1)
return environment + "." + app + "." + underscoreHostname + "."
}
|
package rpc
import (
"context"
"encoding/json"
"time"
"github.com/mylxsw/adanos-alert/internal/repository"
"github.com/mylxsw/adanos-alert/rpc/protocol"
"github.com/mylxsw/asteria/log"
"github.com/mylxsw/glacier/infra"
)
// HeartbeatService is a service server for heartbeat
type HeartbeatService struct {
cc infra.Resolver
}
func NewHeartbeatService(cc infra.Resolver) *HeartbeatService {
return &HeartbeatService{cc: cc}
}
func (h *HeartbeatService) Ping(ctx context.Context, request *protocol.PingRequest) (*protocol.PongResponse, error) {
if log.DebugEnabled() {
log.Debugf("agent heartbeat received, id=%s, ip=%s, version=%s, ts=%v", request.AgentID, request.AgentIP, request.ClientVersion, request.AgentTs)
}
h.cc.MustResolve(func(agent repository.AgentRepo) {
lastStat, _ := json.Marshal(request.GetAgent())
if _, err := agent.Update(repository.Agent{
IP: request.GetAgentIP(),
AgentID: request.GetAgentID(),
Version: request.GetClientVersion(),
LastAliveAt: time.Now(),
LastStat: string(lastStat),
}); err != nil {
log.WithFields(log.Fields{
"req": request,
}).Errorf("agent status update failed: %v", err)
}
})
return &protocol.PongResponse{
ServerTs: time.Now().Unix(),
ServerVersion: h.cc.MustGet(infra.VersionKey).(string),
}, nil
}
|
package main
import (
"testing"
"gitlab.com/joukehofman/OTSthingy/types"
)
func TestInitVars(t *testing.T) {
initVars()
if logs == nil || cfg == nil {
t.Fail()
}
}
func TestStartGRPC(t *testing.T) {
startGRPC(&types.Requester{})
}
func TestStartPoller(t *testing.T) {
abortChan := make(chan bool, 5)
notifyChan := make(chan *types.Request, 5)
startPoller(&types.Requester{}, &types.Notifier{}, abortChan, notifyChan)
go poller.Notify()
poller.Abort = true
_ = <-abortChan
}
|
package matrix_test
import (
"fmt"
"testing"
"github.com/carolove/Golang/algorithms/datageneration"
"github.com/carolove/Golang/algorithms/matrix"
)
func TestMatrixMul(t *testing.T) {
a := datageneration.GenerationMarix(2)
fmt.Println(a)
b := datageneration.GenerationMarix(2)
fmt.Println(b)
fmt.Println(matrix.MatrixMul(a, b))
}
|
package user
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/Al-un/alun-api/alun/testutils"
"github.com/Al-un/alun-api/alun/utils"
"github.com/Al-un/alun-api/pkg/logger"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
)
const (
userRegisterEmail = "register@test.com"
userAdminEmail = "admin@test.com"
userAdminUsername = "Admin user"
userAdminPassword = "adminPassword"
userBasicEmail = "basic@test.com"
userBasicUsername = "Basic user"
userBasicPassword = "basicPassword"
)
var (
apiTester *testutils.APITester
userAdmin = User{
BaseUser: BaseUser{Email: userAdminEmail},
IsAdmin: true,
Username: userAdminUsername,
PwdResetToken: pwdResetToken{
Token: "adminToken",
CreatedAt: time.Now(),
ExpiresAt: time.Now().Add(24 * time.Hour),
},
}
userBasic = User{
BaseUser: BaseUser{Email: userBasicEmail},
IsAdmin: false,
Username: userBasicUsername,
PwdResetToken: pwdResetToken{
Token: "basicToken",
CreatedAt: time.Now(),
ExpiresAt: time.Now().Add(24 * time.Hour),
},
}
)
func TestMain(m *testing.M) {
setupGlobal()
code := m.Run()
tearDownGlobal()
os.Exit(code)
}
func setupGlobal() {
// Dummy implementation
userLogger = logger.NewSilenceLogger()
alunEmail = utils.GetDummyEmail()
// Setup router
apiTester = testutils.NewAPITester(UserAPI)
}
func tearDownGlobal() {
}
// Returns userId / User JWT token
func setupUsers(t *testing.T, user User, password string) (User, string) {
createdUser, jwt, err := SetupUser(user, t.Name(), password)
testutils.Assert(t, testutils.CallFromHelperMethod, err == nil, "Error when setting up users: %v", err)
return createdUser, jwt
}
// Returns adminID, adminJwt, basicID, basicJwt
func setupUserBasicAndAdmin(t *testing.T) (User, string, User, string) {
admin, adminJwt := setupUsers(t, userAdmin, userAdminPassword)
basic, basicJwt := setupUsers(t, userBasic, userBasicPassword)
return admin, adminJwt, basic, basicJwt
}
func tearDownBasicAndAdmin(t *testing.T) {
d, err := TearDownUsers([]User{userAdmin, userBasic}, t.Name())
if err != nil {
userLogger.Info("[User] error in user deletion: ", err)
}
fmt.Printf("%s > deleted %d users\n", t.Name(), d)
}
func tearDownLogins(t *testing.T, userID primitive.ObjectID) {
filter := bson.M{"userId": userID}
_, err := dbUserLoginCollection.DeleteMany(context.TODO(), filter)
if err != nil {
t.Errorf("Error when cleaning login for userID %s\n", userID)
}
}
|
package Longest_Substring_Without_Repeating_Characters
import "testing"
func Test(t *testing.T) {
res := lengthOfLongestSubstring("abcabcbb")
t.Log(res)
}
|
package plugins
import (
"testing"
)
func TestNewConsolePlugin(t *testing.T) {
if _, ok := NewConsolePlugin().(*ConsolePlugin); !ok {
t.Fail()
}
}
func TestConsolePluginConfigure(t *testing.T) {
t.Run("configuration is nil", func(t *testing.T) {
c := NewConsolePlugin().(*ConsolePlugin)
expected := NewConsolePlugin().(*ConsolePlugin)
err := c.Configure("", nil)
if err != nil || !c.Equal(expected) { t.Fail() }
})
t.Run("configuration is empty", func(t *testing.T) {
c := NewConsolePlugin().(*ConsolePlugin)
expected := NewConsolePlugin().(*ConsolePlugin)
err := c.Configure("", map[string]interface{}{})
if err != nil || !c.Equal(expected) { t.Fail() }
})
t.Run("configuration is valid", func(t *testing.T) {
c := NewConsolePlugin().(*ConsolePlugin)
expected := &ConsolePlugin{name: "test"}
err := c.Configure("test", map[string]interface{}{})
if err != nil || !c.Equal(expected) { t.Fail() }
})
}
|
package entity
import "time"
type User struct {
LineID string `db:"line_id"`
CalendarAccessToken string `db:"calendar_access_token"`
CalendarTokenType string `db:"calendar_token_type"`
CalendarRefreshToken string `db:"calendar_refresh_token"`
CalendarExpiry time.Time `db:"calendar_expiry"`
}
|
package handlers
import (
"fmt"
"strings"
)
func PrintBold(str string) {
fmt.Printf("\033[1m%s\033[0m", str)
}
func MakeBold(str string) string {
return fmt.Sprintf("\033[1m%s\033[0m", str)
}
func PrintSuccess(str string) {
fmt.Printf("\033[32;1m%s\033[0m", str)
}
func PrintTable(keys []string, values [][]string) {
sizes := make([]int, len(keys))
//define sizes and give one space before each value
for line := 0; line < len(values); line++ {
row := values[line]
for column := 0; column < len(row); column++ {
values[line][column] = fmt.Sprintf(" %s", values[line][column])
if sizes[column] < len(row[column]) {
sizes[column] = len(row[column]) + 2
}
}
}
var totalSize int
for i := 0; i < len(sizes); i++ {
//to centeralize column title
diff := sizes[i] - len(keys[i])
if diff%2 != 0 {
sizes[i]++
}
totalSize += sizes[i]
}
totalSize += len(keys) + 1
//draw first strong line
fmt.Println(drawLine("╒", "═", "╕", '╤', totalSize, sizes))
for i := 0; i < len(keys); i++ {
if i == 0 {
fmt.Print("│")
}
emptySpace := strings.Repeat(" ", (sizes[i]-len(keys[i]))/2)
fmt.Print(fmt.Sprintf("\033[1m%s%s%s\033[0m│", emptySpace, keys[i], emptySpace))
if i == len(keys)-1 {
fmt.Print("\n")
}
}
//draw second strong line
fmt.Println(drawLine("╞", "═", "╡", '╪', totalSize, sizes))
for i := 0; i < len(values); i++ {
for j := 0; j < len(values[i]); j++ {
if j == 0 {
fmt.Print("│")
}
fmt.Print(fmt.Sprintf("%-*s│", sizes[j], values[i][j]))
if j == len(values[i])-1 {
fmt.Print("\n")
}
}
}
//draw last line
fmt.Println(drawLine("└", "─", "┘", '┴', totalSize, sizes))
}
func drawLine(leftEdge string, common string, rightEdge string, division rune, totalSize int, sizes []int) string {
line := fmt.Sprintf("%s%s%s", leftEdge, fmt.Sprintf("%s", strings.Repeat(common, totalSize-2)), rightEdge)
startingAt := 1
for i := 0; i < len(sizes); i++ {
if i != len(sizes)-1 {
line = replaceAtIndex(line, division, startingAt+sizes[i])
startingAt += sizes[i] + 1
}
}
return line
}
func replaceAtIndex(in string, r rune, i int) string {
out := []rune(in)
out[i] = r
return string(out)
}
|
//go:build e2e
package cloudnative
import (
"context"
"encoding/json"
"path"
"strconv"
"strings"
"testing"
"github.com/Dynatrace/dynatrace-operator/src/api/v1beta1"
dtcsi "github.com/Dynatrace/dynatrace-operator/src/controllers/csi"
"github.com/Dynatrace/dynatrace-operator/src/kubeobjects/address"
"github.com/Dynatrace/dynatrace-operator/src/webhook"
"github.com/Dynatrace/dynatrace-operator/src/webhook/mutation/pod_mutator/oneagent_mutation"
"github.com/Dynatrace/dynatrace-operator/test/csi"
"github.com/Dynatrace/dynatrace-operator/test/dynakube"
"github.com/Dynatrace/dynatrace-operator/test/istiosetup"
"github.com/Dynatrace/dynatrace-operator/test/kubeobjects/deployment"
"github.com/Dynatrace/dynatrace-operator/test/kubeobjects/manifests"
"github.com/Dynatrace/dynatrace-operator/test/kubeobjects/pod"
"github.com/Dynatrace/dynatrace-operator/test/project"
"github.com/Dynatrace/dynatrace-operator/test/sampleapps"
"github.com/Dynatrace/dynatrace-operator/test/secrets"
"github.com/Dynatrace/dynatrace-operator/test/setup"
"github.com/Dynatrace/dynatrace-operator/test/shell"
"github.com/pkg/errors"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/e2e-framework/klient/k8s"
"sigs.k8s.io/e2e-framework/klient/wait"
"sigs.k8s.io/e2e-framework/klient/wait/conditions"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
const (
codeModulesVersion = "1.246.0.20220627-183412"
codeModulesImage = "quay.io/dynatrace/codemodules:" + codeModulesVersion
codeModulesImageDigest = "7ece13a07a20c77a31cc36906a10ebc90bd47970905ee61e8ed491b7f4c5d62f"
dataPath = "/data/"
)
var (
codeModulesDeploymentConfig = path.Join(project.TestDataDir(), "cloudnative/codemodules-deployment.yaml")
)
type manifest struct {
Version string `json:"version,omitempty"`
}
func CodeModules(t *testing.T, istioEnabled bool) features.Feature {
secretConfigs, err := secrets.DefaultMultiTenant(afero.NewOsFs())
require.NoError(t, err)
codeModulesInjection := features.New("codemodules injection")
if istioEnabled {
codeModulesInjection.Setup(manifests.InstallFromFile(istioTestNamespaceConfig))
} else {
codeModulesInjection.Setup(manifests.InstallFromFile(testNamespaceConfig))
}
setup.InstallDynatraceFromSource(codeModulesInjection, &secretConfigs[0])
setup.AssessOperatorDeployment(codeModulesInjection)
setup.DeploySampleApps(codeModulesInjection, codeModulesDeploymentConfig)
dynakubeBuilder := dynakube.NewBuilder().
WithDefaultObjectMeta().
ApiUrl(secretConfigs[0].ApiUrl).
CloudNative(codeModulesSpec())
if istioEnabled {
dynakubeBuilder = dynakubeBuilder.WithIstio()
}
codeModulesInjection.Assess("install dynakube", dynakube.Apply(dynakubeBuilder.Build()))
setup.AssessDynakubeStartup(codeModulesInjection)
assessSampleAppsRestart(codeModulesInjection)
assessOneAgentsAreRunning(codeModulesInjection)
if istioEnabled {
istiosetup.AssessIstio(codeModulesInjection)
}
codeModulesInjection.Assess("csi driver did not crash", csiDriverIsAvailable)
codeModulesInjection.Assess("codemodules have been downloaded", imageHasBeenDownloaded)
codeModulesInjection.Assess("storage size has not increased", diskUsageDoesNotIncrease(secretConfigs[0]))
codeModulesInjection.Assess("volumes are mounted correctly", volumesAreMountedCorrectly())
return codeModulesInjection.Feature()
}
func codeModulesSpec() *v1beta1.CloudNativeFullStackSpec {
return &v1beta1.CloudNativeFullStackSpec{
HostInjectSpec: v1beta1.HostInjectSpec{
NodeSelector: map[string]string{
"inject": "dynakube",
},
},
AppInjectionSpec: v1beta1.AppInjectionSpec{
CodeModulesImage: codeModulesImage,
},
}
}
func csiDriverIsAvailable(ctx context.Context, t *testing.T, envConfig *envconf.Config) context.Context {
resource := envConfig.Client().Resources()
daemonset, err := csi.Get(ctx, resource)
require.NoError(t, err)
assert.Equal(t, daemonset.Status.DesiredNumberScheduled, daemonset.Status.NumberReady)
return ctx
}
func imageHasBeenDownloaded(ctx context.Context, t *testing.T, environmentConfig *envconf.Config) context.Context {
resource := environmentConfig.Client().Resources()
restConfig := environmentConfig.Client().RESTConfig()
err := csi.ForEachPod(ctx, resource, func(podItem corev1.Pod) {
var result *pod.ExecutionResult
result, err := pod.
NewExecutionQuery(podItem, "provisioner", shell.ListDirectory(dataPath)...).
Execute(restConfig)
require.NoError(t, err)
assert.Contains(t, result.StdOut.String(), "codemodules")
result, err = pod.
NewExecutionQuery(podItem, "provisioner", shell.Shell(shell.ReadFile(getManifestPath()))...).
Execute(restConfig)
require.NoError(t, err)
var codeModulesManifest manifest
err = json.Unmarshal(result.StdOut.Bytes(), &codeModulesManifest)
if err != nil {
err = errors.WithMessagef(err, "json:\n%s", result.StdOut)
}
require.NoError(t, err)
assert.Equal(t, codeModulesVersion, codeModulesManifest.Version)
})
require.NoError(t, err)
return ctx
}
func diskUsageDoesNotIncrease(secretConfig secrets.Secret) features.Func {
return func(ctx context.Context, t *testing.T, environmentConfig *envconf.Config) context.Context {
resource := environmentConfig.Client().Resources()
restConfig := environmentConfig.Client().RESTConfig()
storageMap := make(map[string]int)
err := csi.ForEachPod(ctx, resource, func(podItem corev1.Pod) {
var result *pod.ExecutionResult
result, err := pod.
NewExecutionQuery(podItem, "provisioner", shell.Shell(shell.Pipe(
shell.DiskUsageWithTotal(dataPath),
shell.FilterLastLineOnly()))...).
Execute(restConfig)
require.NoError(t, err)
diskUsage, err := strconv.Atoi(strings.Split(result.StdOut.String(), "\t")[0])
require.NoError(t, err)
storageMap[podItem.Name] = diskUsage
})
secondTenantSecret := getSecondTenantSecret(secretConfig.ApiToken)
secondTenant := getSecondTenantDynakube(secretConfig.ApiUrl)
require.NoError(t, err)
require.NoError(t, resource.Create(ctx, &secondTenantSecret))
require.NoError(t, resource.Create(ctx, &secondTenant))
require.NoError(t, wait.For(conditions.New(resource).ResourceMatch(&secondTenant, func(object k8s.Object) bool {
dynakubeInstance, isDynakube := object.(*v1beta1.DynaKube)
return isDynakube && dynakubeInstance.Status.Phase == v1beta1.Running
})))
err = csi.ForEachPod(ctx, resource, func(podItem corev1.Pod) {
var result *pod.ExecutionResult
result, err = pod.
NewExecutionQuery(podItem, "provisioner", shell.Shell(shell.Pipe(
shell.DiskUsageWithTotal(dataPath),
shell.FilterLastLineOnly()))...).
Execute(restConfig)
require.NoError(t, err)
diskUsage, err := strconv.Atoi(strings.Split(result.StdOut.String(), "\t")[0])
require.NoError(t, err)
// Dividing it by 1000 so the sizes do not need to be exactly the same down to the byte
assert.Equal(t, storageMap[podItem.Name]/1000, diskUsage/1000)
})
return ctx
}
}
func volumesAreMountedCorrectly() features.Func {
return func(ctx context.Context, t *testing.T, environmentConfig *envconf.Config) context.Context {
resource := environmentConfig.Client().Resources()
err := deployment.NewQuery(ctx, resource, client.ObjectKey{
Name: sampleapps.Name,
Namespace: sampleapps.Namespace,
}).ForEachPod(func(podItem corev1.Pod) {
volumes := podItem.Spec.Volumes
volumeMounts := podItem.Spec.Containers[0].VolumeMounts
assert.True(t, isVolumeAttached(t, volumes, oneagent_mutation.OneAgentBinVolumeName))
assert.True(t, isVolumeMounted(t, volumeMounts, oneagent_mutation.OneAgentBinVolumeName))
executionResult, err := pod.
NewExecutionQuery(podItem, sampleapps.Name, shell.ListDirectory(webhook.DefaultInstallPath)...).
Execute(environmentConfig.Client().RESTConfig())
require.NoError(t, err)
assert.NotEmpty(t, executionResult.StdOut.String())
executionResult, err = pod.
NewExecutionQuery(podItem, sampleapps.Name, shell.Shell(shell.Pipe(
shell.DiskUsageWithTotal(webhook.DefaultInstallPath),
shell.FilterLastLineOnly()))...).
Execute(environmentConfig.Client().RESTConfig())
require.NoError(t, err)
require.Contains(t, executionResult.StdOut.String(), "total")
diskUsage, err := strconv.Atoi(strings.Split(executionResult.StdOut.String(), "\t")[0])
require.NoError(t, err)
assert.Greater(t, diskUsage, 0)
})
require.NoError(t, err)
return ctx
}
}
func isVolumeMounted(t *testing.T, volumeMounts []corev1.VolumeMount, volumeMountName string) bool {
result := false
for _, volumeMount := range volumeMounts {
if volumeMount.Name == volumeMountName {
result = true
assert.Equal(t, webhook.DefaultInstallPath, volumeMount.MountPath)
assert.False(t, volumeMount.ReadOnly)
}
}
return result
}
func isVolumeAttached(t *testing.T, volumes []corev1.Volume, volumeName string) bool {
result := false
for _, volume := range volumes {
if volume.Name == volumeName {
result = true
assert.NotNil(t, volume.CSI)
assert.Equal(t, dtcsi.DriverName, volume.CSI.Driver)
if volume.CSI.ReadOnly != nil {
assert.False(t, *volume.CSI.ReadOnly)
}
}
}
return result
}
func getSecondTenantSecret(apiToken string) corev1.Secret {
return corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "dynakube-2",
Namespace: dynakube.Namespace,
},
Data: map[string][]byte{
"apiToken": []byte(apiToken),
},
}
}
func getSecondTenantDynakube(apiUrl string) v1beta1.DynaKube {
dynakubeInstance := v1beta1.DynaKube{
ObjectMeta: metav1.ObjectMeta{
Name: "dynakube-2",
Namespace: dynakube.Namespace,
},
Spec: v1beta1.DynaKubeSpec{
APIURL: apiUrl,
OneAgent: v1beta1.OneAgentSpec{
ApplicationMonitoring: &v1beta1.ApplicationMonitoringSpec{
UseCSIDriver: address.Of(true),
AppInjectionSpec: v1beta1.AppInjectionSpec{
CodeModulesImage: codeModulesImage,
},
},
},
},
}
dynakubeInstance.Spec.NamespaceSelector = metav1.LabelSelector{
MatchLabels: map[string]string{
"test-key": "test-value",
},
}
return dynakubeInstance
}
func getManifestPath() string {
return "/data/codemodules/" + codeModulesImageDigest + "/manifest.json"
}
|
/*
Copyright 2021 Dynatrace LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csidriver
import (
"context"
"fmt"
"net"
"os"
"runtime"
"strings"
"time"
dtcsi "github.com/Dynatrace/dynatrace-operator/src/controllers/csi"
csivolumes "github.com/Dynatrace/dynatrace-operator/src/controllers/csi/driver/volumes"
appvolumes "github.com/Dynatrace/dynatrace-operator/src/controllers/csi/driver/volumes/app"
hostvolumes "github.com/Dynatrace/dynatrace-operator/src/controllers/csi/driver/volumes/host"
"github.com/Dynatrace/dynatrace-operator/src/controllers/csi/metadata"
"github.com/Dynatrace/dynatrace-operator/src/version"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/spf13/afero"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/utils/mount"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type Server struct {
client client.Client
opts dtcsi.CSIOptions
fs afero.Afero
mounter mount.Interface
db metadata.Access
path metadata.PathResolver
publishers map[string]csivolumes.Publisher
}
var _ csi.IdentityServer = &Server{}
var _ csi.NodeServer = &Server{}
func NewServer(client client.Client, opts dtcsi.CSIOptions, db metadata.Access) *Server {
return &Server{
client: client,
opts: opts,
fs: afero.Afero{Fs: afero.NewOsFs()},
mounter: mount.New(""),
db: db,
path: metadata.PathResolver{RootDir: opts.RootDir},
}
}
func (svr *Server) SetupWithManager(mgr ctrl.Manager) error {
return mgr.Add(svr)
}
func (svr *Server) Start(ctx context.Context) error {
defer metadata.LogAccessOverview(svr.db)
proto, addr, err := parseEndpoint(svr.opts.Endpoint)
if err != nil {
return fmt.Errorf("failed to parse endpoint '%s': %w", svr.opts.Endpoint, err)
}
if proto == "unix" {
if err := svr.fs.Remove(addr); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to remove old endpoint on '%s': %w", addr, err)
}
}
svr.publishers = map[string]csivolumes.Publisher{
appvolumes.Mode: appvolumes.NewAppVolumePublisher(svr.client, svr.fs, svr.mounter, svr.db, svr.path),
hostvolumes.Mode: hostvolumes.NewHostVolumePublisher(svr.client, svr.fs, svr.mounter, svr.db, svr.path),
}
log.Info("starting listener", "protocol", proto, "address", addr)
listener, err := net.Listen(proto, addr)
if err != nil {
return fmt.Errorf("failed to start server: %w", err)
}
server := grpc.NewServer(grpc.UnaryInterceptor(logGRPC()))
go func() {
ticker := time.NewTicker(memoryMetricTick)
done := false
for !done {
select {
case <-ctx.Done():
log.Info("stopping server")
server.GracefulStop()
log.Info("stopped server")
done = true
case <-ticker.C:
var m runtime.MemStats
runtime.ReadMemStats(&m)
memoryUsageMetric.Set(float64(m.Alloc))
}
}
}()
csi.RegisterIdentityServer(server, svr)
csi.RegisterNodeServer(server, svr)
log.Info("listening for connections on address", "address", listener.Addr())
err = server.Serve(listener)
server.GracefulStop()
return err
}
func (svr *Server) GetPluginInfo(context.Context, *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) {
return &csi.GetPluginInfoResponse{Name: dtcsi.DriverName, VendorVersion: version.Version}, nil
}
func (svr *Server) Probe(context.Context, *csi.ProbeRequest) (*csi.ProbeResponse, error) {
return &csi.ProbeResponse{}, nil
}
func (svr *Server) GetPluginCapabilities(context.Context, *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
return &csi.GetPluginCapabilitiesResponse{}, nil
}
func (svr *Server) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
volumeCfg, err := csivolumes.ParseNodePublishVolumeRequest(req)
if err != nil {
return nil, err
}
if isMounted, err := isMounted(svr.mounter, volumeCfg.TargetPath); err != nil {
return nil, err
} else if isMounted {
return &csi.NodePublishVolumeResponse{}, nil
}
publisher, ok := svr.publishers[volumeCfg.Mode]
if !ok {
return nil, status.Error(codes.Internal, fmt.Sprintf("unknown csi mode provided, mode=%s", volumeCfg.Mode))
}
log.Info("publishing volume",
"csiMode", volumeCfg.Mode,
"target", volumeCfg.TargetPath,
"fstype", req.GetVolumeCapability().GetMount().GetFsType(),
"readonly", req.GetReadonly(),
"volumeID", volumeCfg.VolumeID,
"attributes", req.GetVolumeContext(),
"mountflags", req.GetVolumeCapability().GetMount().GetMountFlags(),
)
return publisher.PublishVolume(ctx, volumeCfg)
}
func (svr *Server) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
volumeInfo, err := csivolumes.ParseNodeUnpublishVolumeRequest(req)
if err != nil {
return nil, err
}
for _, publisher := range svr.publishers {
canUnpublish, err := publisher.CanUnpublishVolume(ctx, volumeInfo)
if err != nil {
log.Error(err, "couldn't determine if volume can be unpublished", "publisher", publisher)
}
if canUnpublish {
response, err := publisher.UnpublishVolume(ctx, volumeInfo)
if err != nil {
return nil, err
}
return response, nil
}
}
svr.unmountUnknownVolume(*volumeInfo)
return &csi.NodeUnpublishVolumeResponse{}, nil
}
func (svr *Server) unmountUnknownVolume(volumeInfo csivolumes.VolumeInfo) {
log.Info("VolumeID not present in the database", "volumeID", volumeInfo.VolumeID, "targetPath", volumeInfo.TargetPath)
if err := svr.mounter.Unmount(volumeInfo.TargetPath); err != nil {
log.Error(err, "Tried to unmount unknown volume", "volumeID", volumeInfo.VolumeID)
}
}
func (svr *Server) NodeStageVolume(context.Context, *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (svr *Server) NodeUnstageVolume(context.Context, *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (svr *Server) NodeGetInfo(context.Context, *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {
return &csi.NodeGetInfoResponse{NodeId: svr.opts.NodeId}, nil
}
func (svr *Server) NodeGetCapabilities(context.Context, *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
return &csi.NodeGetCapabilitiesResponse{Capabilities: []*csi.NodeServiceCapability{}}, nil
}
func (svr *Server) NodeGetVolumeStats(context.Context, *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (svr *Server) NodeExpandVolume(context.Context, *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func isMounted(mounter mount.Interface, targetPath string) (bool, error) {
isNotMounted, err := mount.IsNotMountPoint(mounter, targetPath)
if os.IsNotExist(err) {
isNotMounted = true
} else if err != nil {
return false, status.Error(codes.Internal, err.Error())
}
return !isNotMounted, nil
}
func logGRPC() grpc.UnaryServerInterceptor {
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
if info.FullMethod == "/csi.v1.Identity/Probe" || info.FullMethod == "/csi.v1.Node/NodeGetCapabilities" {
return handler(ctx, req)
}
methodName := ""
if info.FullMethod == "/csi.v1.Node/NodePublishVolume" {
req := req.(*csi.NodePublishVolumeRequest)
methodName = "NodePublishVolume"
log.Info("GRPC call", "method", methodName, "volume-id", req.VolumeId)
} else if info.FullMethod == "/csi.v1.Node/NodeUnpublishVolume" {
req := req.(*csi.NodeUnpublishVolumeRequest)
methodName = "NodeUnpublishVolume"
log.Info("GRPC call", "method", methodName, "volume-id", req.VolumeId)
}
resp, err := handler(ctx, req)
if err != nil {
log.Error(err, "GRPC call failed", "method", methodName)
}
return resp, err
}
}
func parseEndpoint(ep string) (string, string, error) {
if strings.HasPrefix(strings.ToLower(ep), "unix://") || strings.HasPrefix(strings.ToLower(ep), "tcp://") {
s := strings.SplitN(ep, "://", 2)
if s[1] != "" {
return s[0], s[1], nil
}
}
return "", "", fmt.Errorf("invalid endpoint: %v", ep)
}
|
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information
//go:build linux || freebsd
// +build linux freebsd
package processgroup
import (
"os"
"os/exec"
"syscall"
)
// Setup sets up exec.Cmd such that it can be properly terminated.
func Setup(c *exec.Cmd) {
c.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
Pdeathsig: syscall.SIGKILL,
}
}
// Kill tries to forcefully kill the process.
func Kill(cmd *exec.Cmd) {
proc := cmd.Process
if proc == nil {
return
}
pgid, err := syscall.Getpgid(proc.Pid)
if err != nil {
_ = syscall.Kill(-pgid, syscall.SIGTERM)
}
// just in case
_ = proc.Signal(os.Interrupt)
_ = proc.Signal(os.Kill)
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"strings"
"text/tabwriter"
"github.com/cockroachdb/cockroach/pkg/cmd/roachprod/cloud"
"github.com/cockroachdb/cockroach/pkg/cmd/roachprod/config"
"github.com/cockroachdb/cockroach/pkg/cmd/roachprod/install"
"github.com/cockroachdb/errors"
)
func initDirs() error {
hd := os.ExpandEnv(config.DefaultHostDir)
if err := os.MkdirAll(hd, 0755); err != nil {
return err
}
return os.MkdirAll(os.ExpandEnv(config.DefaultDebugDir), 0755)
}
func syncHosts(cloud *cloud.Cloud) error {
hd := os.ExpandEnv(config.DefaultHostDir)
// Write all host files. We're the only process doing this due to the file
// lock acquired by syncAll, but other processes may be reading the host
// files concurrently so we need to write the files atomically by writing to
// a temporary file and renaming.
for _, c := range cloud.Clusters {
filename := path.Join(hd, c.Name)
tmpFile := filename + ".tmp"
err := func() error {
file, err := os.Create(tmpFile)
if err != nil {
return errors.Wrapf(err, "problem creating file %s", filename)
}
defer file.Close()
// Align columns left and separate with at least two spaces.
tw := tabwriter.NewWriter(file, 0, 8, 2, ' ', 0)
if _, err := tw.Write([]byte("# user@host\tlocality\tvpcId\n")); err != nil {
return err
}
for _, vm := range c.VMs {
if _, err := tw.Write([]byte(fmt.Sprintf(
"%s@%s\t%s\t%s\n", vm.RemoteUser, vm.PublicIP, vm.Locality(), vm.VPC))); err != nil {
return err
}
}
if err := tw.Flush(); err != nil {
return errors.Wrapf(err, "problem writing file %s", filename)
}
return nil
}()
if err != nil {
return err
}
if err := os.Rename(tmpFile, filename); err != nil {
return err
}
}
return gcHostsFiles(cloud)
}
func gcHostsFiles(cloud *cloud.Cloud) error {
hd := os.ExpandEnv(config.DefaultHostDir)
files, err := ioutil.ReadDir(hd)
if err != nil {
return err
}
for _, file := range files {
if !file.Mode().IsRegular() {
continue
}
if _, ok := cloud.Clusters[file.Name()]; ok {
continue
}
filename := filepath.Join(hd, file.Name())
if err = os.Remove(filename); err != nil {
log.Printf("failed to remove file %s", filename)
}
}
return nil
}
func newInvalidHostsLineErr(line string) error {
return fmt.Errorf("invalid hosts line, expected <username>@<host> [locality] [vpcId], got %q", line)
}
func loadClusters() error {
hd := os.ExpandEnv(config.DefaultHostDir)
files, err := ioutil.ReadDir(hd)
if err != nil {
return err
}
debugDir := os.ExpandEnv(config.DefaultDebugDir)
for _, file := range files {
if !file.Mode().IsRegular() {
continue
}
if strings.HasSuffix(file.Name(), ".tmp") {
continue
}
filename := filepath.Join(hd, file.Name())
contents, err := ioutil.ReadFile(filename)
if err != nil {
return errors.Wrapf(err, "could not read %s", filename)
}
lines := strings.Split(string(contents), "\n")
c := &install.SyncedCluster{
Name: file.Name(),
DebugDir: debugDir,
}
for _, l := range lines {
// We'll consume the fields as we go along
fields := strings.Fields(l)
if len(fields) == 0 {
continue
} else if len(fields[0]) > 0 && fields[0][0] == '#' {
// Comment line.
continue
} else if len(fields) > 3 {
return newInvalidHostsLineErr(l)
}
parts := strings.Split(fields[0], "@")
fields = fields[1:]
var n, u string
if len(parts) == 1 {
u = config.OSUser.Username
n = parts[0]
} else if len(parts) == 2 {
u = parts[0]
n = parts[1]
} else {
return newInvalidHostsLineErr(l)
}
// NB: it turns out we do see empty hosts here if we are concurrently
// creating clusters and this sync is picking up a cluster that's not
// ready yet. See:
// https://github.com/cockroachdb/cockroach/issues/49542#issuecomment-634563130
// if n == "" {
// return newInvalidHostsLineErr(l)
// }
var locality string
if len(fields) > 0 {
locality = fields[0]
fields = fields[1:]
}
var vpc string
if len(fields) > 0 {
vpc = fields[0]
fields = fields[1:]
}
if len(fields) > 0 {
return newInvalidHostsLineErr(l)
}
c.VMs = append(c.VMs, n)
c.Users = append(c.Users, u)
c.Localities = append(c.Localities, locality)
c.VPCs = append(c.VPCs, vpc)
}
if len(c.VMs) == 0 {
return errors.Errorf("found no VMs in %s", contents)
}
install.Clusters[file.Name()] = c
}
return nil
}
|
package simpletime
import (
"testing"
"time"
)
func TestParseFormat(t *testing.T) {
t.Parallel()
tests := []struct {
name string
format string
want string
}{
{
name: "year-month-day long",
format: "YYYY-MM-DD",
want: "2022-08-09",
},
{
name: "month/day/year short",
format: "M/D/YY",
want: "8/9/22",
},
{
name: "hour:minute:second long",
format: "hh:mm:ss",
want: "10:07:06",
},
{
name: "second-minute short",
format: "s-m",
want: "6-7",
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
goFormat := ParseFormat(tt.format)
fixedDate := time.Date(
2022, // year
8, // month
9, // day
10, // hour
7, // minute
6, // second
20, // nanosecond
time.UTC,
)
if got := fixedDate.Format(goFormat); got != tt.want {
t.Errorf("ParseFormat() got %q, want %q", got, tt.want)
}
})
}
}
|
package auth
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/wyllisMonteiro/go-api-template/pkg/models"
)
// Register New account
// @Summary Create new account
// @Description Using JWT auth
// @Tags auth
// @Accept json
// @Produce json
// @Param body body models.RequestRegister true "Add account"
// @Success 200 {string} string "message"
// @Failure 500 {object} httputil.HTTPError
// @Router /register [post]
func Register(c *gin.Context) {
var req models.RequestRegister
if err := c.BindJSON(&req); err != nil {
c.JSON(http.StatusInternalServerError, err)
return
}
err := models.CreateUser(req)
if err != nil {
c.JSON(http.StatusInternalServerError, err)
return
}
c.JSON(http.StatusServiceUnavailable, gin.H{
"message": "created",
})
}
// Login An account
// @Summary Connect user to app
// @Description Using JWT auth (look headers for token)
// @Tags auth
// @Accept json
// @Produce json
// @Param body body models.RequestLogin true "Log account"
// @Success 200 {string} string ""
// @Failure 500 {object} httputil.HTTPError
// @Router /login [post]
func Login(c *gin.Context) {
// call jwt.Auth.Authenticate()
// useless func, but help to make swagger doc
} |
// Copyright 2017 Walter Schulze
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"fmt"
"strconv"
"testing"
)
func TestCompose(t *testing.T) {
read := func() (string, error) {
return "1", nil
}
parseFloat := func(i string) (float64, error) {
return strconv.ParseFloat(i, 64)
}
got, err := deriveCompose(read, parseFloat)()
if err != nil {
t.Fatal(err)
}
want := float64(1)
if got != want {
t.Fatalf("got %v, want %v", got, want)
}
}
func TestComposeA(t *testing.T) {
read := func(s string) (string, error) {
return s, nil
}
parseFloat := func(i string) (float64, error) {
return strconv.ParseFloat(i, 64)
}
parse := deriveComposeA(read, parseFloat)
got, err := parse("1")
if err != nil {
t.Fatal(err)
}
want := float64(1)
if got != want {
t.Fatalf("got %v, want %v", got, want)
}
}
func TestCompose2(t *testing.T) {
read := func(s string, z string) ([]string, string, error) {
return []string{s, z}, s + z, nil
}
parseFloat := func(ss []string, s string) (float64, error) {
if ss[0]+ss[1] != s {
return 0, fmt.Errorf("wtf")
}
return strconv.ParseFloat(s, 64)
}
parse := deriveCompose2(read, parseFloat)
got, err := parse("1", "2")
if err != nil {
t.Fatal(err)
}
want := float64(12)
if got != want {
t.Fatalf("got %v, want %v", got, want)
}
}
func TestComposeRetBoolSuccess(t *testing.T) {
read := func(s string) (string, error) {
return s, nil
}
lenLessThan2 := func(i string) (bool, error) {
result := len(i) < 2
return result, nil
}
check := deriveComposeRetBool(read, lenLessThan2)
got, err := check("1")
if err != nil {
t.Fatal(err)
}
want := true
if got != want {
t.Fatalf("got %v, want %v", got, want)
}
}
func TestComposeRetBoolFailed(t *testing.T) {
read := func(s string) (string, error) {
if s == "" {
return s, fmt.Errorf("empty string")
}
return s, nil
}
lenLessThan2 := func(i string) (bool, error) {
result := len(i) < 2
return result, nil
}
check := deriveComposeRetBool(read, lenLessThan2)
got, err := check("") // passing empty string will fail
if err == nil {
t.Fatalf("Expected error from empty string")
}
want := false
if got != want {
t.Fatalf("got %v, want %v", got, want)
}
}
func TestComposeVariadic(t *testing.T) {
read := func(s string) (string, error) {
return s, nil
}
parseFloat := func(i string) (float64, error) {
return strconv.ParseFloat(i, 64)
}
toInt := func(f float64) (int, error) {
i := int(f)
if float64(i) != f {
return 0, fmt.Errorf("%f is not a whole number", f)
}
return i, nil
}
parse := deriveComposeVariadic(read, parseFloat, toInt)
got, err := parse("1")
if err != nil {
t.Fatal(err)
}
want := 1
if got != want {
t.Fatalf("got %v, want %v", got, want)
}
}
|
/*
Copyright 2022 Docker Compose CLI authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package compose
import (
"context"
"fmt"
"sort"
"sync"
"github.com/compose-spec/compose-go/types"
"github.com/distribution/distribution/v3/reference"
moby "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"golang.org/x/sync/errgroup"
"github.com/docker/compose/v2/pkg/api"
)
// ImagePruneMode controls how aggressively images associated with the project
// are removed from the engine.
type ImagePruneMode string
const (
// ImagePruneNone indicates that no project images should be removed.
ImagePruneNone ImagePruneMode = ""
// ImagePruneLocal indicates that only images built locally by Compose
// should be removed.
ImagePruneLocal ImagePruneMode = "local"
// ImagePruneAll indicates that all project-associated images, including
// remote images should be removed.
ImagePruneAll ImagePruneMode = "all"
)
// ImagePruneOptions controls the behavior of image pruning.
type ImagePruneOptions struct {
Mode ImagePruneMode
// RemoveOrphans will result in the removal of images that were built for
// the project regardless of whether they are for a known service if true.
RemoveOrphans bool
}
// ImagePruner handles image removal during Compose `down` operations.
type ImagePruner struct {
client client.ImageAPIClient
project *types.Project
}
// NewImagePruner creates an ImagePruner object for a project.
func NewImagePruner(imageClient client.ImageAPIClient, project *types.Project) *ImagePruner {
return &ImagePruner{
client: imageClient,
project: project,
}
}
// ImagesToPrune returns the set of images that should be removed.
func (p *ImagePruner) ImagesToPrune(ctx context.Context, opts ImagePruneOptions) ([]string, error) {
if opts.Mode == ImagePruneNone {
return nil, nil
} else if opts.Mode != ImagePruneLocal && opts.Mode != ImagePruneAll {
return nil, fmt.Errorf("unsupported image prune mode: %s", opts.Mode)
}
var images []string
if opts.Mode == ImagePruneAll {
namedImages, err := p.namedImages(ctx)
if err != nil {
return nil, err
}
images = append(images, namedImages...)
}
projectImages, err := p.labeledLocalImages(ctx)
if err != nil {
return nil, err
}
for _, img := range projectImages {
if len(img.RepoTags) == 0 {
// currently, we're only pruning the tagged references, but
// if we start removing the dangling images and grouping by
// service, we can remove this (and should rely on `Image::ID`)
continue
}
var shouldPrune bool
if opts.RemoveOrphans {
// indiscriminately prune all project images even if they're not
// referenced by the current Compose state (e.g. the service was
// removed from YAML)
shouldPrune = true
} else {
// only prune the image if it belongs to a known service for the project.
if _, err := p.project.GetService(img.Labels[api.ServiceLabel]); err == nil {
shouldPrune = true
}
}
if shouldPrune {
images = append(images, img.RepoTags[0])
}
}
fallbackImages, err := p.unlabeledLocalImages(ctx)
if err != nil {
return nil, err
}
images = append(images, fallbackImages...)
images = normalizeAndDedupeImages(images)
return images, nil
}
// namedImages are those that are explicitly named in the service config.
//
// These could be registry-only images (no local build), hybrid (support build
// as a fallback if cannot pull), or local-only (image does not exist in a
// registry).
func (p *ImagePruner) namedImages(ctx context.Context) ([]string, error) {
var images []string
for _, service := range p.project.Services {
if service.Image == "" {
continue
}
images = append(images, service.Image)
}
return p.filterImagesByExistence(ctx, images)
}
// labeledLocalImages are images that were locally-built by a current version of
// Compose (it did not always label built images).
//
// The image name could either have been defined by the user or implicitly
// created from the project + service name.
func (p *ImagePruner) labeledLocalImages(ctx context.Context) ([]moby.ImageSummary, error) {
imageListOpts := moby.ImageListOptions{
Filters: filters.NewArgs(
projectFilter(p.project.Name),
// TODO(milas): we should really clean up the dangling images as
// well (historically we have NOT); need to refactor this to handle
// it gracefully without producing confusing CLI output, i.e. we
// do not want to print out a bunch of untagged/dangling image IDs,
// they should be grouped into a logical operation for the relevant
// service
filters.Arg("dangling", "false"),
),
}
projectImages, err := p.client.ImageList(ctx, imageListOpts)
if err != nil {
return nil, err
}
return projectImages, nil
}
// unlabeledLocalImages are images that match the implicit naming convention
// for locally-built images but did not get labeled, presumably because they
// were produced by an older version of Compose.
//
// This is transitional to ensure `down` continues to work as expected on
// projects built/launched by previous versions of Compose. It can safely
// be removed after some time.
func (p *ImagePruner) unlabeledLocalImages(ctx context.Context) ([]string, error) {
var images []string
for _, service := range p.project.Services {
if service.Image != "" {
continue
}
img := api.GetImageNameOrDefault(service, p.project.Name)
images = append(images, img)
}
return p.filterImagesByExistence(ctx, images)
}
// filterImagesByExistence returns the subset of images that exist in the
// engine store.
//
// NOTE: Any transient errors communicating with the API will result in an
// image being returned as "existing", as this method is exclusively used to
// find images to remove, so the worst case of being conservative here is an
// attempt to remove an image that doesn't exist, which will cause a warning
// but is otherwise harmless.
func (p *ImagePruner) filterImagesByExistence(ctx context.Context, imageNames []string) ([]string, error) {
var mu sync.Mutex
var ret []string
eg, ctx := errgroup.WithContext(ctx)
for _, img := range imageNames {
img := img
eg.Go(func() error {
_, _, err := p.client.ImageInspectWithRaw(ctx, img)
if errdefs.IsNotFound(err) {
// err on the side of caution: only skip if we successfully
// queried the API and got back a definitive "not exists"
return nil
}
mu.Lock()
defer mu.Unlock()
ret = append(ret, img)
return nil
})
}
if err := eg.Wait(); err != nil {
return nil, err
}
return ret, nil
}
// normalizeAndDedupeImages returns the unique set of images after normalization.
func normalizeAndDedupeImages(images []string) []string {
seen := make(map[string]struct{}, len(images))
for _, img := range images {
// since some references come from user input (service.image) and some
// come from the engine API, we standardize them, opting for the
// familiar name format since they'll also be displayed in the CLI
ref, err := reference.ParseNormalizedNamed(img)
if err == nil {
ref = reference.TagNameOnly(ref)
img = reference.FamiliarString(ref)
}
seen[img] = struct{}{}
}
ret := make([]string, 0, len(seen))
for v := range seen {
ret = append(ret, v)
}
// ensure a deterministic return result - the actual ordering is not useful
sort.Strings(ret)
return ret
}
|
/*
* Wire API
*
* Moov Wire implements an HTTP API for creating, parsing, and validating Fedwire messages.
*
* API version: v1
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package openapi
// OutputMessageAccountabilityData struct for OutputMessageAccountabilityData
type OutputMessageAccountabilityData struct {
// OutputCycleDate (Format CCYYMMDD - C=Century, Y=Year, M=Month, D=Day)
OutputCycleDate string `json:"outputCycleDate,omitempty"`
// OutputDestinationID
OutputDestinationID string `json:"outputDestinationID,omitempty"`
// OutputSequenceNumber
OutputSequenceNumber string `json:"outputSequenceNumber,omitempty"`
// Output Date is based on the calendar date. (Format MMDD - M=Month, D=Day)
OutputDate string `json:"outputDate,omitempty"`
// Output Time is based on a 24-hour clock, Eastern Time. (Format HHmm - H=Hour, m=Minute)
OutputTime string `json:"outputTime,omitempty"`
// OutputFRBApplicationIdentification
OutputFRBApplicationIdentification string `json:"outputFRBApplicationIdentification,omitempty"`
}
|
package handler
import (
"golang.org/x/net/context"
pb "github.com/im-auld/gopherup/moderator-service"
"google.golang.org/grpc/grpclog"
"strings"
)
// NewService returns a naïve, stateless implementation of Service.
func NewService() pb.ModeratorServer {
return gopherupService{}
}
type gopherupService struct{}
var profanity = []string{"shit", "crap", "fuck", "poop"}
func checkSlice(word string, list []string) bool {
for _, val := range list {
if strings.ToLower(word) == strings.ToLower(val) {
return true
}
}
return false
}
func cleanString(str string) (string, bool) {
moderated := false
strSlice := strings.Split(str, " ")
for idx, val := range strSlice {
if checkSlice(val, profanity) {
grpclog.Printf("Removed %v from string", val)
strSlice[idx] = strings.Repeat("*", len(val))
moderated = true
}
}
return strings.Join(strSlice, " "), moderated
}
func cleanItem(item *pb.Item) (*pb.Item, bool) {
description, descriptionCleaned := cleanString(item.Description)
if descriptionCleaned {
item.Description = description
}
title, titleCleaned := cleanString(item.Title)
if titleCleaned {
item.Title = title
}
return item, titleCleaned || descriptionCleaned
}
// ModerateItem implements Service.
func (s gopherupService) ModerateItem(ctx context.Context, in *pb.ModerationRequest) (*pb.ModerationResponse, error) {
item, moderated := cleanItem(in.Item)
resp := pb.ModerationResponse{
Moderated: moderated,
Item: item,
}
return &resp, nil
}
|
package main
import (
"time"
"fmt"
)
func main() {
crutime:= time.Now()
fmt.Println(crutime)
testTime := time.Now().Unix()
fmt.Println(testTime)
}
|
package main
import (
"fmt"
mypackage "./mypackage"
)
func main() {
var myCar mypackage.CarPublic
myCar.Brand = "Ferrari"
myCar.Year = 2021
fmt.Println(myCar)
mypackage.Printmessage("Hello everyone")
}
|
package token
/**
网页授权access_token
微信网页授权是通过OAuth2.0机制实现的, 在用户授权给公众号后, 公众号可以获取到一个网页授权特有的接口调用凭证(网页授权access_token).
通过网页授权access_token可以进行授权后接口调用,如获取用户基本信息.
*/
type ApiToken string
func (token *ApiToken) GetToken() string {
}
func (token *ApiToken) RefreshToken() string {
}
|
package text
import (
"fmt"
"strings"
"github.com/aevea/quoad"
)
// ReleaseNotes holds the required settings for generating ReleaseNotes
type ReleaseNotes struct {
Complex bool
}
// Generate generates the output mentioned in the expected-output.md
func (r *ReleaseNotes) Generate(sections map[string][]quoad.Commit, dryRun bool) string {
builder := strings.Builder{}
// Extra lines at the start to make sure formatting starts correctly
builder.WriteString("\n\n")
if len(sections["features"]) > 0 {
builder.WriteString(r.buildSection("features", sections["features"]))
}
if len(sections["bugs"]) > 0 {
builder.WriteString(r.buildSection("bugs", sections["bugs"]))
}
if len(sections["chores"]) > 0 {
builder.WriteString(r.buildSection("chores", sections["chores"]))
}
if len(sections["others"]) > 0 {
builder.WriteString(r.buildSection("others", sections["others"]))
}
if dryRun {
fmt.Print(builder.String())
}
return builder.String()
}
|
package main
import (
"html/template"
"log"
"net/http"
)
var tpl *template.Template
func init() {
tpl = template.Must(template.ParseGlob("templates/*.gohtml"))
}
func index(w http.ResponseWriter, r *http.Request) {
page := "Index"
tpl.ExecuteTemplate(w, "index.gohtml", page)
}
func dog(w http.ResponseWriter, r *http.Request) {
page := "Dog"
tpl.ExecuteTemplate(w, "index.gohtml", page)
}
func me(w http.ResponseWriter, r *http.Request) {
name := "Sergey"
tpl.ExecuteTemplate(w, "me.gohtml", name)
}
func main() {
http.HandleFunc("/", index)
http.HandleFunc("/dog/", dog)
http.HandleFunc("/me/", me)
if err := http.ListenAndServe(":8080", nil); err != nil {
log.Fatalf("ListenAndServe failed %v", err)
}
} |
package camo
import (
"runtime"
"testing"
)
func newRouteTestIface() (iface *Iface, err error) {
iface, err = NewTunIface(DefaultMTU)
if err != nil {
return nil, err
}
err = iface.SetIPv4("10.20.30.42/24")
if err != nil {
iface.Close()
return nil, err
}
err = iface.SetIPv6("fd00:cafe:1234::2/64")
if err != nil {
iface.Close()
return nil, err
}
return iface, nil
}
func TestRoute(t *testing.T) {
iface, err := newRouteTestIface()
if err != nil {
t.Fatal(err)
}
defer iface.Close()
type args struct {
dst string
routeDst string
gateway string
darwinGateway string
dev string
}
tests := []struct {
name string
args args
}{
{
"IPv4", args{
"10.20.31.0/24",
"10.20.31.1",
"10.20.30.41",
"10.20.30.42",
iface.Name(),
},
},
{
"IPv6", args{
"fd00:cafe:1235::/64",
"fd00:cafe:1235::1",
"fd00:cafe:1234::1",
"fd00:cafe:1234::1",
iface.Name(),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gateway := tt.args.gateway
if runtime.GOOS == "darwin" {
gateway = tt.args.darwinGateway
}
needClean := true
err := AddRoute(tt.args.dst, gateway, tt.args.dev)
if err != nil {
t.Fatal(err)
}
defer func() {
if needClean {
_ = DelRoute(tt.args.dst, gateway, tt.args.dev)
}
}()
gw, dev, err := GetRoute(tt.args.routeDst)
if err != nil {
t.Fatal(err)
}
if gw != gateway {
t.Errorf("gw = %s, want %s", gw, gateway)
}
if dev != tt.args.dev {
t.Errorf("dev = %s, want %s", dev, tt.args.dev)
}
needClean = false
err = DelRoute(tt.args.dst, gateway, tt.args.dev)
if err != nil {
t.Fatal(err)
}
gw, dev, err = GetRoute(tt.args.routeDst)
if err == nil {
if gw == gateway {
t.Errorf("gw = %s, want != %s", gw, gateway)
}
if dev == tt.args.dev {
t.Errorf("dev = %s, want != %s", dev, tt.args.dev)
}
}
})
}
}
func TestSetupNAT(t *testing.T) {
if runtime.GOOS != "linux" {
t.Skip("linux only")
}
type args struct {
src string
}
tests := []struct {
name string
args args
}{
{"IPv4", args{"10.20.0.0/24"}},
{"IPv6", args{"fd01:cafe::/64"}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cancel, err := SetupNAT(tt.args.src)
if err != nil {
t.Fatal(err)
}
err = cancel()
if err != nil {
t.Error(err)
}
})
}
}
func TestRedirectGateway(t *testing.T) {
iface, err := newRouteTestIface()
if err != nil {
t.Fatal(err)
}
defer iface.Close()
type args struct {
dev string
gateway string
darwinGateway string
dst string
}
tests := []struct {
name string
args args
}{
{
"IPv4", args{
iface.Name(),
"10.20.30.41",
"10.20.30.42",
"8.8.8.8",
},
},
{
"IPv6", args{
iface.Name(),
"fd00:cafe:1234::1",
"fd00:cafe:1234::1",
"2001:4860:4860::8888",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gateway := tt.args.gateway
if runtime.GOOS == "darwin" {
gateway = tt.args.darwinGateway
}
reset, err := RedirectGateway(tt.args.dev, gateway)
if err != nil {
t.Fatal(err)
}
defer func() {
err := reset()
if err != nil {
t.Error(err)
}
}()
gw, dev, err := GetRoute(tt.args.dst)
if err != nil {
t.Fatal(err)
}
if gw != gateway {
t.Errorf("gw = %s, want %s", gw, gateway)
}
if dev != tt.args.dev {
t.Errorf("dev = %s, want %s", dev, tt.args.dev)
}
})
}
}
|
package container_test
import (
"errors"
"github.com/genevieve/leftovers/gcp/container"
"github.com/genevieve/leftovers/gcp/container/fakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Cluster", func() {
var (
client *fakes.ClustersClient
name string
cluster container.Cluster
)
BeforeEach(func() {
client = &fakes.ClustersClient{}
name = "banana"
cluster = container.NewCluster(client, "zone", name)
})
Describe("Delete", func() {
It("deletes the resource", func() {
err := cluster.Delete()
Expect(err).NotTo(HaveOccurred())
Expect(client.DeleteClusterCall.Receives.Zone).To(Equal("zone"))
Expect(client.DeleteClusterCall.Receives.Cluster).To(Equal(name))
})
Context("when the client returns an error", func() {
BeforeEach(func() {
client.DeleteClusterCall.Returns.Error = errors.New("kiwi")
})
It("returns a helpful error message", func() {
err := cluster.Delete()
Expect(err).To(MatchError("Delete: kiwi"))
})
})
})
Describe("Name", func() {
It("returns the name", func() {
Expect(cluster.Name()).To(Equal(name))
})
})
Describe("Type", func() {
It("returns the type", func() {
Expect(cluster.Type()).To(Equal("Container Cluster"))
})
})
})
|
package main
import (
"context"
"fmt"
"os"
"github.com/docker/docker/client"
)
// Nagios return codes
const (
NagiosOk = 0
NagiosWarning = 1
NagiosCritical = 2
NagiosUnknown = 3
)
var version string
func main() {
cli, err := client.NewEnvClient()
if err != nil {
fmt.Println("Critical - create docker client connection error:", err)
os.Exit(NagiosUnknown)
}
info, err := cli.Info(context.Background())
if err != nil {
fmt.Println("Critical - get docker info error:", err)
os.Exit(NagiosUnknown)
}
msg := fmt.Sprintf("Containers( %d running, %d paused, %d stopped );Images( %d images );Swarm ( Status %s, Error %s )",
info.ContainersRunning,
info.ContainersPaused,
info.ContainersStopped,
info.Images,
info.Swarm.LocalNodeState,
info.Swarm.Error)
if info.Swarm.Error != "" {
fmt.Println("Critical - Swarm error message detected:", msg)
os.Exit(NagiosCritical)
}
if info.Swarm.NodeID != "" && info.Swarm.LocalNodeState != "active" {
fmt.Println("Warning - Swarm State not active:", msg)
os.Exit(NagiosWarning)
}
if info.ContainersRunning == 0 {
fmt.Println("Warning - No containers runinng:", msg)
os.Exit(NagiosWarning)
}
fmt.Println("OK - ", msg)
os.Exit(NagiosOk)
}
|
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"runtime"
"strconv"
"strings"
"time"
"cloud.google.com/go/datastore"
"cloud.google.com/go/storage"
"github.com/gorilla/mux"
"google.golang.org/api/iterator"
)
func backtestHandler(w http.ResponseWriter, r *http.Request) {
//create result ID for websocket packets + res storage
rid := fmt.Sprintf("%v", time.Now().UnixNano())
setupCORS(&w, r)
if (*r).Method == "OPTIONS" {
return
}
var req ComputeRequest
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
//get backtest res
userID := req.User
ticker := req.Ticker
period := req.Period
risk := req.Risk
leverage := req.Leverage
size := req.Size
reqType := req.Operation
reqProcess := req.Process
retrieveCandles := req.RetrieveCandles
candlePacketSize, err := strconv.Atoi(req.CandlePacketSize)
if err != nil {
_, file, line, _ := runtime.Caller(0)
go Log(err.Error(), fmt.Sprintf("<%v> %v", line, file))
w.WriteHeader(http.StatusBadRequest)
return
}
start, err2 := time.Parse(httpTimeFormat, req.TimeStart)
if err2 != nil {
_, file, line, _ := runtime.Caller(0)
go Log(err2.Error(), fmt.Sprintf("<%v> %v", line, file))
_, file, line, _ = runtime.Caller(0)
go Log(err.Error(), fmt.Sprintf("<%v> %v", line, file))
}
end, err3 := time.Parse(httpTimeFormat, req.TimeEnd)
if err3 != nil {
_, file, line, _ := runtime.Caller(0)
go Log(err3.Error(), fmt.Sprintf("<%v> %v", line, file))
_, file, line, _ = runtime.Caller(0)
go Log(err.Error(), fmt.Sprintf("<%v> %v", line, file))
}
//strat params
rF, _ := strconv.ParseFloat(risk, 32)
lF, _ := strconv.ParseFloat(leverage, 32)
szF, _ := strconv.ParseFloat(size, 32)
// var candles []CandlestickChartData
var profitCurve []ProfitCurveData
var simTrades []SimulatedTradeData
// var scanRes []PivotTrendScanDataPoint
if reqType == "SCAN" {
_, _ = runScan(userID, rid, ticker, period, start, end, candlePacketSize, scanPivotTrends, streamScanResData, reqProcess, retrieveCandles)
//TODO: save scan results like backtest results?
} else {
_, profitCurve, simTrades = runBacktest(rF, lF, szF, userID, rid, ticker, period, start, end, candlePacketSize, strat1, streamBacktestResData, reqProcess, retrieveCandles)
// Get all of user's shared history json data
shareResult := getAllShareResult(userID)
// delete an element in history if more than 10 items
bucketName := "res-" + userID
bucketData := listFiles(bucketName)
if len(bucketData) >= 10+len(shareResult) {
var firstLoop bool = true
var EarliestFile storage.ObjectAttrs
for _, file := range bucketData {
if firstLoop && !contains(shareResult, strings.Split(file.Name, ".")[0]) {
EarliestFile = *file
firstLoop = false
} else if !firstLoop && file.Created.Before(EarliestFile.Created) && !contains(shareResult, strings.Split(file.Name, ".")[0]) {
EarliestFile = *file
}
}
deleteFile(bucketName, EarliestFile.Name)
}
//save result to bucket
go saveSharableResult(totalCandles, profitCurve, simTrades, bucketName, ticker, period, req.TimeStart, req.TimeEnd, rF, lF, szF, rid)
}
// return
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(rid)
}
func getAllShareResult(userID string) []string {
// Get all of user's shared history json data
var shareResult []string
query := datastore.NewQuery("ShareResult").Filter("UserID =", userID)
t := dsClient.Run(ctx, query)
for {
var x ShareResult
_, err := t.Next(&x)
if err == iterator.Done {
break
}
shareResult = append(shareResult, x.ResultFileName)
}
return shareResult
}
func getAllShareResultHandler(w http.ResponseWriter, r *http.Request) {
setupCORS(&w, r)
if (*r).Method == "OPTIONS" {
return
}
if flag.Lookup("test.v") != nil {
initDatastore()
}
userID := r.URL.Query()["user"][0]
shareResult := getAllShareResult(userID)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(shareResult)
}
func shareResultHandler(w http.ResponseWriter, r *http.Request) {
setupCORS(&w, r)
if (*r).Method == "OPTIONS" {
return
}
if flag.Lookup("test.v") != nil {
initDatastore()
}
uniqueURL := fmt.Sprintf("%v", time.Now().UnixNano()) + generateRandomID(20)
var share ShareResult
err := json.NewDecoder(r.Body).Decode(&share)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// add new row to DB
share.ShareID = uniqueURL
kind := "ShareResult"
newKey := datastore.IncompleteKey(kind, nil)
if _, err := dsClient.Put(ctx, newKey, &share); err != nil {
log.Fatalf("Failed to delete Bot: %v", err)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(share)
}
func getShareResultHandler(w http.ResponseWriter, r *http.Request) {
setupCORS(&w, r)
if (*r).Method == "OPTIONS" {
return
}
var shareResult ShareResult
shareID := r.URL.Query()["share"][0]
query := datastore.NewQuery("ShareResult").Filter("ShareID =", shareID)
t := dsClient.Run(ctx, query)
_, error := t.Next(&shareResult)
if error != nil {
_, file, line, _ := runtime.Caller(0)
go Log(error.Error(), fmt.Sprintf("<%v> %v", line, file))
}
// candlePacketSize, err := strconv.Atoi(r.URL.Query()["candlePacketSize"][0])
// if err != nil {
// fmt.Println(err)
// w.WriteHeader(http.StatusBadRequest)
// return
// }
// candlePacketSize := 100
//create result ID for websocket packets + res storage
rid := fmt.Sprintf("%v", time.Now().UnixNano())
//get backtest hist file
storageClient, _ := storage.NewClient(ctx)
defer storageClient.Close()
ctx, cancel := context.WithTimeout(ctx, time.Second*1000)
defer cancel()
userID := shareResult.UserID
bucketName := "res-" + userID
backtestResID := shareResult.ResultFileName
objName := backtestResID + ".json"
rc, _ := storageClient.Bucket(bucketName).Object(objName).NewReader(ctx)
defer rc.Close()
// Progress bar tuned to show 10 percent done
layout := "2006-01-02T15:04:05.000Z"
startTime, _ := time.Parse(layout, "2021-01-01T00:00:00.000Z")
endTime, _ := time.Parse(layout, "2021-01-01T00:59:00.000Z")
progressBar(userID, rid, 6, startTime, endTime, false)
backtestResByteArr, _ := ioutil.ReadAll(rc)
var rawHistory historyResFile
var risk float64
var lev float64
var accSize float64
var candleData []CandlestickChartData
var profitData []ProfitCurveData
var simData []SimulatedTradeData
json.Unmarshal(backtestResByteArr, &rawHistory)
fmt.Printf("\nLen of share: %v\n", len(rawHistory.Candlestick))
risk = rawHistory.Risk
lev = rawHistory.Leverage
accSize = rawHistory.AccountSize
json.Unmarshal([]byte(rawHistory.Candlestick), &candleData)
json.Unmarshal([]byte(rawHistory.ProfitCurve), &profitData)
json.Unmarshal([]byte(rawHistory.SimulatedTrades), &simData)
// Progress bar tuned to show half done
start, _ := time.Parse(layout, "2021-01-01T00:00:00.000Z")
end, _ := time.Parse(layout, "2021-01-01T00:59:00.000Z")
progressBar(userID, rid, 30, start, end, false)
// Send history data to frontend
streamBacktestResData(userID, rid, candleData, profitData, simData)
ret := []string{fmt.Sprint(risk), fmt.Sprint(lev), fmt.Sprint(accSize), backtestResID}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(ret)
}
func getTickersHandler(w http.ResponseWriter, r *http.Request) {
setupCORS(&w, r)
if (*r).Method == "OPTIONS" {
return
}
data, err := ioutil.ReadFile("./json-data/symbols-binance-fut-perp.json")
if err != nil {
_, file, line, _ := runtime.Caller(0)
go Log(err.Error(), fmt.Sprintf("<%v> %v", line, file))
}
var t []CoinAPITicker
json.Unmarshal(data, &t)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(t)
}
func getBacktestHistoryHandler(w http.ResponseWriter, r *http.Request) {
setupCORS(&w, r)
if (*r).Method == "OPTIONS" {
return
}
userID := r.URL.Query()["user"][0]
bucketData := listFiles("res-" + userID)
var listName []string
for _, l := range bucketData {
listName = append(listName, l.Name)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(listName)
}
func getBacktestResHandler(w http.ResponseWriter, r *http.Request) {
setupCORS(&w, r)
if (*r).Method == "OPTIONS" {
return
}
_, err := strconv.Atoi(r.URL.Query()["candlePacketSize"][0])
if err != nil {
_, file, line, _ := runtime.Caller(0)
go Log(err.Error(), fmt.Sprintf("<%v> %v", line, file))
w.WriteHeader(http.StatusBadRequest)
return
}
//create result ID for websocket packets + res storage
rid := fmt.Sprintf("%v", time.Now().UnixNano())
//get backtest hist file
storageClient, _ := storage.NewClient(ctx)
defer storageClient.Close()
ctx, cancel := context.WithTimeout(ctx, time.Second*1000)
defer cancel()
userID := r.URL.Query()["user"][0]
bucketName := "res-" + userID
backtestResID, _ := url.QueryUnescape(mux.Vars(r)["id"])
objName := backtestResID + ".json"
rc, _ := storageClient.Bucket(bucketName).Object(objName).NewReader(ctx)
defer rc.Close()
// Progress bar tuned to show 10 percent done
layout := "2006-01-02T15:04:05.000Z"
startTime, _ := time.Parse(layout, "2021-01-01T00:00:00.000Z")
endTime, _ := time.Parse(layout, "2021-01-01T00:59:00.000Z")
progressBar(userID, rid, 6, startTime, endTime, false)
backtestResByteArr, _ := ioutil.ReadAll(rc)
var rawHistory historyResFile
var risk float64
var lev float64
var accSize float64
var candleData []CandlestickChartData
var profitData []ProfitCurveData
var simData []SimulatedTradeData
json.Unmarshal(backtestResByteArr, &rawHistory)
fmt.Printf("\nLen of load: %v\n", len(rawHistory.Candlestick))
risk = rawHistory.Risk
lev = rawHistory.Leverage
accSize = rawHistory.AccountSize
json.Unmarshal([]byte(rawHistory.Candlestick), &candleData)
json.Unmarshal([]byte(rawHistory.ProfitCurve), &profitData)
json.Unmarshal([]byte(rawHistory.SimulatedTrades), &simData)
// Progress bar tuned to show half done
start, _ := time.Parse(layout, "2021-01-01T00:00:00.000Z")
end, _ := time.Parse(layout, "2021-01-01T00:59:00.000Z")
progressBar(userID, rid, 30, start, end, false)
// Send history data to frontend
streamBacktestResData(userID, rid, candleData, profitData, simData)
ret := []float64{risk, lev, accSize}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(ret)
}
var startTimeSave time.Time
var endTimeSave time.Time
var periodSave string
var tickerSave string
var allCandlesSave []Candlestick
var userIDSave string
func saveCandlesToJson(w http.ResponseWriter, r *http.Request) {
setupCORS(&w, r)
if (*r).Method == "OPTIONS" {
return
}
saveCandlesBucket(allCandlesSave, "saved_candles-"+userIDSave, tickerSave, periodSave, startTimeSave.Format("2006-01-02_15:04:05"), endTimeSave.Format("2006-01-02_15:04:05"))
_, file, line, _ := runtime.Caller(0)
go Log("Candles Saved As JSON In Storage", fmt.Sprintf("<%v> %v", line, file))
}
func getSavedCandlesHandler(w http.ResponseWriter, r *http.Request) {
setupCORS(&w, r)
if (*r).Method == "OPTIONS" {
return
}
userID := r.URL.Query()["user"][0]
bucketData := listFiles("saved_candles-" + userID)
var listName []string
for _, l := range bucketData {
listName = append(listName, l.Name)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(listName)
}
func saveCandlesPrepared(startTime, endTime time.Time, period, ticker string, allCandles []Candlestick, userID string) {
startTimeSave = startTime
endTimeSave = endTime
periodSave = period
tickerSave = ticker
allCandlesSave = allCandles
userIDSave = userID
}
func availableCandlesInRedis(w http.ResponseWriter, r *http.Request) {
setupCORS(&w, r)
if (*r).Method == "OPTIONS" {
return
}
ticker := r.URL.Query()["ticker"][0]
period := r.URL.Query()["period"][0]
// Get calendar range from datastore
var calendar Calendar
query := datastore.NewQuery("Calendar").Filter("Ticker =", ticker).Filter("Period =", period)
t := dsClient.Run(ctx, query)
_, error := t.Next(&calendar)
if error != nil {
_, file, line, _ := runtime.Caller(0)
Log(error.Error(), fmt.Sprintf("<%v> %v", line, file))
}
// Get all missing candles from redis
ctx := context.Background()
key := ticker + ":" + period
missingCandlesRedis, err := rdbChartmaster.SMembers(ctx, key).Result()
if err != nil {
_, file, line, _ := runtime.Caller(0)
go Log(fmt.Sprintf("redis cache candlestick data err: %v\n", err),
fmt.Sprintf("<%v> %v", line, file))
return
}
// Remove specific time and only store year, month, and day
var missingCandlesOnlyDate []string
for _, m := range missingCandlesRedis {
onlyDate := strings.Split(m, "T")[0]
if !contains(missingCandlesOnlyDate, onlyDate) {
missingCandlesOnlyDate = append(missingCandlesOnlyDate, strings.Split(m, "T")[0])
}
}
// Create data that has all days and counts
var calendarData []CalendarData
for _, c := range calendar.DateRange {
dateRange := strings.Split(c, "~")
layout := "2006-01-02T15:04:05.0000000Z"
startRange, _ := time.Parse(layout, dateRange[0])
endRange, _ := time.Parse(layout, dateRange[1])
for i := startRange; i.Before(endRange); i = i.AddDate(0, 0, 1) {
var x CalendarData
if contains(missingCandlesOnlyDate, i.Format("2006-01-02")) {
x.Count = "1"
} else {
x.Count = "2"
}
x.Day = i.Format("2006-01-02")
calendarData = append(calendarData, x)
}
}
// createCSVForAvailableCandles(ticker, period, calendar.DateRange)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(calendarData)
}
func createCSVForAvailableCandles(ticker, period string, dateRange []string) {
resFileName := candlesAvailabilityResFile(ticker, period, dateRange)
reqBucketname := "candles_availability"
storageClient, _ := storage.NewClient(ctx)
defer storageClient.Close()
ctx, cancel := context.WithTimeout(ctx, time.Second*1000)
defer cancel()
//if bucket doesn't exist, create new
buckets, _ := listBuckets()
var bucketName string
for _, bn := range buckets {
if bn == reqBucketname {
bucketName = bn
}
}
if bucketName == "" {
bucket := storageClient.Bucket(reqBucketname)
if err := bucket.Create(ctx, googleProjectID, nil); err != nil {
fmt.Printf("Failed to create bucket: %v", err)
}
bucketName = reqBucketname
}
//create obj
object := ticker + ":" + period + ".csv"
// Open local file
f, err := os.Open(resFileName)
if err != nil {
fmt.Printf("os.Open: %v", err)
}
defer f.Close()
ctx2, cancel := context.WithTimeout(ctx, time.Second*50)
defer cancel()
// upload object with storage.Writer
wc := storageClient.Bucket(bucketName).Object(object).NewWriter(ctx2)
if _, err = io.Copy(wc, f); err != nil {
fmt.Printf("io.Copy: %v", err)
}
if err := wc.Close(); err != nil {
fmt.Printf("Writer.Close: %v", err)
}
//remove local file
_ = os.Remove(resFileName)
}
func candlesAvailabilityResFile(ticker, period string, dateRange []string) string {
var fileString string
// Get all missing candles from redis
// ctx := context.Background()
// key := ticker + ":" + period
// missingCandlesRedis, err := rdbChartmaster.SMembers(ctx, key).Result()
// if err != nil {
// _, file, line, _ := runtime.Caller(0)
// go Log(fmt.Sprintf("redis cache candlestick data err: %v\n", err),
// fmt.Sprintf("<%v> %v", line, file))
// }
// firstRow := []string{"day", "count"}
// fileString = "day + count"
// csvWrite(firstRow, ticker+":"+period+".csv")
first := true
for _, c := range dateRange {
dateRange := strings.Split(c, "~")
layout := "2006-01-02T15:04:05.0000000Z"
startRange, _ := time.Parse(layout, dateRange[0])
endRange, _ := time.Parse(layout, dateRange[1])
for i := startRange; i.Before(endRange); i = i.AddDate(0, 0, 1) {
// row := []string{i.Format("2006-01-02"), "2"}
// fileString = append(fileString, row...)
if first {
fileString = i.Format("2006-01-02") + "+" + "2"
first = false
} else {
fileString = fileString + "=" + i.Format("2006-01-02") + "+" + "2"
}
// csvAppend(row, ticker+":"+period+".csv")
}
}
//save candlesticks
file, _ := json.Marshal(fileString)
fileName := fmt.Sprintf("%v.csv", ticker+":"+period)
_ = ioutil.WriteFile(fileName, file, 0644)
return fileName
}
|
package commit
import (
"sort"
"time"
)
type Commit struct {
Revision string `json:"revision"`
Author string `json:"author,omitempty"`
Email string `json:"email,omitempty"`
Date string `json:"date,omitempty"`
Message string `json:"message,omitempty"`
}
func (c Commit) GetDate() *time.Time {
if c.Date == "" {
return nil
}
d, err := time.Parse("Mon 2006-01-02 15:04:05 -0700", c.Date)
if err == nil {
return &d
}
d, err = time.Parse("Mon 2006-1-2 15:04:05 -0700", c.Date)
if err == nil {
return &d
}
d, err = time.Parse("Mon Jan 02 15:04:05 2006 -0700", c.Date)
if err == nil {
return &d
}
d, err = time.Parse("Mon Jan 2 15:04:05 2006 -0700", c.Date)
if err == nil {
return &d
}
d, err = time.Parse("2006-01-02 15:04:05 -0700", c.Date)
if err == nil {
return &d
}
d, err = time.Parse("2006-1-2 15:04:05 -0700", c.Date)
if err == nil {
return &d
}
return nil
}
type Commits []Commit
type CommitsAsc Commits
type CommitsDesc Commits
func (c Commits) OrderByDate(dir string) {
if dir == "" {
dir = "ASC"
}
if dir == "ASC" {
sort.Sort(CommitsAsc(c))
} else if dir == "DESC" {
sort.Sort(CommitsDesc(c))
}
}
func (c Commits) Reverse() {
for i, j := 0, len(c)-1; i < j; i, j = i+1, j-1 {
c[i], c[j] = c[j], c[i]
}
}
// sort utils.
func (s CommitsAsc) Len() int {
return len(s)
}
func (s CommitsAsc) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s CommitsAsc) Less(i, j int) bool {
d1 := s[i].GetDate()
if d1 == nil {
return false
}
d2 := s[j].GetDate()
if d2 == nil {
return false
}
return d2.After(*d1)
}
func (s CommitsDesc) Len() int {
return len(s)
}
func (s CommitsDesc) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s CommitsDesc) Less(i, j int) bool {
d1 := s[i].GetDate()
if d1 == nil {
return false
}
d2 := s[j].GetDate()
if d2 == nil {
return false
}
return d1.After(*d2)
}
|
package main
import (
"testing"
)
func TestCode(t *testing.T) {
var tests = []struct {
catA int
catB int
mouse int
output string
}{
{
catA: 1,
catB: 2,
mouse: 3,
output: "Cat B",
},
{
catA: 1,
catB: 3,
mouse: 2,
output: "Mouse C",
},
}
for _, test := range tests {
if got := catsAndMouse(test.catA, test.catB, test.mouse); got != test.output {
t.Errorf(
"For Cat A=%v, Cat B=%v, Mouse=%v; Expected %v; Got %v",
test.catA, test.catB, test.mouse, test.output, got,
)
}
}
}
|
/*
* ALISI client
*
* This is the client API of ALISI. Each device will expose this API in order to be identified by ALISI compliant control units.
*
* API version: 1.0.0
* Contact: matteo.sovilla@studenti.unipd.it
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package datamodel
type EncodedClaim struct {
Id string `json:"id,omitempty"`
// JWT-encoded claim
EncodedData string `json:"encodedData,omitempty"`
// der encoding of a typical ecdsa signature
Signature string `json:"signature,omitempty"`
}
|
/*
Copyright 2021-2023 ICS-FORTH.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// Call is the Schema for the Call API.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type Call struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec CallSpec `json:"spec,omitempty"`
Status CallStatus `json:"status,omitempty"`
}
// MatchOutputs defined a set of remote command outputs that must be matched. The limit for both Stdout and Stderr
// is 1024 characters.
type MatchOutputs struct {
// Stdout is a regex that describes the expected output from stdout. It cannot be longer than 1024 characters.
// +optional
// +kubebuilder:validation:MaxLength=1024
Stdout *string `json:"stdout,omitempty"`
// Stderr is a regex that describes the expected output from stderr. It cannot be longer than 1024 characters.
// +optional
// +kubebuilder:validation:MaxLength=1024
Stderr *string `json:"stderr,omitempty"`
}
// CallSpec defines the desired state of Call.
type CallSpec struct {
// Callable is the name of the endpoint that will be called
// +kubebuilder:validation:minlength=1
Callable string `json:"callable"`
// Services is a list of services that will be stopped.
// +kubebuilder:validation:minimum=1
Services []string `json:"services"`
/*
Job Scheduling
*/
// Schedule defines the interval between the invocations of the callable.
// +optional
Schedule *TaskSchedulerSpec `json:"schedule,omitempty"`
// Expect declares a list of expected outputs. The number of expected outputs must be the same
// as the number of defined services.
// +optional
Expect []MatchOutputs `json:"expect,omitempty"`
/*
Execution Flow
*/
// Suspend forces the Controller to stop scheduling any new jobs until it is resumed. Defaults to false.
// +optional
Suspend *bool `json:"suspend,omitempty"`
// SuspendWhen automatically sets Suspend to True, when certain conditions are met.
// +optional
SuspendWhen *ConditionalExpr `json:"suspendWhen,omitempty"`
// Tolerate specifies the conditions under which the call will fail. If undefined, the call fails
// immediately when a call to service has failed.
// +optional
Tolerate *TolerateSpec `json:"tolerate,omitempty"`
}
// CallStatus defines the observed state of Call.
type CallStatus struct {
Lifecycle `json:",inline"`
// QueuedJobs is a list of services scheduled for stopping.
// +optional
QueuedJobs []Callable `json:"queuedJobs,omitempty"`
// ExpectedTimeline is the result of evaluating a timeline distribution into specific points in time.
// +optional
ExpectedTimeline Timeline `json:"expectedTimeline,omitempty"`
// ScheduledJobs points to the next QueuedJobs.
ScheduledJobs int `json:"scheduledJobs,omitempty"`
// LastScheduleTime provide information about the last time a Service was successfully scheduled.
LastScheduleTime metav1.Time `json:"lastScheduleTime,omitempty"`
}
func (in *Call) GetReconcileStatus() Lifecycle {
return in.Status.Lifecycle
}
func (in *Call) SetReconcileStatus(lifecycle Lifecycle) {
in.Status.Lifecycle = lifecycle
}
// +kubebuilder:object:root=true
// CallList contains a list of Call jobs.
type CallList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Call `json:"items"`
}
func init() {
SchemeBuilder.Register(&Call{}, &CallList{})
}
|
package middleware
import (
"github.com/dgrijalva/jwt-go"
"github.com/gin-gonic/gin"
"hospital-go/util"
"net/http"
)
type Header struct {
Authorization string `json:"authorization"`
}
func JWT() gin.HandlerFunc {
return func(c *gin.Context) {
header := Header{}
if err := c.ShouldBindHeader(&header); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
}
token := header.Authorization
var msg = "OK"
if token == "" {
msg = "Token Invalid"
} else {
_, err := util.ParseToken(token)
if err != nil {
switch err.(*jwt.ValidationError).Errors {
case jwt.ValidationErrorExpired:
msg = "Token Expired"
default:
msg = "Token Error"
}
}
}
if msg != "OK" {
c.JSON(http.StatusUnauthorized, gin.H{"message": msg})
c.Abort()
return
}
c.Next()
}
}
|
package npilib
import (
"encoding/xml"
c "github.com/arkaev/npilib/commands"
)
type Parser interface {
//Unmarshal node to command pojo
Unmarshal(data []byte) c.NCCCommand
}
type AuthenificateRqParser struct {
Parser
}
//Unmarshal "Authenificate" command
func (h *AuthenificateRqParser) Unmarshal(data []byte) c.NCCCommand {
var auth c.AuthentificateRq
xml.Unmarshal(data, &auth)
return &auth
}
//FullBuddyListHandler for "FullBuddyList" command
type FullBuddyListParser struct {
Parser
}
//Unmarshal "FullBuddyList" command
func (h *FullBuddyListParser) Unmarshal(data []byte) c.NCCCommand {
var rs c.FullBuddyListRs
xml.Unmarshal(data, &rs)
return &rs
}
type RegisterPeerRsParser struct {
Parser
}
func (p *RegisterPeerRsParser) Unmarshal(data []byte) c.NCCCommand {
var rq c.RegisterPeerRs
xml.Unmarshal(data, &rq)
return &rq
}
|
/*
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ops_manager
import (
"bytes"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"log"
"net/http"
"os"
"strings"
"time"
"io"
"io/ioutil"
"omg-cli/config"
"omg-cli/version"
"github.com/gosuri/uilive"
"github.com/pivotal-cf/om/api"
"github.com/pivotal-cf/om/commands"
"github.com/pivotal-cf/om/extractor"
"github.com/pivotal-cf/om/formcontent"
"github.com/pivotal-cf/om/network"
"github.com/pivotal-cf/om/progress"
)
const (
requestTimeout = 1800
poolingIntervalSec = 10
)
type Sdk struct {
target string
creds config.OpsManagerCredentials
logger *log.Logger
unauthenticatedClient network.UnauthenticatedClient
client network.OAuthClient
httpClient *http.Client
}
// NewSdk creates an authenticated session and object to interact with Ops Manager
func NewSdk(target string, creds config.OpsManagerCredentials, logger log.Logger) (*Sdk, error) {
client, err := network.NewOAuthClient(target, creds.Username, creds.Password, "", "", creds.SkipSSLVerification, true, time.Duration(requestTimeout)*time.Second)
if err != nil {
return nil, err
}
logger.SetPrefix(fmt.Sprintf("%s[OM SDK] ", logger.Prefix()))
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: creds.SkipSSLVerification},
}
return &Sdk{target: target,
creds: creds,
logger: &logger,
unauthenticatedClient: network.NewUnauthenticatedClient(target, creds.SkipSSLVerification, time.Duration(requestTimeout)*time.Second),
client: client,
httpClient: &http.Client{Transport: tr},
}, nil
}
// SetupAuth configures the initial username, password, and decryptionPhrase
func (om *Sdk) SetupAuth() error {
setupService := api.NewSetupService(om.unauthenticatedClient)
cmd := commands.NewConfigureAuthentication(setupService, om.logger)
return cmd.Execute([]string{
"--username", om.creds.Username,
"--password", om.creds.Password,
"--decryption-passphrase", om.creds.DecryptionPhrase})
}
// Unlock decrypts Ops Manager. This is needed after a reboot before attempting to authenticate.
// This task runs asynchronously. Query the status by invoking ReadyForAuth.
func (om *Sdk) Unlock() error {
om.logger.Println("decrypting Ops Manager")
unlockReq := UnlockRequest{om.creds.DecryptionPhrase}
body, err := json.Marshal(&unlockReq)
req, err := om.newRequest("PUT", fmt.Sprintf("%s/api/v0/unlock", om.target), bytes.NewReader(body))
if err != nil {
return err
}
req.Header.Add("Content-Type", "application/json")
resp, err := om.httpClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
return nil
}
// ReadyForAuth checks if the Ops Manager authentication system is ready
func (om *Sdk) ReadyForAuth() bool {
req, err := om.newRequest("GET", fmt.Sprintf("%s/login/ensure_availability", om.target), nil)
if err != nil {
return false
}
resp, err := om.httpClient.Do(req)
if err != nil {
return false
}
defer resp.Body.Close()
// When OpsMan is online/decrypted it redirects its auth system. UAA is expected for OMG.
return resp.StatusCode == 200 && strings.Contains(resp.Request.URL.Path, "/uaa/login")
}
// SetupBosh applies the provided configuration to the BOSH director tile
func (om *Sdk) SetupBosh(iaas commands.GCPIaaSConfiguration, director commands.DirectorConfiguration, azs commands.AvailabilityZonesConfiguration, networks commands.NetworksConfiguration, networkAssignment commands.NetworkAssignment, resources commands.ResourceConfiguration) error {
boshService := api.NewBoshFormService(om.client)
diagnosticService := api.NewDiagnosticService(om.client)
cmd := commands.NewConfigureBosh(boshService, diagnosticService, om.logger)
iaasBytes, err := json.Marshal(iaas)
if err != nil {
return err
}
directorBytes, err := json.Marshal(director)
if err != nil {
return err
}
azBytes, err := json.Marshal(azs)
if err != nil {
return err
}
networksBytes, err := json.Marshal(networks)
if err != nil {
return err
}
networkAssignmentBytes, err := json.Marshal(networkAssignment)
if err != nil {
return err
}
resourceBytes, err := json.Marshal(resources)
if err != nil {
return err
}
return cmd.Execute([]string{
"--iaas-configuration", string(iaasBytes),
"--director-configuration", string(directorBytes),
"--az-configuration", string(azBytes),
"--networks-configuration", string(networksBytes),
"--network-assignment", string(networkAssignmentBytes),
"--resource-configuration", string(resourceBytes)})
}
// ApplyChanges deploys pending changes to Ops Manager
func (om *Sdk) ApplyChanges() error {
installationsService := api.NewInstallationsService(om.client)
logWriter := commands.NewLogWriter(os.Stdout)
cmd := commands.NewApplyChanges(installationsService, logWriter, om.logger, poolingIntervalSec)
return cmd.Execute(nil)
}
func (om *Sdk) ApplyDirector() error {
installationsService := api.NewInstallationsService(om.client)
logWriter := commands.NewLogWriter(os.Stdout)
cmd := commands.NewApplyChanges(installationsService, logWriter, om.logger, poolingIntervalSec)
return cmd.Execute([]string{"--skip-deploy-products"})
}
// UploadProduct pushes a given file located locally at path to the target
func (om *Sdk) UploadProduct(path string) error {
liveWriter := uilive.New()
availableProductsService := api.NewAvailableProductsService(om.client, progress.NewBar(), liveWriter)
form, err := formcontent.NewForm()
if err != nil {
return err
}
cmd := commands.NewUploadProduct(form, extractor.ProductUnzipper{}, availableProductsService, om.logger)
return cmd.Execute([]string{
"--product", path})
}
// UploadStemcell pushes a given stemcell located locally at path to the target
func (om *Sdk) UploadStemcell(path string) error {
diagnosticService := api.NewDiagnosticService(om.client)
form, err := formcontent.NewForm()
if err != nil {
return err
}
uploadStemcellService := api.NewUploadStemcellService(om.client, progress.NewBar())
cmd := commands.NewUploadStemcell(form, uploadStemcellService, diagnosticService, om.logger)
return cmd.Execute([]string{
"--stemcell", path})
}
// StageProduct moves a given name, version to the list of tiles that will be deployed
func (om *Sdk) StageProduct(tile config.OpsManagerMetadata) error {
diagnosticService := api.NewDiagnosticService(om.client)
availableProductsService := api.NewAvailableProductsService(om.client, progress.NewBar(), uilive.New())
stagedProductsService := api.NewStagedProductsService(om.client)
deployedProductsService := api.NewDeployedProductsService(om.client)
cmd := commands.NewStageProduct(stagedProductsService, deployedProductsService, availableProductsService, diagnosticService, om.logger)
return cmd.Execute([]string{
"--product-name", tile.Name,
"--product-version", tile.Version,
})
}
// Online checks if Ops Manager is running on the target.
func (om *Sdk) Online() bool {
req, err := om.newRequest("GET", om.target, nil)
if err != nil {
return false
}
resp, err := om.httpClient.Do(req)
if err != nil {
return false
}
defer resp.Body.Close()
return resp.StatusCode < 500
}
// AvaliableProducts lists products that are uploaded to Ops Manager.
func (om *Sdk) AvaliableProducts() ([]api.ProductInfo, error) {
service := api.NewAvailableProductsService(om.client, progress.NewBar(), uilive.New())
out, err := service.List()
if err != nil {
return nil, err
}
return out.ProductsList, nil
}
// ConfigureProduct sets up the settings for a given tile by name
func (om *Sdk) ConfigureProduct(name, networks, properties string, resources string) error {
stagedProductsService := api.NewStagedProductsService(om.client)
jobsService := api.NewJobsService(om.client)
cmd := commands.NewConfigureProduct(stagedProductsService, jobsService, om.logger)
return cmd.Execute([]string{
"--product-name", name,
"--product-network", networks,
"--product-properties", properties,
"--product-resources", resources,
})
}
// GetProduct fetches settings for a given tile by name
func (om *Sdk) GetProduct(name string) (*ProductProperties, error) {
productGuid, err := om.productGuidByType(name)
if err != nil {
return nil, err
}
resp, err := om.curl(fmt.Sprintf("api/v0/staged/products/%s/properties", productGuid), "GET", nil)
if err != nil {
return nil, err
}
var prop ProductProperties
if err := json.Unmarshal(resp, &prop); err != nil {
return nil, err
}
return &prop, nil
}
// GetDirector fetches settings for the BOSH director
func (om *Sdk) GetDirector() (*DirectorProperties, error) {
resp, err := om.curl("/api/v0/staged/director/properties", "GET", nil)
if err != nil {
return nil, err
}
var prop DirectorProperties
if err := json.Unmarshal(resp, &prop); err != nil {
return nil, err
}
return &prop, nil
}
// GetResource fetches resource settings for a specific job of a tile
func (om *Sdk) GetResource(tileName, jobName string) (*Resource, error) {
productGuid, err := om.productGuidByType(tileName)
if err != nil {
return nil, err
}
jobGuid, err := om.jobGuidByName(productGuid, jobName)
if err != nil {
return nil, err
}
resp, err := om.curl(fmt.Sprintf("/api/v0/staged/products/%s/jobs/%s/resource_config", productGuid, jobGuid), "GET", nil)
if err != nil {
return nil, err
}
var prop Resource
if err := json.Unmarshal(resp, &prop); err != nil {
return nil, err
}
return &prop, nil
}
func (om *Sdk) curl(path, method string, data io.Reader) ([]byte, error) {
req, err := om.newRequest(method, fmt.Sprintf("%s/%s", om.target, path), data)
if err != nil {
return nil, err
}
resp, err := om.client.Do(req)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
// Check if the OpsMan API returned an error
errResp := ErrorResponse{make(map[string][]string)}
if err := json.Unmarshal(body, &errResp); err == nil {
if len(errResp.Errors) != 0 {
return nil, fmt.Errorf("error from Ops Manager API requesting %s: %v", path, errResp.Errors)
}
}
return body, nil
}
func (om *Sdk) getProducts() ([]Product, error) {
body, err := om.curl("api/v0/deployed/products", http.MethodGet, nil)
if err != nil {
return nil, err
}
var resp []Product
if err := json.Unmarshal(body, &resp); err != nil {
return nil, fmt.Errorf("malformed products response: %s", string(body))
}
return resp, nil
}
func (om *Sdk) productGuidByType(product string) (string, error) {
products, err := om.getProducts()
if err != nil {
return "", err
}
appGuid := ""
for _, p := range products {
if p.Type == product {
appGuid = p.Guid
break
}
}
if appGuid == "" {
return "", fmt.Errorf("could not find installed application by name: %s", product)
}
return appGuid, nil
}
func (om *Sdk) jobGuidByName(productGuid, jobName string) (string, error) {
resp, err := om.curl(fmt.Sprintf("/api/v0/staged/products/%s/jobs", productGuid), "GET", nil)
if err != nil {
return "", err
}
var jobResp JobsResponse
if err := json.Unmarshal(resp, &jobResp); err != nil {
return "", err
}
jobGuid := ""
for _, j := range jobResp.Jobs {
if j.Name == jobName {
jobGuid = j.Guid
break
}
}
if jobGuid == "" {
return "", fmt.Errorf("job %s not found for product %s", jobName, productGuid)
}
return jobGuid, nil
}
func (om *Sdk) GetCredentials(name, credential string) (*SimpleCredential, error) {
productGuid, err := om.productGuidByType(name)
if err != nil {
return nil, err
}
return om.getCredential(fmt.Sprintf("api/v0/deployed/products/%s/credentials/%s", productGuid, credential))
}
func (om *Sdk) GetDirectorCredentials(credential string) (*SimpleCredential, error) {
return om.getCredential(fmt.Sprintf("api/v0/deployed/director/credentials/%s", credential))
}
func (om *Sdk) getCredential(path string) (*SimpleCredential, error) {
body, err := om.curl(path, http.MethodGet, nil)
if err != nil {
return nil, err
}
var resp CredentialResponse
if err := json.Unmarshal(body, &resp); err != nil {
return nil, fmt.Errorf("malformed credentials response: %s", string(body))
}
if resp.Credential.Value.Password == "" || resp.Credential.Value.Identity == "" {
return nil, fmt.Errorf("recieved an empty credential: %s", string(body))
}
return &resp.Credential.Value, nil
}
func (om *Sdk) GetDirectorIP() (string, error) {
boshGuid, err := om.productGuidByType("p-bosh")
if err != nil {
return "", err
}
body, err := om.curl(fmt.Sprintf("api/v0/deployed/products/%s/static_ips", boshGuid), http.MethodGet, nil)
if err != nil {
return "", err
}
var boshIPs []StaticIP
if err := json.Unmarshal(body, &boshIPs); err != nil {
return "", fmt.Errorf("malformed static_ips response: %s", string(body))
}
for _, ip := range boshIPs {
if strings.HasPrefix(ip.Name, "director") {
return ip.IPs[0], nil
}
}
return "", errors.New("static_ips response had no director job")
}
func (om *Sdk) DeleteInstallation() error {
logWriter := commands.NewLogWriter(os.Stdout)
deleteInstallationService := api.NewInstallationAssetService(om.client, nil, nil)
installationsService := api.NewInstallationsService(om.client)
cmd := commands.NewDeleteInstallation(deleteInstallationService, installationsService, logWriter, om.logger, poolingIntervalSec)
return cmd.Execute(nil)
}
func (om *Sdk) newRequest(method, url string, body io.Reader) (req *http.Request, err error) {
req, err = http.NewRequest(method, url, body)
if req != nil {
req.Header.Set("User-Agent", version.UserAgent())
}
return
}
|
package human
type HumanType int
const (
BLACKHUMAN HumanType = iota
YELLOWHUMAN
WHITEHUMAN
FEMALEBLACKHUMAN
MALEBLACKHUMAN
)
func CreateHuman(t HumanType) Human {
switch t {
case BLACKHUMAN:
return &BlackHuman{}
case YELLOWHUMAN:
return &YellowHuman{}
case WHITEHUMAN:
return &WhiteHuman{}
default:
return nil
}
}
func CreateSexHuman(t HumanType) SexHuman {
switch t {
case FEMALEBLACKHUMAN:
return &FemaleBlackHuman{}
case MALEBLACKHUMAN:
return &MaleBlackHuman{}
default:
return nil
}
}
|
package store
import (
"regexp"
"strings"
"time"
"github.com/kantopark/cronexpr"
"github.com/pkg/errors"
"nidavellir/libs"
)
const (
ScheduleQueued = "QUEUED"
ScheduleRunning = "RUNNING"
ScheduleNoop = "NOOP"
)
type Source struct {
Id int `json:"id"`
Name string `json:"name"`
UniqueName string `json:"-"`
RepoUrl string `json:"repoUrl"`
State string `json:"state"`
NextTime time.Time `json:"nextTime"`
Secrets []Secret `json:"secrets"`
CronExpr string `json:"cronExpr"`
}
func NewSource(name, repoUrl string, startTime time.Time, secrets []Secret, cronExpr string) (*Source, error) {
name = strings.TrimSpace(name)
s := &Source{
Name: name,
UniqueName: libs.LowerTrimReplaceSpace(name),
RepoUrl: repoUrl,
State: ScheduleNoop,
NextTime: startTime,
Secrets: secrets,
CronExpr: cronExpr,
}
if err := s.Validate(); err != nil {
return nil, err
}
return s, nil
}
func (s *Source) Validate() error {
s.Name = strings.TrimSpace(s.Name)
s.UniqueName = libs.LowerTrimReplaceSpace(s.Name)
if len(s.Name) < 4 {
return errors.New("name length must be >= 4 characters")
}
if matched := regexp.MustCompile(`^https?://\S+$`).MatchString(s.RepoUrl); !matched {
return errors.Errorf("expected '%s' git remote to be accessible through http", s.RepoUrl)
}
if !libs.IsIn(s.State, []string{ScheduleNoop, ScheduleRunning, ScheduleQueued}) {
return errors.Errorf("'%s' is an invalid schedule state", s.State)
}
cron, err := cronexpr.Parse(s.CronExpr)
if err != nil {
return errors.Wrapf(err, "malformed cron expression: %s", s.CronExpr)
}
nextTimes := cron.NextN(time.Now(), 100)
for i, t := range nextTimes[1:] {
if t.Sub(nextTimes[i]).Minutes() < 5 {
return errors.New("cron interval has instance where 1 job and another differs by less than 5 minutes")
}
}
if s.NextTime.IsZero() {
s.NextTime = cron.Next(time.Now())
}
return nil
}
// sets the source state to Running
func (s *Source) ToRunning() *Source {
s.State = ScheduleRunning
return s
}
// Sets the job's state to completed and calculates the next runtime
func (s *Source) ToCompleted() *Source {
expr := cronexpr.MustParse(s.CronExpr)
s.NextTime = expr.Next(s.NextTime)
s.State = ScheduleNoop
return s
}
// Adds a new job source
func (p *Postgres) AddSource(source *Source) (*Source, error) {
source.Id = 0 // force primary key to be empty
source.State = ScheduleNoop
if err := source.Validate(); err != nil {
return nil, err
}
if err := p.db.Create(source).Error; err != nil {
return nil, errors.Wrap(err, "could not create new source")
}
return source, nil
}
// Gets the source with the specified id
func (p *Postgres) GetSource(id int) (*Source, error) {
var source Source
if err := p.db.First(&source, "id = ?", id).Error; err != nil {
return nil, errors.Wrapf(err, "could not find source with id '%d'", id)
}
return &source, nil
}
func (p *Postgres) GetSourceByName(name string) (*Source, error) {
var source Source
if err := p.db.First(&source, "unique_name = ?", libs.LowerTrimReplaceSpace(name)).Error; err != nil {
return nil, errors.Wrapf(err, "could not find source with name '%s'", name)
}
return &source, nil
}
type GetSourceOption struct {
ScheduledToRun bool
}
// Gets a list of jobs sources specified by the option. If nil, lists all job
// sources
func (p *Postgres) GetSources(options *GetSourceOption) ([]*Source, error) {
var sources []*Source
if options == nil {
options = &GetSourceOption{}
}
query := p.db
if options.ScheduledToRun {
query = query.Where("state = ? AND next_time <= ?", ScheduleNoop, time.Now())
}
if err := query.Find(&sources).Error; err != nil {
return nil, errors.Wrap(err, "error getting sources")
}
return sources, nil
}
// Updates a job source
func (p *Postgres) UpdateSource(source *Source) (*Source, error) {
if err := source.Validate(); err != nil {
return nil, err
} else if source.Id <= 0 {
return nil, errors.New("source id must be specified")
}
err := p.db.
Model(source).
Where("id = ?", source.Id).
Update(*source).
Error
if err != nil {
return nil, errors.Wrap(err, "could not update source")
}
return source, nil
}
// Removes a job source
func (p *Postgres) RemoveSource(id int) error {
if id <= 0 {
return errors.New("source id must be specified")
}
if err := p.db.First(&Source{}, id).Error; err != nil {
return errors.Errorf("could not find any sources with id '%d'", id)
}
if err := p.db.Delete(&Source{Id: id}).Error; err != nil {
return errors.Wrapf(err, "error removing source with id '%d'", id)
}
return nil
}
func (s *Source) SecretMap() map[string]string {
secrets := make(map[string]string, len(s.Secrets))
for _, secret := range s.Secrets {
secrets[secret.Key] = secret.Value
}
return secrets
}
|
package client
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
client_models "github.com/garcialuis/ActivityCollector/client/models"
)
// GetActivity fetches and returns an activity with a specified originID from the database
func (activity *ActivityCollector) GetActivity(originID uint64, activityID uint64) (Activity client_models.Activity) {
reqURL := fmt.Sprint(activity.config.hostURL, "activity/")
base, err := url.Parse(reqURL)
if err != nil {
log.Println("ActivityClient: Unable to parse reqURL: ", err.Error())
return Activity
}
base.Path += fmt.Sprintf("%d/%d", originID, activityID)
resp, err := http.Get(base.String())
if err != nil {
log.Println("ActivityClient: Unable to complete request due to: ", err.Error())
return Activity
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatalln(err)
}
err = json.Unmarshal(body, &Activity)
if err != nil {
log.Println("ActivityClient: Unable to unmarshall data retrieved: ", err.Error())
return Activity
}
return Activity
}
func (activity *ActivityCollector) GetActivities(originID uint64) (Activities client_models.Activities) {
reqURL := fmt.Sprint(activity.config.hostURL, "activities/")
base, err := url.Parse(reqURL)
if err != nil {
log.Println("ActivityClient: Unable to parse reqURL: ", err.Error())
return Activities
}
base.Path += fmt.Sprintf("%d", originID)
resp, err := http.Get(base.String())
if err != nil {
log.Println("ActivityClient: Unable to complete request due to: ", err.Error())
return Activities
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatalln(err)
}
err = json.Unmarshal(body, &Activities)
if err != nil {
log.Println("ActivityClient: Unable to unmarshall Activities data retrieved: ", err.Error())
return Activities
}
return Activities
}
func (activity *ActivityCollector) GetActivitiesInRange(originID uint64, starttime uint64, endtime uint64) (Activities client_models.Activities) {
reqURL := fmt.Sprint(activity.config.hostURL, "activities/")
base, err := url.Parse(reqURL)
if err != nil {
log.Println("ActivityClient: Unable to parse reqURL: ", err.Error())
return Activities
}
base.Path += fmt.Sprintf("%d/%d/%d", originID, starttime, endtime)
resp, err := http.Get(base.String())
if err != nil {
log.Println("ActivityClient: Unable to complete request due to: ", err.Error())
return Activities
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatalln(err)
}
err = json.Unmarshal(body, &Activities)
if err != nil {
log.Println("ActivityClient: Unable to unmarshall Activities data retrieved: ", err.Error())
return Activities
}
return Activities
}
|
package clippings
import (
"bufio"
"log"
"os"
"strconv"
"strings"
"time"
)
type Clipping struct {
BookTitle string // The book this clipping was taken from
Author string // The author of the book
Type int // The type of clipping (e.g. clipping/bookmark)
Timestamp time.Time // Timestamp of the date the clipping was created
Location []int // Location of the clipping. [0] = start point, [1] = end point. Bookmark is only [0]
Body string // Actual text of the clipping
}
const clippingDelimiter = "=========="
const (
TypeBookmark = iota
TypeHighlight
TypeUnknown
)
func ParseClippingsFile(filePath string) []*Clipping {
var allClippings []*Clipping
f, err := os.Open(filePath)
if err != nil {
log.Printf("Error reading valid clippings file: %v \n", err)
return nil
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
if scanner.Text() == clippingDelimiter {
// If the delimiter is the first line, we have no clippings. Exit.
return nil
}
newClipping := &Clipping{}
// first line is the title and author
newClipping.parseTitleAndAuthor(scanner.Text())
// second line is type, location, and date added
scanner.Scan()
newClipping.parseTypeAndLocationAndDate(scanner.Text())
// third line is blank, ignore
scanner.Scan()
// fourth line is the text body
scanner.Scan()
newClipping.parseClippingBody(scanner.Text())
// fifth line is the delimiter. Ignore.
scanner.Scan()
allClippings = append(allClippings, newClipping)
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
return allClippings
}
func (c *Clipping) parseTitleAndAuthor(text string) {
c.BookTitle = strings.TrimSpace(strings.Split(text, "(")[0])
c.Author = strings.Split(text, "(")[1]
}
func (c *Clipping) parseTypeAndLocationAndDate(text string) {
// Extract type from 3rd word of the line
c.setType(strings.Split(text, " ")[2])
// Extract location from 6th word line
c.setLocation(strings.Split(text, " ")[5])
// Extract timestamp by trimming prefix "... | Added on "
tmp := strings.Split(text, "|")[1]
tmp = strings.TrimPrefix(tmp, " Added on ")
c.setTimestamp(tmp)
}
func (c *Clipping) setType(typeCompare string) {
switch typeCompare {
case "Bookmark":
c.Type = TypeBookmark
case "Hightlight":
c.Type = TypeHighlight
default:
c.Type = TypeUnknown
}
}
// location is a string representation of a range in the form of two ints with a dash between them. e.g. "100-150"
func (c *Clipping) setLocation(location string) {
splitLocation := strings.Split(location, "-")
start, err := strconv.Atoi(splitLocation[0])
if err != nil {
log.Printf("Error getting start location for clipping: %v\n", err)
start = 0
}
if len(splitLocation) == 1 {
// Bookmark. Not a range of locaitons.
c.Location = []int{start}
return
}
end, err := strconv.Atoi(splitLocation[1])
if err != nil {
log.Printf("Error getting end location for a clipping: %v\n", err)
end = 0
}
c.Location = []int{start, end}
}
func (c *Clipping) setTimestamp(timestamp string) {
timestampFormat := "Monday, January 2, 2006 3:04:05 PM"
if parsedTime, err := time.Parse(timestampFormat, timestamp); err == nil {
c.Timestamp = parsedTime
} else {
log.Printf("Error parsing timestamp: %v", err)
}
}
func (c *Clipping) timestampToString() string {
return c.Timestamp.Format("2006-01-02_15:04:05")
}
func (c *Clipping) parseClippingBody(text string) {
c.Body = text
}
func (c *Clipping) ToString() string {
var txt strings.Builder
txt.WriteString(c.BookTitle)
txt.WriteString(" --- ")
txt.WriteString(c.Timestamp.String())
txt.WriteString("\n")
txt.WriteString(c.Body)
txt.WriteString("\n")
return txt.String()
}
|
// +build !windows,!darwin
// 29 july 2014
package ui
import (
"fmt"
"reflect"
"unsafe"
"image"
)
// #include "gtk_unix.h"
// extern void goTableModel_toggled(GtkCellRendererToggle *, gchar *, gpointer);
// extern void tableSelectionChanged(GtkTreeSelection *, gpointer);
import "C"
type table struct {
*tablebase
*scroller
treeview *C.GtkTreeView
model *C.goTableModel
modelgtk *C.GtkTreeModel
selection *C.GtkTreeSelection
selected *event
// stuff required by GtkTreeModel
nColumns C.gint
old C.gint
types []C.GType
crtocol map[*C.GtkCellRendererToggle]int
}
var (
attribText = togstr("text")
attribPixbuf = togstr("pixbuf")
attribActive = togstr("active")
)
func finishNewTable(b *tablebase, ty reflect.Type) Table {
widget := C.gtk_tree_view_new()
t := &table{
scroller: newScroller(widget, true, true, false), // natively scrollable; has a border; no overlay
tablebase: b,
treeview: (*C.GtkTreeView)(unsafe.Pointer(widget)),
crtocol: make(map[*C.GtkCellRendererToggle]int),
selected: newEvent(),
}
model := C.newTableModel(unsafe.Pointer(t))
t.model = model
t.modelgtk = (*C.GtkTreeModel)(unsafe.Pointer(model))
t.selection = C.gtk_tree_view_get_selection(t.treeview)
g_signal_connect(
C.gpointer(unsafe.Pointer(t.selection)),
"changed",
C.GCallback(C.tableSelectionChanged),
C.gpointer(unsafe.Pointer(t)))
C.gtk_tree_view_set_model(t.treeview, t.modelgtk)
for i := 0; i < ty.NumField(); i++ {
colname := ty.Field(i).Tag.Get("uicolumn")
if colname == "" {
colname = ty.Field(i).Name
}
cname := togstr(colname)
switch {
case ty.Field(i).Type == reflect.TypeOf((*image.RGBA)(nil)):
// can't use GDK_TYPE_PIXBUF here because it's a macro that expands to a function and cgo hates that
t.types = append(t.types, C.gdk_pixbuf_get_type())
C.tableAppendColumn(t.treeview, C.gint(i), cname,
C.gtk_cell_renderer_pixbuf_new(), attribPixbuf)
case ty.Field(i).Type.Kind() == reflect.Bool:
t.types = append(t.types, C.G_TYPE_BOOLEAN)
cr := C.gtk_cell_renderer_toggle_new()
crt := (*C.GtkCellRendererToggle)(unsafe.Pointer(cr))
t.crtocol[crt] = i
g_signal_connect(C.gpointer(unsafe.Pointer(cr)),
"toggled",
C.GCallback(C.goTableModel_toggled),
C.gpointer(unsafe.Pointer(t)))
C.tableAppendColumn(t.treeview, C.gint(i), cname,
cr, attribActive)
default:
t.types = append(t.types, C.G_TYPE_STRING)
C.tableAppendColumn(t.treeview, C.gint(i), cname,
C.gtk_cell_renderer_text_new(), attribText)
}
freegstr(cname) // free now (not deferred) to conserve memory
}
// and for some GtkTreeModel boilerplate
t.nColumns = C.gint(ty.NumField())
return t
}
func (t *table) Lock() {
t.tablebase.Lock()
d := reflect.Indirect(reflect.ValueOf(t.data))
t.old = C.gint(d.Len())
}
func (t *table) Unlock() {
t.unlock()
// there's a possibility that user actions can happen at this point, before the view is updated
// alas, this is something we have to deal with, because Unlock() can be called from any thread
go func() {
Do(func() {
t.RLock()
defer t.RUnlock()
d := reflect.Indirect(reflect.ValueOf(t.data))
new := C.gint(d.Len())
C.tableUpdate(t.model, t.old, new)
})
}()
}
func (t *table) Selected() int {
var iter C.GtkTreeIter
t.RLock()
defer t.RUnlock()
if C.gtk_tree_selection_get_selected(t.selection, nil, &iter) == C.FALSE {
return -1
}
path := C.gtk_tree_model_get_path(t.modelgtk, &iter)
if path == nil {
panic(fmt.Errorf("invalid iter in Table.Selected()"))
}
defer C.gtk_tree_path_free(path)
return int(*C.gtk_tree_path_get_indices(path))
}
func (t *table) Select(index int) {
t.RLock()
defer t.RUnlock()
C.gtk_tree_selection_unselect_all(t.selection)
if index == -1 {
return
}
path := C.gtk_tree_path_new()
defer C.gtk_tree_path_free(path)
C.gtk_tree_path_append_index(path, C.gint(index))
C.gtk_tree_selection_select_path(t.selection, path)
}
func (t *table) OnSelected(f func()) {
t.selected.set(f)
}
//export goTableModel_get_n_columns
func goTableModel_get_n_columns(model *C.GtkTreeModel) C.gint {
tm := (*C.goTableModel)(unsafe.Pointer(model))
t := (*table)(tm.gotable)
return t.nColumns
}
//export goTableModel_get_column_type
func goTableModel_get_column_type(model *C.GtkTreeModel, column C.gint) C.GType {
tm := (*C.goTableModel)(unsafe.Pointer(model))
t := (*table)(tm.gotable)
return t.types[column]
}
//export goTableModel_do_get_value
func goTableModel_do_get_value(data unsafe.Pointer, row C.gint, col C.gint, value *C.GValue) {
t := (*table)(data)
t.RLock()
defer t.RUnlock()
d := reflect.Indirect(reflect.ValueOf(t.data))
datum := d.Index(int(row)).Field(int(col))
switch {
case datum.Type() == reflect.TypeOf((*image.RGBA)(nil)):
d := datum.Interface().(*image.RGBA)
pixbuf := toIconSizedGdkPixbuf(d)
C.g_value_init(value, C.gdk_pixbuf_get_type())
object := C.gpointer(unsafe.Pointer(pixbuf))
// use g_value_take_object() so the GtkTreeView becomes the pixbuf's owner
C.g_value_take_object(value, object)
case datum.Kind() == reflect.Bool:
d := datum.Interface().(bool)
C.g_value_init(value, C.G_TYPE_BOOLEAN)
C.g_value_set_boolean(value, togbool(d))
default:
s := fmt.Sprintf("%v", datum)
str := togstr(s)
defer freegstr(str)
C.g_value_init(value, C.G_TYPE_STRING)
C.g_value_set_string(value, str)
}
}
//export goTableModel_getRowCount
func goTableModel_getRowCount(data unsafe.Pointer) C.gint {
t := (*table)(data)
t.RLock()
defer t.RUnlock()
d := reflect.Indirect(reflect.ValueOf(t.data))
return C.gint(d.Len())
}
//export goTableModel_toggled
func goTableModel_toggled(cr *C.GtkCellRendererToggle, pathstr *C.gchar, data C.gpointer) {
t := (*table)(unsafe.Pointer(data))
t.Lock()
defer t.Unlock()
path := C.gtk_tree_path_new_from_string(pathstr)
if len := C.gtk_tree_path_get_depth(path); len != 1 {
panic(fmt.Errorf("invalid path of depth %d given to goTableModel_toggled()", len))
}
// dereference return value to get our sole member
row := *C.gtk_tree_path_get_indices(path)
col := t.crtocol[cr]
d := reflect.Indirect(reflect.ValueOf(t.data))
datum := d.Index(int(row)).Field(int(col))
datum.SetBool(!datum.Bool())
}
//export tableSelectionChanged
func tableSelectionChanged(sel *C.GtkTreeSelection, data C.gpointer) {
t := (*table)(unsafe.Pointer(data))
t.selected.fire()
}
|
package http
import (
"context"
"fmt"
"io"
"io/ioutil"
"net"
stdhttp "net/http"
"testing"
"time"
)
func getFreePort(network string) (string, error) {
ln, err := net.Listen(network, "127.0.0.1:")
if err != nil {
return "", err
}
defer ln.Close()
return ln.Addr().String(), nil
}
func TestGraceful(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
addr, err := getFreePort("tcp")
if err != nil {
t.Fatalf("get free port failed: %v", err)
}
srv := &stdhttp.Server{
Addr: addr,
}
go func() {
cancel()
}()
err = Graceful(ctx, srv, time.Second)
if want, got := (error)(nil), err; got != want {
t.Fatalf("want error: %v, but got: %v", want, got)
}
}
func TestCancel(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
addr, err := getFreePort("tcp")
if err != nil {
t.Fatalf("get free port failed: %v", err)
}
var handler stdhttp.HandlerFunc = func(w stdhttp.ResponseWriter, r *stdhttp.Request) {
time.Sleep(3 * time.Second)
io.Copy(ioutil.Discard, r.Body)
r.Body.Close()
fmt.Fprintln(w, "hello")
}
srv := &stdhttp.Server{Addr: addr, Handler: handler}
go func() {
go func() {
time.Sleep(time.Millisecond * 200)
cancel()
}()
if _, err := stdhttp.Get("http://" + addr); err != nil {
t.Fatalf("preform request failed: %v", err)
}
}()
err = Graceful(ctx, srv, time.Millisecond*100)
if want, got := context.DeadlineExceeded, err; got != want {
t.Fatalf("want error: %v, but got: %v", want, got)
}
}
func TestError(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
addr, err := getFreePort("tcp")
if err != nil {
t.Fatalf("get free port failed: %v", err)
}
ln, err := net.Listen("tcp", addr)
if err != nil {
t.Fatalf("listen addr failed: %v", err)
}
defer ln.Close()
srv := &stdhttp.Server{Addr: addr}
go func() {
cancel()
}()
err = Graceful(ctx, srv, time.Second)
if err == nil {
t.Fatalf("want error, but got nil")
}
}
|
package inner
import (
"net"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/qyqx233/go-tunel/lib"
"github.com/qyqx233/go-tunel/lib/proto"
"github.com/rs/zerolog/log"
)
var maxUint63 uint64 = 2<<62 - 1
type transport struct {
minConns int
maxConns int
targetPort int
// targetHost [32]byte
targetHost string
tcpOrUdp string
serverIp string
serverPort int
name [16]byte
symkey [16]byte
idleConns int32
atomic int32
shakeRetry int
cmdConn net.Conn
}
func (t *transport) shake(conn net.Conn, transportType proto.TransportTypeEnum, usage proto.ShakeProtoUsageEnum, reqID int64, corrReqID int64) error {
shake := proto.ShakeProto{
Magic: proto.Magic,
Type: transportType,
Usage: usage,
Name: t.name,
SymKey: t.symkey,
Host: lib.String2Byte32(t.targetHost),
Port: uint16(t.targetPort),
ReqID: reqID,
CorrReqId: corrReqID,
}
err := shake.Send(conn)
if err != nil {
log.Error().Err(err).Msg("error")
return err
}
if transportType == proto.CmdType {
err = shake.Recv(conn)
if err != nil {
log.Error().Err(err).Msg("error")
return err
}
if shake.Code != proto.OkCode {
log.Info().Msgf("握手返回错误码%d", shake.Code)
return FatalError{code: int8(shake.Code), msg: "握手错误"}
}
}
return nil
}
// 连接后会启动协程
func (t *transport) createCmdAndConn() error {
if !atomic.CompareAndSwapInt32(&t.atomic, 0, 1) {
log.Error().Msg("获取锁失败")
return nil
}
addr, _ := net.ResolveTCPAddr("tcp", t.serverIp+":"+strconv.Itoa(t.serverPort))
conn, err := net.DialTCP("tcp", nil, addr)
if err != nil {
log.Error().Err(err).Msg("error")
t.atomic = 0
return err
}
err = t.shake(conn, proto.CmdType, proto.InitiativeTransportUsage, lib.NextPosUid(), 0)
if err != nil {
conn.Close()
t.atomic = 0
return err
}
t.cmdConn = conn
go t.handleCmd(conn)
// TODO 连接池好像真的挺烦的,。。。
// for i := 0; i < t.minConns; i++ {
// go t.createConn()
// }
return nil
}
func (t *transport) checkReConn() {
idles := atomic.LoadInt32(&(t.idleConns))
if int(idles) < t.minConns {
go t.createConn()
}
}
func (t *transport) createConn() {
addr, _ := net.ResolveTCPAddr("tcp", t.serverIp+":"+strconv.Itoa(t.serverPort))
conn, err := net.DialTCP("tcp", nil, addr)
if err != nil {
return
}
id := lib.NextPosUid()
err = t.shake(conn, proto.TransportType, proto.InitiativeTransportUsage, id, 0)
if err != nil {
conn.Close()
return
}
c := newWrappedConn(conn, id, true)
c.run(t, true)
}
func (t *transport) monitor(wg *sync.WaitGroup) {
tk := time.NewTicker(time.Duration(time.Minute))
for {
<-tk.C
log.Debug().Msg("定时探测...")
if t.cmdConn == nil {
err := t.createCmdAndConn()
if err != nil {
continue
}
} else {
cmd := proto.CmdProto{}
cmd.Usage = proto.BeatUsage
err := cmd.Send(t.cmdConn)
if err != nil {
log.Err(err).Msg("定时探测包发送失败,关闭cmdConn")
t.cmdConn.Close()
t.cmdConn = nil
}
}
// err := t.createCmdAndConn()
// if _, ok := err.(FatalError); ok {
// tk.Stop()
// break
// }
}
}
func (t *transport) handleCmd(conn net.Conn) {
defer func() {
conn.Close()
t.atomic = 0
}()
for {
cmd := proto.CmdProto{}
err := cmd.Recv(conn)
if err != nil {
log.Error().Err(err).Msg("error")
break
}
go func(c proto.CmdProto) {
log.Info().Msgf("获取到一个请求,用途=%d, reqID=%d", cmd.Usage, cmd.ReqID)
switch cmd.Usage {
case proto.BeatUsage:
log.Debug().Msg("收到♥跳响应包")
case proto.TransportReqUsage:
var conn net.Conn
var err error
var id int64
addr, _ := net.ResolveTCPAddr("tcp", t.serverIp+":"+strconv.Itoa(t.serverPort))
for i := 0; i < 3; i++ {
if i == 2 {
log.Info().Msgf("创建回应链接连续两次失败,不再尝试")
return
}
conn, err = net.DialTCP("tcp", nil, addr)
if err != nil {
log.Info().Msgf("创建回应临时链接失败%v", err)
continue
}
id = lib.NextPosUid()
err = t.shake(conn, proto.TransportType, proto.TransportRspUsage,
id, cmd.ReqID)
if err != nil {
continue
}
break
}
c := newWrappedConn(conn, id, true)
c.run(t, false)
}
}(cmd)
}
}
type wrappedConn1 struct {
net.Conn
duration bool
id int64
atomic int32
}
type wrappedConn struct {
lib.WrapConnStru
}
func (w wrappedConn1) ID() int64 {
return w.id
}
func (w *wrappedConn) Shutdown() error {
return w.Shutdown()
}
func newWrappedConn(conn net.Conn, id int64, duration bool) *wrappedConn {
return &wrappedConn{WrapConnStru: lib.NewWrapConn(conn, id)}
// return wrappedConn{conn, duration, Ui, 0}
}
func (c *wrappedConn) run(t *transport, reConn bool) {
// c := wrappedConn{}
var target = t.targetHost + ":" + strconv.Itoa(t.targetPort)
addr, _ := net.ResolveTCPAddr("tcp", target)
conn, err := net.DialTCP("tcp", nil, addr)
if err != nil {
log.Info().Msgf("建立与%s:%d的链接失败", t.targetHost, t.targetPort)
c.Close()
return
}
log.Debug().Str("target", target).Msg("connection made")
wg := &sync.WaitGroup{}
wc := lib.NewWrapConn(conn, lib.NextPosUid())
wg.Add(2)
go lib.Pipe2(wg, wc, c.WrapConnStru, func() {
wc.Close()
c.Close()
})
go lib.Pipe2(wg, c.WrapConnStru, wc, func() {
wc.Close()
c.Close()
})
if reConn {
t.checkReConn()
}
wg.Wait()
}
|
// +build darwin
package easyterm
import (
"golang.org/x/sys/unix"
)
const TCSETATTR = unix.TIOCSETA
const TCGETATTR = unix.TIOCGETA
|
package main
import (
"restic/repository"
"github.com/spf13/cobra"
)
var cmdRebuildIndex = &cobra.Command{
Use: "rebuild-index [flags]",
Short: "build a new index file",
Long: `
The "rebuild-index" command creates a new index by combining the index files
into a new one.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return runRebuildIndex(globalOptions)
},
}
func init() {
cmdRoot.AddCommand(cmdRebuildIndex)
}
func runRebuildIndex(gopts GlobalOptions) error {
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
return repository.RebuildIndex(repo)
}
|
package dns
import (
"context"
"net"
"testing"
)
func TestPacketSession(t *testing.T) {
t.Parallel()
srv := mustServer(localhostZone)
addr, err := net.ResolveTCPAddr("tcp", srv.Addr)
if err != nil {
t.Fatal(err)
}
conn, err := new(Transport).DialAddr(context.Background(), addr)
if err != nil {
t.Fatal(err)
}
ps := &packetSession{
session: session{
Conn: conn,
addr: addr,
client: new(Client),
msgerrc: make(chan msgerr),
},
}
msg := new(Message)
for i := 0; i < 120; i++ {
q := Question{
Name: "app.localhost.",
Type: TypeA,
Class: ClassIN,
}
msg.Questions = append(msg.Questions, q)
}
buf, err := msg.Pack(nil, true)
if _, err := ps.Write(buf); err != nil {
t.Fatal(err)
}
// test truncate due to short buffer size
if _, err := ps.Write(buf); err != nil {
t.Fatal(err)
}
buf = make([]byte, 100)
if _, err := ps.Read(buf); err != nil {
t.Fatal(err)
}
_, err = msg.Unpack(buf)
if want, got := errResourceLen, err; want != got {
t.Fatalf("want %v error, got %v", want, got)
}
if want, got := true, msg.Truncated; want != got {
t.Errorf("response message was not truncated")
}
}
func TestStreamSession(t *testing.T) {
t.Parallel()
srv := mustServer(localhostZone)
addr, err := net.ResolveTCPAddr("tcp", srv.Addr)
if err != nil {
t.Fatal(err)
}
conn, err := new(Transport).DialAddr(context.Background(), addr)
if err != nil {
t.Fatal(err)
}
ss := &streamSession{
session: session{
Conn: conn,
addr: addr,
client: new(Client),
msgerrc: make(chan msgerr),
},
}
msg := &Message{
Questions: []Question{
{
Name: "app.localhost.",
Type: TypeA,
Class: ClassIN,
},
},
}
buf, err := msg.Pack(nil, true)
if err != nil {
t.Fatal(err)
}
buf = append(make([]byte, 2), buf...)
nbo.PutUint16(buf[:2], uint16(len(buf)-2))
if _, err := ss.Write(buf); err != nil {
t.Fatal(err)
}
// test 2 byte length prefix read followed by msg read
if _, err := ss.Read(buf[:2]); err != nil {
t.Fatal(err)
}
mlen := nbo.Uint16(buf[:2])
buf = make([]byte, mlen)
if _, err := ss.Read(buf); err != nil {
t.Fatal(err)
}
if buf, err = msg.Unpack(buf); err != nil {
t.Fatal(err)
}
if want, got := 0, len(buf); want != got {
t.Errorf("want %d extra buffer bytes, got %d", want, got)
}
}
|
package usecase
import (
"net"
"strings"
)
func ReplaceLocalhostWithOutboundIP(in string) string {
if strings.Contains(in, "host") {
in = strings.Replace(in, "host", GetOutboundIP().String(), 1)
}
return in
}
func GetOutboundIP() net.IP {
conn, err := net.Dial("udp", "8.8.8.8:80")
if err != nil {
panic(err)
}
defer conn.Close()
localAddr := conn.LocalAddr().(*net.UDPAddr)
return localAddr.IP
}
|
package handlers
import (
"github.com/hashicorp/go-hclog"
"github.com/milutindzunic/pac-backend/data"
"net/http"
)
type TalksHandler struct {
log hclog.Logger
store data.TalkStore
}
func NewTalksHandler(store data.TalkStore, log hclog.Logger) *TalksHandler {
return &TalksHandler{log, store}
}
func (lh *TalksHandler) GetTalks(rw http.ResponseWriter, r *http.Request) {
talks, err := lh.store.GetTalks()
if err != nil {
writeJSONErrorWithStatus("Error getting all entities", err.Error(), rw, http.StatusInternalServerError)
return
}
err = writeJSONWithStatus(talks, rw, http.StatusOK)
if err != nil {
lh.log.Error("Error serializing entity", err)
return
}
}
func (lh *TalksHandler) GetTalk(rw http.ResponseWriter, r *http.Request) {
id := readId(r)
talk, err := lh.store.GetTalkByID(id)
if err != nil {
switch err.(type) {
case *data.TalkNotFoundError:
writeJSONErrorWithStatus("Entity not found", err.Error(), rw, http.StatusNotFound)
return
default:
writeJSONErrorWithStatus("Unexpected error occurred", err.Error(), rw, http.StatusInternalServerError)
return
}
}
err = writeJSONWithStatus(talk, rw, http.StatusOK)
if err != nil {
lh.log.Error("Error serializing entity", err)
return
}
}
func (lh *TalksHandler) CreateTalk(rw http.ResponseWriter, r *http.Request) {
talk := &data.Talk{}
err := readJSON(r.Body, talk)
if err != nil {
lh.log.Error("Error deserializing entity", err)
writeJSONErrorWithStatus("Error deserializing entity", err.Error(), rw, http.StatusBadRequest)
return
}
talk, err = lh.store.AddTalk(talk)
if err != nil {
writeJSONErrorWithStatus("Error creating entity", err.Error(), rw, http.StatusBadRequest)
return
}
err = writeJSONWithStatus(talk, rw, http.StatusCreated)
if err != nil {
lh.log.Error("Error serializing entity", err)
return
}
}
func (lh *TalksHandler) UpdateTalk(rw http.ResponseWriter, r *http.Request) {
id := readId(r)
talk := &data.Talk{}
err := readJSON(r.Body, talk)
if err != nil {
lh.log.Error("Error deserializing entity", err)
writeJSONErrorWithStatus("Error deserializing entity", err.Error(), rw, http.StatusBadRequest)
return
}
talk, err = lh.store.UpdateTalk(id, talk)
if err != nil {
switch err.(type) {
case *data.TalkNotFoundError:
writeJSONErrorWithStatus("Entity not found", err.Error(), rw, http.StatusNotFound)
return
default:
writeJSONErrorWithStatus("Unexpected error occurred", err.Error(), rw, http.StatusInternalServerError)
return
}
}
err = writeJSONWithStatus(talk, rw, http.StatusOK)
if err != nil {
lh.log.Error("Error serializing entity", err)
return
}
}
func (lh *TalksHandler) DeleteTalk(rw http.ResponseWriter, r *http.Request) {
id := readId(r)
err := lh.store.DeleteTalkByID(id)
if err != nil {
switch err.(type) {
case *data.TalkNotFoundError:
writeJSONErrorWithStatus("Entity not found", err.Error(), rw, http.StatusNotFound)
return
default:
writeJSONErrorWithStatus("Unexpected error occurred", err.Error(), rw, http.StatusInternalServerError)
return
}
}
rw.WriteHeader(http.StatusNoContent)
}
func (lh *TalksHandler) GetTalksByEventID(rw http.ResponseWriter, r *http.Request) {
eventID := readId(r)
talks, err := lh.store.GetTalksByEventID(eventID)
if err != nil {
writeJSONErrorWithStatus("Error getting entities", err.Error(), rw, http.StatusInternalServerError)
return
}
err = writeJSONWithStatus(talks, rw, http.StatusOK)
if err != nil {
lh.log.Error("Error serializing entity", err)
return
}
}
func (lh *TalksHandler) GetTalksByPersonID(rw http.ResponseWriter, r *http.Request) {
personID := readId(r)
talks, err := lh.store.GetTalksByPersonID(personID)
if err != nil {
writeJSONErrorWithStatus("Error getting entities", err.Error(), rw, http.StatusInternalServerError)
return
}
err = writeJSONWithStatus(talks, rw, http.StatusOK)
if err != nil {
lh.log.Error("Error serializing entity", err)
return
}
}
|
package osc
import (
"encoding/binary"
"errors"
"time"
)
const (
secondsFrom1900To1970 = 2208988800
bundleIdentifier = "#bundle"
)
var (
errInvalidData = errors.New("invalid data")
)
func getPaddingLength(len int, multipleOf int) int {
return (multipleOf - (len % multipleOf)) % multipleOf
}
func createOSCString(data string) []byte {
paddingLength := getPaddingLength(len(data)+1, 4) + 1
oscString := make([]byte, len(data)+paddingLength)
copy(oscString, []byte(data))
return oscString
}
func createOSCBlob(data []byte) []byte {
paddingLength := getPaddingLength(len(data), 4)
oscBloc := make([]byte, 4+len(data)+paddingLength)
binary.BigEndian.PutUint32(oscBloc, uint32(len(data)))
copy(oscBloc[4:], data)
return oscBloc
}
func timeToTimeTag(v time.Time) []byte {
msb32 := uint64((secondsFrom1900To1970 + v.Unix()) << 32)
lsb32 := uint64(v.Nanosecond())
data := make([]byte, 8)
binary.BigEndian.PutUint64(data, msb32+lsb32)
return data
}
func timeTagToTime(v []byte) time.Time {
seconds := binary.BigEndian.Uint32(v[0:4]) - secondsFrom1900To1970
nanoseconds := binary.BigEndian.Uint32(v[4:8])
return time.Unix(int64(seconds), int64(nanoseconds))
}
|
package stored_responses
import (
"context"
"encoding/json"
"errors"
"testing"
"github.com/prebid/openrtb/v19/openrtb2"
"github.com/prebid/prebid-server/openrtb_ext"
"github.com/stretchr/testify/assert"
)
func TestRemoveImpsWithStoredResponses(t *testing.T) {
bidRespId1 := json.RawMessage(`{"id": "resp_id1"}`)
testCases := []struct {
description string
reqIn *openrtb2.BidRequest
storedBidResponses ImpBidderStoredResp
expectedImps []openrtb2.Imp
}{
{
description: "request with imps and stored bid response for this imp",
reqIn: &openrtb2.BidRequest{Imp: []openrtb2.Imp{
{ID: "imp-id1"},
}},
storedBidResponses: ImpBidderStoredResp{
"imp-id1": {"appnexus": bidRespId1},
},
expectedImps: nil,
},
{
description: "request with imps and stored bid response for one of these imp",
reqIn: &openrtb2.BidRequest{Imp: []openrtb2.Imp{
{ID: "imp-id1"},
{ID: "imp-id2"},
}},
storedBidResponses: ImpBidderStoredResp{
"imp-id1": {"appnexus": bidRespId1},
},
expectedImps: []openrtb2.Imp{
{
ID: "imp-id2",
},
},
},
{
description: "request with imps and stored bid response for both of these imp",
reqIn: &openrtb2.BidRequest{Imp: []openrtb2.Imp{
{ID: "imp-id1"},
{ID: "imp-id2"},
}},
storedBidResponses: ImpBidderStoredResp{
"imp-id1": {"appnexus": bidRespId1},
"imp-id2": {"appnexus": bidRespId1},
},
expectedImps: nil,
},
{
description: "request with imps and no stored bid responses",
reqIn: &openrtb2.BidRequest{Imp: []openrtb2.Imp{
{ID: "imp-id1"},
{ID: "imp-id2"},
}},
storedBidResponses: nil,
expectedImps: []openrtb2.Imp{
{ID: "imp-id1"},
{ID: "imp-id2"},
},
},
}
for _, testCase := range testCases {
request := testCase.reqIn
removeImpsWithStoredResponses(request, testCase.storedBidResponses)
assert.Equal(t, testCase.expectedImps, request.Imp, "incorrect Impressions for testCase %s", testCase.description)
}
}
func TestBuildStoredBidResponses(t *testing.T) {
bidRespId1 := json.RawMessage(`{"id": "resp_id1"}`)
bidRespId2 := json.RawMessage(`{"id": "resp_id2"}`)
bidRespId3 := json.RawMessage(`{"id": "resp_id3"}`)
testCases := []struct {
description string
storedBidResponses ImpBidderStoredResp
expectedResult BidderImpsWithBidResponses
}{
{
description: "one imp and stored response for this imp with one bidder",
storedBidResponses: ImpBidderStoredResp{
"imp-id1": {"bidderA": bidRespId1},
},
expectedResult: BidderImpsWithBidResponses{
"bidderA": {
"imp-id1": bidRespId1,
},
},
},
{
description: "one imp and stored response for this imp with two bidders",
storedBidResponses: ImpBidderStoredResp{
"imp-id1": {"bidderA": bidRespId1, "bidderB": bidRespId2},
},
expectedResult: BidderImpsWithBidResponses{
"bidderA": {
"imp-id1": bidRespId1,
},
"bidderB": {
"imp-id1": bidRespId2,
},
},
},
{
description: "two imps and stored response for this imp with two bidders",
storedBidResponses: ImpBidderStoredResp{
"imp-id1": {"bidderA": bidRespId1},
"imp-id2": {"bidderB": bidRespId2},
},
expectedResult: BidderImpsWithBidResponses{
"bidderA": {
"imp-id1": bidRespId1,
},
"bidderB": {
"imp-id2": bidRespId2,
},
},
},
{
description: "three imps and stored response for these imps with two bidders",
storedBidResponses: ImpBidderStoredResp{
"imp-id1": {"bidderA": bidRespId1},
"imp-id2": {"bidderB": bidRespId2},
"imp-id3": {"bidderA": bidRespId3},
},
expectedResult: BidderImpsWithBidResponses{
"bidderA": {
"imp-id1": bidRespId1,
"imp-id3": bidRespId3,
},
"bidderB": {
"imp-id2": bidRespId2,
},
},
},
{
description: "empty stored responses",
storedBidResponses: ImpBidderStoredResp{},
expectedResult: BidderImpsWithBidResponses{},
},
}
for _, testCase := range testCases {
bidderToImpToResponses := buildStoredResp(testCase.storedBidResponses)
for expectedBidderName := range testCase.expectedResult {
assert.Equal(t, testCase.expectedResult[expectedBidderName], bidderToImpToResponses[expectedBidderName], "incorrect stored responses for testCase %s", testCase.description)
}
}
}
func TestProcessStoredAuctionAndBidResponsesErrors(t *testing.T) {
bidderMap := map[string]openrtb_ext.BidderName{"testBidder": "testBidder"}
testCases := []struct {
description string
requestJson []byte
expectedErrorList []error
}{
{
description: "Invalid stored auction response format: empty stored Auction Response Id",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"prebid": {
"storedauctionresponse": {
}
}
}
}
]}`),
expectedErrorList: []error{errors.New("request.imp[0] has ext.prebid.storedauctionresponse specified, but \"id\" field is missing ")},
},
{
description: "Invalid stored bid response format: empty storedbidresponse.bidder",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"prebid": {
"storedbidresponse": [
{ "id": "123abc"}]
}
}
}
]}`),
expectedErrorList: []error{errors.New("request.imp[0] has ext.prebid.storedbidresponse specified, but \"id\" or/and \"bidder\" fields are missing ")},
},
{
description: "Invalid stored bid response format: empty storedbidresponse.id",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"prebid": {
"storedbidresponse": [
{ "bidder": "testbidder"}]
}
}
}
]}`),
expectedErrorList: []error{errors.New("request.imp[0] has ext.prebid.storedbidresponse specified, but \"id\" or/and \"bidder\" fields are missing ")},
},
{
description: "Invalid stored bid response: storedbidresponse.bidder not found",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"prebid": {
"storedbidresponse": [
{ "bidder": "testBidder123", "id": "123abc"}]
}
}
}
]}`),
expectedErrorList: []error{errors.New("request.imp[impId: imp-id1].ext.prebid.bidder contains unknown bidder: testBidder123. Did you forget an alias in request.ext.prebid.aliases?")},
},
{
description: "Invalid stored auction response format: empty stored Auction Response Id in second imp",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"prebid": {
"storedauctionresponse": {
"id":"123"
}
}
}
},
{
"id": "imp-id2",
"ext": {
"prebid": {
"storedauctionresponse": {
"id":""
}
}
}
}
]}`),
expectedErrorList: []error{errors.New("request.imp[1] has ext.prebid.storedauctionresponse specified, but \"id\" field is missing ")},
},
{
description: "Invalid stored bid response format: empty stored bid Response Id in second imp",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"prebid": {
"storedbidresponse": [
{"bidder":"testBidder", "id": "123abc"}
]
}
}
},
{
"id": "imp-id2",
"ext": {
"prebid": {
"storedbidresponse": [
{"bidder":"testBidder", "id": ""}
]
}
}
}
]}`),
expectedErrorList: []error{errors.New("request.imp[1] has ext.prebid.storedbidresponse specified, but \"id\" or/and \"bidder\" fields are missing ")},
},
}
for _, test := range testCases {
_, _, _, errorList := ProcessStoredResponses(nil, test.requestJson, nil, bidderMap)
assert.Equalf(t, test.expectedErrorList, errorList, "Error doesn't match: %s\n", test.description)
}
}
func TestProcessStoredAuctionAndBidResponses(t *testing.T) {
bidderMap := map[string]openrtb_ext.BidderName{"bidderA": "bidderA", "bidderB": "bidderB"}
bidStoredResp1 := json.RawMessage(`[{"bid": [{"id": "bid_id1"],"seat": "bidderA"}]`)
bidStoredResp2 := json.RawMessage(`[{"bid": [{"id": "bid_id2"],"seat": "bidderB"}]`)
bidStoredResp3 := json.RawMessage(`[{"bid": [{"id": "bid_id3"],"seat": "bidderA"}]`)
mockStoredResponses := map[string]json.RawMessage{
"1": bidStoredResp1,
"2": bidStoredResp2,
"3": bidStoredResp3,
}
fetcher := &mockStoredBidResponseFetcher{mockStoredResponses}
testCases := []struct {
description string
requestJson []byte
expectedStoredAuctionResponses ImpsWithBidResponses
expectedStoredBidResponses ImpBidderStoredResp
expectedBidderImpReplaceImpID BidderImpReplaceImpID
}{
{
description: "No stored responses",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"prebid": {
}
}
}
]}`),
expectedStoredAuctionResponses: nil,
expectedStoredBidResponses: nil,
expectedBidderImpReplaceImpID: nil,
},
{
description: "Stored auction response one imp",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedauctionresponse": {
"id": "1"
}
}
}
}
]}`),
expectedStoredAuctionResponses: ImpsWithBidResponses{
"imp-id1": bidStoredResp1,
},
expectedStoredBidResponses: ImpBidderStoredResp{},
expectedBidderImpReplaceImpID: BidderImpReplaceImpID{},
},
{
description: "Stored bid response one imp",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedbidresponse": [
{"bidder":"bidderA", "id": "1"}
]
}
}
}
]}`),
expectedStoredAuctionResponses: ImpsWithBidResponses{},
expectedStoredBidResponses: ImpBidderStoredResp{
"imp-id1": {"bidderA": bidStoredResp1},
},
expectedBidderImpReplaceImpID: BidderImpReplaceImpID{
"bidderA": map[string]bool{"imp-id1": true},
},
},
{
description: "Stored bid responses two bidders one imp",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedbidresponse": [
{"bidder":"bidderA", "id": "1", "replaceimpid": true},
{"bidder":"bidderB", "id": "2", "replaceimpid": false}
]
}
}
}
]}`),
expectedStoredAuctionResponses: ImpsWithBidResponses{},
expectedStoredBidResponses: ImpBidderStoredResp{
"imp-id1": {"bidderA": bidStoredResp1, "bidderB": bidStoredResp2},
},
expectedBidderImpReplaceImpID: BidderImpReplaceImpID{
"bidderA": map[string]bool{"imp-id1": true},
"bidderB": map[string]bool{"imp-id1": false},
},
},
{
//This is not a valid scenario for real auction request, added for testing purposes
description: "Stored auction and bid responses one imp",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedauctionresponse": {
"id": "1"
},
"storedbidresponse": [
{"bidder":"bidderA", "id": "1"},
{"bidder":"bidderB", "id": "2"}
]
}
}
}
]}`),
expectedStoredAuctionResponses: ImpsWithBidResponses{
"imp-id1": bidStoredResp1,
},
expectedStoredBidResponses: ImpBidderStoredResp{
"imp-id1": {"bidderA": bidStoredResp1, "bidderB": bidStoredResp2},
},
expectedBidderImpReplaceImpID: BidderImpReplaceImpID{
"bidderA": map[string]bool{"imp-id1": true},
"bidderB": map[string]bool{"imp-id1": true},
},
},
{
description: "Stored auction response three imps",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedauctionresponse": {
"id": "1"
}
}
}
},
{
"id": "imp-id2",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedauctionresponse": {
"id": "2"
}
}
}
},
{
"id": "imp-id3",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedauctionresponse": {
"id": "3"
}
}
}
}
]}`),
expectedStoredAuctionResponses: ImpsWithBidResponses{
"imp-id1": bidStoredResp1,
"imp-id2": bidStoredResp2,
"imp-id3": bidStoredResp3,
},
expectedStoredBidResponses: ImpBidderStoredResp{},
expectedBidderImpReplaceImpID: BidderImpReplaceImpID{},
},
{
description: "Stored auction response three imps duplicated stored auction response",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedauctionresponse": {
"id": "1"
}
}
}
},
{
"id": "imp-id2",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedauctionresponse": {
"id": "2"
}
}
}
},
{
"id": "imp-id3",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedauctionresponse": {
"id": "2"
}
}
}
}
]}`),
expectedStoredAuctionResponses: ImpsWithBidResponses{
"imp-id1": bidStoredResp1,
"imp-id2": bidStoredResp2,
"imp-id3": bidStoredResp2,
},
expectedStoredBidResponses: ImpBidderStoredResp{},
expectedBidderImpReplaceImpID: BidderImpReplaceImpID{},
},
{
description: "Stored bid responses two bidders two imp",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedbidresponse": [
{"bidder":"bidderA", "id": "1", "replaceimpid": false},
{"bidder":"bidderB", "id": "2"}
]
}
}
},
{
"id": "imp-id2",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedbidresponse": [
{"bidder":"bidderA", "id": "3"},
{"bidder":"bidderB", "id": "2", "replaceimpid": false}
]
}
}
}
]}`),
expectedStoredAuctionResponses: ImpsWithBidResponses{},
expectedStoredBidResponses: ImpBidderStoredResp{
"imp-id1": {"bidderA": bidStoredResp1, "bidderB": bidStoredResp2},
"imp-id2": {"bidderA": bidStoredResp3, "bidderB": bidStoredResp2},
},
expectedBidderImpReplaceImpID: BidderImpReplaceImpID{
"bidderA": map[string]bool{"imp-id1": false, "imp-id2": true},
"bidderB": map[string]bool{"imp-id1": true, "imp-id2": false},
},
},
}
for _, test := range testCases {
storedAuctionResponses, storedBidResponses, bidderImpReplaceImpId, errorList := ProcessStoredResponses(nil, test.requestJson, fetcher, bidderMap)
assert.Equal(t, test.expectedStoredAuctionResponses, storedAuctionResponses, "storedAuctionResponses doesn't match: %s\n", test.description)
assert.Equalf(t, test.expectedStoredBidResponses, storedBidResponses, "storedBidResponses doesn't match: %s\n", test.description)
assert.Equal(t, test.expectedBidderImpReplaceImpID, bidderImpReplaceImpId, "bidderImpReplaceImpId doesn't match: %s\n", test.description)
assert.Nil(t, errorList, "Error should be nil")
}
}
func TestProcessStoredResponsesNotFoundResponse(t *testing.T) {
bidderMap := map[string]openrtb_ext.BidderName{"bidderA": "bidderA", "bidderB": "bidderB"}
bidStoredResp1 := json.RawMessage(`[{"bid": [{"id": "bid_id1"],"seat": "bidderA"}]`)
bidStoredResp2 := json.RawMessage(`[{"bid": [{"id": "bid_id2"],"seat": "bidderB"}]`)
mockStoredResponses := map[string]json.RawMessage{
"1": bidStoredResp1,
"2": bidStoredResp2,
"3": nil,
"4": nil,
}
fetcher := &mockStoredBidResponseFetcher{mockStoredResponses}
testCases := []struct {
description string
requestJson []byte
expectedErrors []error
}{
{
description: "Stored bid response with nil data, one bidder one imp",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedbidresponse": [
{"bidder":"bidderB", "id": "3"}
]
}
}
}
]}`),
expectedErrors: []error{
errors.New("failed to fetch stored bid response for impId = imp-id1, bidder = bidderB and storedBidResponse id = 3"),
},
},
{
description: "Stored bid response with nil data, one bidder, two imps, one with correct stored response",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedbidresponse": [
{"bidder":"bidderB", "id": "1"}
]
}
}
},
{
"id": "imp-id2",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedbidresponse": [
{"bidder":"bidderB", "id": "3"}
]
}
}
}
]}`),
expectedErrors: []error{
errors.New("failed to fetch stored bid response for impId = imp-id2, bidder = bidderB and storedBidResponse id = 3"),
},
},
{
description: "Stored bid response with nil data, one bidder, two imps, both with correct stored response",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedbidresponse": [
{"bidder":"bidderB", "id": "4"}
]
}
}
},
{
"id": "imp-id2",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedbidresponse": [
{"bidder":"bidderB", "id": "3"}
]
}
}
}
]}`),
expectedErrors: []error{
errors.New("failed to fetch stored bid response for impId = imp-id1, bidder = bidderB and storedBidResponse id = 4"),
errors.New("failed to fetch stored bid response for impId = imp-id2, bidder = bidderB and storedBidResponse id = 3"),
},
},
{
description: "Stored auction response with nil data and one imp",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedauctionresponse": {
"id": "4"
}
}
}
}
]}`),
expectedErrors: []error{
errors.New("failed to fetch stored auction response for impId = imp-id1 and storedAuctionResponse id = 4"),
},
},
{
description: "Stored auction response with nil data, and two imps with nil responses",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedauctionresponse": {
"id": "4"
}
}
}
},
{
"id": "imp-id2",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedauctionresponse": {
"id": "3"
}
}
}
}
]}`),
expectedErrors: []error{
errors.New("failed to fetch stored auction response for impId = imp-id1 and storedAuctionResponse id = 4"),
errors.New("failed to fetch stored auction response for impId = imp-id2 and storedAuctionResponse id = 3"),
},
},
{
description: "Stored auction response with nil data, two imps, one with nil responses",
requestJson: []byte(`{"imp": [
{
"id": "imp-id1",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedauctionresponse": {
"id": "2"
}
}
}
},
{
"id": "imp-id2",
"ext": {
"appnexus": {
"placementId": 123
},
"prebid": {
"storedauctionresponse": {
"id": "3"
}
}
}
}
]}`),
expectedErrors: []error{
errors.New("failed to fetch stored auction response for impId = imp-id2 and storedAuctionResponse id = 3"),
},
},
}
for _, test := range testCases {
_, _, _, errorList := ProcessStoredResponses(nil, test.requestJson, fetcher, bidderMap)
for _, err := range test.expectedErrors {
assert.Contains(t, errorList, err, "incorrect errors returned: %s", test.description)
}
}
}
func TestFlipMap(t *testing.T) {
testCases := []struct {
description string
inImpBidderReplaceImpID ImpBidderReplaceImpID
outBidderImpReplaceImpID BidderImpReplaceImpID
}{
{
description: "Empty ImpBidderReplaceImpID",
inImpBidderReplaceImpID: ImpBidderReplaceImpID{},
outBidderImpReplaceImpID: BidderImpReplaceImpID{},
},
{
description: "Nil ImpBidderReplaceImpID",
inImpBidderReplaceImpID: nil,
outBidderImpReplaceImpID: BidderImpReplaceImpID{},
},
{
description: "ImpBidderReplaceImpID has a one element map with single element",
inImpBidderReplaceImpID: ImpBidderReplaceImpID{"imp-id": {"bidderA": true}},
outBidderImpReplaceImpID: BidderImpReplaceImpID{"bidderA": {"imp-id": true}},
},
{
description: "ImpBidderReplaceImpID has a one element map with multiple elements",
inImpBidderReplaceImpID: ImpBidderReplaceImpID{"imp-id": {"bidderA": true, "bidderB": false}},
outBidderImpReplaceImpID: BidderImpReplaceImpID{"bidderA": {"imp-id": true}, "bidderB": {"imp-id": false}},
},
{
description: "ImpBidderReplaceImpID has multiple elements map with single element",
inImpBidderReplaceImpID: ImpBidderReplaceImpID{
"imp-id1": {"bidderA": true},
"imp-id2": {"bidderB": false}},
outBidderImpReplaceImpID: BidderImpReplaceImpID{
"bidderA": {"imp-id1": true},
"bidderB": {"imp-id2": false}},
},
{
description: "ImpBidderReplaceImpID has multiple elements map with multiple elements",
inImpBidderReplaceImpID: ImpBidderReplaceImpID{
"imp-id1": {"bidderA": true, "bidderB": false, "bidderC": false, "bidderD": true},
"imp-id2": {"bidderA": false, "bidderB": false, "bidderC": true, "bidderD": true},
"imp-id3": {"bidderA": false, "bidderB": true, "bidderC": true, "bidderD": false}},
outBidderImpReplaceImpID: BidderImpReplaceImpID{
"bidderA": {"imp-id1": true, "imp-id2": false, "imp-id3": false},
"bidderB": {"imp-id1": false, "imp-id2": false, "imp-id3": true},
"bidderC": {"imp-id1": false, "imp-id2": true, "imp-id3": true},
"bidderD": {"imp-id1": true, "imp-id2": true, "imp-id3": false}},
},
}
for _, test := range testCases {
actualResult := flipMap(test.inImpBidderReplaceImpID)
assert.Equal(t, test.outBidderImpReplaceImpID, actualResult, "Incorrect flipped map for test case %s\n", test.description)
}
}
type mockStoredBidResponseFetcher struct {
data map[string]json.RawMessage
}
func (cf *mockStoredBidResponseFetcher) FetchRequests(ctx context.Context, requestIDs []string, impIDs []string) (requestData map[string]json.RawMessage, impData map[string]json.RawMessage, errs []error) {
return nil, nil, nil
}
func (cf *mockStoredBidResponseFetcher) FetchResponses(ctx context.Context, ids []string) (data map[string]json.RawMessage, errs []error) {
return cf.data, nil
}
|
package jwt
import (
"errors"
"time"
"github.com/dgrijalva/jwt-go"
)
var Singleton *tk
type Claims struct {
PlayLoad string `json:"playLoad"`
jwt.StandardClaims
}
type tk struct {
secret []byte
expiresAt time.Duration
}
func (this tk) TokenCreate(playLoad string) (string, error) {
return jwt.NewWithClaims(jwt.SigningMethodHS256, Claims{
playLoad,
jwt.StandardClaims{
ExpiresAt: time.Now().Add(this.expiresAt).Unix(),
},
}).SignedString(this.secret)
}
func (this tk) TokenParse(tokenString string) (*Claims, error) {
if tokenString == "" {
return nil, errors.New("tokenString 无效")
}
token, err := jwt.ParseWithClaims(tokenString, new(Claims), func(tokenString *jwt.Token) (interface{}, error) {
return this.secret, nil
})
if token != nil {
if claims, ok := token.Claims.(*Claims); ok && token.Valid {
return claims, nil
}
}
return nil, err
}
func (this *tk) TokenRefresh(tokenString string) (string, error) {
t, err := this.TokenParse(tokenString)
if err != nil {
return "", err
}
return this.TokenCreate(t.PlayLoad)
}
func New(secret string, expiresAt time.Duration) *tk {
if Singleton != nil {
return Singleton
}
Singleton = &tk{[]byte(secret), expiresAt}
return Singleton
}
|
// Copyright (c) Mainflux
// SPDX-License-Identifier: Apache-2.0
package main
import (
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
"strconv"
"syscall"
"time"
kitprometheus "github.com/go-kit/kit/metrics/prometheus"
"github.com/jmoiron/sqlx"
"github.com/mainflux/mainflux"
authapi "github.com/mainflux/mainflux/auth/api/grpc"
"github.com/mainflux/mainflux/consumers"
"github.com/mainflux/mainflux/consumers/notifiers"
"github.com/mainflux/mainflux/consumers/notifiers/api"
"github.com/mainflux/mainflux/consumers/notifiers/postgres"
"github.com/mainflux/mainflux/consumers/notifiers/smtp"
"github.com/mainflux/mainflux/consumers/notifiers/tracing"
"github.com/mainflux/mainflux/internal/email"
"github.com/mainflux/mainflux/logger"
"github.com/mainflux/mainflux/pkg/messaging/nats"
"github.com/mainflux/mainflux/pkg/ulid"
opentracing "github.com/opentracing/opentracing-go"
stdprometheus "github.com/prometheus/client_golang/prometheus"
jconfig "github.com/uber/jaeger-client-go/config"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
const (
defLogLevel = "error"
defDBHost = "localhost"
defDBPort = "5432"
defDBUser = "mainflux"
defDBPass = "mainflux"
defDB = "subscriptions"
defConfigPath = "/config.toml"
defDBSSLMode = "disable"
defDBSSLCert = ""
defDBSSLKey = ""
defDBSSLRootCert = ""
defHTTPPort = "8180"
defServerCert = ""
defServerKey = ""
defJaegerURL = ""
defNatsURL = "nats://localhost:4222"
defEmailHost = "localhost"
defEmailPort = "25"
defEmailUsername = "root"
defEmailPassword = ""
defEmailSecret = ""
defEmailFromAddress = ""
defEmailFromName = ""
defEmailTemplate = "email.tmpl"
defAuthTLS = "false"
defAuthCACerts = ""
defAuthURL = "localhost:8181"
defAuthTimeout = "1s"
envLogLevel = "MF_SMTP_NOTIFIER_LOG_LEVEL"
envDBHost = "MF_SMTP_NOTIFIER_DB_HOST"
envDBPort = "MF_SMTP_NOTIFIER_DB_PORT"
envDBUser = "MF_SMTP_NOTIFIER_DB_USER"
envDBPass = "MF_SMTP_NOTIFIER_DB_PASS"
envDB = "MF_SMTP_NOTIFIER_DB"
envConfigPath = "MF_SMTP_NOTIFIER_CONFIG_PATH"
envDBSSLMode = "MF_SMTP_NOTIFIER_DB_SSL_MODE"
envDBSSLCert = "MF_SMTP_NOTIFIER_DB_SSL_CERT"
envDBSSLKey = "MF_SMTP_NOTIFIER_DB_SSL_KEY"
envDBSSLRootCert = "MF_SMTP_NOTIFIER_DB_SSL_ROOT_CERT"
envHTTPPort = "MF_SMTP_NOTIFIER_PORT"
envServerCert = "MF_SMTP_NOTIFIER_SERVER_CERT"
envServerKey = "MF_SMTP_NOTIFIER_SERVER_KEY"
envJaegerURL = "MF_JAEGER_URL"
envNatsURL = "MF_NATS_URL"
envEmailHost = "MF_EMAIL_HOST"
envEmailPort = "MF_EMAIL_PORT"
envEmailUsername = "MF_EMAIL_USERNAME"
envEmailPassword = "MF_EMAIL_PASSWORD"
envEmailSecret = "MF_EMAIL_SECRET"
envEmailFromAddress = "MF_EMAIL_FROM_ADDRESS"
envEmailFromName = "MF_EMAIL_FROM_NAME"
envEmailTemplate = "MF_EMAIL_TEMPLATE"
envAuthTLS = "MF_AUTH_CLIENT_TLS"
envAuthCACerts = "MF_AUTH_CA_CERTS"
envAuthURL = "MF_AUTH_GRPC_URL"
envAuthTimeout = "MF_AUTH_GRPC_TIMEOUT"
)
type config struct {
natsURL string
configPath string
logLevel string
dbConfig postgres.Config
emailConf email.Config
httpPort string
serverCert string
serverKey string
jaegerURL string
authTLS bool
authCACerts string
authURL string
authTimeout time.Duration
}
func main() {
cfg := loadConfig()
logger, err := logger.New(os.Stdout, cfg.logLevel)
if err != nil {
log.Fatalf(err.Error())
}
db := connectToDB(cfg.dbConfig, logger)
defer db.Close()
pubSub, err := nats.NewPubSub(cfg.natsURL, "", logger)
if err != nil {
logger.Error(fmt.Sprintf("Failed to connect to NATS: %s", err))
os.Exit(1)
}
defer pubSub.Close()
authTracer, closer := initJaeger("auth", cfg.jaegerURL, logger)
defer closer.Close()
auth, close := connectToAuth(cfg, authTracer, logger)
if close != nil {
defer close()
}
tracer, closer := initJaeger("smtp-notifier", cfg.jaegerURL, logger)
defer closer.Close()
dbTracer, dbCloser := initJaeger("smtp-notifier_db", cfg.jaegerURL, logger)
defer dbCloser.Close()
svc := newService(db, dbTracer, auth, cfg, logger)
errs := make(chan error, 2)
if err = consumers.Start(pubSub, svc, nil, cfg.configPath, logger); err != nil {
logger.Error(fmt.Sprintf("Failed to create Postgres writer: %s", err))
}
go startHTTPServer(tracer, svc, cfg.httpPort, cfg.serverCert, cfg.serverKey, logger, errs)
go func() {
c := make(chan os.Signal)
signal.Notify(c, syscall.SIGINT)
errs <- fmt.Errorf("%s", <-c)
}()
err = <-errs
logger.Error(fmt.Sprintf("Users service terminated: %s", err))
}
func loadConfig() config {
authTimeout, err := time.ParseDuration(mainflux.Env(envAuthTimeout, defAuthTimeout))
if err != nil {
log.Fatalf("Invalid %s value: %s", envAuthTimeout, err.Error())
}
tls, err := strconv.ParseBool(mainflux.Env(envAuthTLS, defAuthTLS))
if err != nil {
log.Fatalf("Invalid value passed for %s\n", envAuthTLS)
}
dbConfig := postgres.Config{
Host: mainflux.Env(envDBHost, defDBHost),
Port: mainflux.Env(envDBPort, defDBPort),
User: mainflux.Env(envDBUser, defDBUser),
Pass: mainflux.Env(envDBPass, defDBPass),
Name: mainflux.Env(envDB, defDB),
SSLMode: mainflux.Env(envDBSSLMode, defDBSSLMode),
SSLCert: mainflux.Env(envDBSSLCert, defDBSSLCert),
SSLKey: mainflux.Env(envDBSSLKey, defDBSSLKey),
SSLRootCert: mainflux.Env(envDBSSLRootCert, defDBSSLRootCert),
}
emailConf := email.Config{
FromAddress: mainflux.Env(envEmailFromAddress, defEmailFromAddress),
FromName: mainflux.Env(envEmailFromName, defEmailFromName),
Host: mainflux.Env(envEmailHost, defEmailHost),
Port: mainflux.Env(envEmailPort, defEmailPort),
Username: mainflux.Env(envEmailUsername, defEmailUsername),
Password: mainflux.Env(envEmailPassword, defEmailPassword),
Secret: mainflux.Env(envEmailSecret, defEmailSecret),
Template: mainflux.Env(envEmailTemplate, defEmailTemplate),
}
return config{
logLevel: mainflux.Env(envLogLevel, defLogLevel),
natsURL: mainflux.Env(envNatsURL, defNatsURL),
configPath: mainflux.Env(envConfigPath, defConfigPath),
dbConfig: dbConfig,
emailConf: emailConf,
httpPort: mainflux.Env(envHTTPPort, defHTTPPort),
serverCert: mainflux.Env(envServerCert, defServerCert),
serverKey: mainflux.Env(envServerKey, defServerKey),
jaegerURL: mainflux.Env(envJaegerURL, defJaegerURL),
authTLS: tls,
authCACerts: mainflux.Env(envAuthCACerts, defAuthCACerts),
authURL: mainflux.Env(envAuthURL, defAuthURL),
authTimeout: authTimeout,
}
}
func initJaeger(svcName, url string, logger logger.Logger) (opentracing.Tracer, io.Closer) {
if url == "" {
return opentracing.NoopTracer{}, ioutil.NopCloser(nil)
}
tracer, closer, err := jconfig.Configuration{
ServiceName: svcName,
Sampler: &jconfig.SamplerConfig{
Type: "const",
Param: 1,
},
Reporter: &jconfig.ReporterConfig{
LocalAgentHostPort: url,
LogSpans: true,
},
}.NewTracer()
if err != nil {
logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err))
os.Exit(1)
}
return tracer, closer
}
func connectToDB(dbConfig postgres.Config, logger logger.Logger) *sqlx.DB {
db, err := postgres.Connect(dbConfig)
if err != nil {
logger.Error(fmt.Sprintf("Failed to connect to postgres: %s", err))
os.Exit(1)
}
return db
}
func connectToAuth(cfg config, tracer opentracing.Tracer, logger logger.Logger) (mainflux.AuthServiceClient, func() error) {
var opts []grpc.DialOption
if cfg.authTLS {
if cfg.authCACerts != "" {
tpc, err := credentials.NewClientTLSFromFile(cfg.authCACerts, "")
if err != nil {
logger.Error(fmt.Sprintf("Failed to create tls credentials: %s", err))
os.Exit(1)
}
opts = append(opts, grpc.WithTransportCredentials(tpc))
}
} else {
opts = append(opts, grpc.WithInsecure())
logger.Info("gRPC communication is not encrypted")
}
conn, err := grpc.Dial(cfg.authURL, opts...)
if err != nil {
logger.Error(fmt.Sprintf("Failed to connect to auth service: %s", err))
os.Exit(1)
}
return authapi.NewClient(tracer, conn, cfg.authTimeout), conn.Close
}
func newService(db *sqlx.DB, tracer opentracing.Tracer, auth mainflux.AuthServiceClient, c config, logger logger.Logger) notifiers.Service {
database := postgres.NewDatabase(db)
repo := tracing.New(postgres.New(database), tracer)
idp := ulid.New()
agent, err := email.New(&c.emailConf)
if err != nil {
logger.Error(fmt.Sprintf("Failed to create email agent: %s", err))
os.Exit(1)
}
notifier := smtp.New(agent)
svc := notifiers.New(auth, repo, idp, notifier)
svc = api.LoggingMiddleware(svc, logger)
svc = api.MetricsMiddleware(
svc,
kitprometheus.NewCounterFrom(stdprometheus.CounterOpts{
Namespace: "notifier",
Subsystem: "smtp",
Name: "request_count",
Help: "Number of requests received.",
}, []string{"method"}),
kitprometheus.NewSummaryFrom(stdprometheus.SummaryOpts{
Namespace: "notifier",
Subsystem: "smtp",
Name: "request_latency_microseconds",
Help: "Total duration of requests in microseconds.",
}, []string{"method"}),
)
return svc
}
func startHTTPServer(tracer opentracing.Tracer, svc notifiers.Service, port string, certFile string, keyFile string, logger logger.Logger, errs chan error) {
p := fmt.Sprintf(":%s", port)
if certFile != "" || keyFile != "" {
logger.Info(fmt.Sprintf("SMTP notifier service started using https, cert %s key %s, exposed port %s", certFile, keyFile, port))
errs <- http.ListenAndServeTLS(p, certFile, keyFile, api.MakeHandler(svc, tracer))
} else {
logger.Info(fmt.Sprintf("SMTP notifier service started using http, exposed port %s", port))
errs <- http.ListenAndServe(p, api.MakeHandler(svc, tracer))
}
}
|
package main
import "net/http"
func logout(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", `application/json`)
// Parse and check token
token, err := parseToken(r)
if err != nil {
writeError(w, errorToJson(err.Error()), http.StatusBadRequest)
return
}
err = checkJwtToken(token)
if err != nil {
writeError(w, errorToJson(err.Error()), http.StatusBadRequest)
return
}
// Delete token from storage
response, err := requestToStorage(http.MethodDelete, settings.TokenStorage+"/token", token)
if err != nil {
writeError(w, errorToJson(err.Error()), http.StatusBadRequest)
return
}
if response.Status != http.StatusOK {
writeError(w, errorToJson(response.Body), response.Status)
return
}
// If delete token is ok, do not return anything to the body, return status 200
}
|
package supervisor
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/cloudflare/cloudflared/retry"
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
)
var (
errJWTUnset = errors.New("JWT unset")
)
// reconnectTunnelCredentialManager is invoked by functions in tunnel.go to
// get/set parameters for ReconnectTunnel RPC calls.
type reconnectCredentialManager struct {
mu sync.RWMutex
jwt []byte
eventDigest map[uint8][]byte
connDigest map[uint8][]byte
authSuccess prometheus.Counter
authFail *prometheus.CounterVec
}
func newReconnectCredentialManager(namespace, subsystem string, haConnections int) *reconnectCredentialManager {
authSuccess := prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "tunnel_authenticate_success",
Help: "Count of successful tunnel authenticate",
},
)
authFail := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "tunnel_authenticate_fail",
Help: "Count of tunnel authenticate errors by type",
},
[]string{"error"},
)
prometheus.MustRegister(authSuccess, authFail)
return &reconnectCredentialManager{
eventDigest: make(map[uint8][]byte, haConnections),
connDigest: make(map[uint8][]byte, haConnections),
authSuccess: authSuccess,
authFail: authFail,
}
}
func (cm *reconnectCredentialManager) ReconnectToken() ([]byte, error) {
cm.mu.RLock()
defer cm.mu.RUnlock()
if cm.jwt == nil {
return nil, errJWTUnset
}
return cm.jwt, nil
}
func (cm *reconnectCredentialManager) SetReconnectToken(jwt []byte) {
cm.mu.Lock()
defer cm.mu.Unlock()
cm.jwt = jwt
}
func (cm *reconnectCredentialManager) EventDigest(connID uint8) ([]byte, error) {
cm.mu.RLock()
defer cm.mu.RUnlock()
digest, ok := cm.eventDigest[connID]
if !ok {
return nil, fmt.Errorf("no event digest for connection %v", connID)
}
return digest, nil
}
func (cm *reconnectCredentialManager) SetEventDigest(connID uint8, digest []byte) {
cm.mu.Lock()
defer cm.mu.Unlock()
cm.eventDigest[connID] = digest
}
func (cm *reconnectCredentialManager) ConnDigest(connID uint8) ([]byte, error) {
cm.mu.RLock()
defer cm.mu.RUnlock()
digest, ok := cm.connDigest[connID]
if !ok {
return nil, fmt.Errorf("no connection digest for connection %v", connID)
}
return digest, nil
}
func (cm *reconnectCredentialManager) SetConnDigest(connID uint8, digest []byte) {
cm.mu.Lock()
defer cm.mu.Unlock()
cm.connDigest[connID] = digest
}
func (cm *reconnectCredentialManager) RefreshAuth(
ctx context.Context,
backoff *retry.BackoffHandler,
authenticate func(ctx context.Context, numPreviousAttempts int) (tunnelpogs.AuthOutcome, error),
) (retryTimer <-chan time.Time, err error) {
authOutcome, err := authenticate(ctx, backoff.Retries())
if err != nil {
cm.authFail.WithLabelValues(err.Error()).Inc()
if _, ok := backoff.GetMaxBackoffDuration(ctx); ok {
return backoff.BackoffTimer(), nil
}
return nil, err
}
// clear backoff timer
backoff.SetGracePeriod()
switch outcome := authOutcome.(type) {
case tunnelpogs.AuthSuccess:
cm.SetReconnectToken(outcome.JWT())
cm.authSuccess.Inc()
return retry.Clock.After(outcome.RefreshAfter()), nil
case tunnelpogs.AuthUnknown:
duration := outcome.RefreshAfter()
cm.authFail.WithLabelValues(outcome.Error()).Inc()
return retry.Clock.After(duration), nil
case tunnelpogs.AuthFail:
cm.authFail.WithLabelValues(outcome.Error()).Inc()
return nil, outcome
default:
err := fmt.Errorf("refresh_auth: Unexpected outcome type %T", authOutcome)
cm.authFail.WithLabelValues(err.Error()).Inc()
return nil, err
}
}
|
package Place
import (
"fmt"
)
type Place struct {
latitude, longtitude float64
Name string
}
func New(latitude, longtitude float64, name string) *Place {
return &Place{saneAngle(0, latitude), saneAngle(0, longtitude), name}
}
func (place *Place) Latitude() float64 {
return place.latitude
}
func (place *Place) SetLatitude(latitude float64) {
place.latitude = saneAngle(place.latitude, latitude)
}
func (place *Place) Longtitude() float64 {
return place.longtitude
}
func (place *Place) SetLongtitude(longtitude float64) {
place.longtitude = saneAngle(place.longtitude, longtitude)
}
func (place *Place) String() string {
return fmt.Sprintf("(%.3f度, %.3f度 ) %q", place.latitude, place.longtitude, place.Name)
}
func (original *Place) Copy() *Place {
return &Place{original.latitude, original.longtitude, original.Name }
}
func saneAngle(old, new float64) float64 {
//验证
return new
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"fmt"
"strings"
)
// IsColor returns true if a is a color attachment.
func (a FramebufferAttachment) IsColor() bool {
switch a {
case FramebufferAttachment_Color0,
FramebufferAttachment_Color1,
FramebufferAttachment_Color2,
FramebufferAttachment_Color3:
return true
default:
return false
}
}
// IsDepth returns true if a is a depth attachment.
func (a FramebufferAttachment) IsDepth() bool {
return a == FramebufferAttachment_Depth
}
// IsStencil returns true if a is a stencil attachment.
func (a FramebufferAttachment) IsStencil() bool {
return a == FramebufferAttachment_Stencil
}
func (a AspectType) Format(f fmt.State, c rune) {
switch a {
case AspectType_COLOR:
fmt.Fprint(f, "Color")
case AspectType_DEPTH:
fmt.Fprint(f, "Depth")
case AspectType_STENCIL:
fmt.Fprint(f, "Stencil")
default:
fmt.Fprintf(f, "Unknown AspectType %d", int(a))
}
}
func (t Pipeline_Type) Format(f fmt.State, c rune) {
fmt.Fprint(f, strings.Title(strings.ToLower(t.String())))
}
func (x ShaderType) Extension() string {
switch x {
case ShaderType_Vertex:
return "vert"
case ShaderType_Geometry:
return "geom"
case ShaderType_TessControl:
return "tessc"
case ShaderType_TessEvaluation:
return "tesse"
case ShaderType_Fragment:
return "frag"
case ShaderType_Compute:
return "comp"
case ShaderType_Spirv:
return "spvasm"
case ShaderType_SpirvBinary:
return "spv"
default:
return "unknown"
}
}
|
package utils
import "holdempoker/models"
//WinnerValidator 승자계산
type WinnerValidator struct {
handUtil PokerHandUtil
}
//GetResult 결과를 조회한다.
func (w *WinnerValidator) GetResult(cards []int) models.HandResult {
w.handUtil = PokerHandUtil{}
return w.handUtil.CheckHands(cards)
}
//GetWinner 승자를 조회한다.
func (w *WinnerValidator) GetWinner(playerList []models.GamePlayer) int64 {
var winnerList []models.GamePlayer
maxHand := -1
currentHand := -1
var userIndex int64
for i := 0; i < len(playerList); i++ {
currentHand = playerList[i].Result.HandType
if maxHand < currentHand {
maxHand = currentHand
winnerList = make([]models.GamePlayer, 0, 0)
winnerList = append(winnerList, playerList[i])
} else if maxHand == currentHand {
winnerList = append(winnerList, playerList[i])
}
}
if winnerList != nil {
if len(winnerList) == 1 {
userIndex = winnerList[0].UserIndex
} else if len(winnerList) > 1 {
userIndex = GetTiedWinner(winnerList)
if userIndex == 0 {
userIndex = GetKickWinner(winnerList)
}
}
}
return userIndex
}
//GetTiedWinner 같은 족보의 승리자들중에서 승자를 조회한다.
func GetTiedWinner(resultList []models.GamePlayer) int64 {
var userIndex int64
maxValue := 0
for i := 0; i < len(resultList); i++ {
for j := 0; j < len(resultList[i].Result.Hands); j++ {
if resultList[i].Result.Hands[j] > maxValue {
maxValue = resultList[i].Result.Hands[j]
userIndex = resultList[i].UserIndex
}
}
}
return userIndex
}
//GetKickWinner 같은 족보의 승리자들중에서 승자를 조회한다.
func GetKickWinner(resultList []models.GamePlayer) int64 {
var userIndex int64
maxValue := 0
for i := 0; i < len(resultList); i++ {
for j := 0; j < len(resultList[i].Result.Kicks); j++ {
if resultList[i].Result.Kicks[j] > maxValue {
maxValue = resultList[i].Result.Kicks[j]
userIndex = resultList[i].UserIndex
}
}
}
return userIndex
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scheduler
import (
"context"
"github.com/google/gapid/core/log"
"github.com/google/gapid/test/robot/build"
"github.com/google/gapid/test/robot/job"
"github.com/google/gapid/test/robot/monitor"
"github.com/google/gapid/test/robot/replay"
)
func (s schedule) doReplay(ctx context.Context, t *monitor.Trace,
tools *build.ToolSet, androidTools *build.AndroidToolSet) error {
if !s.worker.Supports(job.Replay) {
return nil
}
ctx = log.Enter(ctx, "Replay")
ctx = log.V{"Package": s.pkg.Id}.Bind(ctx)
input := &replay.Input{
Trace: t.Action.Output.Trace,
Gapit: tools.Host.Gapit,
Gapis: tools.Host.Gapis,
Gapir: tools.Host.Gapir,
VirtualSwapChainLib: tools.Host.VirtualSwapChainLib,
VirtualSwapChainJson: tools.Host.VirtualSwapChainJson,
Package: s.pkg.Id,
Api: t.Input.Hints.API,
GapirDevice: s.gapirDevice(),
}
if androidTools != nil {
input.GapidApk = androidTools.GapidApk
input.ToolingLayout = &replay.ToolingLayout{
GapidAbi: androidTools.Abi,
}
}
action := &replay.Action{
Input: input,
Host: s.worker.Host,
Target: s.worker.Target,
}
if _, found := s.data.Replays.FindOrCreate(ctx, action); found {
return nil
}
// TODO: we just ignore the error right now, what should we do?
go s.managers.Replay.Do(ctx, action.Target, input)
return nil
}
|
package handler
import (
"log"
"net"
"puck-server/match-server/convert"
"puck-server/match-server/service"
"puck-server/shared-server"
)
func HandleGetLeaderboard(buf []byte, conn net.Conn, serviceList *service.List) {
log.Printf("GETLEADERBOARD received")
// Parse
recvPacket, err := convert.ParseGetLeaderboard(buf)
if err != nil {
log.Printf("HandleGetLeaderboard fail: %v", err.Error())
}
leaderboardRequest := shared_server.LeaderboardRequest{
Id: convert.IdCuintToByteArray(recvPacket.Id),
StartIndex: int(recvPacket.Start_index),
Count: int(recvPacket.Count),
}
var leaderboardReply shared_server.LeaderboardReply
err = serviceList.Rank.GetLeaderboard(&leaderboardRequest, &leaderboardReply)
if err != nil {
log.Printf("rank rpc error: %v", err.Error())
} else {
reply := convert.NewLwpLeaderboard(&leaderboardReply)
replyBuf := convert.Packet2Buf(reply)
conn.Write(replyBuf)
}
}
|
package core
var followers = []string{"instagram",
"selenagomez",
"taylorswift",
"arianagrande",
"beyonce",
"kimkardashian",
"cristiano",
"kyliejenner",
"justinbieber",
"therock",
"kendalljenner",
"nickiminaj",
"nike",
"natgeo",
"neymarjr",
"leomessi",
"khloekardashian",
"katyperry",
"mileycyrus",
"jlo",
"ddlovato",
"kourtneykardash",
"victoriassecret",
"badgalriri",
"kevinhart4real",
"fcbarcelona",
"realmadrid",
"justintimberlake",
"theellenshow",
"caradelevingne",
"zendaya",
"9gag",
"chrisbrownofficial",
"davidbeckham",
"champagnepapi",
"vindiesel",
"shakira",
"jamesrodriguez10",
"gigihadid",
"kingjames",
"garethbale11",
"nikefootball",
"zacefron",
"adele",
"vanessahudgens",
"iamzlatanibrahimovic",
"emmawatson",
"ladygaga",
"danbilzerian",
"nba",
"harrystyles",
"ronaldinhooficial",
"letthelordbewithyou",
"luissuarez9",
"maluma",
"adidasfootball",
"niallhoran",
"nasa",
"hm",
"brumarquezine",
"zayn",
"ayutingting92",
"onedirection",
"chanelofficial",
"camerondallas",
"shawnmendes",
"zachking",
"lucyhale",
"anitta",
"karimbenzema",
"marinaruybarbosa",
"adidasoriginals",
"hudabeauty",
"princessyahrini",
"krisjenner",
"davidluiz_4",
"andresiniesta8",
"itsashbenzo",
"zara",
"manchesterunited",
"nickyjampr",
"instagrambrasil",
"shaym",
"raffinagita1717",
"marcelotwelve",
"bellathorne",
"ciara",
"britneyspears",
"repostapp",
"natgeotravel",
"louisvuitton",
"stephencurry30",
"laudyacynthiabella",
"snoopdogg",
"floydmayweather",
"wizkhalifa",
"prillylatuconsina96",
"voguemagazine",
"jbalvin",
"deepikapadukone"}
|
package balancer
import (
"testing"
)
func TestBalancer(t *testing.T) {
lb := New(WeightedRoundRobin, nil)
if lb.Name() != "WeightedRoundRobin" {
t.Fatal("balancer.New wrong")
}
lb = New(SmoothWeightedRoundRobin, nil)
if lb.Name() != "SmoothWeightedRoundRobin" {
t.Fatal("balancer.New wrong")
}
lb = New(WeightedRand, nil)
if lb.Name() != "WeightedRand" {
t.Fatal("balancer.New wrong")
}
wNodes := map[interface{}]int{
"X": 0,
"Y": 1,
}
choices := NewChoicesMap(wNodes)
lb.Update(choices)
best := lb.Select()
if best != "Y" {
t.Fatal("balancer select wrong")
}
lb = New(ConsistentHash, nil)
if lb.Name() != "ConsistentHash" {
t.Fatal("balancer.New wrong")
}
lb = New(RoundRobin, nil)
if lb.Name() != "RoundRobin" {
t.Fatal("balancer.New wrong")
}
lb = New(Random, nil)
if lb.Name() != "Random" {
t.Fatal("balancer.New wrong")
}
lb.Update([]*Choice{
NewChoice("A"),
})
best = lb.Select()
if best != "A" {
t.Fatal("balancer select wrong")
}
nodes := []string{"B", "C"}
choices = NewChoicesSlice(nodes)
lb.Update(choices)
best = lb.Select()
if best != "B" && best != "C" {
t.Fatal("balancer select wrong")
}
}
|
package red_black
func InsertBalance(t *RBTree) {
parnt := t.Parent
grand := t.Grandparent()
uncle := t.Uncle()
if uncle == nil {
return
}
if uncle.Red {
parnt.Red = false
uncle.Red = false
grand.Red = true
InsertBalance(grand)
return
} else {
switch {
case parnt == grand.Left && t == parnt.Left:
RotateRight(grand)
case parnt == grand.Left && t == parnt.Right:
RotateLeft(parnt)
RotateRight(grand)
case parnt == grand.Right && t == parnt.Right:
RotateLeft(grand)
case parnt == grand.Right && t == parnt.Left:
RotateRight(parnt)
RotateLeft(grand)
}
grand.Red = !grand.Red
t.Red = !t.Red
}
}
func RotateRight(t *RBTree) {
new_parent := t.Left
parnt := t.Parent
if parnt != nil {
if t == parnt.Left {
parnt.Left = new_parent
} else {
parnt.Right = new_parent
}
} else {
new_parent.Parent = nil
}
t.Parent = new_parent
t.Left = new_parent.Right
new_parent.Right = t
}
func RotateLeft(t *RBTree) {
new_parent := t.Right
parnt := t.Parent
if parnt != nil {
if t == parnt.Left {
parnt.Left = new_parent
} else {
parnt.Right = new_parent
}
} else {
new_parent.Parent = nil
}
t.Parent = new_parent
t.Right = new_parent.Left
new_parent.Left = t
}
func (t *RBTree) Grandparent() *RBTree {
if t.Parent == nil {
return nil
}
return t.Parent.Parent
}
func (t *RBTree) Uncle() *RBTree {
if grand := t.Grandparent(); grand != nil {
if t.Parent == grand.Left {
return grand.Right
}
return grand.Left
}
return nil
}
func (t *RBTree) Sibling() *RBTree {
parnt := t.Parent
if parnt == nil {
return nil
}
if t == parnt.Left {
return parnt.Right
}
return parnt.Left
}
func (t *RBTree) FindRoot() *RBTree {
if t.Parent == nil {
return t
}
return t.Parent.FindRoot()
}
|
package main
import (
"fmt"
)
func trocar(p1,p2 int)(segundo int, primeiro int){
segundo = p2
primeiro = p1
return
}
func main(){
r1,r2 := trocar(2,1)
fmt.Println(r1, r2)
} |
package main
import "fmt"
// this is a comment
func main() {
fmt.Println("My Name is, Jagmohan")
} |
package httprequest
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"syncAgent-go/syncAgent/params"
)
const csrfRecordPath = "./record.txt"
//获取csrf
func csrfGet() (string, error) {
req, err := http.NewRequest("GET", params.TryLoginURL, nil)
if err != nil {
return "", err
}
req.Header.Add("Content-Type", "application/json")
res, err := http.DefaultClient.Do(req)
if err != nil {
return "", err
}
defer res.Body.Close()
for k, v := range res.Header {
if k == "X-Syncthing-Id" {
params.MetaDeviceID = v[0]
}
}
return res.Cookies()[0].Value, nil
}
//校验csrf 是否有效
func csrfVarify(url string, csrf string) (bool, error) {
if csrf == "" || len(csrf) != params.CSRFlength {
return false, nil
}
req, err := http.NewRequest("GET", params.TryERR, nil)
if err != nil {
return false, err
}
req.Header.Add("Content-Type", "application/json")
req.Header.Add("X-CSRF-Token-UE2TW", csrf)
res, err := http.DefaultClient.Do(req)
if err != nil {
return false, err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return false, nil
}
return true, nil
}
//从文件中读取csrf
func readCsrfRecord() (string, error) {
fd, err := os.OpenFile(csrfRecordPath, os.O_CREATE|os.O_RDONLY, 0755)
if err != nil {
return "", err
}
defer fd.Close()
b, err := ioutil.ReadAll(fd)
return string(b), nil
}
//写入csrf文件
func writeCsrfRecord(content string) error {
fd, err := os.OpenFile(csrfRecordPath, os.O_TRUNC|os.O_WRONLY, 0755)
if err != nil {
return err
}
defer fd.Close()
fmt.Fprintf(fd, "%s", content)
return nil
}
//获取随机的folderID(测试专用)
func randomFolderID() (string, error) {
d, err := Get(params.TryRandomFolderID)
if err != nil {
return "", err
}
//添加之后访问添加文件夹
FolderID := func(d []byte) string {
f := params.FolderIDRandom{}
err := json.Unmarshal(d, &f)
if err != nil {
//shuold not happen
return "" //log
}
sl := strings.ToLower(string(f.Random))
s1 := sl[:5]
s2 := sl[5:]
s3 := s1 + "-" + s2
return s3
}(d)
return FolderID, nil
}
//CSRF GET 请求并返回结果cookie
func CSRF() error {
var csrf string
sa, err := readCsrfRecord()
if err != nil {
return err //log 不需要返回,失败也没事
}
csrf = sa
ok, err := csrfVarify(params.TryERR, csrf)
if err != nil {
return err
}
if !ok {
fmt.Println("文件中找的csrf验证未通过") //log
sb, err := csrfGet()
if err != nil {
return err //log
}
csrf = sb
} else {
_, err := csrfGet()
if err != nil {
return err //log
}
}
params.CSRF = csrf
err = writeCsrfRecord(params.CSRF)
if err != nil {
return err //log 不需要返回,失败也没事
}
return nil
}
//------------------------------------------------------------------------------------------------------
//GlobleConfigLoad for init
func GlobleConfigLoad() error {
params.GCHK = params.Configuration{}
d, err := Get(params.TryConfigURL)
if err != nil {
return err
}
err = json.Unmarshal(d, ¶ms.GCHK)
if err != nil {
return err
}
return nil
}
//GlobleConfigUpdate update to syncthing
func GlobleConfigUpdate() error {
d, err := json.Marshal(params.GCHK)
if err != nil {
return err
}
err = PUT(params.TryConfigURL, d)
if err != nil {
return err
}
return nil
}
//------------------------------------------------------------------------------------------------------
//FolderCreate create a folder
func FolderCreate(folderID, folderLable,
folderCreatePath string) *params.FolderConfiguration {
dev := params.FolderDeviceConfiguration{
DeviceID: params.MetaDeviceID, //固定参数
}
devs := make([]params.FolderDeviceConfiguration, 0)
devs = append(devs, dev)
mdisk := params.Size{
Value: 1,
Unit: "%",
}
p := make(map[string]string, 0)
ver := params.VersioningConfiguration{
Type: "",
Params: p,
CleanupIntervalS: 0,
}
return ¶ms.FolderConfiguration{
ID: folderID,
Label: folderID,
FilesystemType: "basic",
Path: folderCreatePath, //测试用,先不修改
Type: "sendreceive",
Devices: devs,
RescanIntervalS: 3600,
FSWatcherEnabled: true,
FSWatcherDelayS: 10,
IgnorePerms: false,
AutoNormalize: true,
MinDiskFree: mdisk,
Versioning: ver,
Copiers: 0,
PullerMaxPendingKiB: 0,
Hashers: 0,
Order: "random",
IgnoreDelete: false,
ScanProgressIntervalS: 0,
PullerPauseS: 0,
MaxConflicts: 10,
DisableSparseFiles: false,
DisableTempIndexes: false,
Paused: false,
WeakHashThresholdPct: 25,
MarkerName: ".stfolder",
CopyOwnershipFromParent: false,
RawModTimeWindowS: 0,
MaxConcurrentWrites: 2,
DisableFsync: false,
BlockPullOrder: "standard",
CopyRangeMethod: "standard",
CaseSensitiveFS: false,
JunctionsAsDirs: false,
}
}
|
package main
import "fmt"
//addNums adds passed in integers, sending the result to the channel
func addNums(num1 int64, num2 int64, ch chan int64) {
res := num1 + num2
// Send result to the channel
ch <- res
}
func main() {
// Create channel
c := make(chan int64)
// Calls go routines, passing in the channel they'll use to send data on
go addNums(2, 4, c)
go addNums(10, 20, c)
// Grab results from channel
res1 := <-c
res2 := <-c
fmt.Printf("The aggregated sum is: %v", (res1 + res2))
}
|
package interaction
import (
"bufio"
"errors"
"fmt"
"os"
"strings"
)
var reader = bufio.NewReader(os.Stdin)
func GetPlayerChoice(isSpecialAttack bool) string {
// infinite loop
for {
userInput, _ := getPlayerInput()
switch userInput {
case "1":
return "ATTACK"
case "2":
return "HEAL"
case "3":
if isSpecialAttack {
return "SPECIAL_ATTACK"
} else {
fmt.Println("Invalid input. Please try again.")
}
default:
fmt.Println("Invalid input. Please try again.")
}
}
}
func getPlayerInput() (string, error) {
fmt.Print("\nEnter Your Choice : ")
// read the user-input
playerInput, err := reader.ReadString('\n')
if err != nil {
return "", errors.New("input is invalid")
}
playerInput = strings.Replace(playerInput, "\n", "", -1)
return playerInput, nil
}
|
// Copyright 2020 Comcast Cable Communications Management, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package checkpoint
import (
"errors"
"time"
)
func newInMemoryCheckpointManager(config StorageConfig) CheckpointManager {
cm := new(InMemoryCheckpointManager)
cm.StorageConfig = config
cm.checkpoints = map[string]string{}
return cm
}
func (cm *InMemoryCheckpointManager) GetCheckpoint(Id string) (string, time.Time, error) {
cm.Lock()
defer cm.Unlock()
return cm.checkpoints[Id], time.Now(), nil
}
func (cm *InMemoryCheckpointManager) SetCheckpoint(Id string, sequenceNumber string) error {
cm.Lock()
defer cm.Unlock()
if sequenceNumber == "" {
return errors.New("cannot pass blank sequence number as checkpoint")
}
cm.checkpoints[Id] = sequenceNumber
return nil
}
|
package broadcast
import (
"github.com/iotaledger/goshimmer/plugins/broadcast/server"
"github.com/iotaledger/goshimmer/plugins/config"
flag "github.com/spf13/pflag"
"sync"
"github.com/iotaledger/hive.go/daemon"
"github.com/iotaledger/hive.go/events"
"github.com/iotaledger/hive.go/logger"
"github.com/iotaledger/hive.go/node"
"github.com/iotaledger/goshimmer/packages/shutdown"
"github.com/iotaledger/goshimmer/packages/tangle"
"github.com/iotaledger/goshimmer/plugins/messagelayer"
)
const (
pluginName = "Broadcast"
bindAddress = "broadcast.bindAddress"
)
var (
// plugin is the plugin instance of the activity plugin.
plugin *node.Plugin
once sync.Once
log *logger.Logger
)
// Plugin gets the plugin instance.
func Plugin() *node.Plugin {
once.Do(func() {
plugin = node.NewPlugin(pluginName, node.Enabled, configure, run)
})
return plugin
}
// Handler functions
func init() {
flag.String(bindAddress, ":5050", "the bind address for the broadcast plugin")
}
// Configure events
func configure(_ *node.Plugin) {
plugin.LogInfof("starting node with broadcast plugin")
log = logger.NewLogger(pluginName)
}
//Run
func run(_ *node.Plugin) {
//Server to connect to
bindAddress := config.Node().String(bindAddress)
log.Debugf("Starting Broadcast plugin on %s", bindAddress)
err := daemon.BackgroundWorker("Broadcast worker", func(shutdownSignal <-chan struct{}) {
err := server.Listen(bindAddress, log, shutdownSignal)
if err != nil {
log.Errorf("Failed to start Broadcast server: %v", err)
}
<-shutdownSignal
})
if err != nil {
log.Errorf("Failed to start Broadcast daemon: %v", err)
}
//Get Messages from node
notifyNewMsg := events.NewClosure(func(messageID tangle.MessageID) {
messagelayer.Tangle().Storage.Message(messageID).Consume(func(message *tangle.Message) {
server.Broadcast(message.Bytes())
})
})
if err := daemon.BackgroundWorker("Broadcast[MsgUpdater]", func(shutdownSignal <-chan struct{}) {
messagelayer.Tangle().Storage.Events.MessageStored.Attach(notifyNewMsg)
<-shutdownSignal
log.Info("Stopping Broadcast...")
messagelayer.Tangle().Storage.Events.MessageStored.Detach(notifyNewMsg)
log.Info("Stopping Broadcast... \tDone")
}, shutdown.PriorityDashboard); err != nil {
log.Panicf("Failed to start as daemon: %s", err)
}
}
|
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +genclient:noStatus
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type User struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec UserSpec `json:"spec"`
}
type UserSpec struct {
MobilePhone string `json:"mobilePhone"`
Country string `json:"country"`
Creator string `json:"creator"`
Username string `json:"username"`
First_name string `json:"first_name"`
Last_name string `json:"last_name"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// UserList is a list of User resources
type UserList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []User `json:"items"`
}
|
package gonsen
import (
"github.com/mitchellh/packer/common/json"
"io/ioutil"
"net/http"
)
type response struct {
Result []string `json:"result"`
}
func GetProgramNames() ([]string, error) {
res, err := http.Get("http://www.onsen.ag/api/shownMovie/shownMovie.json")
if err != nil {
return nil, err
}
defer res.Body.Close()
b, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
ps := response{}
json.Unmarshal(b, &ps)
return ps.Result, nil
}
|
package overmount
import (
"github.com/pkg/errors"
. "gopkg.in/check.v1"
)
func (m *mountSuite) TestTags(c *C) {
_, err := m.Repository.GetTag("test")
c.Assert(errors.Cause(err), Equals, ErrTagDoesNotExist)
err = m.Repository.RemoveTag("test")
c.Assert(errors.Cause(err), Equals, ErrTagDoesNotExist)
_, layer := m.makeImage(c, 2)
c.Assert(m.Repository.AddTag("test", layer), IsNil)
layer2, err := m.Repository.GetTag("test")
c.Assert(err, IsNil)
c.Assert(layer2.ID(), Equals, layer.ID())
c.Assert(layer2.RestoreParent(), IsNil)
c.Assert(layer2.Parent.ID(), Equals, layer.Parent.ID())
c.Assert(m.Repository.RemoveTag("test"), IsNil)
}
|
package odoo
import (
"fmt"
)
// StockLocationRoute represents stock.location.route model.
type StockLocationRoute struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
Active *Bool `xmlrpc:"active,omptempty"`
CategIds *Relation `xmlrpc:"categ_ids,omptempty"`
CompanyId *Many2One `xmlrpc:"company_id,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
Name *String `xmlrpc:"name,omptempty"`
ProductCategSelectable *Bool `xmlrpc:"product_categ_selectable,omptempty"`
ProductIds *Relation `xmlrpc:"product_ids,omptempty"`
ProductSelectable *Bool `xmlrpc:"product_selectable,omptempty"`
PullIds *Relation `xmlrpc:"pull_ids,omptempty"`
PushIds *Relation `xmlrpc:"push_ids,omptempty"`
SaleSelectable *Bool `xmlrpc:"sale_selectable,omptempty"`
Sequence *Int `xmlrpc:"sequence,omptempty"`
SuppliedWhId *Many2One `xmlrpc:"supplied_wh_id,omptempty"`
SupplierWhId *Many2One `xmlrpc:"supplier_wh_id,omptempty"`
WarehouseIds *Relation `xmlrpc:"warehouse_ids,omptempty"`
WarehouseSelectable *Bool `xmlrpc:"warehouse_selectable,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
}
// StockLocationRoutes represents array of stock.location.route model.
type StockLocationRoutes []StockLocationRoute
// StockLocationRouteModel is the odoo model name.
const StockLocationRouteModel = "stock.location.route"
// Many2One convert StockLocationRoute to *Many2One.
func (slr *StockLocationRoute) Many2One() *Many2One {
return NewMany2One(slr.Id.Get(), "")
}
// CreateStockLocationRoute creates a new stock.location.route model and returns its id.
func (c *Client) CreateStockLocationRoute(slr *StockLocationRoute) (int64, error) {
ids, err := c.CreateStockLocationRoutes([]*StockLocationRoute{slr})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateStockLocationRoute creates a new stock.location.route model and returns its id.
func (c *Client) CreateStockLocationRoutes(slrs []*StockLocationRoute) ([]int64, error) {
var vv []interface{}
for _, v := range slrs {
vv = append(vv, v)
}
return c.Create(StockLocationRouteModel, vv)
}
// UpdateStockLocationRoute updates an existing stock.location.route record.
func (c *Client) UpdateStockLocationRoute(slr *StockLocationRoute) error {
return c.UpdateStockLocationRoutes([]int64{slr.Id.Get()}, slr)
}
// UpdateStockLocationRoutes updates existing stock.location.route records.
// All records (represented by ids) will be updated by slr values.
func (c *Client) UpdateStockLocationRoutes(ids []int64, slr *StockLocationRoute) error {
return c.Update(StockLocationRouteModel, ids, slr)
}
// DeleteStockLocationRoute deletes an existing stock.location.route record.
func (c *Client) DeleteStockLocationRoute(id int64) error {
return c.DeleteStockLocationRoutes([]int64{id})
}
// DeleteStockLocationRoutes deletes existing stock.location.route records.
func (c *Client) DeleteStockLocationRoutes(ids []int64) error {
return c.Delete(StockLocationRouteModel, ids)
}
// GetStockLocationRoute gets stock.location.route existing record.
func (c *Client) GetStockLocationRoute(id int64) (*StockLocationRoute, error) {
slrs, err := c.GetStockLocationRoutes([]int64{id})
if err != nil {
return nil, err
}
if slrs != nil && len(*slrs) > 0 {
return &((*slrs)[0]), nil
}
return nil, fmt.Errorf("id %v of stock.location.route not found", id)
}
// GetStockLocationRoutes gets stock.location.route existing records.
func (c *Client) GetStockLocationRoutes(ids []int64) (*StockLocationRoutes, error) {
slrs := &StockLocationRoutes{}
if err := c.Read(StockLocationRouteModel, ids, nil, slrs); err != nil {
return nil, err
}
return slrs, nil
}
// FindStockLocationRoute finds stock.location.route record by querying it with criteria.
func (c *Client) FindStockLocationRoute(criteria *Criteria) (*StockLocationRoute, error) {
slrs := &StockLocationRoutes{}
if err := c.SearchRead(StockLocationRouteModel, criteria, NewOptions().Limit(1), slrs); err != nil {
return nil, err
}
if slrs != nil && len(*slrs) > 0 {
return &((*slrs)[0]), nil
}
return nil, fmt.Errorf("stock.location.route was not found with criteria %v", criteria)
}
// FindStockLocationRoutes finds stock.location.route records by querying it
// and filtering it with criteria and options.
func (c *Client) FindStockLocationRoutes(criteria *Criteria, options *Options) (*StockLocationRoutes, error) {
slrs := &StockLocationRoutes{}
if err := c.SearchRead(StockLocationRouteModel, criteria, options, slrs); err != nil {
return nil, err
}
return slrs, nil
}
// FindStockLocationRouteIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindStockLocationRouteIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(StockLocationRouteModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindStockLocationRouteId finds record id by querying it with criteria.
func (c *Client) FindStockLocationRouteId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(StockLocationRouteModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("stock.location.route was not found with criteria %v and options %v", criteria, options)
}
|
package main
import (
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
mrand "math/rand"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/dgrijalva/jwt-go"
"github.com/gin-gonic/gin"
core "github.com/kan-fun/kan-core"
"github.com/kan-fun/kan-server-core/model"
)
func autoMigrate() {
db.AutoMigrate(&model.User{})
db.AutoMigrate(&model.ChannelEmail{})
db.AutoMigrate(&model.ChannelWeChat{})
db.AutoMigrate(&model.Task{})
}
type codeClaims struct {
CodeHash string `json:"code_hash"`
ChannelID string `json:"channel_id"`
jwt.StandardClaims
}
type idClaims struct {
ID string `json:"id"`
jwt.StandardClaims
}
func generateKey() (string, error) {
randomBytes := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, randomBytes); err != nil {
return "", errors.New("Can't generate key")
}
key := base64.URLEncoding.EncodeToString(randomBytes)
return key, nil
}
func getPrivateKey(test bool) (*rsa.PrivateKey, error) {
if test {
reader := rand.Reader
bitSize := 512
return rsa.GenerateKey(reader, bitSize)
}
url, ok := os.LookupEnv("KAN_PRIVATE_KEY_URL")
if !ok {
return nil, errors.New("KAN_PRIVATE_KEY_URL not set")
}
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
privateKey, err := jwt.ParseRSAPrivateKeyFromPEM(bytes)
if err != nil {
return nil, err
}
return privateKey, nil
}
func generateCode(channelID string) (raw string, token string, err error) {
ints := make([]string, 6)
for i := 0; i <= 5; i++ {
v := mrand.Intn(10)
ints[i] = strconv.Itoa(v)
}
raw = strings.Join(ints, "")
hash := core.HashString(raw, secretKeyGlobal)
token, err = generateCodeToken(hash, channelID)
if err != nil {
return "", "", err
}
return
}
func generateIDToken(id string) (tokenString string, err error) {
claims := idClaims{
id,
jwt.StandardClaims{
ExpiresAt: time.Now().AddDate(0, 1, 0).Unix(),
Issuer: "kan-fun.com",
},
}
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
tokenString, err = token.SignedString(privateKeyGlobal)
if err != nil {
return "", err
}
return
}
func generateCodeToken(codeHash string, channelID string) (tokenString string, err error) {
claims := codeClaims{
codeHash,
channelID,
jwt.StandardClaims{
ExpiresAt: time.Now().Add(time.Hour).Unix(),
Issuer: "kan-fun.com",
},
}
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
tokenString, err = token.SignedString(privateKeyGlobal)
if err != nil {
return "", err
}
return
}
func hashPassword(password string) string {
hash := sha256.Sum256([]byte(password))
return fmt.Sprintf("%x", hash)
}
func checkSignature(c *gin.Context, specificParameter map[string]string) (*model.User, error) {
signatureNonce := c.GetHeader("Kan-Nonce")
if signatureNonce == "" {
return nil, errors.New("No SignatureNonce")
}
timestamp := c.GetHeader("Kan-Timestamp")
if timestamp == "" {
return nil, errors.New("No Timestamp")
}
accessKey := c.GetHeader("Kan-Key")
if accessKey == "" {
return nil, errors.New("No AccessKey")
}
signature := c.GetHeader("Kan-Signature")
if signature == "" {
return nil, errors.New("No Signature")
}
commonParameter := core.CommonParameter{
AccessKey: accessKey,
SignatureNonce: signatureNonce,
Timestamp: timestamp,
}
var user model.User
db.Select("id, secret_key").Where("access_key = ?", accessKey).First(&user)
if user.ID == 0 {
return nil, errors.New("User not Exist")
}
credential, err := core.NewCredential(accessKey, user.SecretKey)
if err != nil {
return nil, err
}
s := credential.Sign(commonParameter, specificParameter)
if s != signature {
return nil, errors.New("Signature not Valid")
}
return &user, nil
}
|
package test
import (
"fmt"
"github.com/apitable/apitable-sdks/apitable.go/lib/common"
aterror "github.com/apitable/apitable-sdks/apitable.go/lib/common/error"
"github.com/apitable/apitable-sdks/apitable.go/lib/common/profile"
"github.com/apitable/apitable-sdks/apitable.go/lib/common/util"
apitable "github.com/apitable/apitable-sdks/apitable.go/lib/datasheet"
"github.com/apitable/apitable-sdks/apitable.go/lib/space"
"os"
"testing"
)
func TestCreateRecords(t *testing.T) {
// HOST can use the produced host by default without setting.
credential := common.NewCredential(os.Getenv("TOKEN"))
cpf := profile.NewClientProfile()
cpf.HttpProfile.Domain = os.Getenv("DOMAIN")
datasheet, _ := apitable.NewDatasheet(credential, os.Getenv("DATASHEET_ID"), cpf)
t.Log("DATASHEET_ID", os.Getenv("DATASHEET_ID"))
request := apitable.NewCreateRecordsRequest()
request.Records = []*apitable.Fields{
{
Fields: &apitable.Field{
os.Getenv("NUMBER_FIELD_NAME"): apitable.NumberFieldValue(900),
},
},
}
records, err := datasheet.CreateRecords(request)
if _, ok := err.(*aterror.SDKError); ok {
t.Errorf("An API error has returned: %s", err)
}
// Non-SDK exception, direct failure. Other processing can be added to the actual code.
if err != nil {
t.Errorf("An unexcepted error has returned: %s", err)
panic(err)
}
t.Log(len(records))
}
func TestDescribeAllRecords(t *testing.T) {
// HOST can use the produced host by default without setting.
credential := common.NewCredential(os.Getenv("TOKEN"))
cpf := profile.NewClientProfile()
cpf.HttpProfile.Domain = os.Getenv("DOMAIN")
datasheet, _ := apitable.NewDatasheet(credential, os.Getenv("DATASHEET_ID"), cpf)
request := apitable.NewDescribeRecordRequest()
request.Sort = []*apitable.Sort{
{
Field: common.StringPtr(os.Getenv("NUMBER_FIELD_NAME")),
Order: common.StringPtr("desc"),
},
}
request.Fields = common.StringPtrs([]string{os.Getenv("NUMBER_FIELD_NAME")})
records, err := datasheet.DescribeAllRecords(request)
if _, ok := err.(*aterror.SDKError); ok {
t.Errorf("An API error has returned: %s", err)
}
// Non-SDK exception, direct failure. Other processing can be added to the actual code.
if err != nil {
t.Errorf("An unexcepted error has returned: %s", err)
panic(err)
}
t.Log(len(records))
}
func TestDescribeRecords(t *testing.T) {
// HOST can use the produced host by default without setting.
credential := common.NewCredential(os.Getenv("TOKEN"))
cpf := profile.NewClientProfile()
cpf.HttpProfile.Domain = os.Getenv("DOMAIN")
datasheet, _ := apitable.NewDatasheet(credential, os.Getenv("DATASHEET_ID"), cpf)
request := apitable.NewDescribeRecordRequest()
request.Sort = []*apitable.Sort{
{
Field: common.StringPtr(os.Getenv("NUMBER_FIELD_NAME")),
Order: common.StringPtr("desc"),
},
}
request.Fields = common.StringPtrs([]string{os.Getenv("NUMBER_FIELD_NAME")})
records, err := datasheet.DescribeRecords(request)
if _, ok := err.(*aterror.SDKError); ok {
t.Errorf("An API error has returned: %s", err)
}
// Non-SDK exception, direct failure. Other processing can be added to the actual code.
if err != nil {
t.Errorf("An unexcepted error has returned: %s", err)
panic(err)
}
t.Log(len(records.Records))
}
func TestModifyRecords(t *testing.T) {
// HOST can use the produced host by default without setting.
credential := common.NewCredential(os.Getenv("TOKEN"))
cpf := profile.NewClientProfile()
cpf.HttpProfile.Domain = os.Getenv("DOMAIN")
datasheet, _ := apitable.NewDatasheet(credential, os.Getenv("DATASHEET_ID"), cpf)
describeRequest := apitable.NewDescribeRecordRequest()
describeRequest.FilterByFormula = common.StringPtr("{" + os.Getenv("NUMBER_FIELD_NAME") + "}=900")
record, _ := datasheet.DescribeRecord(describeRequest)
request := apitable.NewModifyRecordsRequest()
request.Records = []*apitable.BaseRecord{
{
Fields: &apitable.Field{
os.Getenv("NUMBER_FIELD_NAME"): apitable.NumberFieldValue(1000),
},
RecordId: record.RecordId,
},
}
records, err := datasheet.ModifyRecords(request)
if _, ok := err.(*aterror.SDKError); ok {
t.Errorf("An API error has returned: %s", err)
}
// Non-SDK exception, direct failure. Other processing can be added to the actual code.
if err != nil {
t.Errorf("An unexcepted error has returned: %s", err)
panic(err)
}
t.Log(len(records))
}
func TestDeleteRecords(t *testing.T) {
// HOST can use the produced host by default without setting.
credential := common.NewCredential(os.Getenv("TOKEN"))
cpf := profile.NewClientProfile()
cpf.HttpProfile.Domain = os.Getenv("DOMAIN")
datasheet, _ := apitable.NewDatasheet(credential, os.Getenv("DATASHEET_ID"), cpf)
describeRequest := apitable.NewDescribeRecordRequest()
describeRequest.FilterByFormula = common.StringPtr("{" + os.Getenv("NUMBER_FIELD_NAME") + "}=1000")
record, _ := datasheet.DescribeRecord(describeRequest)
request := apitable.NewDeleteRecordsRequest()
request.RecordIds = []*string{record.RecordId}
err := datasheet.DeleteRecords(request)
if _, ok := err.(*aterror.SDKError); ok {
fmt.Printf("An API error has returned: %s", err)
return
}
// Non-SDK exception, direct failure. Other processing can be added to the actual code.
if err != nil {
t.Errorf("An unexcepted error has returned: %s", err)
panic(err)
}
}
func TestUpload(t *testing.T) {
// HOST can use the produced host by default without setting.
credential := common.NewCredential(os.Getenv("TOKEN"))
cpf := profile.NewClientProfile()
cpf.HttpProfile.Domain = os.Getenv("DOMAIN")
cpf.Upload = true
datasheet, _ := apitable.NewDatasheet(credential, os.Getenv("DATASHEET_ID"), cpf)
request := apitable.NewUploadRequest()
request.FilePath = "image.png"
attachment, err := datasheet.UploadFile(request)
if _, ok := err.(*aterror.SDKError); ok {
fmt.Printf("An API error has returned: %s", err)
return
}
// Non-SDK exception, direct failure. Other processing can be added to the actual code.
if err != nil {
t.Errorf("An unexcepted error has returned: %s", err)
panic(err)
}
t.Log(attachment)
}
func TestDescribeFields(t *testing.T) {
// HOST can use the produced host by default without setting.
credential := common.NewCredential(os.Getenv("TOKEN"))
cpf := profile.NewClientProfile()
cpf.HttpProfile.Domain = os.Getenv("DOMAIN")
datasheet, _ := apitable.NewDatasheet(credential, os.Getenv("DATASHEET_ID"), cpf)
describeRequest := apitable.NewDescribeFieldsRequest()
describeRequest.ViewId = common.StringPtr(os.Getenv("VIEW_ID"))
fields, err := datasheet.DescribeFields(describeRequest)
if _, ok := err.(*aterror.SDKError); ok {
t.Errorf("An API error has returned: %s", err)
}
// Non-SDK exception, direct failure. Other processing can be added to the actual code.
if err != nil {
t.Errorf("An unexcepted error has returned: %s", err)
panic(err)
}
for _, value := range fields {
property := value.SelectFieldProperty()
util.Dd(property)
}
t.Log(len(fields))
}
func TestDescribeViews(t *testing.T) {
// HOST can use the produced host by default without setting.
credential := common.NewCredential(os.Getenv("TOKEN"))
cpf := profile.NewClientProfile()
cpf.HttpProfile.Domain = os.Getenv("DOMAIN")
datasheet, _ := apitable.NewDatasheet(credential, os.Getenv("DATASHEET_ID"), cpf)
describeRequest := apitable.NewDescribeViewsRequest()
views, err := datasheet.DescribeViews(describeRequest)
if _, ok := err.(*aterror.SDKError); ok {
t.Errorf("An API error has returned: %s", err)
}
// Non-SDK exception, direct failure. Other processing can be added to the actual code.
if err != nil {
t.Errorf("An unexcepted error has returned: %s", err)
panic(err)
}
util.Dd(views)
t.Log(len(views))
}
func TestDescribeSpaces(t *testing.T) {
// HOST can use the produced host by default without setting.
credential := common.NewCredential(os.Getenv("TOKEN"))
cpf := profile.NewClientProfile()
cpf.HttpProfile.Domain = os.Getenv("DOMAIN")
spaceClient, _ := space.NewSpace(credential, "", cpf)
describeRequest := space.NewDescribeSpacesRequest()
spaces, err := spaceClient.DescribeSpaces(describeRequest)
if _, ok := err.(*aterror.SDKError); ok {
t.Errorf("An API error has returned: %s", err)
}
// Non-SDK exception, direct failure. Other processing can be added to the actual code.
if err != nil {
t.Errorf("An unexcepted error has returned: %s", err)
panic(err)
}
util.Dd(spaces)
t.Log(len(spaces))
}
func TestDescribeNodes(t *testing.T) {
// HOST can use the produced host by default without setting.
credential := common.NewCredential(os.Getenv("TOKEN"))
cpf := profile.NewClientProfile()
cpf.HttpProfile.Domain = os.Getenv("DOMAIN")
spaceClient, _ := space.NewSpace(credential, os.Getenv("SPACE_ID"), cpf)
describeRequest := space.NewDescribeNodesRequest()
nodes, err := spaceClient.DescribeNodes(describeRequest)
if _, ok := err.(*aterror.SDKError); ok {
t.Errorf("An API error has returned: %s", err)
}
// Non-SDK exception, direct failure. Other processing can be added to the actual code.
if err != nil {
t.Errorf("An unexcepted error has returned: %s", err)
panic(err)
}
util.Dd(nodes)
t.Log(len(nodes))
}
func TestDescribeNode(t *testing.T) {
// HOST can use the produced host by default without setting.
credential := common.NewCredential(os.Getenv("TOKEN"))
cpf := profile.NewClientProfile()
cpf.HttpProfile.Domain = os.Getenv("DOMAIN")
spaceClient, _ := space.NewSpace(credential, os.Getenv("SPACE_ID"), cpf)
describeRequest := space.NewDescribeNodeRequest()
describeRequest.NodeId = common.StringPtr(os.Getenv("DATASHEET_ID"))
node, err := spaceClient.DescribeNode(describeRequest)
if _, ok := err.(*aterror.SDKError); ok {
t.Errorf("An API error has returned: %s", err)
}
// Non-SDK exception, direct failure. Other processing can be added to the actual code.
if err != nil {
t.Errorf("An unexcepted error has returned: %s", err)
panic(err)
}
util.Dd(node)
t.Log(node)
}
|
package main
import "fmt"
func main() {
// unlike arrays, slices are types only by the elements they
// contain (not the number of elements)
// to create an empty slice with non zero length, use the built-in make
// here we make a slice of strings of length 3 (initially zero valued)
s := make([]string, 3)
fmt.Println("emp:", s)
// we can set and get just like with arrays
s[0] = "a"
s[1] = "b"
s[2] = "c"
fmt.Println("set:", s)
fmt.Println("get:", s[2])
// len returns the length of the slice as expected
fmt.Println("len:", len(s))
// in addition to these basic operations, slices support several
// more that make them richer than arrays. one is the builtin
// append, which returns a slice containing one or more new
// value. note that we need to accept a return value from
// append as we may get a new slice value
s = append(s, "d")
s = append(s, "e", "f")
fmt.Println("apd:", s)
// slices can also be copy'd. here we create an empty slice c
// of the same length as s and copy into c from s
c := make([]string, len(s))
copy(c, s)
fmt.Println("cpy:", c)
// slices support a slice operator with the syntax
// slice[low:high]. for example, this gets a slice of the
// elements s[2], s[3], and s[4]
l := s[2: 5]
fmt.Println("sl1:", l)
// this slices up to but not including 5
l = s[:5]
fmt.Println("sl2:", l)
// this slices up from and including 2
l = s[2:]
fmt.Println("sl3:", l)
// we can declare and initialize a variable for slice in
// a single line as well
t := []string{"g", "h", "i"}
fmt.Println("dcl:", t)
// slices can be composed into multi dimensional data
// structures. the length of the inner slices can vary, unlike with multi dimensional arrays
twoD := make([][]int, 3)
for i := 0; i < 3; i++ {
innerLen := i + 1
twoD[i] = make([]int, innerLen)
for j := 0; j < innerLen; j++ {
twoD[i][j] = i + j
}
}
fmt.Println("2d: ", twoD)
}
|
package main
// Transaction type to manage transaction information
type Transaction struct {
Merchant string `json:"merchant"`
Amount float64 `json:"amount"`
Time string `json:"time"`
LastTransaction []Transaction `json:"lastTransaction"`
}
|
package files
import (
"os"
"io/ioutil"
"path/filepath"
)
//返回目录名,文件名
func SplitDirFile(path string) (string, string) {
return filepath.Dir(path), filepath.Base(path)
}
//判断是否存在
func Exist(path string) bool {
_, err := os.Stat(path)
return err != nil
}
//判断是否是文件
func IsFile(path string) bool {
stat, err := os.Stat(path)
if err == nil {
return !stat.IsDir()
}
return false
}
//判断是否目录
func IsDir(path string) bool {
stat, err := os.Stat(path)
if err == nil {
return stat.IsDir()
}
return false
}
//读取文件字节流
func ReadFileByte(path string) ([]byte, error) {
fi, err := os.Open(path)
if err != nil {
// panic(err)
return nil, err
} else {
defer fi.Close()
return ioutil.ReadAll(fi)
}
} |
package main
func max(a, b int) int {
if a > b {
return a
}
return b
}
func maxSubArray(nums []int) int {
ans, cur := nums[0], 0
for i := 0 ; i < len(nums) ; i++ {
if cur < 0 {
cur = 0
}
cur = cur + nums[i]
ans = max(ans, cur)
}
return ans
}
func main() {
}
|
package _go
import (
"encoding/json"
"fmt"
"math/rand"
"time"
)
// =============================================================================================================================
// 打印JSON
func PrintJSON(data interface{}) {
js, err := json.MarshalIndent(data, "", " ")
if err != nil {
panic(err)
}
fmt.Println(string(js))
}
// =============================================================================================================================
// 字典键值对调
func MapKVReversal(m map[string]string) map[string]string {
temp := make(map[string]string)
for k, v := range m {
temp[v] = k
}
return temp
}
// =============================================================================================================================
// 切片反转
func SliceA2Z(slice []string) (temp []string) {
for i := len(slice) - 1; i >= 0; i-- {
temp = append(temp, slice[i])
}
return
}
// 切片去重
func SliceUnique(intSlice []string) []string {
keys, list := make(map[string]bool), make([]string, 0)
for _, entry := range intSlice {
if _, value := keys[entry]; !value {
keys[entry] = true
list = append(list, entry)
}
}
return list
}
// =============================================================================================================================
// 任务定时器 func runtimer( 开始时间, 周期时间, 任务方法 ) 开始时间格式 "00:00" (零点)
func RunTimer(startTime string, cycle time.Duration, task func()) {
for {
time.Sleep(time.Second) // 每秒监测一次
if time.Now().Format("15:04") == startTime {
for {
task()
time.Sleep(cycle)
}
}
}
}
// 随机字符种子
func RandomChar(l int) string {
//str := "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
str := "0123456789"
strs := []byte(str)
result := []byte{}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for i := 0; i < l; i++ {
result = append(result, strs[r.Intn(len(strs))])
}
return string(result)
}
// 冒泡排序
func BubbleAsort(values []int) []int {
for i := 0; i < len(values)-1; i++ {
for j := i + 1; j < len(values); j++ {
// 控制排序顺序
if values[i] < values[j] {
values[i], values[j] = values[j], values[i]
}
}
}
return values
}
|
/*
Copyright © 2021 Doppler <support@doppler.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"sort"
"github.com/DopplerHQ/cli/pkg/http"
"github.com/DopplerHQ/cli/pkg/models"
"github.com/DopplerHQ/cli/pkg/utils"
)
func GetSecretNames(config models.ScopedOptions) ([]string, Error) {
utils.RequireValue("token", config.Token.Value)
response, err := http.GetSecrets(config.APIHost.Value, utils.GetBool(config.VerifyTLS.Value, true), config.Token.Value, config.EnclaveProject.Value, config.EnclaveConfig.Value)
if !err.IsNil() {
return nil, Error{Err: err.Unwrap(), Message: err.Message}
}
secrets, parseErr := models.ParseSecrets(response)
if parseErr != nil {
return nil, Error{Err: parseErr, Message: "Unable to parse API response"}
}
var secretsNames []string
for name := range secrets {
secretsNames = append(secretsNames, name)
}
sort.Strings(secretsNames)
return secretsNames, Error{}
}
|
package parser
import (
"net/http"
"strings"
"golang.org/x/net/html"
)
// GetHyperlinks process a given URL and the request to the UR
// to obtain all the links in it. It does so by tokenizing the
// result <html> body and identifies hyperlinks in it
func GetHyperlinks(BaseURL string, response *http.Response) []string {
var links []string
token := html.NewTokenizer(response.Body)
for {
tokenIterator := token.Next()
tokenTest := token.Token()
switch {
case tokenIterator == html.ErrorToken:
return links
case tokenIterator == html.StartTagToken:
if tokenTest.Data != "a" {
continue
}
url := GetLinkHelper(tokenTest.Attr)
// If parsed url is a redundant
// or the same link discard it
if url == "" || url == "/" || url == "#" || url == BaseURL {
continue
}
// Filter only for http links
if strings.Index(url, "http") == 0 {
links = append(links, url)
continue
}
// In the case of relative links
// prepend the original URL and
// check if its valid
if ValidateURL(BaseURL+url) == nil {
links = append(links, BaseURL+url)
}
}
}
}
// GetLinkHelper iterates over the token attributes
// to search for the link type token
func GetLinkHelper(tokenAttribute []html.Attribute) string {
if len(tokenAttribute) == 0 {
return ""
}
if tokenAttribute[0].Key == "href" {
return tokenAttribute[0].Val
}
return GetLinkHelper(tokenAttribute[1:])
}
// FilterHyperlinks filters out links that
// lead away from the original domain URL
func FilterHyperlinks(URL string, links []string) []string {
index := 0
for _, link := range links {
if strings.Contains(link, URL) {
links[index] = link
index++
}
}
return links[:index]
}
|
package main
import (
"fmt"
)
func main() {
fmt.Println("Hello Akilan, Welcome to Go!!!")
}
|
package modbusone
import (
"encoding/binary"
"fmt"
)
// DataToBools translates the data part of PDU to []bool dependent on FunctionCode.
func DataToBools(data []byte, count uint16, fc FunctionCode) ([]bool, error) {
if fc == FcWriteSingleCoil {
if len(data) != 2 {
debugf("WriteSingleCoil need 2 bytes data\n")
return nil, EcIllegalDataValue
}
if data[1] != 0 {
debugf("WriteSingleCoil unexpected %v %v\n", data[0], data[1])
return nil, EcIllegalDataValue
}
if data[0] == 0 {
return []bool{false}, nil
}
if data[0] == 0xff {
return []bool{true}, nil
}
debugf("WriteSingleCoil unexpected %v %v", data[0], data[1])
return nil, EcIllegalDataValue
}
byteCount := len(data)
if (count+7)/8 != uint16(byteCount) {
debugf("unexpected size: bools %v, bytes %v", count, byteCount)
return nil, EcIllegalDataValue
}
r := make([]bool, byteCount*8)
for i := 0; i < byteCount; i++ {
for j := 0; j < 8; j++ {
r[i*8+j] = bool((int(data[i]) & (1 << uint(j))) > 0)
}
}
return r[:count], nil
}
// BoolsToData translates []bool to the data part of PDU dependent on FunctionCode.
func BoolsToData(values []bool, fc FunctionCode) ([]byte, error) {
if fc == FcWriteSingleCoil {
if len(values) != 1 {
return nil, fmt.Errorf("FcWriteSingleCoil can not write %v coils", len(values))
}
if values[0] {
return []byte{0xff, 0x00}, nil
}
return []byte{0x00, 0x00}, nil
}
count := len(values)
byteCount := (count + 7) / 8
data := make([]byte, byteCount)
byteNr := 0
bitNr := uint8(0)
byteVal := uint8(0)
for v := 0; v < count; v++ {
if values[v] {
// set bit in byte to true
byteVal |= 1 << bitNr
}
switch {
case bitNr == 7:
// last bit in byte, set data value and go to the next byte
data[byteNr] = byteVal
byteVal = 0
bitNr = 0
byteNr++
case v+1 == count:
// last bit, set byte value and exit loop
data[byteNr] = byteVal
default:
bitNr++
}
}
return data, nil
}
// DataToRegisters translates the data part of PDU to []uint16.
func DataToRegisters(data []byte) ([]uint16, error) {
if len(data) < 2 || len(data)%2 != 0 {
debugf("unexpected odd number of bytes %v", len(data))
return nil, EcIllegalDataValue
}
count := len(data) / 2
values := make([]uint16, count)
for i := range values {
values[i] = binary.BigEndian.Uint16(data[2*i:])
}
return values, nil
}
// RegistersToData translates []uint16 to the data part of PDU.
func RegistersToData(values []uint16) ([]byte, error) {
data := make([]byte, 2*len(values))
for i, v := range values {
binary.BigEndian.PutUint16(data[i*2:], v)
}
return data, nil
}
|
package gdash
import (
"reflect"
"testing"
)
func TestPull(t *testing.T) {
var emptySlice []interface{}
var expectedResult []interface{}
result := Pull(emptySlice, "")
if !reflect.DeepEqual(result, expectedResult) {
t.Fatal("Pull empty slice produce empty slice", result)
}
expectedResult = []interface{}{1, 4}
result = Pull([]interface{}{1, 2, 3, 4, 2, 3}, 2, 3)
if !reflect.DeepEqual(result, expectedResult) {
t.Fatal("Pull return a slice with elements 2 and 3 filtered out", result)
}
expectedResult = []interface{}{1, 3, 4}
result = Pull([]interface{}{1, 2, 3, 4, 2, []int{1, 2}}, 2, []int{1, 2})
if !reflect.DeepEqual(result, expectedResult) {
t.Fatal("Pull return a slice with elements 2 and int slice filtered out", result)
}
}
|
// Copyright (C) 2015-Present Pivotal Software, Inc. All rights reserved.
// This program and the accompanying materials are made available under
// the terms of the under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package credhub_tests
import (
"fmt"
"os"
"testing"
"code.cloudfoundry.org/credhub-cli/credhub"
"code.cloudfoundry.org/credhub-cli/credhub/auth"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
odbcredhub "github.com/pivotal-cf/on-demand-service-broker/credhub"
)
var (
devEnv string
caCerts []string
credhubURL string
)
func TestContractTests(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Credhub Contract Tests Suite")
}
var _ = BeforeSuite(func() {
devEnv = os.Getenv("DEV_ENV")
credhubURL = "https://" + os.Getenv("CREDHUB_SERVER")
uaaCACert := os.Getenv("BOSH_CA_CERT")
Expect(uaaCACert).ToNot(BeEmpty())
credhubCACert := os.Getenv("CREDHUB_CA_CERT")
Expect(credhubCACert).ToNot(BeEmpty())
caCerts = []string{uaaCACert, credhubCACert}
ensureCredhubIsClean()
})
var _ = AfterSuite(func() {
ensureCredhubIsClean()
})
func testKeyPrefix() string {
return fmt.Sprintf("/test-%s", devEnv)
}
func makeKeyPath(name string) string {
return fmt.Sprintf("%s/%s", testKeyPrefix(), name)
}
func ensureCredhubIsClean() {
credhubClient := underlyingCredhubClient()
testKeys, err := credhubClient.FindByPath(testKeyPrefix())
Expect(err).NotTo(HaveOccurred())
for _, key := range testKeys.Credentials {
credhubClient.Delete(key.Name)
}
}
func getCredhubStore() *odbcredhub.Store {
clientSecret := os.Getenv("CREDHUB_SECRET")
Expect(clientSecret).NotTo(BeEmpty(), "Expected CREDHUB_SECRET to be set")
credentialStore, err := odbcredhub.Build(
credhubURL,
credhub.Auth(auth.UaaClientCredentials(os.Getenv("CREDHUB_CLIENT"), clientSecret)),
credhub.CaCerts(caCerts...),
)
Expect(err).NotTo(HaveOccurred())
return credentialStore
}
func underlyingCredhubClient() *credhub.CredHub {
clientSecret := os.Getenv("CREDHUB_SECRET")
Expect(clientSecret).NotTo(BeEmpty(), "Expected CREDHUB_SECRET to be set")
Expect(caCerts).ToNot(BeEmpty())
credhubClient, err := credhub.New(
credhubURL,
credhub.Auth(auth.UaaClientCredentials(os.Getenv("CREDHUB_CLIENT"), clientSecret)),
credhub.CaCerts(caCerts...),
)
Expect(err).NotTo(HaveOccurred())
return credhubClient
}
|
package acrostic
import (
"errors"
log "github.com/sirupsen/logrus"
)
type WordNetSynset struct {
Options *Options
Instance *Instance
}
func NewWordNetSynset(o *Options, i *Instance) *WordNetSynset {
ret := new(WordNetSynset)
ret.Options = o
ret.Instance = i
return ret
}
func (w *WordNetSynset) WordID(a []rune, part WordNetPart) (string, error) {
rows, err := w.Instance.WordNet.DB.Query("select wordid,pos from word where lemma=?", string(a))
if err != nil {
return "", err
}
defer rows.Close()
for rows.Next() {
var (
wordid string
pos string
)
if err = rows.Scan(&wordid, &pos); err != nil {
return "", err
}
if part.String() == pos {
return wordid, nil
}
}
return "", nil
}
func (w *WordNetSynset) Synset(id string) ([]string, error) {
var ret []string
rows, err := w.Instance.WordNet.DB.Query("select synset from sense where wordid=?", id)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var synset string
if err = rows.Scan(&synset); err != nil {
return nil, err
}
ret = append(ret, synset)
}
return ret, nil
}
func (w *WordNetSynset) Name(syns string) (string, error) {
rows, err := w.Instance.WordNet.DB.Query("select name from synset where synset=?", syns)
if err != nil {
return "", err
}
defer rows.Close()
for rows.Next() {
var name string
if err = rows.Scan(&name); err != nil {
return "", err
}
return name, nil
}
return "", nil
}
func (w *WordNetSynset) Hype(syns string) (string, error) {
rows, err := w.Instance.WordNet.DB.Query("select synset2 from synlink where synset1=? and link=\"hype\"",
syns)
if err != nil {
return "", err
}
defer rows.Close()
for rows.Next() {
var synset string
if err = rows.Scan(&synset); err != nil {
return "", err
}
return synset, nil
}
return "", nil
}
// Nearest: aについて,bに共通で最も近いsynset(概念)のIDを取得する.
// a: 概念を取得するための文字列
// b: aの概念のうち,最も近いものとして挙げられる文字列
func (w *WordNetSynset) NearestSynset(
a []rune,
apart WordNetPart,
b []rune,
bpart WordNetPart) ([]SynsetResult, error) {
// wordidを取得
var (
aid string
bid string
err error
)
aid, err = w.WordID(a, apart)
if err != nil {
return nil, err
}
bid, err = w.WordID(b, bpart)
if err != nil {
return nil, err
}
if len(aid) == 0 {
return nil, errors.New("string a has not id")
}
if len(bid) == 0 {
return nil, errors.New("string b has not id")
}
log.Debugf("aid: %v, bid: %v", aid, bid)
// synsetを取得
asyn, err := w.Synset(aid)
if err != nil {
return nil, err
}
bsyn, err := w.Synset(bid)
if err != nil {
return nil, err
}
log.Debugf("asyn: %v, bsyn: %v", asyn, bsyn)
// 探索
sr := make([]SynsetResult, 0)
astep := make([]int, len(asyn))
bstep := make([]int, len(bsyn))
for i := range astep {
astep[i] = 0
}
for i := range bstep {
bstep[i] = 0
}
sr, err = w.Search(asyn, astep, 0, bsyn, bstep, 0, sr)
if err != nil {
return nil, err
}
log.Debugf("finished Search(), len = %v", len(sr))
// ルートまで行く
for r := range sr {
v := sr[r].Synset
for v != "" {
v, err = w.Hype(v)
if err != nil {
return nil, err
}
sr[r].Depth++
}
if sr[r].AStep == 0 && sr[r].BStep == 0 {
sr[r].Approximation = 0
} else {
sr[r].Approximation = (2.0 * float32(sr[r].Depth) /
(float32(sr[r].AStep) + float32(sr[r].BStep)))
}
log.Debugf("sr[%v]: %v, %v[%v %v] = %v",
r, sr[r].Synset, sr[r].Depth, sr[r].AStep, sr[r].BStep, sr[r].Approximation)
}
return sr, nil
}
type SynsetResult struct {
Synset string
AStep int
BStep int
Depth int
Approximation float32
}
// 検索をする
//
func (w *WordNetSynset) Search(
asyn []string,
astep []int,
an int,
bsyn []string,
bstep []int,
bn int,
sr []SynsetResult) ([]SynsetResult, error) {
log.Debugf("Search: %v-%v(%v), %v-%v-(%v)", astep, len(asyn), asyn, bstep, len(bsyn), bsyn)
// 探索
for ai, as := range asyn {
if as == "" {
continue
}
for bi, bs := range bsyn {
if bs == "" {
continue
}
if as == bs {
// 同じsynset
found := false
for i := range sr {
if sr[i].Synset == as {
found = true
break
}
}
if found == false {
log.Debugf("Search: same synset %v [%v %v]", as, astep, bstep)
sr = append(sr, SynsetResult{Synset: as, AStep: astep[ai], BStep: bstep[bi]})
asyn[ai] = ""
bsyn[bi] = ""
}
}
}
}
// 上
new := false
for _, as := range asyn {
h, err := w.Hype(as)
if err != nil {
return nil, err
}
if h == "" {
// root
continue
}
found := false
for s := range sr {
if sr[s].Synset == h {
found = true
break
}
}
if found == false {
for s := range asyn {
if asyn[s] == h {
found = true
break
}
}
if found == false {
new = true
asyn = append(asyn, h)
astep = append(astep, an)
}
}
}
for _, bs := range bsyn {
h, err := w.Hype(bs)
if err != nil {
return nil, err
}
if h == "" {
// root
continue
}
found := false
for s := range sr {
if sr[s].Synset == h {
found = true
break
}
}
if found == false {
for s := range bsyn {
if bsyn[s] == h {
found = true
break
}
}
if found == false {
new = true
bsyn = append(bsyn, h)
bstep = append(bstep, bn)
}
}
}
if new {
return w.Search(asyn, astep, an+1, bsyn, bstep, bn+1, sr)
} else {
return sr, nil
}
}
|
package ts3
type Client map[string]string
|
package model
import (
"github.com/caos/zitadel/internal/crypto"
caos_errs "github.com/caos/zitadel/internal/errors"
es_models "github.com/caos/zitadel/internal/eventstore/models"
policy_model "github.com/caos/zitadel/internal/policy/model"
"time"
)
type Password struct {
es_models.ObjectRoot
SecretString string
SecretCrypto *crypto.CryptoValue
ChangeRequired bool
}
type PasswordCode struct {
es_models.ObjectRoot
Code *crypto.CryptoValue
Expiry time.Duration
NotificationType NotificationType
}
type NotificationType int32
const (
NotificationTypeEmail NotificationType = iota
NotificationTypeSms
)
func (p *Password) IsValid() bool {
return p.AggregateID != "" && p.SecretString != ""
}
func (p *Password) HashPasswordIfExisting(policy *policy_model.PasswordComplexityPolicy, passwordAlg crypto.HashAlgorithm, onetime bool) error {
if p.SecretString == "" {
return nil
}
if policy == nil {
return caos_errs.ThrowPreconditionFailed(nil, "MODEL-s8ifS", "Errors.User.PasswordComplexityPolicy.NotFound")
}
if err := policy.Check(p.SecretString); err != nil {
return err
}
secret, err := crypto.Hash([]byte(p.SecretString), passwordAlg)
if err != nil {
return err
}
p.SecretCrypto = secret
p.ChangeRequired = onetime
return nil
}
|
package common
import (
"context"
"fmt"
"io"
)
// This file contains types that need to be referenced by both the ./encoding and ./encoding/vX packages.
// It primarily exists here to break dependency loops.
var (
ErrUnsupported = fmt.Errorf("unsupported")
)
// ID in TempoDB
type ID []byte
// Record represents the location of an ID in an object file
type Record struct {
ID ID
Start uint64
Length uint32
}
// ObjectCombiner is used to combine two objects in the backend
type ObjectCombiner interface {
// Combine objA and objB encoded using dataEncoding. The returned object must
// use the same dataEncoding. Returns a bool indicating if it the objects required combining and
// the combined slice
Combine(objA []byte, objB []byte, dataEncoding string) ([]byte, bool)
}
// DataReader returns a slice of pages in the encoding/v0 format referenced by
// the slice of *Records passed in. The length of the returned slice is guaranteed
// to be equal to the length of the provided records unless error is non nil.
// DataReader is the primary abstraction point for supporting multiple data
// formats.
type DataReader interface {
Read(context.Context, []Record, []byte) ([][]byte, []byte, error)
Close()
// NextPage can be used to iterate at a page at a time. May return ErrUnsupported for older formats
// NextPage takes a reusable buffer to read the page into and returns it in case it needs to resize
// NextPage returns the uncompressed page buffer ready for object iteration and the length of the
// original page from the page header. len(page) might not equal page len!
NextPage([]byte) ([]byte, uint32, error)
}
// IndexReader is used to abstract away the details of an index. Currently
// only used in the paged finder, it could eventually provide a way to
// support multiple index formats.
// IndexReader is the primary abstraction point for supporting multiple index
// formats.
type IndexReader interface {
At(ctx context.Context, i int) (*Record, error)
Find(ctx context.Context, id ID) (*Record, int, error)
}
// DataWriter is used to write paged data to the backend
type DataWriter interface {
// Write writes the passed ID/byte to the current page
Write(ID, []byte) (int, error)
// CutPage completes the current page and start a new one. It
// returns the length in bytes of the cut page.
CutPage() (int, error)
// Complete must be called when the operation DataWriter is done.
Complete() error
}
// IndexWriter is used to write paged indexes
type IndexWriter interface {
// Write returns a byte representation of the provided Records
Write([]Record) ([]byte, error)
}
// ObjectReaderWriter represents a library of methods to read and write
// at the object level
type ObjectReaderWriter interface {
MarshalObjectToWriter(id ID, b []byte, w io.Writer) (int, error)
UnmarshalObjectFromReader(r io.Reader) (ID, []byte, error)
UnmarshalAndAdvanceBuffer(buffer []byte) ([]byte, ID, []byte, error)
}
// RecordReaderWriter represents a library of methods to read and write
// records
type RecordReaderWriter interface {
MarshalRecords(records []Record) ([]byte, error)
MarshalRecordsToBuffer(records []Record, buffer []byte) error
RecordCount(b []byte) int
UnmarshalRecord(buff []byte) Record
RecordLength() int
}
|
//
// Observer_test.go
// PureMVC Go Multicore
//
// Copyright(c) 2019 Saad Shams <saad.shams@puremvc.org>
// Your reuse is governed by the Creative Commons Attribution 3.0 License
//
package observer
import (
"github.com/puremvc/puremvc-go-multicore-framework/src/interfaces"
"github.com/puremvc/puremvc-go-multicore-framework/src/patterns/observer"
"testing"
)
/*
Tests PureMVC Observer class.
Since the Observer encapsulates the interested object's
callback information, there are no getters, only setters.
It is, in effect write-only memory.
Therefore, the only way to test it is to set the
notification method and context and call the notifyObserver
method.
*/
/*
Tests observer class when initialized by accessor methods.
*/
func TestObserverAccessor(t *testing.T) {
// Create observer
test := &Test{}
var obs interfaces.IObserver = &observer.Observer{Notify: nil, Context: nil}
obs.SetNotifyContext(test)
obs.SetNotifyMethod(test.NotifyMethod)
// create a test event, setting a payload value and notify
// the observer with it. since the observer is this class
// and the notification method is observerTestMethod,
// successful notification will result in our local
// observerTestVar being set to the value we pass in
// on the note body.
var note = observer.NewNotification("ObserverTestNote", 10, "")
obs.NotifyObserver(note)
// test assertions
if test.Var != 10 {
t.Error("Expecting test.Var == 10")
}
}
/*
Tests observer class when initialized by constructor.
*/
func TestObserverConstructor(t *testing.T) {
// Create observer passing in notification method and context
var test = &Test{}
var obs = &observer.Observer{Notify: test.NotifyMethod, Context: test}
// create a test note, setting a body value and notify
// the observer with it. since the observer is this class
// and the notification method is observerTestMethod,
// successful notification will result in our local
// observerTestVar being set to the value we pass in
// on the note body.
var note = observer.NewNotification("ObserverTestNote", 5, "")
obs.NotifyObserver(note)
// test assertions
if test.Var != 5 {
t.Error("Expecting test.Var == 5")
}
}
/*
Tests the compareNotifyContext method of the Observer class
*/
func TestCompareNotifyContext(t *testing.T) {
// Create observer passing in notification method and context
var test = &Test{}
var negTestObj = &NegTestObj{}
var obs = &observer.Observer{Notify: test.NotifyMethod, Context: test}
// test assertions
if obs.CompareNotifyContext(negTestObj) != false {
t.Error("Expecting observer.compareNotifyContext(negTestObj) == false")
}
if obs.CompareNotifyContext(test) != true {
t.Error("Expecting observer.compareNotifyContext(this) == true")
}
}
type Test struct {
Var int
}
/*
A function that is used as the observer notification
method. It multiplies the input number by the
observerTestVar value
*/
func (o *Test) NotifyMethod(note interfaces.INotification) {
o.Var = note.Body().(int)
}
type NegTestObj struct{}
|
package policy
import (
"sync"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNopPolicy(t *testing.T) {
p := CombinePolicies()
select {
case <-p.C():
t.Error("should not be able to pull from channel yet")
default:
}
assert.Nil(t, p.Op("foo", Set))
assert.Nil(t, p.Close())
assert.Equal(t, ErrClosed, p.Op("foo", Set))
_, ok := <-p.C()
assert.False(t, ok)
}
type op struct {
k string
op OpType
}
type mockPolicy struct {
sync.Mutex
ops []op
ch chan string
oerr, cerr error
}
func (mp *mockPolicy) C() <-chan string {
return mp.ch
}
func (mp *mockPolicy) Op(k string, ot OpType) error {
mp.Lock()
mp.ops = append(mp.ops, op{k, ot})
mp.Unlock()
return mp.oerr
}
func (mp *mockPolicy) Close() error {
close(mp.ch)
return mp.cerr
}
func TestMultiPolicy(t *testing.T) {
var (
m1 = &mockPolicy{ch: make(chan string)}
m2 = &mockPolicy{ch: make(chan string)}
m3 = &mockPolicy{ch: make(chan string)}
wg sync.WaitGroup
)
p := CombinePolicies(m1, m2, m3)
wg.Add(1)
go func() {
m2.ch <- "foo"
wg.Done()
}()
k := <-p.C()
wg.Wait()
assert.Equal(t, "foo", k)
assert.Equal(t, []op{op{"foo", Evict}}, m1.ops)
assert.Equal(t, []op{op{"foo", Evict}}, m3.ops)
assert.Nil(t, p.Close())
p.Op("bar", Set)
assert.Equal(t, []op{op{"foo", Evict}, op{"bar", Set}}, m1.ops)
assert.Equal(t, []op{op{"bar", Set}}, m2.ops)
assert.Equal(t, []op{op{"foo", Evict}, op{"bar", Set}}, m3.ops)
}
|
// Copyright 2021 Adobe. All rights reserved.
// This file is licensed to you under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. You may obtain a copy
// of the License at http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under
// the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
// OF ANY KIND, either express or implied. See the License for the specific language
// governing permissions and limitations under the License.
package ims
import (
"context"
"encoding/json"
"fmt"
"net/http"
)
// ValidateTokenRequest is the request to ValidateToken.
type ValidateTokenRequest struct {
// AccessToken is a valid access token.
Token string
Type TokenType
ClientID string
}
// ValidateTokenResponse is the response to the ValidateToken request .
type ValidateTokenResponse struct {
Response
Valid bool
}
// ValidateTokenWithContext validates a token using the IMS API. It returns a
// non-nil response on success or an error on failure.
func (c *Client) ValidateTokenWithContext(ctx context.Context, r *ValidateTokenRequest) (*ValidateTokenResponse, error) {
switch r.Type {
case AccessToken, RefreshToken, DeviceToken, AuthorizationCode:
// Valid token type.
default:
return nil, fmt.Errorf("invalid token type: %v", r.Type)
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%s/ims/validate_token/v1", c.url), nil)
if err != nil {
return nil, fmt.Errorf("create request: %v", err)
}
query := req.URL.Query()
query.Set("type", string(r.Type))
query.Set("client_id", r.ClientID)
query.Set("token", r.Token)
req.URL.RawQuery = query.Encode()
// Header X-IMS-ClientID will be mandatory in the future
req.Header.Set("X-IMS-ClientId", r.ClientID)
res, err := c.do(req)
if err != nil {
return nil, fmt.Errorf("perform request: %v", err)
}
if res.StatusCode != http.StatusOK {
return nil, errorResponse(res)
}
var payload struct {
Valid bool `json:"valid"`
}
if err := json.Unmarshal(res.Body, &payload); err != nil {
return nil, fmt.Errorf("error parsing response: %v", err)
}
return &ValidateTokenResponse{
Response: *res,
Valid: payload.Valid,
}, nil
}
// ValidateToken is equivalent to ValidateTokenWithContext with a background
// context.
func (c *Client) ValidateToken(r *ValidateTokenRequest) (*ValidateTokenResponse, error) {
return c.ValidateTokenWithContext(context.Background(), r)
}
|
package galery
import (
"encoding/json"
"errors"
"net/http"
"os"
"strings"
"github.com/asaskevich/govalidator"
"github.com/juliotorresmoreno/unravel-server/config"
"github.com/juliotorresmoreno/unravel-server/helper"
"github.com/juliotorresmoreno/unravel-server/models"
"github.com/juliotorresmoreno/unravel-server/ws"
)
// Save crea y actualiza la galeria
func Save(w http.ResponseWriter, r *http.Request, session *models.User, hub *ws.Hub) {
var ID = strings.Trim(r.PostFormValue("ID"), " ")
var nombre = strings.Trim(r.PostFormValue("nombre"), " ")
var permiso = r.PostFormValue("permiso")
var descripcion = r.PostFormValue("descripcion")
w.Header().Set("Content-Type", "application/json")
if !helper.IsValidPermision(permiso) || !govalidator.IsAlphanumeric(nombre) {
helper.DespacharError(w, errors.New("El nombre es invalido"), http.StatusNotAcceptable)
return
}
var galeria = config.PATH + "/" + session.Usuario + "/" + strings.Trim(nombre, "\n")
if ID != "" {
var galeriaOld = config.PATH + "/" + session.Usuario + "/" + strings.Trim(ID, "\n")
if _, err := os.Stat(galeriaOld); err != nil {
helper.DespacharError(w, err, http.StatusInternalServerError)
return
}
os.Rename(galeriaOld, galeria)
} else {
if _, err := os.Stat(galeria); err != nil {
if err = os.MkdirAll(galeria, 0755); err != nil {
helper.DespacharError(w, err, http.StatusInternalServerError)
return
}
}
}
p, _ := os.Create(galeria + "/permiso")
defer p.Close()
p.Write([]byte(permiso))
d, _ := os.Create(galeria + "/descripcion")
defer d.Close()
d.Write([]byte(descripcion))
w.WriteHeader(http.StatusCreated)
var respuesta, _ = json.Marshal(map[string]interface{}{
"success": true,
"galeria": nombre,
})
w.Write(respuesta)
}
|
package main
// Leetcode 2267. (hard)
func hasValidPath(grid [][]byte) bool {
m, n := len(grid), len(grid[0])
if grid[0][0] == ')' || grid[m-1][n-1] == '(' || (m+n-1)%2 != 0 {
return false
}
dp := make([][][]bool, m)
for i := range dp {
dp[i] = make([][]bool, n)
for j := range dp[i] {
dp[i][j] = make([]bool, (m+n-1)/2+1)
}
}
dp[0][0][1] = true
for i := range grid {
for j := range grid[i] {
delta := 1
if grid[i][j] == ')' {
delta = -1
}
for k := range dp[i][j] {
q := k + delta
if q < 0 || q > (m+n-1)/2 {
continue
}
if i != 0 {
dp[i][j][q] = dp[i][j][q] || dp[i-1][j][k]
}
if j != 0 {
dp[i][j][q] = dp[i][j][q] || dp[i][j-1][k]
}
}
}
}
return dp[m-1][n-1][0]
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.