text stringlengths 11 4.05M |
|---|
package base
// Pagination struct
type Pagination struct {
PageNo int `json:"pageNo"`
PageSize int `json:"pageSize"`
Total int `json:"total"`
}
|
package utils
import (
"crypto/md5"
"encoding/hex"
"os"
"strings"
)
func substr(s string, pos, length int) string {
runes := []rune(s)
l := pos + length
if l > len(runes) {
l = len(runes)
}
return string(runes[pos:l])
}
// GetParentFullPath ...
func GetParentFullPath(in string) (parentFullPath string) {
parentFullPath = substr(in, 0, strings.LastIndex(in, "/"))
if parentFullPath == "" {
parentFullPath = "/"
}
return
}
// GetSelfName ...
func GetSelfName(in string) (selfName string) {
tmp := strings.Split(in, "/")
selfName = tmp[len(tmp)-1]
if selfName == "" {
selfName = "/"
}
return
}
// GetParentName ...
func GetParentName(in string) (parentName string) {
tmp := strings.Split(in, "/")
parentName = tmp[len(tmp)-2]
if parentName == "" {
parentName = "/"
}
return
}
// MD5 ...
func MD5(in string) string {
h := md5.New()
h.Write([]byte(in))
cipherStr := h.Sum(nil)
hexStr := hex.EncodeToString(cipherStr)
return hexStr
}
// LocalPathExists ...
func LocalPathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
func ConvertValueToArgs(args ...interface{}) []interface{} {
var ret []interface{}
for _, v := range args {
ret = append(ret, v)
}
return ret
} |
package kvs
import (
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"sync"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/version"
)
var (
stateKey = []byte("stateKey")
kvPairPrefixKey = []byte("kvPairKey:")
// ProtocolVersion is the version of the protocol
ProtocolVersion version.Protocol = 0x1
)
// State is the application state
type State struct {
db dbm.DB
Size int64 `json:"size"`
Height int64 `json:"height"`
AppHash []byte `json:"app_hash"`
}
func prefixKey(key []byte) []byte {
return append(kvPairPrefixKey, key...)
}
func saveState(state State) {
stateBytes, err := json.Marshal(state)
if err != nil {
panic(err)
}
state.db.Set(stateKey, stateBytes)
}
func loadState(db dbm.DB) (State, error) {
stateBytes := db.Get(stateKey)
var state State
if len(stateBytes) != 0 {
err := json.Unmarshal(stateBytes, &state)
if err != nil {
return state, err
}
}
state.db = db
return state, nil
}
type clientCreator struct {
mtx *sync.Mutex
app types.Application
}
// NewClientCreator create a new client creator for KVStore
func NewClientCreator() (proxy.ClientCreator, error) {
app, err := NewKVStoreApplication()
if err != nil {
return nil, err
}
return &clientCreator{mtx: new(sync.Mutex), app: app}, nil
}
func (c *clientCreator) NewABCIClient() (abcicli.Client, error) {
return abcicli.NewLocalClient(c.mtx, c.app), nil
}
// KVStoreApplication is a tendermint key value store app
type KVStoreApplication struct {
types.BaseApplication
state State
}
// Info returns the application information
func (k *KVStoreApplication) Info(types.RequestInfo) types.ResponseInfo {
return types.ResponseInfo{
Data: fmt.Sprintf("{\"size\":%v}", k.state.Size),
Version: version.ABCIVersion,
AppVersion: ProtocolVersion.Uint64(),
}
}
// Query returns the state of the application
func (k *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
// if the proof of existence if requested then return proof
// or else return the actual value
if reqQuery.Prove {
resQuery.Index = -1
}
resQuery.Key = reqQuery.Data
value := k.state.db.Get(prefixKey(reqQuery.Data))
resQuery.Value = value
if value != nil {
resQuery.Log = "exists"
} else {
resQuery.Log = "missing"
}
return
}
// CheckTx validates a transaction for the mempool
func (k *KVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx {
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
}
// DeliverTx delivers a transaction for full processing
func (k *KVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
var key, value []byte
parts := bytes.Split(tx, []byte("="))
if len(parts) == 2 {
key, value = parts[0], parts[1]
} else {
key, value = tx, tx
}
k.state.db.Set(prefixKey(key), value)
k.state.Size++
tags := []cmn.KVPair{
{Key: []byte("app.creator"), Value: []byte("Greg Osuri")},
{Key: []byte("app.key"), Value: key},
}
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags}
}
// Commit commits the state and returns the applicaiton merkle root hash
func (k *KVStoreApplication) Commit() types.ResponseCommit {
// Using a memdb - just return the big endian size of the db
appHash := make([]byte, 8)
binary.PutVarint(appHash, k.state.Size)
k.state.AppHash = appHash
k.state.Height++
saveState(k.state)
return types.ResponseCommit{Data: appHash}
}
// NewKVStoreApplication returns a new instance of KVStoreApplication
func NewKVStoreApplication() (*KVStoreApplication, error) {
// load state from memory
s, err := loadState(dbm.NewMemDB())
if err != nil {
return nil, err
}
return &KVStoreApplication{state: s}, nil
}
|
package event
type CustomerRegistered struct {
CustomerID string
FullName string
EmailAddress string
ConfirmationHash string
}
|
package main
import (
r "MetricsNew/redis"
"fmt"
)
func main() {
if r.ExistValue(123, []interface{}{23123, 121212}) {
fmt.Println("123123", " - old")
} else {
fmt.Println("123123", " - new")
}
if err := r.AddValue(123, []interface{}{23123, 121212}); err != nil {
fmt.Println(err)
}
if err := r.RenameKey(123); err != nil {
fmt.Println(err)
}
}
|
package enforcer
import (
"context"
"fmt"
"github.com/liatrio/rode/pkg/occurrence"
"github.com/liatrio/rode/pkg/attester"
"go.uber.org/zap"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
// Enforcer enforces attestations on a resource
type Enforcer interface {
Enforce(ctx context.Context, namespace string, resourceURI string) error
}
type enforcer struct {
logger *zap.SugaredLogger
excludeNS []string
attesters []attester.Attester
occurrenceLister occurrence.Lister
clientset *kubernetes.Clientset
}
// NewEnforcer creates an enforcer
func NewEnforcer(logger *zap.SugaredLogger, excludeNS []string, attesters []attester.Attester, occurrenceLister occurrence.Lister, clientset *kubernetes.Clientset) Enforcer {
return &enforcer{
logger,
excludeNS,
attesters,
occurrenceLister,
clientset,
}
}
func (e *enforcer) Enforce(ctx context.Context, namespace string, resourceURI string) error {
for _, ns := range e.excludeNS {
if namespace == ns {
// skip - this namespace is excluded
return nil
}
}
e.logger.Debugf("About to enforce resource '%s' in namespace '%s'", resourceURI, namespace)
// Begin: Determine enforced attesters
result, err := e.clientset.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("Unable to get namespace: %v", err)
}
resultLabels := result.ObjectMeta.Labels
if resultLabels == nil {
return nil
}
enforcedAttesters := resultLabels["rode.liatr.io/enforce-attesters"]
// End: Determine enforced attesters
occurrenceList, err := e.occurrenceLister.ListOccurrences(ctx, resourceURI)
if err != nil {
return err
}
for _, att := range e.attesters {
if enforcedAttesters != "*" && enforcedAttesters != att.String() {
continue
}
attested := false
for _, occ := range occurrenceList.GetOccurrences() {
req := &attester.VerifyRequest{
Occurrence: occ,
}
if err = att.Verify(ctx, req); err == nil {
attested = true
break
}
}
if !attested {
return fmt.Errorf("Unable to find an attestation for %s", att)
}
}
return nil
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package store
import (
"testing"
"github.com/mattermost/mattermost-cloud/internal/testlib"
"github.com/mattermost/mattermost-cloud/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestGetLogicalDatabase(t *testing.T) {
logger := testlib.MakeLogger(t)
store := MakeTestSQLStore(t, logger)
defer CloseConnection(t, store)
multitenantDatabase := &model.MultitenantDatabase{
DatabaseType: model.DatabaseEngineTypePostgresProxy,
}
err := store.CreateMultitenantDatabase(multitenantDatabase)
require.NoError(t, err)
t.Run("success", func(t *testing.T) {
logicalDatabase := &model.LogicalDatabase{
MultitenantDatabaseID: multitenantDatabase.ID,
Name: "ldb1",
}
createAndCheckLogicalDatabase(t, store, logicalDatabase)
_, err := store.GetLogicalDatabase(logicalDatabase.ID)
require.NoError(t, err)
})
t.Run("invalid id", func(t *testing.T) {
logicalDatabase, err := store.GetLogicalDatabase(model.NewID())
require.NoError(t, err)
assert.Nil(t, logicalDatabase)
})
}
func TestCreateLogicalDatabase(t *testing.T) {
logger := testlib.MakeLogger(t)
store := MakeTestSQLStore(t, logger)
defer CloseConnection(t, store)
multitenantDatabase := &model.MultitenantDatabase{
DatabaseType: model.DatabaseEngineTypePostgresProxy,
}
err := store.CreateMultitenantDatabase(multitenantDatabase)
require.NoError(t, err)
t.Run("success", func(t *testing.T) {
logicalDatabase := &model.LogicalDatabase{
MultitenantDatabaseID: multitenantDatabase.ID,
Name: "ldb1",
}
createAndCheckLogicalDatabase(t, store, logicalDatabase)
})
}
func TestDeleteLogicalDatabase(t *testing.T) {
logger := testlib.MakeLogger(t)
store := MakeTestSQLStore(t, logger)
defer CloseConnection(t, store)
multitenantDatabase := &model.MultitenantDatabase{
DatabaseType: model.DatabaseEngineTypePostgresProxy,
}
err := store.CreateMultitenantDatabase(multitenantDatabase)
require.NoError(t, err)
logicalDatabase := &model.LogicalDatabase{
MultitenantDatabaseID: multitenantDatabase.ID,
Name: "ldb1",
}
createAndCheckLogicalDatabase(t, store, logicalDatabase)
t.Run("success", func(t *testing.T) {
err = store.DeleteLogicalDatabase(logicalDatabase.ID)
require.NoError(t, err)
logicalDatabase, err = store.GetLogicalDatabase(logicalDatabase.ID)
require.NoError(t, err)
assert.True(t, logicalDatabase.DeleteAt > 0)
})
}
// Helpers
func createAndCheckLogicalDatabase(t *testing.T, store *SQLStore, logicalDatabase *model.LogicalDatabase) {
err := store.CreateLogicalDatabase(logicalDatabase)
require.NoError(t, err)
assert.NotEmpty(t, logicalDatabase.ID)
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"strings"
)
func panacea(p, q string) bool {
var v, m int
l, r := strings.Fields(p), strings.Fields(q)
for _, i := range l {
fmt.Sscanf(i, "%x", &v)
m += v
}
for _, i := range r {
fmt.Sscanf(i, "%b", &v)
m -= v
}
return m <= 0
}
func main() {
data, err := os.Open(os.Args[1])
if err != nil {
log.Fatal(err)
}
defer data.Close()
scanner := bufio.NewScanner(data)
for scanner.Scan() {
s := strings.Split(scanner.Text(), " | ")
if panacea(s[0], s[1]) {
fmt.Println("True")
} else {
fmt.Println("False")
}
}
}
|
package main
import (
"context"
"errors"
"net"
"net/http"
"os"
"os/signal"
"syscall"
)
func main() {
quit := make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
ctx := context.Background()
s := &http.Server{
Addr: net.JoinHostPort("", "9090"),
Handler: http.FileServer(http.Dir("./static/attack-server")),
}
go func() {
if err := s.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
panic(err)
}
}()
<-quit
if err := s.Shutdown(ctx); err != nil {
panic(err)
}
}
|
package main
import (
"context"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/gorilla/mux"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
var client *mongo.Client
type Person struct {
ID primitive.ObjectID `json:"_id,omitempty" bson:"_id,omitempty"`
Firstname string `json:"firstname,omitempty" bson:"firstname,omitempty"`
Lastname string `json:"lastname,omitempty" bson:"lastname,omitempty"`
}
// func to create
func Create(response http.ResponseWriter, request *http.Request) {
response.Header().Add("content-type", "application/json") // serve in
var person Person
json.NewDecoder(request.Body).Decode(&person)
collection := client.Database("test").Collection("people")
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
result, _ := collection.InsertOne(ctx, person)
json.NewEncoder(response).Encode(result)
}
func main() {
fmt.Println("Starting")
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) //create context with timeout
client, _ = mongo.Connect(ctx, options.Client().ApplyURI("mongodb://localhost:27017")) //connect to the db TODO get params from conf
router := mux.NewRouter() // define the router
router.HandleFunc("/person", Create).Methods("POST") // create the route
http.ListenAndServe(":8086", router) // define the port TODO get from conf
}
|
package extra
import (
"time"
)
type ClientLog struct {
Id int
UserId int
Platform int
Version string
Content string
Extra string
Status int
CreatedAt time.Time
UpdatedAt time.Time
}
|
package sics
import (
"errors"
"fmt"
"github.com/moovweb/gokogiri"
"github.com/moovweb/gokogiri/xml"
"strings"
)
func Parse(input []byte) (m Match, err error) {
h, err := gokogiri.ParseHtml(input)
if err != nil {
return
}
// Find score tables and extract innings out of each
oversTables, err := h.Search("//table[@class='OversTable']")
if err != nil {
return
}
if len(oversTables) < 2 {
err = errors.New(fmt.Sprintf(
"Could not parse out two innings tables, got %d", len(oversTables)))
return
}
m.Innings = [2]Innings{}
m.Teams = [2]Team{}
for i, _ := range m.Innings {
m.Innings[i], err = ParseInnings(oversTables[i])
if err != nil {
err = errors.New(fmt.Sprintf(
"Could not parse innings: %s", err.Error()))
return
}
m.Teams[i] = Team{
Name: m.Innings[i].Team,
}
}
return
}
func ParseInnings(oversTable xml.Node) (i Innings, err error) {
var (
skin Skin
skinBatsmanIndex int
)
rows, err := oversTable.Search(".//tr")
if err != nil {
return
}
for _, row := range rows {
// Figure out type of row and handle
cells, err := row.Search(".//td")
if err != nil {
return i, err
}
if len(cells) == 0 {
continue
}
if len(cells) > 1 && cells[0].Attr("class") == "TeamHeader" {
i.Team = strings.TrimSpace(cells[1].Content())
} else if len(cells) > 2 &&
cells[2].Attr("class") == "Bwl" {
skin = Skin{}
skinBatsmanIndex = 0
for _, cell := range cells {
if cell.Attr("class") == "Bwl" {
skin.Overs = append(skin.Overs, Over{
Bowler: strings.TrimSpace(cell.Content()),
})
}
}
} else if skinBatsmanIndex <= 1 && len(skin.Overs) > 0 &&
cells[0].Attr("class") == "BatsmanCell" {
skin.Batsmen[skinBatsmanIndex] = strings.TrimSpace(
cells[0].Content())
overIndex := 0
ballIndex := 0
for _, cell := range cells {
if (cell.Attr("class") == "BallCell" ||
cell.Attr("class") == "extraBall") &&
overIndex < len(skin.Overs) {
var ball *Ball
ballRaw := strings.ToLower(strings.TrimSpace(
cell.Content()))
fmt.Printf("%#v\n", ballRaw)
if skinBatsmanIndex == 0 {
ball = &Ball{}
skin.Overs[overIndex].Balls = append(
skin.Overs[overIndex].Balls, *ball)
} else {
ball = &skin.Overs[overIndex].Balls[ballIndex]
}
ball.Bowler = skin.Overs[overIndex].Bowler
if ballRaw != "" {
ball.Batsman = skin.Batsmen[skinBatsmanIndex]
ball.Kind = ballRaw
}
// skin.Overs[overIndex].Balls = append(
// skin.Overs[overIndex].Balls, Ball{
// Bowler: skin.Overs[overIndex],
// Batsman: skin.Batsmen[skinBatsmanIndex],
// })
} else if cell.Attr("class") == "OverTotalCell rightAligned" {
overIndex++
ballIndex = 0
}
}
if skinBatsmanIndex == 1 {
i.Skins = append(i.Skins, skin)
}
skinBatsmanIndex++
}
}
fmt.Printf("%#v\n", i)
return
}
|
package magic
import (
"fmt"
"path/filepath"
"testing"
)
func TestMagic(t *testing.T) {
files, _ := filepath.Glob("/home/strings/via/cache/src/*")
if len(files) == 0 {
t.Errorf("expected files list greater the 0 to test")
t.FailNow()
}
for _, file := range files {
m, err := GetFileMagic(file)
if err != nil {
t.Error(err)
}
fmt.Printf("%-40.40s %v\n", filepath.Base(file), m)
}
}
func TestContentType(t *testing.T) {
files, _ := filepath.Glob("/home/strings/Music/Johnny Cash/The Very Best Of/*")
if len(files) == 0 {
t.Errorf("expected files list greater the 0 to test")
t.FailNow()
}
for _, file := range files {
b, err := GetFileMagic(file)
if err != nil {
t.Error(err)
}
fmt.Printf("%-40.40s %v\n", filepath.Base(file), b)
}
}
|
package controller
import (
"github.com/gorilla/websocket"
"github.com/labstack/echo/v4"
"log"
"net/http"
"time"
"websocket-example/controller/delivery"
)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(r *http.Request) bool {
return true
},
}
func ServeWs(c echo.Context) error {
roomId := c.Param("roomId")
ws, err := upgrader.Upgrade(c.Response(), c.Request(), nil)
if err != nil {
c.Logger().Error(err)
return err
}
conn := &connection{send: make(chan []byte, 256), ws: ws}
s := subscription{conn, roomId}
H.register <- s
go s.writePump()
go s.readPump()
return nil
}
type connection struct {
ws *websocket.Conn
send chan []byte
}
type subscription struct {
conn *connection
room string
}
func (s subscription) readPump() {
c := s.conn
defer func() {
H.unregister <- s
c.ws.Close()
}()
c.ws.SetReadLimit(maxMessageSize)
c.ws.SetReadDeadline(time.Now().Add(pongWait))
c.ws.SetPongHandler(func(string) error { c.ws.SetReadDeadline(time.Now().Add(pongWait)); return nil })
for {
_, msg, err := c.ws.ReadMessage()
if err != nil {
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway) {
log.Printf("error: %v", err)
}
break
}
go func() {
t := new(delivery.TimelineHandler)
t.Store(string(msg))
}()
m := message{msg, s.room}
H.broadcast <- m
}
}
func (s *subscription) writePump() {
c := s.conn
ticker := time.NewTicker(pingPeriod)
defer func() {
ticker.Stop()
c.ws.Close()
}()
for {
select {
case message, ok := <-c.send:
if !ok {
c.write(websocket.CloseMessage, []byte{})
return
}
if err := c.write(websocket.TextMessage, message); err != nil {
return
}
case <-ticker.C:
if err := c.write(websocket.PingMessage, []byte{}); err != nil {
return
}
}
}
}
func (c *connection) write(mt int, payload []byte) error {
c.ws.SetWriteDeadline(time.Now().Add(writeWait))
return c.ws.WriteMessage(mt, payload)
}
|
package virtualmachineimage
import (
"context"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
const (
// DataVolName provides a const to use for creating volumes in pod specs
DataVolName = "data-vol"
// WriteBlockPath provides a constant for the path where the PV is mounted.
WriteBlockPath = "/dev/cdi-block-volume"
// ImporterSource provides a constant to capture our env variable "IMPORTER_SOURCE"
ImporterSource = "IMPORTER_SOURCE"
// ImporterEndpoint provides a constant to capture our env variable "IMPORTER_ENDPOINT"
ImporterEndpoint = "IMPORTER_ENDPOINT"
// ImporterContentType provides a constant to capture our env variable "IMPORTER_CONTENTTYPE"
ImporterContentType = "IMPORTER_CONTENTTYPE"
// ImporterImageSize provides a constant to capture our env variable "IMPORTER_IMAGE_SIZE"
ImporterImageSize = "IMPORTER_IMAGE_SIZE"
// InsecureTLSVar provides a constant to capture our env variable "INSECURE_TLS"
InsecureTLSVar = "INSECURE_TLS"
// SourceHTTP is the source type HTTP
SourceHTTP = "http"
// SourceHostPath is the source type host path
SourceHostPath = "hostPath"
// ImageContentType is the content-type of the imported file
ImageContentType = "kubevirt"
// ImportPodImage and ImportPodVerbose should be modified to get value from vmi env
// ImportPodImage indicates image name of the import pod
ImportPodImage = "kubevirt/cdi-importer:v1.13.0"
// ImportPodVerbose indicates log level of the import pod
ImportPodVerbose = "1"
// SourceVolumeName is used for creating source volume in pod specs
SourceVolumeName = "source-vol"
// SourceVolumeMountPath is a path where the source volume is mounted
SourceVolumeMountPath = "/data/source"
)
func (r *ReconcileVirtualMachineImage) syncImporterPod() error {
imported, found, err := r.isPvcImported()
if err != nil {
return err
} else if !found {
klog.Warningf("syncImporterPod without pvc in vmi %s", r.vmi.Name)
return nil
}
importerPod := &corev1.Pod{}
err = r.client.Get(context.Background(), types.NamespacedName{Namespace: r.vmi.Namespace, Name: GetImporterPodNameFromVmiName(r.vmi.Name)}, importerPod)
if err != nil && !errors.IsNotFound(err) {
return err
}
existsImporterPod := err == nil
if !imported && existsImporterPod && isPodCompleted(importerPod) {
// 임포팅이 완료됐으니 애노테이션을 업데이트하고 삭제한다.
klog.Infof("syncImporterPod finish for vmi %s, delete importerPod", r.vmi.Name)
if err := r.updatePvcImported(true); err != nil {
return err
}
if err := r.client.Delete(context.TODO(), importerPod); err != nil && !errors.IsNotFound(err) {
return err
}
} else if !imported && !existsImporterPod {
// 임포팅을 해야 하므로 임포터파드를 만든다
klog.Infof("syncImporterPod create new importerPod for vmi %s", r.vmi.Name)
newPod, err := r.newImporterPod()
if err != nil {
return err
}
if err := r.client.Create(context.TODO(), newPod); err != nil && !errors.IsAlreadyExists(err) {
return err
}
}
return nil
}
func isPodCompleted(pod *corev1.Pod) bool {
return len(pod.Status.ContainerStatuses) != 0 &&
pod.Status.ContainerStatuses[0].State.Terminated != nil &&
pod.Status.ContainerStatuses[0].State.Terminated.Reason == "Completed"
}
// GetImporterPodNameFromVmiName returns ImporterPod name from VmiName
func GetImporterPodNameFromVmiName(vmiName string) string {
return vmiName + "-image-importer"
}
func (r *ReconcileVirtualMachineImage) newImporterPod() (*corev1.Pod, error) {
ip := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: GetImporterPodNameFromVmiName(r.vmi.Name),
Namespace: r.vmi.Namespace,
},
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyOnFailure,
Containers: []corev1.Container{
{
Name: "importer",
Image: ImportPodImage,
Resources: corev1.ResourceRequirements{
Limits: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: resource.MustParse("0"),
corev1.ResourceMemory: resource.MustParse("0")},
Requests: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: resource.MustParse("0"),
corev1.ResourceMemory: resource.MustParse("0")},
},
VolumeDevices: []corev1.VolumeDevice{
{Name: DataVolName, DevicePath: WriteBlockPath},
},
},
},
Volumes: []corev1.Volume{
{
Name: DataVolName,
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: GetPvcNameFromVmiName(r.vmi.Name),
},
},
},
},
SecurityContext: &corev1.PodSecurityContext{
RunAsUser: &[]int64{0}[0],
},
},
}
src, err := r.getSource()
if err != nil {
return nil, err
}
if src == SourceHTTP {
pvcSize := r.vmi.Spec.PVC.Resources.Requests[corev1.ResourceStorage]
ip.Spec.Containers[0].Args = []string{"-v=" + ImportPodVerbose}
ip.Spec.Containers[0].Env = []corev1.EnvVar{
{Name: ImporterSource, Value: SourceHTTP},
{Name: ImporterEndpoint, Value: r.vmi.Spec.Source.HTTP},
{Name: ImporterContentType, Value: ImageContentType},
{Name: ImporterImageSize, Value: pvcSize.String()},
{Name: InsecureTLSVar, Value: "true"},
}
} else if src == SourceHostPath {
ip.Spec.NodeName = r.vmi.Spec.Source.HostPath.NodeName
ip.Spec.Containers[0].Command = []string{"qemu-img", "convert", "-f", "qcow2", "-O", "raw", SourceVolumeMountPath + "/disk.img", WriteBlockPath}
ip.Spec.Volumes = append(ip.Spec.Volumes, corev1.Volume{
Name: SourceVolumeName,
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: r.vmi.Spec.Source.HostPath.Path,
}},
})
ip.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{
{Name: SourceVolumeName, MountPath: SourceVolumeMountPath}}
}
if err := controllerutil.SetControllerReference(r.vmi, ip, r.scheme); err != nil {
return nil, err
}
return ip, nil
}
|
package main
import "fmt"
//程序定义一个int变量num的地址并打印
//将num的地址赋给指针ptr,并通过ptr去修改num的值
func main() {
var a int
fmt.Println(&a)
var p *int
p = &a
*p = 20
fmt.Println(a)
}
|
package postgres
import (
"github.com/google/uuid"
"github.com/orbis-challenge/src/models"
)
func (q DBQuery) SaveSectorWeight(sectorWeight *models.SectorWeight) (*models.SectorWeight, error) {
_, err := q.Model(sectorWeight).
Returning("*").
Insert()
return sectorWeight, err
}
func (q DBQuery) DeleteSectorWeightByID(id uuid.UUID) (err error) {
sw := models.SectorWeight{ID: id}
_, err = q.Model(&sw).
Delete()
return err
}
|
package structs
import "fmt"
type Student struct{
id int
name string
age int
}
func Learn() {
james := Student {
id:2,
name:"James",
age: 15,
}
students := []Student{
{id:2, name:"John", age: 20},
{id:3, name:"Top", age: 21},
}
fmt.Println(james.name)
fmt.Println(students[1].name)
v := Vertex {3,4}
fmt.Printf("%v",v.Area())
fmt.Println(v)
v.Scale(10)
fmt.Println(v)
}
//Area
type Vertex struct{
X,Y float32
}
func (v Vertex)Area() float32{
return v.X * v.Y
}
func (v *Vertex)Scale(f float32) {
v.X = v.X * f
v.Y = v.Y * f
}
|
package cmd
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
snmpsimclient "github.com/inexio/snmpsim-restapi-go-client"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// eraseEnvCmd represents the eraseEnv command
var eraseEnvCmd = &cobra.Command{
Use: "erase-env <tag-id>",
Args: cobra.ExactArgs(1),
Short: "Erases an lab environment",
Long: `Completely deletes all components created during setup-env operation including the tag created with it.`,
Run: func(cmd *cobra.Command, args []string) {
//Load the client data from the config
baseUrl := viper.GetString("mgmt.http.baseUrl")
username := viper.GetString("mgmt.http.authUsername")
password := viper.GetString("mgmt.http.authPassword")
//Create a new client
client, err := snmpsimclient.NewManagementClient(baseUrl)
if err != nil {
log.Error().
Msg("Error while creating management client")
os.Exit(1)
}
err = client.SetUsernameAndPassword(username, password)
if err != nil {
log.Error().
Msg("Error while setting username and password")
os.Exit(1)
}
//Create reader
reader := bufio.NewReader(os.Stdin)
//Read in the tag-id
tagId, err := strconv.Atoi(args[0])
if err != nil {
log.Error().
Msg("Error while converting " + args[0] + "from string to int")
os.Exit(1)
}
//Get information about the tag
tag, err := client.GetTag(tagId)
if err != nil {
log.Error().
Msg("Error while getting tag")
os.Exit(1)
}
deleteEnv := true
//Check if the force flag is set
if !cmd.Flag("force").Changed {
fmt.Print("Are you sure you to delete the environment tagged with ", tag.Name, " id: ", tagId, "?(yes/no) ")
//checking the user inpit
input, err := reader.ReadString('\n')
if err != nil {
log.Error().
Msg("Error while retrieving input")
os.Exit(1)
}
//Remove carriage return and line feed characters
input = strings.Replace(strings.Replace(input, "\n", "", -1), "\r", "", -1)
switch input {
case "yes", "Yes", "y", "Y":
deleteEnv = true
case "no", "No", "n", "N":
deleteEnv = false
default:
log.Debug().
Msg("Invalid input: " + input)
os.Exit(1)
}
}
if deleteEnv {
//Delete all tagged objects
_, err = client.DeleteAllObjectsWithTag(tagId)
if err != nil {
log.Error().
Msg("Error while deleting all objects tagged with " + tag.Name)
os.Exit(1)
}
//Delete the tag itself
err = client.DeleteTag(tagId)
if err != nil {
log.Error().
Msg("Error while deleting tag " + tag.Name)
os.Exit(1)
}
fmt.Println("Environment", tag.Name, "id", tagId, "has been deleted successfully.")
}
},
}
func init() {
rootCmd.AddCommand(eraseEnvCmd)
eraseEnvCmd.Flags().BoolP("force", "f", false, "Disables the 'Are you sure you want to delete this' question")
}
|
package ldap
import (
"bytes"
"crypto/tls"
"crypto/x509"
"fmt"
"reflect"
"strconv"
"strings"
"time"
log "github.com/Sirupsen/logrus"
"github.com/pkg/errors"
"github.com/rancher/go-rancher/v2"
"github.com/rancher/rancher-auth-service/model"
"gopkg.in/ldap.v2"
)
// LClient is the ldap client
type LClient struct {
Config *model.LdapConfig
ConstantsConfig *ConstantsConfig
SearchConfig *SearchConfig
AccessMode string
AllowedIdentities string
Enabled bool
}
type SearchConfig struct {
Server string
Port int64
BindDN string
BindPassword string
UserSearchAttributes []string
GroupSeachAttributes []string
}
type ConstantsConfig struct {
UserScope string
GroupScope string
Scopes []string
MemberOfAttribute string
ObjectClassAttribute string
LdapJwt string
CAPool *x509.CertPool
}
var nilIdentity = client.Identity{Resource: client.Resource{
Type: "identity",
}}
var nilToken = model.Token{Resource: client.Resource{
Type: "token",
}}
func (l *LClient) InitializeSearchConfig() *SearchConfig {
c := l.ConstantsConfig
return &SearchConfig{
Server: l.Config.Server,
Port: l.Config.Port,
BindDN: l.Config.ServiceAccountUsername,
BindPassword: l.Config.ServiceAccountPassword,
UserSearchAttributes: []string{c.MemberOfAttribute,
c.ObjectClassAttribute,
l.Config.UserObjectClass,
l.Config.UserLoginField,
l.Config.UserNameField,
l.Config.UserSearchField,
l.Config.UserEnabledAttribute},
GroupSeachAttributes: []string{c.MemberOfAttribute,
c.ObjectClassAttribute,
l.Config.GroupObjectClass,
l.Config.UserLoginField,
l.Config.GroupNameField,
l.Config.GroupSearchField},
}
}
func (l *LClient) newConn() (*ldap.Conn, error) {
log.Debug("Now creating Ldap connection")
var lConn *ldap.Conn
var err error
var tlsConfig *tls.Config
searchConfig := l.SearchConfig
ldap.DefaultTimeout = time.Duration(l.Config.ConnectionTimeout) * time.Millisecond
if l.Config.TLS {
tlsConfig = &tls.Config{RootCAs: l.ConstantsConfig.CAPool, InsecureSkipVerify: false, ServerName: l.Config.Server}
lConn, err = ldap.DialTLS("tcp", fmt.Sprintf("%s:%d", searchConfig.Server, searchConfig.Port), tlsConfig)
if err != nil {
return nil, fmt.Errorf("Error creating ssl connection: %v", err)
}
} else {
lConn, err = ldap.Dial("tcp", fmt.Sprintf("%s:%d", searchConfig.Server, searchConfig.Port))
if err != nil {
return nil, fmt.Errorf("Error creating connection: %v", err)
}
}
lConn.SetTimeout(time.Duration(l.Config.ConnectionTimeout) * time.Millisecond)
return lConn, nil
}
// GenerateToken generates token
func (l *LClient) GenerateToken(jsonInput map[string]string) (model.Token, int, error) {
log.Info("Now generating Ldap token")
searchConfig := l.SearchConfig
//getLdapToken:ADTokenCreator
//getIdentities: ADIdentityProvider
var status int
split := strings.SplitN(jsonInput["code"], ":", 2)
username, password := split[0], split[1]
externalID := getUserExternalID(username, l.Config.LoginDomain)
if password == "" {
status = 401
return nilToken, status, fmt.Errorf("Failed to login, password not provided")
}
lConn, err := l.newConn()
if err != nil {
return nilToken, status, err
}
if !l.Enabled {
log.Debug("Bind service account username password")
if l.SearchConfig.BindPassword == "" {
status = 401
return nilToken, status, fmt.Errorf("Failed to login, service account password not provided")
}
sausername := getUserExternalID(l.SearchConfig.BindDN, l.Config.LoginDomain)
err = lConn.Bind(sausername, l.SearchConfig.BindPassword)
if err != nil {
if ldap.IsErrorWithCode(err, ldap.LDAPResultInvalidCredentials) {
status = 401
}
defer lConn.Close()
return nilToken, status, fmt.Errorf("Error in ldap bind of service account: %v", err)
}
}
log.Debug("Binding username password")
err = lConn.Bind(externalID, password)
if err != nil {
if ldap.IsErrorWithCode(err, ldap.LDAPResultInvalidCredentials) {
status = 401
}
return nilToken, status, fmt.Errorf("Error in ldap bind: %v", err)
}
defer lConn.Close()
originalLogin := username
samName := username
if strings.Contains(username, `\`) {
samName = strings.SplitN(username, `\`, 2)[1]
}
query := "(" + l.Config.UserLoginField + "=" + ldap.EscapeFilter(samName) + ")"
if l.AccessMode == "required" {
groupFilter, err := l.getAllowedIdentitiesFilter()
if err != nil {
return nilToken, status, err
}
if len(groupFilter) > 1 {
groupQuery := "(&" + query + groupFilter + ")"
query = groupQuery
}
log.Debugf("Query for required mode: %s", query)
search := ldap.NewSearchRequest(l.Config.Domain,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
query,
searchConfig.UserSearchAttributes, nil)
result, err := lConn.Search(search)
if err != nil {
return nilToken, status, err
}
l.logResult(result, "GenerateToken")
if len(result.Entries) < 1 {
return nilToken, 403, errors.Errorf("Cannot locate user information for %s", search.Filter)
} else if len(result.Entries) > 1 {
return nilToken, 403, errors.New("More than one result")
}
}
log.Debugf("LDAP Search query: {%s}", query)
search := ldap.NewSearchRequest(l.Config.Domain,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
query,
searchConfig.UserSearchAttributes, nil)
return l.userRecord(search, lConn, "GenerateToken", originalLogin)
}
func (l *LClient) getIdentitiesFromSearchResult(result *ldap.SearchResult) ([]client.Identity, error) {
// getIdentities(SearchResult result): ADIdentityProvider
c := l.ConstantsConfig
entry := result.Entries[0]
if !l.hasPermission(entry.Attributes, l.Config) {
return []client.Identity{}, fmt.Errorf("Permission denied")
}
identityList := []client.Identity{}
memberOf := entry.GetAttributeValues(c.MemberOfAttribute)
user := &client.Identity{}
log.Debugf("ADConstants userMemberAttribute() {%s}", c.MemberOfAttribute)
log.Debugf("SearchResult memberOf attribute {%s}", memberOf)
// isType
isType := false
objectClass := entry.GetAttributeValues(c.ObjectClassAttribute)
for _, obj := range objectClass {
if strings.EqualFold(string(obj), l.Config.UserObjectClass) {
isType = true
}
}
if !isType {
return []client.Identity{}, nil
}
user, err := l.attributesToIdentity(entry.Attributes, result.Entries[0].DN, c.UserScope)
if err != nil {
return []client.Identity{}, err
}
if user != nil {
identityList = append(identityList, *user)
}
if len(memberOf) != 0 {
lConn, err := l.newConn()
if err != nil {
return []client.Identity{}, fmt.Errorf("Error in getIdentitiesFromSearchResult: %v", err)
}
for i := 0; i < len(memberOf); i += 50 {
batch := memberOf[i:min(i+50, len(memberOf))]
identityListBatch, err := l.GetGroupIdentity(batch, lConn)
if err != nil {
return []client.Identity{}, err
}
identityList = append(identityList, identityListBatch...)
}
defer lConn.Close()
}
return identityList, nil
}
func min(a int, b int) int {
if a < b {
return a
}
return b
}
func (l *LClient) GetGroupIdentity(groupDN []string, lConn *ldap.Conn) ([]client.Identity, error) {
c := l.ConstantsConfig
// Bind before query
// If service acc bind fails, and auth is on, return identity formed using DN
serviceAccountUsername := getUserExternalID(l.Config.ServiceAccountUsername, l.Config.LoginDomain)
err := lConn.Bind(serviceAccountUsername, l.Config.ServiceAccountPassword)
if err != nil {
if ldap.IsErrorWithCode(err, ldap.LDAPResultInvalidCredentials) && l.Enabled {
identityList := []client.Identity{}
for _, distinguishedName := range groupDN {
identity := &client.Identity{
Resource: client.Resource{
Type: "identity",
},
ExternalIdType: c.GroupScope,
ExternalId: distinguishedName,
Name: distinguishedName,
Login: distinguishedName,
User: false,
}
identity.Resource.Id = c.GroupScope + ":" + distinguishedName
identityList = append(identityList, *identity)
}
return identityList, nil
}
return []client.Identity{}, fmt.Errorf("Error in ldap bind: %v", err)
}
filter := "(" + c.ObjectClassAttribute + "=" + l.Config.GroupObjectClass + ")"
query := "(|"
for _, attrib := range groupDN {
query += "(distinguishedName=" + ldap.EscapeFilter(attrib) + ")"
}
query += ")"
query = "(&" + filter + query + ")"
log.Debugf("Query for pulling user's groups: %s", query)
searchDomain := l.Config.Domain
if l.Config.GroupSearchDomain != "" {
searchDomain = l.Config.GroupSearchDomain
}
search := ldap.NewSearchRequest(searchDomain,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
query,
l.SearchConfig.GroupSeachAttributes, nil)
result, err := lConn.Search(search)
if err != nil {
return []client.Identity{}, fmt.Errorf("Error %v in search query : %v", err, query)
}
l.logResult(result, "GetGroupIdentity")
identityList := []client.Identity{}
for _, e := range result.Entries {
identity, err := l.attributesToIdentity(e.Attributes, e.DN, c.GroupScope)
if err != nil {
log.Errorf("Error %v creating identity for group: %s", err, e.DN)
continue
}
if identity == nil {
log.Errorf("Group identity not returned for group: %s", e.DN)
continue
}
if !reflect.DeepEqual(identity, nilIdentity) {
identityList = append(identityList, *identity)
}
}
return identityList, nil
}
func getList(identitiesStr string, separator string) []string {
allowedIdentities := strings.Split(identitiesStr, separator)
for index, str := range allowedIdentities {
allowedIdentities[index] = strings.TrimSpace(str)
}
return allowedIdentities
}
func (l *LClient) savedIdentities(allowedIdentities []string) ([]client.Identity, error) {
identityList := []client.Identity{}
if len(allowedIdentities) == 0 {
return identityList, nil
}
for _, id := range allowedIdentities {
split := strings.SplitN(id, ":", 2)
identity, err := l.GetIdentity(split[1], split[0])
if err != nil {
log.Errorf("Error in getting identity %v: %v", id, err)
continue
}
if !reflect.DeepEqual(identity, nilIdentity) {
identityList = append(identityList, identity)
}
}
return identityList, nil
}
func (l *LClient) getAllowedIdentitiesFilter() (string, error) {
c := l.ConstantsConfig
grpFilterArr := []string{}
memberOf := "(memberof="
dn := "(distinguishedName="
identitySize := 0
identitiesStr := l.AllowedIdentities
// fromHashSeparatedString()
allowedIdentities := getList(identitiesStr, GetIdentitySeparator())
// getAllowedIdentitiesFilter(l)
identities, err := l.savedIdentities(allowedIdentities)
if err != nil {
return "", err
}
for _, identity := range identities {
identitySize++
if strings.EqualFold(c.GroupScope, identity.ExternalIdType) {
grpFilterArr = append(grpFilterArr, memberOf)
} else {
grpFilterArr = append(grpFilterArr, dn)
}
grpFilterArr = append(grpFilterArr, ldap.EscapeFilter(identity.ExternalId))
grpFilterArr = append(grpFilterArr, ")")
}
groupFilter := strings.Join(grpFilterArr, "")
if identitySize > 0 {
outer := "(|" + groupFilter + ")"
return outer, nil
}
return groupFilter, nil
}
// GetIdentity gets identities
func (l *LClient) GetIdentity(distinguishedName string, scope string) (client.Identity, error) {
//getIdentity(String distinguishedName, String scope): LDAPIdentityProvider
c := l.ConstantsConfig
var filter string
searchConfig := l.SearchConfig
var search *ldap.SearchRequest
if c.Scopes[0] != scope && c.Scopes[1] != scope {
return nilIdentity, fmt.Errorf("Invalid scope")
}
// getObject()
var attributes []*ldap.AttributeTypeAndValue
var attribs []*ldap.EntryAttribute
object, err := ldap.ParseDN(distinguishedName)
if err != nil {
return nilIdentity, err
}
for _, rdns := range object.RDNs {
for _, attr := range rdns.Attributes {
attributes = append(attributes, attr)
entryAttr := ldap.NewEntryAttribute(attr.Type, []string{attr.Value})
attribs = append(attribs, entryAttr)
}
}
if !isType(attribs, scope) && !l.hasPermission(attribs, l.Config) {
log.Errorf("Failed to get object %s", distinguishedName)
return nilIdentity, nil
}
if strings.EqualFold(c.UserScope, scope) {
filter = "(" + c.ObjectClassAttribute + "=" + l.Config.UserObjectClass + ")"
} else {
filter = "(" + c.ObjectClassAttribute + "=" + l.Config.GroupObjectClass + ")"
}
log.Debugf("Query for GetIdentity(%s): %s", distinguishedName, filter)
lConn, err := l.newConn()
if err != nil {
return nilIdentity, fmt.Errorf("Error %v creating connection", err)
}
// Bind before query
// If service acc bind fails, and auth is on, return identity formed using DN
serviceAccountUsername := getUserExternalID(l.Config.ServiceAccountUsername, l.Config.LoginDomain)
err = lConn.Bind(serviceAccountUsername, l.Config.ServiceAccountPassword)
defer lConn.Close()
if err != nil {
if ldap.IsErrorWithCode(err, ldap.LDAPResultInvalidCredentials) && l.Enabled {
user := strings.EqualFold(c.UserScope, scope)
identity := &client.Identity{
Resource: client.Resource{
Type: "identity",
},
ExternalIdType: scope,
ExternalId: distinguishedName,
Name: distinguishedName,
Login: distinguishedName,
User: user,
}
identity.Resource.Id = scope + ":" + distinguishedName
return *identity, nil
}
return nilIdentity, fmt.Errorf("Error in ldap bind: %v", err)
}
if strings.EqualFold(c.UserScope, scope) {
search = ldap.NewSearchRequest(distinguishedName,
ldap.ScopeBaseObject, ldap.NeverDerefAliases, 0, 0, false,
filter,
searchConfig.UserSearchAttributes, nil)
} else {
search = ldap.NewSearchRequest(distinguishedName,
ldap.ScopeBaseObject, ldap.NeverDerefAliases, 0, 0, false,
filter,
searchConfig.GroupSeachAttributes, nil)
}
result, err := lConn.Search(search)
if err != nil {
return nilIdentity, fmt.Errorf("Error %v in search query : %v", err, filter)
}
l.logResult(result, "GetIdentity")
if len(result.Entries) < 1 {
return nilIdentity, fmt.Errorf("No identities can be retrieved")
} else if len(result.Entries) > 1 {
return nilIdentity, fmt.Errorf("More than one result found")
}
entry := result.Entries[0]
entryAttributes := entry.Attributes
if !l.hasPermission(entry.Attributes, l.Config) {
return nilIdentity, fmt.Errorf("Permission denied")
}
identity, err := l.attributesToIdentity(entryAttributes, distinguishedName, scope)
if err != nil {
return nilIdentity, err
}
if identity == nil {
return nilIdentity, fmt.Errorf("User Identity not returned for LDAP")
}
return *identity, nil
}
func (l *LClient) attributesToIdentity(attribs []*ldap.EntryAttribute, dnStr string, scope string) (*client.Identity, error) {
var externalIDType, accountName, externalID, login string
user := false
externalID = dnStr
externalIDType = scope
if isType(attribs, l.Config.UserObjectClass) {
for _, attr := range attribs {
if strings.EqualFold(attr.Name, l.Config.UserNameField) {
if len(attr.Values) != 0 {
accountName = attr.Values[0]
} else {
accountName = externalID
}
}
if strings.EqualFold(attr.Name, l.Config.UserLoginField) {
login = attr.Values[0]
}
}
user = true
} else if isType(attribs, l.Config.GroupObjectClass) {
for _, attr := range attribs {
if strings.EqualFold(attr.Name, l.Config.GroupNameField) {
if len(attr.Values) != 0 {
accountName = attr.Values[0]
} else {
accountName = externalID
}
}
if strings.EqualFold(attr.Name, l.Config.UserLoginField) {
if len(attr.Values) > 0 && attr.Values[0] != "" {
login = attr.Values[0]
}
} else {
login = accountName
}
}
} else {
log.Errorf("Failed to get attributes for %s", dnStr)
return nil, nil
}
identity := &client.Identity{
Resource: client.Resource{
Type: "identity",
},
ExternalIdType: externalIDType,
ExternalId: externalID,
Name: accountName,
Login: login,
User: user,
}
identity.Resource.Id = externalIDType + ":" + externalID
return identity, nil
}
func isType(search []*ldap.EntryAttribute, varType string) bool {
for _, attrib := range search {
if attrib.Name == "objectClass" {
for _, val := range attrib.Values {
if strings.EqualFold(val, varType) {
return true
}
}
}
}
log.Debugf("Failed to determine if object is type: %s", varType)
return false
}
func GetIdentitySeparator() string {
return "#"
}
//GetUserIdentity returns the "user" from the list of identities
func GetUserIdentity(identities []client.Identity, userType string) (client.Identity, bool) {
for _, identity := range identities {
if identity.ExternalIdType == userType {
return identity, true
}
}
return client.Identity{}, false
}
//SearchIdentities returns the identity by name
func (l *LClient) SearchIdentities(name string, exactMatch bool) ([]client.Identity, error) {
c := l.ConstantsConfig
identities := []client.Identity{}
for _, scope := range c.Scopes {
identityList, err := l.searchIdentities(name, scope, exactMatch)
if err != nil {
return []client.Identity{}, err
}
identities = append(identities, identityList...)
}
return identities, nil
}
func (l *LClient) searchIdentities(name string, scope string, exactMatch bool) ([]client.Identity, error) {
c := l.ConstantsConfig
name = ldap.EscapeFilter(name)
if strings.EqualFold(c.UserScope, scope) {
return l.searchUser(name, exactMatch)
} else if strings.EqualFold(c.GroupScope, scope) {
return l.searchGroup(name, exactMatch)
} else {
return nil, fmt.Errorf("Invalid scope")
}
}
func (l *LClient) searchUser(name string, exactMatch bool) ([]client.Identity, error) {
c := l.ConstantsConfig
var query string
if exactMatch {
query = "(&(" + l.Config.UserSearchField + "=" + name + ")(" + c.ObjectClassAttribute + "=" +
l.Config.UserObjectClass + "))"
} else {
query = "(&(" + l.Config.UserSearchField + "=*" + name + "*)(" + c.ObjectClassAttribute + "=" +
l.Config.UserObjectClass + "))"
}
log.Debugf("LDAPIdentityProvider searchUser query: %s", query)
return l.searchLdap(query, c.UserScope)
}
func (l *LClient) searchGroup(name string, exactMatch bool) ([]client.Identity, error) {
c := l.ConstantsConfig
var query string
if exactMatch {
query = "(&(" + l.Config.GroupSearchField + "=" + name + ")(" + c.ObjectClassAttribute + "=" +
l.Config.GroupObjectClass + "))"
} else {
query = "(&(" + l.Config.GroupSearchField + "=*" + name + "*)(" + c.ObjectClassAttribute + "=" +
l.Config.GroupObjectClass + "))"
}
log.Debugf("LDAPIdentityProvider searchGroup query: %s", query)
return l.searchLdap(query, c.GroupScope)
}
func (l *LClient) searchLdap(query string, scope string) ([]client.Identity, error) {
c := l.ConstantsConfig
searchConfig := l.SearchConfig
identities := []client.Identity{}
var search *ldap.SearchRequest
searchDomain := l.Config.Domain
if strings.EqualFold(c.UserScope, scope) {
search = ldap.NewSearchRequest(searchDomain,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
query,
searchConfig.UserSearchAttributes, nil)
} else {
if l.Config.GroupSearchDomain != "" {
searchDomain = l.Config.GroupSearchDomain
}
search = ldap.NewSearchRequest(searchDomain,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
query,
searchConfig.GroupSeachAttributes, nil)
}
lConn, err := l.newConn()
if err != nil {
return []client.Identity{}, fmt.Errorf("Error %v creating connection", err)
}
// Bind before query
serviceAccountUsername := getUserExternalID(l.Config.ServiceAccountUsername, l.Config.LoginDomain)
err = lConn.Bind(serviceAccountUsername, l.Config.ServiceAccountPassword)
if err != nil {
return nil, fmt.Errorf("Error %v in ldap bind", err)
}
defer lConn.Close()
results, err := lConn.Search(search)
if err != nil {
ldapErr, ok := reflect.ValueOf(err).Interface().(*ldap.Error)
if ok && ldapErr.ResultCode != ldap.LDAPResultNoSuchObject {
return []client.Identity{}, fmt.Errorf("When searching ldap from /v1/identity Failed to search: %s, error: %#v", query, err)
}
}
for i := 0; i < len(results.Entries); i++ {
entry := results.Entries[i]
identity, err := l.attributesToIdentity(entry.Attributes, results.Entries[i].DN, scope)
if err != nil {
return []client.Identity{}, err
}
identities = append(identities, *identity)
}
return identities, nil
}
func (l *LClient) TestLogin(testAuthConfig *model.TestAuthConfig, accessToken string, originalLogin string) (int, error) {
var lConn *ldap.Conn
var err error
var status int
status = 500
split := strings.SplitN(testAuthConfig.Code, ":", 2)
username, password := split[0], split[1]
if username == "" {
username = originalLogin
}
externalID := getUserExternalID(username, testAuthConfig.AuthConfig.LdapConfig.LoginDomain)
if password == "" {
return 401, fmt.Errorf("Failed to login, password not provided")
}
ldapServer := testAuthConfig.AuthConfig.LdapConfig.Server
ldapPort := testAuthConfig.AuthConfig.LdapConfig.Port
ldap.DefaultTimeout = time.Duration(testAuthConfig.AuthConfig.LdapConfig.ConnectionTimeout) * time.Millisecond
log.Debug("TestLogin: Now creating Ldap connection")
if testAuthConfig.AuthConfig.LdapConfig.TLS {
tlsConfig := &tls.Config{RootCAs: l.ConstantsConfig.CAPool, InsecureSkipVerify: false, ServerName: ldapServer}
lConn, err = ldap.DialTLS("tcp", fmt.Sprintf("%s:%d", ldapServer, ldapPort), tlsConfig)
if err != nil {
return status, fmt.Errorf("Error creating ssl connection: %v", err)
}
} else {
lConn, err = ldap.Dial("tcp", fmt.Sprintf("%s:%d", ldapServer, ldapPort))
if err != nil {
return status, fmt.Errorf("Error creating connection: %v", err)
}
}
lConn.SetTimeout(time.Duration(testAuthConfig.AuthConfig.LdapConfig.ConnectionTimeout) * time.Millisecond)
defer lConn.Close()
if testAuthConfig.AuthConfig.LdapConfig.ServiceAccountPassword == "" {
status = 401
return status, fmt.Errorf("Failed to login, service account password not provided")
}
log.Debug("TestLogin: Binding service account username password")
sausername := getUserExternalID(testAuthConfig.AuthConfig.LdapConfig.ServiceAccountUsername, testAuthConfig.AuthConfig.LdapConfig.LoginDomain)
err = lConn.Bind(sausername, testAuthConfig.AuthConfig.LdapConfig.ServiceAccountPassword)
if err != nil {
if ldap.IsErrorWithCode(err, ldap.LDAPResultInvalidCredentials) {
status = 401
}
return status, fmt.Errorf("Error in ldap bind for service account: %v", err)
}
log.Debug("TestLogin: Binding username password")
err = lConn.Bind(externalID, password)
if err != nil {
if ldap.IsErrorWithCode(err, ldap.LDAPResultInvalidCredentials) {
status = 401
}
return status, fmt.Errorf("Error in ldap bind: %v", err)
}
samName := username
if strings.Contains(username, `\`) {
samName = strings.SplitN(username, `\`, 2)[1]
}
query := "(" + testAuthConfig.AuthConfig.LdapConfig.UserLoginField + "=" + ldap.EscapeFilter(samName) + ")"
log.Debugf("LDAP Search query: {%s}", query)
testUserSearchAttributes := []string{l.ConstantsConfig.MemberOfAttribute, l.ConstantsConfig.ObjectClassAttribute,
testAuthConfig.AuthConfig.LdapConfig.UserObjectClass, testAuthConfig.AuthConfig.LdapConfig.UserLoginField,
testAuthConfig.AuthConfig.LdapConfig.UserNameField, testAuthConfig.AuthConfig.LdapConfig.UserSearchField,
testAuthConfig.AuthConfig.LdapConfig.UserEnabledAttribute}
search := ldap.NewSearchRequest(testAuthConfig.AuthConfig.LdapConfig.Domain,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
query, testUserSearchAttributes, nil)
result, err := lConn.Search(search)
if err != nil {
return status, fmt.Errorf("Error searching the user information with new server settings: %v", err)
}
l.logResult(result, "TestLogin")
if len(result.Entries) < 1 {
return status, fmt.Errorf("Authentication succeeded, but cannot locate the user information with new server schema settings")
} else if len(result.Entries) > 1 {
return status, fmt.Errorf("Multiple users found for the username with new server settings")
}
entry := result.Entries[0]
if !l.hasPermission(entry.Attributes, &testAuthConfig.AuthConfig.LdapConfig) {
return status, fmt.Errorf("Authentication succeeded, but user is probably disabled in the new server settings")
}
userIdentity, err := l.attributesToIdentity(entry.Attributes, entry.DN, l.ConstantsConfig.UserScope)
if err != nil {
return status, fmt.Errorf("Authentication succeeded, but error reading the user information with new server schema settings: %v", err)
}
if userIdentity == nil {
return status, fmt.Errorf("Authentication succeeded, but cannot search user information with new server settings")
}
if userIdentity.ExternalId != accessToken {
return status, fmt.Errorf("Authentication succeeded, but the user returned has a different Distinguished Name than you are currently logged in to. Changing the underlying directory tree is not supported")
}
return status, nil
}
func getUserExternalID(username string, loginDomain string) string {
if strings.Contains(username, "\\") {
return username
} else if loginDomain != "" {
return loginDomain + "\\" + username
}
return username
}
func (l *LClient) hasPermission(attributes []*ldap.EntryAttribute, config *model.LdapConfig) bool {
var permission int64
if !isType(attributes, config.UserObjectClass) {
return true
}
for _, attr := range attributes {
if attr.Name == config.UserEnabledAttribute {
if len(attr.Values) > 0 && attr.Values[0] != "" {
intAttr, err := strconv.ParseInt(attr.Values[0], 10, 64)
if err != nil {
log.Errorf("Failed to get USER_ENABLED_ATTRIBUTE, error: %v", err)
return false
}
permission = intAttr
} else {
return true
}
}
}
permission = permission & config.UserDisabledBitMask
return permission != config.UserDisabledBitMask
}
func (l *LClient) RefreshToken(json map[string]string) (model.Token, int, error) {
c := l.ConstantsConfig
searchConfig := l.SearchConfig
query := "(" + c.ObjectClassAttribute + "=" + l.Config.UserObjectClass + ")"
search := ldap.NewSearchRequest(json["accessToken"],
ldap.ScopeBaseObject, ldap.NeverDerefAliases, 0, 0, false,
query,
searchConfig.UserSearchAttributes, nil)
var status int
lConn, err := l.newConn()
if err != nil {
return nilToken, status, fmt.Errorf("Error %v creating connection", err)
}
// Bind before query
serviceAccountUsername := getUserExternalID(l.Config.ServiceAccountUsername, l.Config.LoginDomain)
err = lConn.Bind(serviceAccountUsername, l.Config.ServiceAccountPassword)
if err != nil {
if ldap.IsErrorWithCode(err, ldap.LDAPResultInvalidCredentials) {
status = 401
}
return nilToken, status, fmt.Errorf("Error %v in ldap bind", err)
}
defer lConn.Close()
return l.userRecord(search, lConn, "RefreshToken", "")
}
func (l *LClient) userRecord(search *ldap.SearchRequest, lConn *ldap.Conn, name string, originalLogin string) (model.Token, int, error) {
var status int
c := l.ConstantsConfig
result, err := lConn.Search(search)
if err != nil {
return nilToken, status, err
}
method := "userRecord+" + name
l.logResult(result, method)
if len(result.Entries) < 1 {
log.Errorf("Cannot locate user information for %s", search.Filter)
return nilToken, status, nil
} else if len(result.Entries) > 1 {
log.Error("More than one result")
return nilToken, status, nil
}
identityList, err := l.getIdentitiesFromSearchResult(result)
if err != nil {
return nilToken, status, err
}
var token = model.Token{Resource: client.Resource{
Type: "token",
}}
token.IdentityList = identityList
token.Type = c.LdapJwt
userIdentity, ok := GetUserIdentity(identityList, c.UserScope)
if !ok {
return nilToken, status, fmt.Errorf("User identity not found for Ldap")
}
token.ExternalAccountID = userIdentity.ExternalId
token.AccessToken = userIdentity.ExternalId
token.OriginalLogin = originalLogin
return token, status, nil
}
func (l *LClient) logResult(result *ldap.SearchResult, name string) {
if log.GetLevel() != log.DebugLevel {
return
}
for idx, e := range result.Entries {
buffer := bytes.Buffer{}
for _, v := range e.Attributes {
buffer.WriteString(v.Name)
buffer.WriteString(":[")
for i := 0; i < (len(v.Values) - 1); i++ {
buffer.WriteString(v.Values[i])
buffer.WriteString(" ")
}
buffer.WriteString(v.Values[len(v.Values)-1])
buffer.WriteString("] ")
}
log.Debugf("(%s) Query Result %v: DN: %v, Attributes: %v", name, idx, e.DN, buffer.String())
}
}
|
/*
Package base provides base data structures and functions for gorse.
The base data structures and functions include:
* Parallel Scheduler
* Hyper-parameters Management
* Random Generator
* Similarity Metrics
* Sparse Data Structures
* Numeric Computing
* Options Management
*/
package base
|
package eth
import (
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/vitelabs/go-vite-gw/setting"
"os"
)
var Erc2ViteABI *abi.ABI
func init() {
contractAbi, e := GetContractAbi(setting.EthSetting.Erc2ViteABIPath)
if e != nil {
panic(e)
}
Erc2ViteABI = contractAbi
}
//read abi-json file
func GetContractAbi(filePath string) (*abi.ABI, error) {
file, e := os.Open(filePath)
if e != nil {
return nil, e
}
defer file.Close()
abi, e := abi.JSON(file)
if e != nil {
return nil, e
}
return &abi, nil
}
|
package binary
import (
"encoding/binary"
"errors"
)
//ByteToIntByLittleEndian is a byte[] to int converter.
//c# BitConverter.GetBytes is LittleEndian
//LittleEndian 從最小開始 (最低位元組在前)
//BigEndian 從最大開始 (最高位元組在前)
//Ex: long 0x12345678
//littleEndian 0x78 0x56 0x34 0x12
//BigEndian 78 56 43 12
func ByteToIntByLittleEndian(data []byte) (int, error) {
//Check data length
if len(data) < 4 {
return 0, errors.New("byteToInt32 data length not enougth")
}
return int(binary.LittleEndian.Uint32(data)), nil
}
//Uint32ToByteByBigEndian is a uint32 to byte[] converter.
func Uint32ToByteByBigEndian(num uint32) []byte {
bs := make([]byte, 4)
binary.BigEndian.PutUint32(bs, num)
return bs
}
//SingleByteToIntByBigEndian is single byte to uint16 converter.
func SingleByteToIntByBigEndian(data byte) (int, error) {
//Check data type
typeBytes := make([]byte, 2)
//高位元補0
typeBytes[0] = 0
typeBytes[1] = data
return int(binary.BigEndian.Uint16(typeBytes)), nil
}
|
package dependencies
import (
"bufio"
"encoding/json"
"fmt"
"regexp"
"strings"
)
func ParsePythonRequirements(reader *bufio.Reader) []string {
packageNamesSet := map[string]bool{}
for {
lineBytes, _, err := reader.ReadLine()
if err != nil {
break
}
line := string(lineBytes)
line = strings.TrimSpace(line)
re := regexp.MustCompile(`[#&]+egg=([a-zA-Z0-9_\-.]+)`)
match := re.FindStringSubmatch(line)
if len(match) > 0 {
packageName := strings.ToLower(match[1])
packageNamesSet[packageName] = true
continue
}
line = strings.Split(line, "#")[0]
if line == "" {
continue
}
if strings.HasPrefix(line, "--requirement") {
continue
}
if strings.HasPrefix(line, "-r") {
continue
}
if strings.Contains(line, "://") {
continue
}
re = regexp.MustCompile(`^([a-zA-Z0-9_\-.]+)`)
match = re.FindStringSubmatch(line)
if len(match) > 0 {
packageName := strings.ToLower(match[1])
packageNamesSet[packageName] = true
continue
}
}
packageNames := []string{}
for k := range packageNamesSet {
packageNames = append(packageNames, k)
}
return packageNames
}
func ParsePackagesJsonFile(reader *bufio.Reader) ([]string, error) {
packageNamesSet := map[string]bool{}
d := json.NewDecoder(reader)
t := struct {
Dependencies *map[string]interface{} `json:"dependencies"`
DevDependencies *map[string]interface{} `json:"devDependencies"`
}{}
err := d.Decode(&t)
if err != nil {
return nil, err
}
processPackageName := func(dict *map[string]interface{}, npmPackageName string) {
if strings.HasPrefix(npmPackageName, "@") {
return
}
value, _ := (*dict)[npmPackageName]
version := fmt.Sprintf("%v", value)
version = strings.ToLower(version)
if strings.HasPrefix(version, "npm:") {
return
}
if strings.Contains(version, "://") {
return
}
packageNamesSet[npmPackageName] = true
}
if t.Dependencies != nil {
for npmPackageName := range *t.Dependencies {
processPackageName(t.Dependencies, npmPackageName)
}
}
if t.DevDependencies != nil {
for npmPackageName := range *t.DevDependencies {
processPackageName(t.DevDependencies, npmPackageName)
}
}
packageNames := []string{}
for k := range packageNamesSet {
packageNames = append(packageNames, k)
}
return packageNames, err
}
|
// package config 支持字符串、整型、以及数组 布尔型
package snailframe
import (
"github.com/BurntSushi/toml"
"os"
"path/filepath"
)
/*
type configNormalType map[string]interface{}
type config struct {
data interface{}
}*/
//初始化Conf
func NewConf(configStrcut interface{},configName string) (redata toml.MetaData) {
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
panic(err)
}
var path string = dir+"/"+ configName
if data, err := toml.DecodeFile(path, configStrcut); err != nil {
panic(err)
}else{
redata = data
}
return redata
}
/*
//加载字符串数组
func (this config)GetSliceString(key string) (conList []string) {
if value, ok := this.data[key]; ok {
if val, rightType := value.([]interface{});rightType {
for _,v := range val {
if vstr, isString := v.(string);isString {
conList = append(conList, vstr)
}
}
}else {
panic("the config [" + key + "] type is not SliceString")
}
} else {
panic("the config [" + key + "] not exist")
}
return
}
//加载整数数组
func (this config)GetSliceInt(key string) (conList []int) {
if value, ok := this.data[key]; ok {
if val, rightType := value.([]interface{});rightType {
for _,v := range val {
if vstr, isString := v.(int64);isString {
conList = append(conList, int(vstr))
}
}
}else {
panic("the config [" + key + "] type is not SliceInt")
}
} else {
panic("the config [" + key + "] not exist")
}
return
}
//以字符串形式加载配置项
func (this config)GetString(key string) string {
if value, ok := this.data[key]; ok {
if val, rightType := value.(string); rightType {
return val
} else {
panic("the config [" + key + "] type is not string")
}
} else {
panic("the config [" + key + "] not exist")
}
}
//以整数形式加载配置项
func (this config)GetInt(key string) int {
if value, ok := this.data[key]; ok {
if val, rightType := value.(int64); rightType {
return int(val)
} else {
panic("the config [" + key + "] type is not int")
}
} else {
panic("the config [" + key + "] not exist")
}
}
//以布尔形式加载配置项
func (this config)GetBool(key string) bool {
if value, ok := this.data[key]; ok {
if val, rightType := value.(bool); rightType {
return val
} else {
panic("the config [" + key + "] type is not bool")
}
} else {
panic("the config [" + key + "] not exist")
}
}*/
|
package urlshortener
import (
"log"
"net/http"
"gopkg.in/yaml.v2"
)
type pathUrl struct {
Path string `yaml:"title"`
URL string `yaml:"url"`
}
func MapHandler(pathToUrls map[string]string, fallback http.Handler) http.HandlerFunc {
return func(rw http.ResponseWriter, r *http.Request) {
path := r.URL.Path
if dest, ok := pathToUrls[path]; ok {
http.Redirect(rw, r, dest, http.StatusFound)
return
}
fallback.ServeHTTP(rw, r)
}
}
func YamlHandler(yamlByte []byte, fallback http.Handler) http.HandlerFunc {
pathUrl, err := parseYaml(yamlByte)
if err != nil {
log.Fatalln(err)
}
pathUrlsMap := buildMap(pathUrl)
return MapHandler(pathUrlsMap, fallback)
}
func parseYaml(yamlByte []byte) ([]pathUrl, error) {
var pathUrls []pathUrl
err := yaml.Unmarshal(yamlByte, &pathUrls)
if err != nil {
return nil, err
}
return pathUrls, nil
}
func buildMap(pathUrls []pathUrl) map[string]string {
pathUrlsMap := make(map[string]string)
for _, pathUrl := range pathUrls {
pathUrlsMap[pathUrl.Path] = pathUrl.URL
}
return pathUrlsMap
}
|
package data
type ItemCategories struct {
ItemCategories []struct {
Category string `json:"key"`
Value []struct {
SubCategory string `json:"key"`
Base []string `json:"value"`
} `json:"value"`
} `json:"itemCategories"`
Items []string `json:"items"`
}
|
// Copyright 2018 Sergey Novichkov. All rights reserved.
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
package migrate
import (
"github.com/gozix/di"
"github.com/gozix/glue/v3"
gzSQL "github.com/gozix/sql/v3"
gzZap "github.com/gozix/zap/v3"
"github.com/gozix/sql-migrate/v3/internal/command"
)
type (
// Bundle implements the glue.Bundle interface.
Bundle struct {
path string
table string
schema string
dialect string
connection string
}
// Option interface.
Option interface {
apply(b *Bundle)
}
// optionFunc wraps a func so it satisfies the Option interface.
optionFunc func(b *Bundle)
)
// BundleName is default definition name.
const BundleName = "sql-migrate"
// Bundle implements glue.Bundle interface.
var _ glue.Bundle = (*Bundle)(nil)
// Connection option.
func Connection(value string) Option {
return optionFunc(func(b *Bundle) {
b.connection = value
})
}
// Dialect option.
func Dialect(value string) Option {
return optionFunc(func(b *Bundle) {
b.dialect = value
})
}
// Path option.
func Path(value string) Option {
return optionFunc(func(b *Bundle) {
b.path = value
})
}
// Table option.
func Table(value string) Option {
return optionFunc(func(b *Bundle) {
b.table = value
})
}
// Schema option.
func Schema(value string) Option {
return optionFunc(func(b *Bundle) {
b.schema = value
})
}
// NewBundle create bundle instance.
func NewBundle(options ...Option) (b *Bundle) {
b = &Bundle{
path: "migrations",
table: "migration",
connection: gzSQL.DEFAULT,
}
for _, option := range options {
option.apply(b)
}
return b
}
// Name implements the glue.Bundle interface.
func (b *Bundle) Name() string {
return BundleName
}
// Build implements the glue.Bundle interface.
func (b *Bundle) Build(builder di.Builder) error {
var tag = "cli.cmd.migrate.subcommand"
return builder.Apply(
di.Provide(
command.NewMigrateConstructor(b.path, b.table, b.schema, b.dialect, b.connection),
di.Constraint(0, di.WithTags(tag)),
glue.AsCliCommand(),
),
di.Provide(command.NewMigrateDown, di.Tags{{
Name: tag,
}}),
di.Provide(command.NewMigrateUp, di.Tags{{
Name: tag,
}}),
)
}
// DependsOn implements the glue.DependsOn interface.
func (b *Bundle) DependsOn() []string {
return []string{
gzSQL.BundleName,
gzZap.BundleName,
}
}
// apply implements Option.
func (f optionFunc) apply(bundle *Bundle) {
f(bundle)
}
|
package galery
import (
"encoding/json"
"io/ioutil"
"net/http"
"regexp"
"strings"
"time"
"github.com/gorilla/mux"
"github.com/juliotorresmoreno/unravel-server/config"
"github.com/juliotorresmoreno/unravel-server/helper"
"github.com/juliotorresmoreno/unravel-server/middlewares"
"github.com/juliotorresmoreno/unravel-server/models"
"github.com/juliotorresmoreno/unravel-server/ws"
)
func NewRouter(hub *ws.Hub) http.Handler {
var mux = mux.NewRouter().StrictSlash(true)
mux.HandleFunc("/", middlewares.Protect(ListarGalerias, hub, true)).Methods("GET")
mux.HandleFunc("/", middlewares.Protect(Save, hub, true)).Methods("POST")
mux.HandleFunc("/delete", middlewares.Protect(EliminarImagen, hub, true)).Methods("POST", "DELETE")
mux.HandleFunc("/fotoPerfil", middlewares.Protect(GetFotoPerfil, hub, true)).Methods("GET")
mux.HandleFunc("/upload", middlewares.Protect(Upload, hub, true)).Methods("POST")
mux.HandleFunc("/fotoPerfil", middlewares.Protect(SetFotoPerfil, hub, true)).Methods("POST")
mux.HandleFunc("/fotoPerfil/{usuario}", middlewares.Protect(GetFotoPerfil, hub, true)).Methods("GET")
mux.HandleFunc("/{galery}/describe", middlewares.Protect(DescribeGaleria, hub, true)).Methods("GET")
mux.HandleFunc("/{galery}/preview", middlewares.Protect(ViewPreview, hub, true)).Methods("GET")
mux.HandleFunc("/{galery}/{imagen}", middlewares.Protect(ViewImagen, hub, true)).Methods("GET")
mux.HandleFunc("/{galery}", middlewares.Protect(ListarImagenes, hub, true)).Methods("GET")
return mux
}
func NewUserRouter(hub *ws.Hub) http.Handler {
var mux = mux.NewRouter().StrictSlash(true)
mux.HandleFunc("/{usuario}/galery", middlewares.Protect(ListarGalerias, hub, true)).Methods("GET")
mux.HandleFunc("/{usuario}/galery/fotoPerfil", middlewares.Protect(GetFotoPerfil, hub, true)).Methods("GET")
mux.HandleFunc("/{usuario}/galery/{galery}", middlewares.Protect(ListarImagenes, hub, true)).Methods("GET")
mux.HandleFunc("/{usuario}/galery/{galery}/preview", middlewares.Protect(ViewPreview, hub, true)).Methods("GET")
mux.HandleFunc("/{usuario}/galery/{galery}/{imagen}", middlewares.Protect(ViewImagen, hub, true)).Methods("GET")
return mux
}
func describeGaleria(usuario, galeria string) (string, string, error) {
var path = config.PATH + "/" + usuario
permiso, err := ioutil.ReadFile(path + "/" + galeria + "/permiso")
if err != nil {
return string(permiso), "", err
}
descripcion, err := ioutil.ReadFile(path + "/" + galeria + "/descripcion")
if err != nil {
return string(permiso), string(descripcion), err
}
return string(permiso), string(descripcion), nil
}
// ListarGalerias lista las galerias existentes
func ListarGalerias(w http.ResponseWriter, r *http.Request, session *models.User, hub *ws.Hub) {
var vars = mux.Vars(r)
var usuario string
if vars["usuario"] != "" {
usuario = vars["usuario"]
} else {
usuario = session.Usuario
}
var path = config.PATH + "/" + usuario
var files, _ = ioutil.ReadDir(path)
var length = len(files)
var galerias = make([]interface{}, 0)
for i := 0; i < length; i++ {
permiso, descripcion, err := describeGaleria(usuario, files[i].Name())
if err != nil {
continue
}
c := map[string]interface{}{
"name": files[i].Name(),
"permiso": string(permiso),
"descripcion": string(descripcion),
}
galerias = append(galerias, c)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
respuesta, _ := json.Marshal(map[string]interface{}{
"success": true,
"data": galerias,
})
w.Write([]byte(respuesta))
}
type image struct {
Src string `json:"src"`
Modified time.Time `json:"modified"`
}
func listarImagenes(usuario, galeria string) []image {
var path = config.PATH + "/" + usuario
var files, _ = ioutil.ReadDir(path + "/" + galeria + "/images")
var length = len(files)
var imagenes = make([]image, length)
for i := 0; i < length; i++ {
imagenes[i] = image{
Src: strings.Trim(files[i].Name(), "\n"),
Modified: files[i].ModTime(),
}
}
return imagenes
}
var nombreValido, _ = regexp.Compile("^[A-Za-z0-9\\.]+$")
var galeriaValida, _ = regexp.Compile("^[A-Za-z0-9]+$")
// ListarImagenes imagenes de la galerias existente
func ListarImagenes(w http.ResponseWriter, r *http.Request, session *models.User, hub *ws.Hub) {
var vars = mux.Vars(r)
var galeria = vars["galery"]
var usuario string
if vars["usuario"] != "" {
usuario = vars["usuario"]
} else {
usuario = session.Usuario
}
var imagenes = listarImagenes(usuario, galeria)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
respuesta, _ := json.Marshal(map[string]interface{}{
"success": true,
"data": imagenes,
})
w.Write([]byte(respuesta))
}
// DescribeGaleria ver imagen
func DescribeGaleria(w http.ResponseWriter, r *http.Request, session *models.User, hub *ws.Hub) {
var vars = mux.Vars(r)
var galeria = vars["galery"]
var usuario string
if vars["usuario"] != "" {
usuario = vars["usuario"]
} else {
usuario = session.Usuario
}
var permiso, descripcion, err = describeGaleria(usuario, galeria)
if err != nil {
helper.DespacharError(w, err, http.StatusInternalServerError)
}
var respuesta, _ = json.Marshal(map[string]interface{}{
"success": true,
"data": map[string]interface{}{
"ID": galeria,
"nombre": galeria,
"permiso": permiso,
"descripcion": descripcion,
},
})
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(respuesta)
}
|
package main
import (
"fmt"
"message/src/cn/cncommdata/study/controller"
"message/src/cn/cncommdata/study/stack"
)
func main() {
//array()
//mySlice()
//helloWorld()
//originSlice()
//resetSlice()
//directStatementSlice()
//useMakeFunConstructSlice()
//utils.Send()
//实例化file
//file := model.FileConstruct(true)
//
//fmt.Println(file)
//
////声明一个DataWriter的接口
//var writer service.DataWriter
//
////将接口赋值file,也就是*file类型
//writer = file
//
//b := file.CanOrNo
//if b {
// write := writer.CanWrite(b)
// if write {
// data := writer.WriteData("欢迎写入数据")
// fmt.Println(data)
// } else {
// fmt.Printf("不好意思,你无权写入")
// }
//}
//controller.OriginSlice()
//fmt.Println("主程序执行")
//stac()
//controller.PractiseMap()
controller.PractiseSynicMap()
//controller.PractiseList()
//controller.Myfunc(1,2,3,4,5,67,8,"hello go",make(map[int]int))
//controller.StartCar()
}
func stac() {
createStack := stack.CreateStack()
createStack.Push(1313)
createStack.Push(3443)
fmt.Println(createStack.Pop(), createStack.Pop())
}
|
package gofile
import "testing"
func TestNew(t *testing.T) {
Register("one", buildOne)
Register("two", buildTwo)
emptyConfig := map[string]string{}
one, err := New("one", emptyConfig)
if err != nil {
t.Errorf("Could not create driver 'one'")
}
switch v := one.(type) {
default:
t.Errorf("'one' not a 'driverOne', is %T instead", v)
case *driverOne:
}
two, err := New("two", emptyConfig)
if err != nil {
t.Errorf("Could not create driver 'two'")
}
switch v := two.(type) {
default:
t.Errorf("'two' not a 'driverTwo', is %T instead", v)
case *driverTwo:
}
_, err = New("three", emptyConfig)
if err == nil {
t.Errorf("Expected error when trying to create non-existant driver 'three'")
}
}
type driverOne struct {
Driver
}
type driverTwo struct {
Driver
}
func buildOne(config map[string]string) (Driver, error) {
return &driverOne{}, nil
}
func buildTwo(config map[string]string) (Driver, error) {
return &driverTwo{}, nil
}
|
// Copyright © 2019 NAME HERE <EMAIL ADDRESS>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"fmt"
"github.com/spf13/cobra"
)
// equipmentCmd represents the equipment command
var equipmentCmd = &cobra.Command{
Use: "equipment",
Short: "Commands for equipment",
Long: `Equipment are items that can be purchased, wielded, or worn.
Commands:
ls, list Lists equipment
describe Shows more details of item
Examples:
dnd equipment ls --name musket
dnd equipment describe musket`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("equipment called")
},
}
func init() {
//rootCmd.AddCommand(equipmentCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// equipmentCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// equipmentCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
|
package model
import (
"time"
"github.com/williammfu/vip-management-system/utils"
"gorm.io/gorm"
)
type Vip struct {
ID int `json:"id" gorm:"primaryKey"`
Name string `json:"name"`
CountryOfOrigin string `json:"country_of_origin"`
ETA time.Time `json:"eta"`
Photo string `json:"photo"`
Arrived bool `json:"arrived"`
Attributes []string `json:"attributes"`
}
var db *gorm.DB
func (v Vip) GetGuestInfo() (g Guest) {
return Guest{
ID: int(utils.HashID(v.Name)),
Name: v.Name,
CountryOfOrigin: v.CountryOfOrigin,
ETA: v.ETA,
Photo: v.Photo,
Arrived: false}
}
func (v Vip) GetDescriptions(id int) (d []Description) {
var td []Description
for _, atr := range v.Attributes {
td = append(td, Description{id, atr})
}
return td
}
func CreateVip(g Guest, d []Description) Vip {
var ta []string
for _, atr := range d {
ta = append(ta, atr.Attribute)
}
return Vip{g.ID, g.Name, g.CountryOfOrigin, g.ETA, g.Photo, g.Arrived, ta}
}
|
package main
import (
"github.com/golang-collections/collections/stack"
)
var calc Calculator
func main() {}
// Operator is used to specify calculator operator.
type Operator int
const (
// UNKNOWN operator is used for unknown operators
UNKNOWN Operator = iota
// ADD operator is used to add two numbers
ADD
// SUBTRACT operator is used to subtract two numbers
SUBTRACT
// MULTIPLY operator is used to multiply two numbers
MULTIPLY
// DIVIDE operator is used to divide two numbers
DIVIDE
)
func (o Operator) String() string {
switch o {
case ADD:
return "add"
case SUBTRACT:
return "subtract"
case MULTIPLY:
return "multiply"
case DIVIDE:
return "divide"
default:
return ""
}
}
// ParseOperator is used to convert string into Calculator Operator
func ParseOperator(operator string) Operator {
switch operator {
case "add":
return ADD
case "subtract":
return SUBTRACT
case "multiply":
return MULTIPLY
case "divide":
return DIVIDE
default:
return UNKNOWN
}
}
// Calculator object performs calculations.
type Calculator struct {
numbers stack.Stack
result int
}
func (c *Calculator) enterNumber(n int) {
c.numbers.Push(n)
}
func (c *Calculator) press(operator Operator) {
n2 := c.numbers.Pop().(int)
n1 := c.numbers.Pop().(int)
switch operator {
case ADD:
c.result = n2 + n1
case SUBTRACT:
c.result = n1 - n2
case MULTIPLY:
c.result = n1 * n2
case DIVIDE:
c.result = n1 / n2
}
}
|
package config
import (
"github.com/bradfitz/gomemcache/memcache"
"github.com/gin-contrib/sessions"
"github.com/gin-gonic/gin"
"os"
)
func InitializeSession() gin.HandlerFunc {
sessionDriver := os.Getenv("SESSION_STORE_DRIVER")
sessionName := os.Getenv("SESSION_STORE_NAME")
appKey := os.Getenv("APP_KEY")
switch sessionDriver {
case "cookie":
store := sessions.NewCookieStore([]byte(appKey))
return sessions.Sessions(sessionName, store)
case "redis":
store, _ := sessions.NewRedisStore(10, "tcp", os.Getenv("SESSION_HOST") + ":" + os.Getenv("SESSION_PORT"), "", []byte(appKey))
return sessions.Sessions(sessionName, store)
case "memcache":
store := sessions.NewMemcacheStore(memcache.New(os.Getenv("SESSION_HOST") + ":" + os.Getenv("SESSION_PORT")), "", []byte(appKey))
return sessions.Sessions(sessionName, store)
default:
store := sessions.NewCookieStore([]byte(appKey))
return sessions.Sessions(sessionName, store)
}
}
|
package cli
import (
"context"
"time"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/tilt-dev/tilt/internal/analytics"
engineanalytics "github.com/tilt-dev/tilt/internal/engine/analytics"
"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
"github.com/tilt-dev/tilt/pkg/model"
)
// A human-friendly CLI for creating extension repos.
type createRepoCmd struct {
helper *createHelper
ref string
}
var _ tiltCmd = &createRepoCmd{}
func newCreateRepoCmd(streams genericclioptions.IOStreams) *createRepoCmd {
helper := newCreateHelper(streams)
return &createRepoCmd{
helper: helper,
}
}
func (c *createRepoCmd) name() model.TiltSubcommand { return "create" }
func (c *createRepoCmd) register() *cobra.Command {
cmd := &cobra.Command{
Use: "repo NAME URL [ARG...]",
DisableFlagsInUseLine: true,
Short: "Register an extension repository.",
Long: `Register a repository for loading Tilt extensions.
Tilt supports both git-hosted and local filesystem repositories.
`,
Args: cobra.MinimumNArgs(2),
Example: `
tilt create repo default https://github.com/tilt-dev/tilt-extensions
tilt create repo default file:///home/user/src/tilt-extensions
tilt create repo default https://github.com/tilt-dev/tilt-extensions --ref=SHA
`,
}
cmd.Flags().StringVar(&c.ref, "ref", "",
"Git reference to sync the repository to.")
c.helper.addFlags(cmd)
return cmd
}
func (c *createRepoCmd) run(ctx context.Context, args []string) error {
a := analytics.Get(ctx)
cmdTags := engineanalytics.CmdTags(map[string]string{})
a.Incr("cmd.create-repo", cmdTags.AsMap())
defer a.Flush(time.Second)
err := c.helper.interpretFlags(ctx)
if err != nil {
return err
}
return c.helper.create(ctx, c.object(args))
}
func (c *createRepoCmd) object(args []string) *v1alpha1.ExtensionRepo {
name := args[0]
url := args[1]
return &v1alpha1.ExtensionRepo{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1alpha1.ExtensionRepoSpec{
URL: url,
Ref: c.ref,
},
}
}
|
package main
import "fmt"
func main(){
a := test_defer()
fmt.Println("a:",a)
}
func test_defer() int {
defer func(){
fmt.Println("222")
}()
ret := 1
fmt.Println("ret:",ret)
return ret
} |
package database
import (
"gorm.io/driver/mysql"
"gorm.io/gorm"
)
func ConnectToDatabase() *gorm.DB {
dsn := "root:ngochd246@/tivis?charset=utf8&parseTime=True&loc=Local"
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{})
if err != nil {
panic("Connected fail")
}
return db
} |
package msgpack
import (
"encoding/hex"
"fmt"
"reflect"
"testing"
)
func TestPack(t *testing.T) {
t.Parallel()
packTests := map[string]struct {
// Expected value
v interface{}
// Hex encodings of typ, v
hs string
}{
"Bool/True": {
v: true,
hs: "c3",
},
"Bool/False": {
v: false,
hs: "c2",
},
"Int64/0x0": {
v: int64(0x0),
hs: "00",
},
"Int64/0x1": {
v: int64(0x1),
hs: "01",
},
"Int64/0x7f": {
v: int64(0x7f),
hs: "7f",
},
"Int64/0x80": {
v: int64(0x80),
hs: "cc80",
},
"Int64/0x7fff": {
v: int64(0x7fff),
hs: "cd7fff",
},
"Int64/0x8000": {
v: int64(0x8000),
hs: "cd8000",
},
"Int64/0x7fffffff": {
v: int64(0x7fffffff),
hs: "ce7fffffff",
},
"Int64/0x80000000": {
v: int64(0x80000000),
hs: "ce80000000",
},
"Int64/0x7fffffffffffffff": {
v: int64(0x7fffffffffffffff),
hs: "cf7fffffffffffffff",
},
"Int64/-0x1": {
v: int64(-0x1),
hs: "ff",
},
"Int64/-0x20": {
v: int64(-0x20),
hs: "e0",
},
"Int64/-0x21": {
v: int64(-0x21),
hs: "d0df",
},
"Int64/-0x80": {
v: int64(-0x80),
hs: "d080",
},
"Int64/-0x81": {
v: int64(-0x81),
hs: "d1ff7f",
},
"Int64/-0x8000": {
v: int64(-0x8000),
hs: "d18000",
},
"Int64/-0x8001": {
v: int64(-0x8001),
hs: "d2ffff7fff",
},
"Int64/-0x80000000": {
v: int64(-0x80000000),
hs: "d280000000",
},
"Int64/-0x80000001": {
v: int64(-0x80000001),
hs: "d3ffffffff7fffffff",
},
"Int64/-0x8000000000000000": {
v: int64(-0x8000000000000000),
hs: "d38000000000000000",
},
"Uint64/0x0": {
v: uint64(0x0),
hs: "00",
},
"Uint64/0x1": {
v: uint64(0x1),
hs: "01",
},
"Uint64/0x7f": {
v: uint64(0x7f),
hs: "7f",
},
"Uint64/0xff": {
v: uint64(0xff),
hs: "ccff",
},
"Uint64/0x100": {
v: uint64(0x100),
hs: "cd0100",
},
"Uint64/0xffff": {
v: uint64(0xffff),
hs: "cdffff",
},
"Uint64/0x10000": {
v: uint64(0x10000),
hs: "ce00010000",
},
"Uint64/0xffffffff": {
v: uint64(0xffffffff),
hs: "ceffffffff",
},
"Uint64/0x100000000": {
v: uint64(0x100000000),
hs: "cf0000000100000000",
},
"Uint64/0xffffffffffffffff": {
v: uint64(0xffffffffffffffff),
hs: "cfffffffffffffffff",
},
"Float64/1.23456": {
v: float64(1.23456),
hs: "cb3ff3c0c1fc8f3238",
},
"String/Empty": {
v: string(""),
hs: "a0",
},
"String/1": {
v: string("1"),
hs: "a131",
},
"String/1234567890123456789012345678901": {
v: string("1234567890123456789012345678901"),
hs: "bf31323334353637383930313233343536373839303132333435363738393031",
},
"String/12345678901234567890123456789012": {
v: string("12345678901234567890123456789012"),
hs: "d9203132333435363738393031323334353637383930313233343536373839303132",
},
"Binary/Empty": {
v: []byte(""),
hs: "c400",
},
"Binary/1": {
v: []byte("1"),
hs: "c40131",
},
"MapLen/0x0": {
v: mapLen(0x0),
hs: "80",
},
"MapLen/0x1": {
v: mapLen(0x1),
hs: "81",
},
"MapLen/0xf": {
v: mapLen(0xf),
hs: "8f",
},
"MapLen/0x10": {
v: mapLen(0x10),
hs: "de0010",
},
"MapLen/0xffff": {
v: mapLen(0xffff),
hs: "deffff",
},
"MapLen/0x10000": {
v: mapLen(0x10000),
hs: "df00010000",
},
"MapLen/0xffffffff": {
v: mapLen(0xffffffff),
hs: "dfffffffff",
},
"ArrayLen/0x0": {
v: arrayLen(0x0),
hs: "90",
},
"ArrayLen/0x1": {
v: arrayLen(0x1),
hs: "91",
},
"ArrayLen/0xf": {
v: arrayLen(0xf),
hs: "9f",
},
"ArrayLen/0x10": {
v: arrayLen(0x10),
hs: "dc0010",
},
"ArrayLen/0xffff": {
v: arrayLen(0xffff),
hs: "dcffff",
},
"ArrayLen/0x10000": {
v: arrayLen(0x10000),
hs: "dd00010000",
},
"ArrayLen/0xffffffff": {
v: arrayLen(0xffffffff),
hs: "ddffffffff",
},
"Extension/1/Empty": {
v: extension{1, ""},
hs: "c70001",
},
"Extension/2/1": {
v: extension{2, "1"},
hs: "d40231",
},
"Extension/3/12": {
v: extension{3, "12"},
hs: "d5033132",
},
"Extension/4/1234": {
v: extension{4, "1234"},
hs: "d60431323334",
},
"Extension/5/12345678": {
v: extension{5, "12345678"},
hs: "d7053132333435363738",
},
"Extension/6/1234567890123456": {
v: extension{6, "1234567890123456"},
hs: "d80631323334353637383930313233343536",
},
"Extension/7/12345678901234567": {
v: extension{7, "12345678901234567"},
hs: "c711073132333435363738393031323334353637",
},
"Nil": {
v: nil,
hs: "c0",
},
}
for name, tt := range packTests {
tt := tt
t.Run(name, func(t *testing.T) {
t.Parallel()
var arg string
switch reflect.ValueOf(tt.v).Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
arg = fmt.Sprintf("%T %x", tt.v, tt.v)
default:
arg = fmt.Sprintf("%T %v", tt.v, tt.v)
}
p, err := pack(tt.v)
if err != nil {
t.Fatalf("pack %s returned error %v", arg, err)
}
h := hex.EncodeToString(p)
if h != tt.hs {
t.Fatalf("pack %s returned %s, want %s", arg, h, tt.hs)
}
})
}
}
|
package main
import (
"crypto/tls"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"strconv"
"strings"
"time"
)
type Settings struct {
GitHubUserName string `yaml:"gitHubUserName"`
GitHubToken string `yaml:"gitHubToken"`
RestrictMergeRequester string `yaml:"restrictMergeRequester"`
}
var settings Settings
type EventPullRequest struct {
URL string `json:"url"`
}
type User struct {
Login string `json:"login"`
}
type Comment struct {
HTMLurl string `json:"html_url"`
Body string `json:"body"`
User User `json:"user"`
}
type Issue struct {
Number int `json:"number"`
State string `json:"state"`
EventPullRequest EventPullRequest `json:"pull_request"`
HTMLurl string `json:"html_url"`
}
type Repository struct {
FullName string `json:"full_name"`
}
type IssueCommentWebhookEvent struct {
Issue Issue `json:"issue"`
Repository Repository `json:"repository"`
Comment Comment `json:"comment"`
}
type Head struct {
Sha string `json:"sha"`
}
type PullRequest struct {
URL string `json:"url"`
Head Head `json:"head"`
Mergeable bool `json:"mergeable"`
Title string `json:"title"`
User User `json:"user"`
}
type ApiResponse struct {
Body []byte
StatusCode int
Error error
}
type stop struct {
error
}
const mergeComment = "please merge"
const gitHubApiBaseUrl = "https://api.github.com"
func retry(attempts int, sleep time.Duration, f func() ApiResponse) ApiResponse {
apiResponse := f()
if apiResponse.Error != nil {
if s, ok := apiResponse.Error.(stop); ok {
// if it's a stop return the original error for later checking
apiResponse.Error = s.error
return apiResponse
}
if attempts--; attempts > 0 {
time.Sleep(sleep)
return retry(attempts, 2*sleep, f)
}
return apiResponse
}
return apiResponse
}
type ApiCall func(url string, method string, payload string, settings Settings) ApiResponse
func apiCall(url string, method string, payload string, settings Settings) ApiResponse {
req, err := http.NewRequest(method, url, strings.NewReader(payload))
if err != nil {
return ApiResponse{Body: nil, StatusCode: -1, Error: err}
}
basicAuthToken := base64.StdEncoding.EncodeToString([]byte(settings.GitHubUserName + ":" + settings.GitHubToken))
req.Header.Add("Authorization", "Basic "+basicAuthToken)
req.Header.Add("content-type", "application/json")
return retry(3, time.Second, func() ApiResponse {
res, err := http.DefaultClient.Do(req)
if err != nil {
return ApiResponse{Body: nil, StatusCode: -1, Error: err}
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
// this results in a retry as we're passing back
return ApiResponse{Body: body, StatusCode: res.StatusCode, Error: err}
}
s := res.StatusCode
switch {
case s >= 500:
// Retry
return ApiResponse{Body: body, StatusCode: res.StatusCode, Error: fmt.Errorf("server error: %v", s)}
case s >= 400:
// Don't retry, it was client's fault
return ApiResponse{Body: body, StatusCode: res.StatusCode, Error: stop{fmt.Errorf("client error: %v", s)}}
default:
// Happy
return ApiResponse{Body: body, StatusCode: res.StatusCode, Error: nil}
}
})
}
func autoMerge(event IssueCommentWebhookEvent, apiCall ApiCall) string {
if event.Issue.State != "open" {
return "Pull request is not open."
}
// get info about the pull request
urlPR := fmt.Sprintf("%s/repos/%s/pulls/%d", gitHubApiBaseUrl, event.Repository.FullName, event.Issue.Number)
prApiResponse := apiCall(urlPR, "GET", "", settings)
if prApiResponse.Error != nil {
log.Printf("Failed to get the pull request details: %s", prApiResponse.Error)
return "Error fetching pull request details. Try again."
}
var pr PullRequest
err := json.Unmarshal(prApiResponse.Body, &pr)
if err != nil {
log.Println(err)
return "Error fetching pull request details. Try again."
}
if !pr.Mergeable {
return "Pull Request is not mergeable. Make sure there is approval and status checks have passed."
}
// by default, the request to merge comment will only be honored if the opener of the PR makes the comment
// if merging is restricted to the requester, check comment user
var restrictBool bool
if settings.RestrictMergeRequester != "" {
restrictBool, err = strconv.ParseBool(settings.RestrictMergeRequester)
} else {
// env not set, default to true
restrictBool = true
}
if restrictBool == true && pr.User.Login != event.Comment.User.Login {
return "Merge request comment must be made by the pull request author."
}
// try to merge the pr
urlMerge := fmt.Sprintf("%s/repos/%s/pulls/%d/merge", gitHubApiBaseUrl, event.Repository.FullName, event.Issue.Number)
payload := fmt.Sprintf(`{
"commit_title": "%s",
"commit_message": "PR automatically merged",
"sha": "%s",
"merge_method": "squash"
}`, pr.Title, pr.Head.Sha)
mergeApiResponse := apiCall(urlMerge, "PUT", payload, settings)
log.Printf("Response: %d %s", mergeApiResponse.StatusCode, mergeApiResponse.Body)
type Body struct {
Message string `json:"message"`
}
var responseMessage Body
err = json.Unmarshal(mergeApiResponse.Body, &responseMessage)
if err != nil {
log.Println(err)
return "Error fetching merge request response details."
}
message := strings.Replace(responseMessage.Message, `"`, "'", -1)
switch mergeApiResponse.StatusCode {
case 200:
log.Printf("Merged pull request: %s", pr.URL)
return ""
case 405, 409:
return message
default:
log.Printf("Unexpected response from pull request merge api, %d %s", mergeApiResponse.StatusCode, mergeApiResponse.Body)
return message
}
}
func handleRequest(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, fmt.Sprintf("Method not allowed"), http.StatusMethodNotAllowed)
return
}
b, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
http.Error(w, fmt.Sprintf("Request body could not be read, %s", err.Error()), http.StatusInternalServerError)
return
}
var event IssueCommentWebhookEvent
err = json.Unmarshal(b, &event)
if err != nil {
http.Error(w, fmt.Sprintf("Could not unmarshal body, %s", err.Error()), http.StatusInternalServerError)
return
}
// no errors with request, so send a 200 and then do stuff
_, err = io.WriteString(w, "OK")
if err != nil {
// log an error, but keep going, doesn't really matter if a response makes it back
log.Println(fmt.Errorf("Error sending response back to GitHub webhook, %s", err))
}
// check if comment is what we're looking for, otherwise do nothing
if strings.ToLower(event.Comment.Body) != mergeComment {
log.Printf("Comment was not '%s', url: %s.", mergeComment, event.Comment.HTMLurl)
return
}
// if it's an issue and not a pull request, do nothing
if event.Issue.EventPullRequest.URL == "" {
log.Printf("Event triggered on issue and not pull request, url: %s.", event.Comment.HTMLurl)
return
}
comment := autoMerge(event, apiCall)
if comment != "" {
// comment back on the pr
log.Printf("Commenting on PR #%d in: %s with comment: %s, url: %s", event.Issue.Number, event.Repository.FullName, comment, event.Issue.HTMLurl)
urlComment := fmt.Sprintf("%s/repos/%s/issues/%d/comments", gitHubApiBaseUrl, event.Repository.FullName, event.Issue.Number)
payload := fmt.Sprintf(`{
"body": "%s"
}`, comment)
commentApiResponse := apiCall(urlComment, "POST", payload, settings)
if commentApiResponse.Error != nil {
log.Printf("Failed to comment on the pull request: %s with failure reason: %s %s", event.Issue.HTMLurl, commentApiResponse.Error, commentApiResponse.Body)
}
}
}
func health(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, fmt.Sprintf("Method not allowed"), http.StatusMethodNotAllowed)
return
}
_, err := io.WriteString(w, "OK")
if err != nil {
log.Println(fmt.Errorf("Error sending response to health check, %s", err))
return
}
log.Println("Request made to /health")
}
func main() {
// don't verify when calling to GitHub, otherwise we need a cert bundle
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
log.Println("Server starting...")
settings.GitHubUserName = os.Getenv("GITHUB_USERNAME")
settings.GitHubToken = os.Getenv("GITHUB_TOKEN")
settings.RestrictMergeRequester = os.Getenv("RESTRICT_MERGE_REQUESTER")
if settings.GitHubToken == "" || settings.GitHubUserName == "" {
log.Fatalf("GitHub username or token not set, cannot start application.")
}
port := "8080"
http.HandleFunc("/", handleRequest)
http.HandleFunc("/health", health)
log.Printf("Server started, listening on port %s", port)
log.Print(http.ListenAndServe(":"+port, nil))
}
|
package login
import (
"encoding/json"
"github.com/llr104/LiFrame/core/liFace"
"github.com/llr104/LiFrame/core/liNet"
"github.com/llr104/LiFrame/dbobject"
"github.com/llr104/LiFrame/proto"
"github.com/llr104/LiFrame/server/app"
"github.com/llr104/LiFrame/utils"
"time"
)
var Enter EnterLogin
func init() {
Enter = EnterLogin{}
}
func ClientConnStart(conn liFace.IConnection) {
app.MClientData.Inc()
utils.Log.Info("ClientConnStart:%s", conn.RemoteAddr().String())
}
func ClientConnStop(conn liFace.IConnection) {
app.MClientData.Dec()
SessLoginMgr.SessionExitByConn(conn)
utils.Log.Info("ClientConnStop:%s", conn.RemoteAddr().String())
}
func ShutDown(){
utils.Log.Info("ShutDown")
}
type EnterLogin struct {
liNet.BaseRouter
}
func (s *EnterLogin) NameSpace() string {
return "EnterLogin"
}
/*
登录
*/
func (s* EnterLogin) Ping(req liFace.IRequest){
utils.Log.Info("Ping req: %s", req.GetMsgName())
info := proto.PingPong{}
info.CurTime = time.Now().Unix()
data, _ := json.Marshal(info)
req.GetConnection().SendMsg(proto.LoginClientPong, data)
}
func (s *EnterLogin) LoginReq(req liFace.IRequest) {
beginTime := time.Now().Nanosecond()
utils.Log.Info("LoginReq begin: %s", req.GetMsgName())
reqInfo := proto.LoginReq{}
ackInfo := proto.LoginAck{}
err := json.Unmarshal(req.GetData(), &reqInfo)
if err != nil {
ackInfo.Code = proto.Code_Illegal
utils.Log.Info("LoginReq error:%s", err.Error())
} else {
user := dbobject.User{}
user.Name = reqInfo.Name
user.Password = reqInfo.Password
user.LastLoginIp = reqInfo.Ip
if err := dbobject.FindUserByNP(&user); err != nil {
ackInfo.Code = proto.Code_User_Error
utils.Log.Info("LoginReq FindByNamePassword error:%s", err.Error())
} else {
if user.State != dbobject.UserStateNormal {
ackInfo.Code = proto.Code_User_Forbid
} else {
user.LastLoginTime = time.Now().Unix()
user.IsOnline = true
user.LoginTimes += 1
dbobject.UpdateUserToDB(&user)
session := s.login(&user, req.GetConnection())
ackInfo.Code = proto.Code_Success
ackInfo.Password = user.Password
ackInfo.Name = user.Name
ackInfo.Id = user.Id
ackInfo.Session = session
}
}
}
data, _ := json.Marshal(ackInfo)
req.GetConnection().SendMsg(proto.EnterLoginLoginAck, data)
endTime := time.Now().Nanosecond()
diff := endTime-beginTime
u := uint64(diff) / uint64(time.Millisecond)
utils.Log.Info("LoginReq end: %v,time:%d", reqInfo, u)
}
/*
注册
*/
func (s *EnterLogin) RegisterReq(req liFace.IRequest) {
beginTime := time.Now().Nanosecond()
utils.Log.Info("RegisterReq begin: %s", req.GetMsgName())
reqInfo := proto.RegisterReq{}
ackInfo := proto.RegisterAck{}
err := json.Unmarshal(req.GetData(), &reqInfo)
if err != nil {
ackInfo.Code = proto.Code_Illegal
utils.Log.Info("RegisterReq error:", err.Error())
} else {
user := dbobject.User{}
user.Name = reqInfo.Name
user.Password = reqInfo.Password
user.LastLoginIp = reqInfo.Ip
if err := dbobject.FindUserByName(&user); err == nil {
ackInfo.Code = proto.Code_User_Exist
utils.Log.Info("RegisterReq FindByName:%s Exist", ackInfo.Name)
} else {
user.LastLoginTime = time.Now().Unix()
user.IsOnline = true
user.LoginTimes = 1
user.State = dbobject.UserStateNormal
dbobject.InsertUserToDB(&user)
ackInfo.Code = proto.Code_Success
ackInfo.Password = user.Password
ackInfo.Name = user.Name
ackInfo.Id = user.Id
}
}
data, _ := json.Marshal(ackInfo)
req.GetConnection().SendMsg(proto.EnterLoginRegisterAck, data)
endTime := time.Now().Nanosecond()
diff := endTime-beginTime
u := uint64(diff) / uint64(time.Millisecond)
utils.Log.Info("RegisterReq end: %v,%d", reqInfo,u)
}
/*
校验session
*/
func (s *EnterLogin) CheckSessionReq(req liFace.IRequest) {
utils.Log.Info("CheckSessionReq begin")
reqInfo := proto.CheckSessionReq{}
ackInfo := proto.CheckSessionAck{}
err := json.Unmarshal(req.GetData(), &reqInfo)
if err != nil {
ackInfo.Code = proto.Code_Illegal
utils.Log.Info("CheckSessionReq error:", err.Error())
} else {
ok := SessLoginMgr.SessionIsLive(reqInfo.UserId, reqInfo.Session)
if ok {
ackInfo.Code = proto.Code_Success
}else{
ackInfo.Code = proto.Code_Session_Error
}
}
ackInfo.Session = reqInfo.Session
ackInfo.UserId = reqInfo.UserId
ackInfo.ConnId = reqInfo.ConnId
data, _ := json.Marshal(ackInfo)
req.GetConnection().SendMsg(proto.EnterWorldCheckSessionAck, data)
utils.Log.Info("CheckSessionReq end: %v", reqInfo)
}
/*
根据负载分配world服务器
*/
func (s *EnterLogin) DistributeWorldReq(req liFace.IRequest) {
utils.Log.Info("DistributeServerReq begin: %s", req.GetMsgName())
reqInfo := proto.DistributeServerReq{}
ackInfo := proto.DistributeServerAck{}
if err := json.Unmarshal(req.GetData(), &reqInfo); err != nil {
ackInfo.Code = proto.Code_Illegal
utils.Log.Info("DistributeWorldReq error:%s", err.Error())
} else {
if serverInfo, err:= app.ServerMgr.Distribute(proto.ServerTypeWorld); err != nil {
ackInfo.Code = proto.Code_Not_Server
utils.Log.Info("DistributeWorldReq error:%s", err.Error())
}else{
ackInfo.Code = proto.Code_Success
ackInfo.ServerInfo = serverInfo
}
}
data, _ := json.Marshal(ackInfo)
req.GetConnection().SendMsg(proto.EnterLoginDistributeWorldAck, data)
utils.Log.Info("DistributeWorldAck end: %v", reqInfo)
}
/*
更新session操作
*/
func (s *EnterLogin) SessionUpdateReq(req liFace.IRequest) {
utils.Log.Info("SessionUpdateReq begin: %s", req.GetMsgName())
reqInfo := proto.SessionUpdateReq{}
ackInfo := proto.SessionUpdateAck{}
ackInfo.Session = reqInfo.Session
ackInfo.UserId = reqInfo.UserId
ackInfo.ConnId = reqInfo.ConnId
ackInfo.OpType = reqInfo.OpType
if err := json.Unmarshal(req.GetData(), &reqInfo); err != nil {
ackInfo.Code = proto.Code_Illegal
utils.Log.Info("SessionUpdateReq error:%s", err.Error())
} else {
if reqInfo.OpType == proto.SessionOpDelete {
s.logout(reqInfo.UserId, reqInfo.Session)
}else if reqInfo.OpType == proto.SessionOpKeepLive {
SessLoginMgr.SessionKeepLive(reqInfo.UserId, reqInfo.Session)
}
ackInfo.Code = proto.Code_Success
}
data, _ := json.Marshal(ackInfo)
req.GetConnection().SendMsg(proto.EnterLoginSessionUpdateAck, data)
utils.Log.Info("SessionUpdateReq end: %v", reqInfo)
}
func (s *EnterLogin) login(user *dbobject.User, conn liFace.IConnection) string{
ser := app.GetServer()
n := ser.(liFace.INetWork)
session := SessLoginMgr.MakeSession(n.GetId(), user.Id, conn)
conn.SetProperty("userId", user.Id)
return session
}
func (s *EnterLogin) logout(userId uint32, session string) {
SessLoginMgr.RemoveSession(userId, session)
utils.Log.Info("logout userId:%d", userId)
} |
package access
import (
"encoding/json"
"errors"
"net/http"
//"gopkg.in/mgo.v2"
//"gopkg.in/mgo.v2/bson"
//"github.com/mgalela/akses/appserver/db"
)
const (
graphPrefixURI = "https://graph.facebook.com"
)
var (
socialAccountMap = map[string]string{
"facebook": "Facebook",
"google": "Google",
"twitter": "Twitter",
}
)
//FacebookPicture represent facebook response when requesting picture
type FacebookPicture struct {
Data facebookResponseData `json:"data"`
}
type facebookResponseData struct {
Url string `json:"url"`
}
type facebookUser struct {
ID string `json:"id"`
Email string `json:"email"`
FirstName string `json:"-"`
LastName string `json:"-"`
}
//RequestPicturePath handles request profile picture path
func RequestPicturePath(facebookID string) (*FacebookPicture, error) {
uri := graphPrefixURI + "/v2.3/" + facebookID + "/picture?width=140&height=140&redirect=false"
resp, err := http.Get(uri)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, errors.New("failed to get uri")
}
fbPict := FacebookPicture{}
if err := json.NewDecoder(resp.Body).Decode(&fbPict); err != nil {
return nil, err
}
return &fbPict, nil
}
func getFacebookUser(token string) (*facebookUser, error) {
fbUser := facebookUser{}
url := "https://graph.facebook.com/v2.2/me?access_token=" + token
r, err := http.Get(url)
if err != nil {
return nil, err
}
if r.StatusCode != 200 {
return nil, errors.New("failed to sync Facebook account")
}
if err := json.NewDecoder(r.Body).Decode(&fbUser); err != nil {
return nil, err
}
return &fbUser, nil
}
|
package gosqlite3-extension-functions
import (
"database/sql"
"testing"
)
func TestOpenReturnsWithoutError(t *testing.T) {
db, err := sql.Open("sqlite3-extension-functions", ":memory:")
if err != nil {
t.Fatalf(err.Error())
}
err = db.Ping()
if err != nil {
t.Fatalf(err.Error())
}
}
|
package level_ip
import (
"fmt"
)
const (
IPV4 uint8 = 0x04
IPV4_TCP = 0x06
)
type IPHdr struct {
version uint8
ihl uint8
tos uint8
len uint16
id uint16
flags uint16
frag_offet uint16
ttl uint8
proto uint8
csum uint16
saddr uint32
daddr uint32
payload []byte
}
func (ip_hdr *IPHdr) encode() []byte {
b := make([]byte, 0)
b = append(b, (ip_hdr.version<<4)|(ip_hdr.ihl&0x0f))
b = append(b, ip_hdr.tos)
b = append(b, writeUint16ToNet(ip_hdr.len)...)
b = append(b, writeUint16ToNet(ip_hdr.id)...)
flags_fragoff := (ip_hdr.flags << 13) | (ip_hdr.frag_offet & 0x1fff)
b = append(b, writeUint16ToNet(flags_fragoff)...)
b = append(b, ip_hdr.ttl)
b = append(b, ip_hdr.proto)
b = append(b, writeUint16ToNet(ip_hdr.csum)...)
b = append(b, writeUint32ToNet(ip_hdr.saddr)...)
b = append(b, writeUint32ToNet(ip_hdr.daddr)...)
b = append(b, ip_hdr.payload...)
return b
}
func (ip_hdr *IPHdr) decode(b []byte) {
ip_hdr.version = b[0] >> 4
ip_hdr.ihl = b[0] & 0xff
ip_hdr.tos = b[1]
ip_hdr.len = readUint16FromNet(b[2:4])
ip_hdr.id = readUint16FromNet(b[4:6])
flags_fragoff := readUint16FromNet(b[6:8])
ip_hdr.flags = flags_fragoff >> 13
ip_hdr.frag_offet = flags_fragoff & 0x1fff
ip_hdr.ttl = b[8]
ip_hdr.proto = b[9]
ip_hdr.csum = readUint16FromNet(b[10:12])
ip_hdr.saddr = readUint32FromNet(b[12:16])
ip_hdr.daddr = readUint32FromNet(b[16:20])
ip_hdr.payload = b[20:]
}
func initIPV4Hdr(eth_hdr *EthHdr) *IPHdr {
hdr := &IPHdr{}
hdr.decode(eth_hdr.payload)
return hdr
}
func ipv4_incoming(netdev *NetDev, eth_hdr *EthHdr, ifce *TunInterface) {
DPrintf("Received A IP Datagram.\n%s\n", hexdump(eth_hdr.payload))
ip_hdr := initIPV4Hdr(eth_hdr)
if ip_hdr.version != IPV4 {
fmt.Printf("Datagram version was not IPV4\n")
return
}
if ip_hdr.ihl < 5 {
fmt.Printf("IPV4 Header is at least 5 length long, but got %d\n", ip_hdr.ihl)
return
}
if ip_hdr.ttl == 0 {
fmt.Printf("Time to live of Datagram reached 0.\n")
return
}
csum := checksum(ip_hdr.encode(), int(ip_hdr.ihl*4))
if csum != 0 {
// data not valid
return
}
switch ip_hdr.proto {
case ICMPV4:
icmp_incoming(netdev, eth_hdr, ip_hdr, ifce)
case IPV4_TCP:
tcp_incoming(netdev, eth_hdr, ip_hdr, ifce)
default:
DPrintf("IP Datagram's protocol: %d\n", ip_hdr.proto)
}
}
func ipv4_outgoing(netdev *NetDev, eth_hdr *EthHdr, ip_hdr *IPHdr, ifce *TunInterface) {
tmpaddr := ip_hdr.saddr
ip_hdr.saddr = ip_hdr.daddr
ip_hdr.daddr = tmpaddr
ip_hdr.csum = 0
ip_hdr.csum = checksum(ip_hdr.encode(), int(ip_hdr.ihl*4))
netdev.transimit(eth_hdr, eth_hdr.ethertype, eth_hdr.smac, ip_hdr, ifce)
}
|
package main
import "fmt"
var selectCaseSummary = `
每个case都必须是一个执行 <- 运算的channel通信
所有channel表达式都会被求值,所有被发送的表达式都会被求值
case和default的路径优先级:case优先级大于default。如果所有case都阻塞,且有default子句,则执行default。如果没有default字句,select将阻塞,直到某个通信可以运行;Go不会重新对channel或值进行求值。
case和case间的路径优先级: case之间优先级相同,如果有多个case都不阻塞,select会随机公平地选出一个执行,其他不会执行。
`
// 用select检测chan是否已满,或者理解为,尽量写入chan,满了就放弃
func main() {
ch := make(chan int, 1)
ch <- 1
select {
case ch <- 2:
default:
fmt.Println("channel is full !")
}
} |
package iafon
import (
"net/http"
"testing"
"time"
)
func TestNewServer(t *testing.T) {
if NewServer() == nil {
t.Fatal("NewServer() == nil")
}
}
func TestRunWithoutRoute(t *testing.T) {
s := NewServer("127.0.0.1:")
err := s.Run()
if err == nil {
t.Fatal("Run before add routes should return error")
}
}
func TestRunWithRoute(t *testing.T) {
s := NewServer("127.0.0.1:")
s.Handle("GET", "/", func(http.ResponseWriter, *http.Request) {})
go func() {
time.Sleep(time.Millisecond)
time.Sleep(time.Millisecond)
s.Close()
}()
err := s.Run()
if err != nil && err.Error() != "http: Server closed" {
t.Fatal("Run failed")
}
}
func TestRunServers(t *testing.T) {
s1 := NewServer("127.0.0.1:")
s1.Handle("GET", "/", func(http.ResponseWriter, *http.Request) {})
s2 := NewServer("127.0.0.1:")
s2.Handle("GET", "/", func(http.ResponseWriter, *http.Request) {})
// auto close servers
go func() {
time.Sleep(time.Millisecond)
time.Sleep(time.Millisecond)
s1.Close()
}()
go func() {
time.Sleep(time.Millisecond)
time.Sleep(time.Millisecond)
s2.Close()
}()
err := RunServers(s1, s2)
if err != nil && err.Error() != "http: Server closed" {
t.Fatalf("RunServers failed: %s", err)
}
// wait servers to be closed
time.Sleep(time.Millisecond * 2)
}
func TestRunServersWaitAll(t *testing.T) {
s1 := NewServer("127.0.0.1:")
s1.Handle("GET", "/", func(http.ResponseWriter, *http.Request) {})
s2 := NewServer("127.0.0.1:")
s2.Handle("GET", "/", func(http.ResponseWriter, *http.Request) {})
// auto close servers
go func() {
time.Sleep(time.Millisecond)
time.Sleep(time.Millisecond)
s1.Close()
}()
go func() {
time.Sleep(time.Millisecond)
time.Sleep(time.Millisecond * 2)
s2.Close()
}()
err := RunServersWaitAll(s1, s2)
if err != nil && err.Error() != "http: Server closed;http: Server closed" {
t.Fatalf("RunServersWaitAll failed: %s", err)
}
// wait servers to be closed
time.Sleep(time.Millisecond * 4)
}
|
package helper
import (
"encoding/json"
"log"
)
// PrettyPrintJSON : return a format JSON string representation
func PrettyPrintJSON(p interface{}) string {
b, err := json.MarshalIndent(p, "", " ")
if err != nil {
log.Println("error:", err)
return ""
}
return string(b)
}
|
package azure
import (
"errors"
"strings"
"testing"
"github.com/Azure/azure-sdk-for-go/arm/compute"
"github.com/NeowayLabs/klb/tests/lib/azure/fixture"
)
type VM struct {
client compute.VirtualMachinesClient
f fixture.F
}
func NewVM(f fixture.F) *VM {
as := &VM{
client: compute.NewVirtualMachinesClient(f.Session.SubscriptionID),
f: f,
}
as.client.Authorizer = f.Session.Token
return as
}
// AssertExists checks if VM exists in the resource group.
// Fail tests otherwise.
func (vm *VM) AssertExists(t *testing.T, name, expectedAvailSet, expectedVMSize, expectedOsType, expectedNic string) {
vm.f.Retrier.Run(newID("VM", "AssertExists", name), func() error {
v, err := vm.client.Get(vm.f.ResGroupName, name, "")
if err != nil {
return err
}
if v.VirtualMachineProperties == nil {
return errors.New("Field VirtualMachineProperties is nil!")
}
properties := *v.VirtualMachineProperties
if properties.AvailabilitySet == nil {
return errors.New("Field AvailabilitySet is nil!")
}
if properties.AvailabilitySet.ID == nil {
return errors.New("Field ID is nil!")
}
gotAvailSet := *properties.AvailabilitySet.ID
if !strings.Contains(gotAvailSet, strings.ToUpper(expectedAvailSet)) {
return errors.New("AvailSet expected is " + expectedAvailSet + " but got " + gotAvailSet)
}
if properties.HardwareProfile == nil {
return errors.New("Field HardwareProfile is nil!")
}
hardwareProfile := *properties.HardwareProfile
gotVMSize := string(hardwareProfile.VMSize)
if gotVMSize != expectedVMSize {
return errors.New("VM Size expected is " + expectedVMSize + " but got " + gotVMSize)
}
if properties.StorageProfile == nil {
return errors.New("Field StorageProfile is nil!")
}
if properties.StorageProfile.OsDisk == nil {
return errors.New("Field OsDisk is nil!")
}
osDisk := *properties.StorageProfile.OsDisk
gotOsType := string(osDisk.OsType)
if gotOsType != expectedOsType {
return errors.New("OS type expected is " + expectedOsType + " but got " + gotOsType)
}
if properties.NetworkProfile == nil {
return errors.New("Field NetworkProfile is nil!")
}
network := *properties.NetworkProfile.NetworkInterfaces
if len(network) == 0 {
return errors.New("Field NetworkInterfaces is nil!")
}
net := network[0]
if net.ID == nil {
return errors.New("Field ID is nil!")
}
gotNic := string(*net.ID)
if !strings.Contains(gotNic, expectedNic) {
return errors.New("Nic expected is " + expectedNic + " but got " + gotNic)
}
return nil
})
}
|
package thriftclient
import (
"errors"
)
const (
NO_NODE_SERVICE = 1
NO_AVAILABLE_NODE = 2
)
type NodeException interface {
CException
TypeID() int
Err() error
}
type cNodeException struct {
typeID int
err error
}
func (c *cNodeException) TypeID() int {
return c.typeID
}
func (c *cNodeException) Err() error {
return c.err
}
func (c *cNodeException) Error() string {
return c.err.Error()
}
func NewNodeException(t int, e string) NodeException {
return &cNodeException{typeID: t, err: errors.New(e)}
}
|
package gdash
//Last return last slice element
func Last(slice []interface{}) interface{} {
if len(slice) > 0 {
return slice[len(slice)-1]
}
return nil
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package shell
import (
"time"
"yunion.io/x/pkg/errors"
"yunion.io/x/onecloud/pkg/multicloud/aliyun"
"yunion.io/x/onecloud/pkg/util/shellutils"
)
func init() {
type MongoDBListOptions struct {
Id string
Offset int
Limit int
}
shellutils.R(&MongoDBListOptions{}, "mongodb-list", "List mongodb", func(cli *aliyun.SRegion, args *MongoDBListOptions) error {
dbs, _, err := cli.GetMongoDBs(args.Limit, args.Offset)
if err != nil {
return err
}
printList(dbs, 0, 0, 0, []string{})
return nil
})
type MongoDBIdOptions struct {
ID string
}
shellutils.R(&MongoDBIdOptions{}, "mongodb-show", "Show mongodb", func(cli *aliyun.SRegion, args *MongoDBIdOptions) error {
db, err := cli.GetMongoDB(args.ID)
if err != nil {
return errors.Wrapf(err, "GetMongoDB(%s)", args.ID)
}
printObject(db)
return nil
})
shellutils.R(&MongoDBIdOptions{}, "mongodb-delete", "Delete mongodb", func(cli *aliyun.SRegion, args *MongoDBIdOptions) error {
return cli.DeleteMongoDB(args.ID)
})
type MongoDBBackupListOptions struct {
ID string
START time.Time
END time.Time
PageSize int
PageNumber int
}
shellutils.R(&MongoDBBackupListOptions{}, "mongodb-backup-list", "List mongodb backups", func(cli *aliyun.SRegion, args *MongoDBBackupListOptions) error {
backups, _, err := cli.GetMongoDBBackups(args.ID, args.START, args.END, args.PageSize, args.PageNumber)
if err != nil {
return err
}
printList(backups, 0, 0, 0, nil)
return nil
})
type MongoDBSkuListOptions struct {
}
shellutils.R(&MongoDBSkuListOptions{}, "mongodb-sku-list", "List mongodb skus", func(cli *aliyun.SRegion, args *MongoDBSkuListOptions) error {
skus, err := cli.GetchMongoSkus()
if err != nil {
return err
}
printObject(skus)
return nil
})
}
|
package awsfirehose
import (
"reflect"
"strings"
firehosePool "github.com/gabrielperezs/streamspooler/firehose"
)
type AWSFirehose struct {
s *firehosePool.Server
}
func NewOrGet(cfg map[string]interface{}) (*AWSFirehose, error) {
c := firehosePool.Config{}
v := reflect.ValueOf(&c)
ps := v.Elem()
typeOfS := ps.Type()
cpCfg := make(map[string]interface{})
for k, v := range cfg {
cpCfg[strings.ToLower(k)] = v
}
for i := 0; i < ps.NumField(); i++ {
newValue, ok := cpCfg[strings.ToLower(typeOfS.Field(i).Name)]
if !ok {
continue
}
f := ps.Field(i)
if !f.IsValid() || !f.CanSet() {
continue
}
switch ps.Field(i).Type().String() {
case "bool":
ps.Field(i).SetBool(newValue.(bool))
case "string":
ps.Field(i).SetString(newValue.(string))
case "float32", "float64":
switch newValue.(type) {
case float32:
ps.Field(i).SetFloat(float64(newValue.(float32)))
case float64:
ps.Field(i).SetFloat(newValue.(float64))
}
case "int":
switch newValue.(type) {
case int:
ps.Field(i).SetInt(int64(newValue.(int)))
case int64:
ps.Field(i).SetInt(newValue.(int64))
}
}
}
o := &AWSFirehose{
s: firehosePool.New(c),
}
return o, nil
}
func (o *AWSFirehose) Send(b []byte) {
o.s.C <- b
}
func (o *AWSFirehose) Exit() {
o.s.Exit()
}
|
package main
import (
"github.com/G-Research/armada/cmd/armadactl/cmd"
"github.com/G-Research/armada/internal/common"
)
func main() {
common.ConfigureCommandLineLogging()
cmd.Execute()
}
|
package main
import "strings"
var DNSPOD_EMAIL = strings.Join([]string{"u", "s", "e", "r", "@", "e", "x", "a", "m", "p", "l", "e", ".", "c", "o", "m"}, "")
var DNSPOD_PASSWORD = strings.Join([]string{"e", "x", "a", "m", "p", "l", "e", "p", "a", "s", "s", "w", "o", "r", "d"}, "")
var GITHUB_TOKEN = strings.Join([]string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "f", "0", "1", "2", "3", "4", "5", "6", "7"}, "")
var GIST_ID = strings.Join([]string{"0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0"}, "")
var PROXY_URL = strings.Join([]string{}, "")
|
package doublePointData
import (
"AlgorithmPractice/src/common/Constant"
)
var (
DemoArray01 = []int{1, 100, 22, 39, 43, 58, 64, 76, 79, 85, 96, 58}
Target01 = 99
Answer01 = []int{1, 22, 76}
Target011 = 101
Answer011 = []int{1, 22, 79}
Target012 = C.Max
Answer012 = []int{85, 96, 100}
Target013 = 187
Answer013 = []int{1, 85, 100}
Target014 = 157
Answer014 = []int{1, 58, 96}
DemoArray02 = []int{1}
Target02 = 99
Answer02 = []int{C.Min, C.Min, C.Min}
DemoArray03 = []int{1, 23}
Target03 = 99
Answer03 = []int{C.Min, C.Min, C.Min}
)
|
package tests
import (
"sync"
"testing"
ravendb "github.com/ravendb/ravendb-go-client"
"github.com/stretchr/testify/assert"
)
func ravendb10566_shouldBeAvailable(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
var name string
var mu sync.Mutex
afterSaveChanges := func(event *ravendb.AfterSaveChangesEventArgs) {
meta := event.GetDocumentMetadata()
nameI, ok := meta.Get("Name")
assert.True(t, ok)
mu.Lock()
defer mu.Unlock()
name = nameI.(string)
}
store.AddAfterSaveChangesListener(afterSaveChanges)
{
session := openSessionMust(t, store)
user := &User{}
user.setName("Oren")
err = session.StoreWithID(user, "users/oren")
assert.NoError(t, err)
metadata, err := session.Advanced().GetMetadataFor(user)
assert.NoError(t, err)
metadata.Put("Name", "FooBar")
err = session.SaveChanges()
assert.NoError(t, err)
session.Close()
}
assert.Equal(t, name, "FooBar")
}
func TestRavenDB10566(t *testing.T) {
driver := createTestDriver(t)
destroy := func() { destroyDriver(t, driver) }
defer recoverTest(t, destroy)
// matches Java's order
ravendb10566_shouldBeAvailable(t, driver)
}
|
package main
import (
"sync"
"time"
"github.com/go-redis/redis"
"github.com/mingjingc/redlock-go"
)
func main() {
dml := redlock.New(redis.NewClient(&redis.Options{
Addr: ":6379",
}), redis.NewClient(&redis.Options{
Addr: ":6380",
}), redis.NewClient(&redis.Options{
Addr: ":6381",
}))
var wg sync.WaitGroup
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
if mylock, ok := dml.Lock("foo", 50); ok {
time.Sleep(time.Millisecond * 40)
dml.Unlock(mylock)
}
defer wg.Done()
}()
}
wg.Wait()
}
|
package models
type BotUser struct {
UserName string
FirstName string
ChatID int64
} |
package main
import (
"fmt"
"time"
)
func main() {
now := time.Now()
/* You can convert a time to seconds since epoch using the Unix() func! */
secs := now.Unix()
nanos := now.UnixNano()
fmt.Println(now)
millis := nanos / 1000000
fmt.Println(secs)
fmt.Println(millis)
fmt.Println(nanos)
/* You can also take a time in seconds since epoch into Time */
fmt.Println(time.Unix(secs, 0))
fmt.Println(time.Unix(0, nanos))
}
|
package main
const Name string = "crondoc"
const Version string = "0.1.1"
|
package leetcode
func getRow1(rowIndex int) []int {
result := make([]int, 0, rowIndex+1)
for ; rowIndex >= 0; rowIndex-- {
result = append(result, 1)
for j := len(result) - 2; j > 0; j-- {
result[j] += result[j-1]
}
}
return result
}
|
package h2mux
import (
"bytes"
"io"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func AssertIOReturnIsGood(t *testing.T, expected int) func(int, error) {
return func(actual int, err error) {
if expected != actual {
t.Fatalf("Expected %d bytes, got %d", expected, actual)
}
if err != nil {
t.Fatalf("Unexpected error %s", err)
}
}
}
func TestSharedBuffer(t *testing.T) {
b := NewSharedBuffer()
testData := []byte("Hello world")
AssertIOReturnIsGood(t, len(testData))(b.Write(testData))
bytesRead := make([]byte, len(testData))
AssertIOReturnIsGood(t, len(testData))(b.Read(bytesRead))
}
func TestSharedBufferBlockingRead(t *testing.T) {
b := NewSharedBuffer()
testData1 := []byte("Hello")
testData2 := []byte(" world")
result := make(chan []byte)
go func() {
bytesRead := make([]byte, len(testData1)+len(testData2))
nRead, err := b.Read(bytesRead)
AssertIOReturnIsGood(t, len(testData1))(nRead, err)
result <- bytesRead[:nRead]
nRead, err = b.Read(bytesRead)
AssertIOReturnIsGood(t, len(testData2))(nRead, err)
result <- bytesRead[:nRead]
}()
time.Sleep(time.Millisecond * 250)
select {
case <-result:
t.Fatalf("read returned early")
default:
}
AssertIOReturnIsGood(t, len(testData1))(b.Write([]byte(testData1)))
select {
case r := <-result:
assert.Equal(t, testData1, r)
case <-time.After(time.Second):
t.Fatalf("read timed out")
}
AssertIOReturnIsGood(t, len(testData2))(b.Write([]byte(testData2)))
select {
case r := <-result:
assert.Equal(t, testData2, r)
case <-time.After(time.Second):
t.Fatalf("read timed out")
}
}
// This is quite slow under the race detector
func TestSharedBufferConcurrentReadWrite(t *testing.T) {
b := NewSharedBuffer()
var expectedResult, actualResult bytes.Buffer
var wg sync.WaitGroup
wg.Add(2)
go func() {
block := make([]byte, 256)
for i := range block {
block[i] = byte(i)
}
for blockSize := 1; blockSize <= 256; blockSize++ {
for i := 0; i < 256; i++ {
expectedResult.Write(block[:blockSize])
n, err := b.Write(block[:blockSize])
if n != blockSize || err != nil {
t.Errorf("write error: %d %s", n, err)
return
}
}
}
wg.Done()
}()
go func() {
block := make([]byte, 256)
// Change block sizes in opposition to the write thread, to test blocking for new data.
for blockSize := 256; blockSize > 0; blockSize-- {
for i := 0; i < 256; i++ {
n, err := io.ReadFull(b, block[:blockSize])
if n != blockSize || err != nil {
t.Errorf("read error: %d %s", n, err)
return
}
actualResult.Write(block[:blockSize])
}
}
wg.Done()
}()
wg.Wait()
if bytes.Compare(expectedResult.Bytes(), actualResult.Bytes()) != 0 {
t.Fatal("Result diverged")
}
}
func TestSharedBufferClose(t *testing.T) {
b := NewSharedBuffer()
testData := []byte("Hello world")
AssertIOReturnIsGood(t, len(testData))(b.Write(testData))
err := b.Close()
if err != nil {
t.Fatalf("unexpected error from Close: %s", err)
}
bytesRead := make([]byte, len(testData))
AssertIOReturnIsGood(t, len(testData))(b.Read(bytesRead))
n, err := b.Read(bytesRead)
if n != 0 {
t.Fatalf("extra bytes received: %d", n)
}
if err != io.EOF {
t.Fatalf("expected EOF, got %s", err)
}
}
|
package rpc
// RawTX represents a for creation using the RPC interface
// of the a Methuselah node.
type RawTX struct {
Data []byte `json:"data"`
Version uint8 `json:"version"`
}
|
package main
import (
"cuthkv/cache"
"os"
"os/signal"
"syscall"
)
func main() {
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt, os.Kill, syscall.SIGTERM)
go cache.InitCache()
<-interrupt
os.Exit(1)
}
|
package plugins
import (
"encoding/json"
"fmt"
"github.com/pkg/errors"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/klog"
"strings"
)
const (
CustomConfigmapPluginName = "CustomConfigmap"
)
func init() {
register(CustomConfigmapPluginName, &WalmPluginRunner{
Run: CustomConfigmapTransform,
Type: Pre_Install,
})
}
type AddConfigmapObject struct {
ApplyAllResources bool `json:"applyAllResources"`
Kind string `json:"kind"`
ResourceName string `json:"resourceName"`
VolumeMountPath string `json:"volumeMountPath"`
ContainerName string `json:"containerName"`
Items []*AddConfigItem `json:"items"`
}
type AddConfigItem struct {
ConfigMapData string `json:"configMapData"`
ConfigMapVolumeMountsSubPath string `json:"configMapVolumeMountsSubPath"`
ConfigMapMode int32 `json:"configMapMode"`
}
type CustomConfigmapArgs struct {
ConfigmapToAdd map[string]*AddConfigmapObject `json:"configmapToAdd" description:"add extra configmap"`
ConfigmapToSkipNames []string `json:"configmapToSkipNames" description:"upgrade skip to render configmap"`
ConfigmapSkipAll bool `json:"configmapSkipAll" description:"upgrade skip all configmap resources"`
}
func CustomConfigmapTransform(context *PluginContext, args string) (err error) {
if args == "" {
klog.Infof("ignore labeling pod, because plugin args is empty")
return nil
} else {
klog.Infof("label pod args : %s", args)
}
customConfigmapArgs := &CustomConfigmapArgs{}
err = json.Unmarshal([]byte(args), customConfigmapArgs)
if err != nil {
klog.Infof("failed to unmarshal plugin args : %s", err.Error())
return err
}
for _, resource := range context.Resources {
unstructuredObj := resource.(*unstructured.Unstructured)
switch resource.GetObjectKind().GroupVersionKind().Kind {
case "Job", "Deployment", "DaemonSet", "StatefulSet":
for configMapName, addConfigMapObj := range customConfigmapArgs.ConfigmapToAdd {
err = mountConfigMap(unstructuredObj, context.R.Name, configMapName, addConfigMapObj)
if err != nil {
klog.Errorf("mountConfigMap %s %s %v error %v", context.R.Name, configMapName, *addConfigMapObj, err)
return err
}
}
case "Configmap":
if isSkippedConfigMap(unstructuredObj.GetName(), customConfigmapArgs) {
err = addNestedStringMap(unstructuredObj.Object, map[string]string{ResourceUpgradePolicyAnno: UpgradePolicy}, "metadata", "annotations")
if err != nil {
klog.Errorf("failed add nested string map : %s", err.Error())
return err
}
}
}
}
for configMapName, addObj := range customConfigmapArgs.ConfigmapToAdd {
configMapObj, err := convertK8SConfigMap(context.R.Name, context.R.Namespace, configMapName, addObj)
if err != nil {
klog.Errorf("add configMap plugin error %v", err)
continue
}
unstructuredObj, err := convertToUnstructured(configMapObj)
if err != nil {
klog.Infof("failed to convertToUnstructured : %v", *configMapObj)
return err
}
context.Resources = append(context.Resources, unstructuredObj)
}
return
}
func isSkippedConfigMap(name string, args *CustomConfigmapArgs) bool{
if args.ConfigmapSkipAll == true {
return true
} else {
for _, skipConfigmapName := range args.ConfigmapToSkipNames {
if skipConfigmapName == name {
return true
}
}
return false
}
}
func convertK8SConfigMap(releaseName, releaseNamespace, configMapName string, addObj *AddConfigmapObject) (*v1.ConfigMap, error) {
if len(configMapName) == 0 || len(configMapName) > qualifiedNameMaxLength || !qualifiedNameRegexp.MatchString(configMapName) {
return nil, errors.New(fmt.Sprintf("invaild configmap name %s", configMapName))
}
configMapObj := &v1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
}
configMapObj.SetName(fmt.Sprintf("walmplugin-%s-%s-cm", configMapName, releaseName))
configMapObj.SetNamespace(releaseNamespace)
configMapObj.SetAnnotations(map[string]string{
"transwarp/walmplugin.custom.configmap": "true",
})
configMapObj.SetLabels(map[string]string{
"release": releaseName,
"heritage": "walmplugin",
})
configMapObj.Data = make(map[string]string, 0)
for _, item := range addObj.Items {
if item.ConfigMapVolumeMountsSubPath != "" {
token := strings.Split(item.ConfigMapVolumeMountsSubPath, "/")
configMapObj.Data[token[len(token) - 1]] = item.ConfigMapData
}
}
return configMapObj, nil
}
func splitConfigmapVolumes(releaseName, configMapName string, addConfigMapObj *AddConfigmapObject) (v1.Volume, v1.VolumeMount, error) {
// ToDo: Add Params Validate
//ConfigMapVolumeSource
configMapVolume := v1.Volume{
Name: fmt.Sprintf("walmplugin-%s-%s-cm", configMapName, releaseName),
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: fmt.Sprintf("walmplugin-%s-%s-cm", configMapName, releaseName),
},
},
},
}
for _, addConfigItem := range addConfigMapObj.Items {
token := strings.Split(addConfigItem.ConfigMapVolumeMountsSubPath, "/")
configMapVolume.VolumeSource.ConfigMap.Items = append(configMapVolume.VolumeSource.ConfigMap.Items, v1.KeyToPath{
Key: token[len(token) - 1],
Path: addConfigItem.ConfigMapVolumeMountsSubPath,
})
}
configMapVolumeMounts := v1.VolumeMount{
Name: fmt.Sprintf("walmplugin-%s-%s-cm", configMapName, releaseName),
MountPath: addConfigMapObj.VolumeMountPath,
}
return configMapVolume, configMapVolumeMounts, nil
}
func mountConfigMap(unstructuredObj *unstructured.Unstructured, releaseName, configMapName string, addConfigMapObj *AddConfigmapObject) error {
resourceKind := unstructuredObj.GetKind()
resourceName := unstructuredObj.GetName()
if !addConfigMapObj.ApplyAllResources {
if addConfigMapObj.Kind != resourceKind || addConfigMapObj.ResourceName != resourceName {
return nil
}
}
configMapVolume, configMapVolumeMounts, err := splitConfigmapVolumes(releaseName, configMapName, addConfigMapObj)
if err != nil {
klog.Errorf("failed to split config map volumes : %s", err.Error())
return err
}
err = addNestedSliceObj(unstructuredObj.Object, []interface{}{
configMapVolume,
}, "spec", "template", "spec", "volumes")
if err != nil {
klog.Errorf("failed to add nested slice objs : %s", err.Error())
return err
}
containers, found, err := unstructured.NestedSlice(unstructuredObj.Object, "spec", "template", "spec", "containers")
if err != nil {
klog.Errorf("failed to get containers %s", err.Error())
return err
}
var k8sContainers []v1.Container
containersData, err := json.Marshal(containers)
if err != nil {
klog.Errorf("failed to marshal containers type interface to []byte : %s", err.Error())
return err
}
err = json.Unmarshal(containersData, &k8sContainers)
if err != nil {
klog.Errorf("failed to unmarshal containers type []byte to []corev1.Container : %s", err.Error())
return err
}
existMountPaths := getExistMountPaths(k8sContainers)
if existMountPaths[configMapVolumeMounts.MountPath] != "" {
return errors.Errorf("volumeMountPath %s already exist in containers, duplicated with volume mount name %s", configMapVolumeMounts.Name, existMountPaths[configMapVolumeMounts.MountPath])
}
if found {
for _, container := range containers {
configMapVolumeMountsInterface := []interface{}{}
configMapVolumeMountsInterface = append(configMapVolumeMountsInterface, configMapVolumeMounts)
err = addNestedSliceObj(container.(map[string]interface{}), configMapVolumeMountsInterface, "volumeMounts")
if err != nil {
klog.Errorf("failed to add nested slice obj : %s", err.Error())
return err
}
}
err = unstructured.SetNestedSlice(unstructuredObj.Object, containers, "spec", "template", "spec", "containers")
if err != nil {
klog.Errorf("failed to set nested slice : %s", err.Error())
return err
}
}
return nil
}
func getExistMountPaths(containers []v1.Container) map[string]string {
existMountPaths := map[string]string{}
for _, container := range containers {
for _, volumeMount := range container.VolumeMounts {
existMountPaths[volumeMount.MountPath] = volumeMount.Name
}
}
return existMountPaths
}
|
// Copyright 2017 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package security
import (
"context"
"io/ioutil"
"os"
"os/exec"
"os/user"
"path/filepath"
"strconv"
"syscall"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: LogPerms,
Desc: "Checks permissions of logging-related files",
Contacts: []string{
"jorgelo@chromium.org", // Security team
"chromeos-security@google.com",
},
Attr: []string{"group:mainline"},
})
}
func LogPerms(ctx context.Context, s *testing.State) {
u, err := user.Lookup("syslog")
if err != nil {
s.Fatal("No syslog user: ", err)
}
g, err := user.LookupGroup("syslog")
if err != nil {
s.Fatal("No syslog group: ", err)
}
if u.Gid != g.Gid {
s.Errorf("syslog user's primary group (%s) isn't syslog (%s)", u.Gid, g.Gid)
}
if fi, err := os.Stat("/var/log"); err != nil {
s.Error("Couldn't stat /var/log: ", err)
} else {
if fi.Mode()&os.ModeSticky == 0 {
s.Error("/var/log doesn't have sticky bit set")
}
if gid := fi.Sys().(*syscall.Stat_t).Gid; strconv.Itoa(int(gid)) != g.Gid {
s.Errorf("/var/log not owned by syslog group (got %d; want %s)", gid, g.Gid)
}
}
if fi, err := os.Stat("/var/log/messages"); err != nil {
// The file is briefly missing during log rotation.
if !os.IsNotExist(err) {
s.Error("Couldn't stat /var/log/messages: ", err)
}
} else {
uid := fi.Sys().(*syscall.Stat_t).Uid
// The file is sometimes owned by root for unknown reasons on DUTs in the lab: https://crbug.com/813579
if strconv.Itoa(int(uid)) != u.Uid && uid != 0 {
s.Errorf("/var/log/messages not owned by syslog or root user (got %d; syslog is %s)", uid, u.Uid)
}
}
// Dump the listing to a file to help investigate failures.
b, err := exec.Command("ls", "-la", "/var/log").CombinedOutput()
if err != nil {
s.Error("ls failed: ", err)
}
if err = ioutil.WriteFile(filepath.Join(s.OutDir(), "ls.txt"), b, 0644); err != nil {
s.Error("Failed writing log listing: ", err)
}
}
|
package test
import (
"fmt"
"go_training/model"
"testing"
)
func TestPerhitunganMap(t *testing.T) {
t.Run("test untuk fungsi penjumlahan ", func(t *testing.T) {
var testPenjumlahan = []struct {
s string
P int
L int
T int
hasilMaunya interface{}
}{
{s: "Keliling", P: 2, L: 2, T: 2, hasilMaunya: 24},
{s: "Luas", P: 2, L: 3, T: 4, hasilMaunya: 52},
{s: "Volume", P: 5, L: 4, T: 3, hasilMaunya: 60},
}
for _, input := range testPenjumlahan {
dapatHasil, errorDapatnya := model.PerhitunganMapBalok(input.s, input.P, input.L, input.T)
if dapatHasil != input.hasilMaunya || errorDapatnya != nil {
t.Fatalf("Hasil Maunya %v, Hasil Dapatnya %v, errornya %v\n", input.hasilMaunya, dapatHasil, errorDapatnya)
}
fmt.Printf("Hasil Maunya %v, Hasil Dapatnya %v, errornya %v\n", input.hasilMaunya, dapatHasil, errorDapatnya)
}
})
t.Run("test untuk fungsi penjumlahan ", func(t *testing.T) {
var testSegitiga = []struct {
s string
Alasnya int
Sisinya int
hasilMaunya interface{}
}{
{s: "Keliling", Alasnya: 12, Sisinya: 10, hasilMaunya: 32},
{s: "Luas", Alasnya: 12, Sisinya: 10, hasilMaunya: 48.0},
{s: "Tinggi", Alasnya: 12, Sisinya: 10, hasilMaunya: 8.0},
}
for _, input := range testSegitiga {
dapatHasil, errorDapatnya := model.PerhitunganMapSegitiga(input.s, input.Alasnya, input.Sisinya)
if dapatHasil != input.hasilMaunya || errorDapatnya != nil {
t.Fatalf("Hasil Maunya %v, Hasil Dapatnya %v, errornya %v\n", input.hasilMaunya, dapatHasil, errorDapatnya)
}
fmt.Printf("Hasil Maunya %v, Hasil Dapatnya %v, errornya %v\n", input.hasilMaunya, dapatHasil, errorDapatnya)
}
})
}
func TestStringer(t *testing.T) {
t.Run("Testing The Stringer ", func(t *testing.T) {
var testStringer = []struct {
bio map[string]model.Mahasiswa
hasilMaunya string
}{
{bio: map[string]model.Mahasiswa{"Nama": {Universitas: "", SMA: "", SMP: "", SD: ""}},
hasilMaunya: "Nama Saya kuliah di , SMA saya di , SMP saya di , SD saya di "},
{bio: map[string]model.Mahasiswa{"adae": {Universitas: "UA", SMA: "SMAN3", SMP: "SMPN5", SD: "SDAT"}},
hasilMaunya: "adae Saya kuliah di UA, SMA saya di SMAN3, SMP saya di SMPN5, SD saya di SDAT"},
}
for _, input := range testStringer {
dapatHasil := model.PerhitunganMapMahasiswa(input.bio)
if dapatHasil != input.hasilMaunya {
t.Fatalf("Got: %v,\n Want: %v,\n", dapatHasil, input.hasilMaunya)
}
fmt.Printf("Got: %v, Want: %v\n", dapatHasil, input.hasilMaunya)
}
})
}
|
package models
import (
"context"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
)
// 销售订单实例表结构
// 销售子订单
type CustomerSubOrder struct {
SubOrderId int64 `json:"sub_order_id" bson:"sub_order_id"` // 子订单id
SubOrderSn string `json:"sub_order_sn" bson:"sub_order_sn"` // 子订单号
ComID int64 `json:"com_id" bson:"com_id"` // 公司id
OrderSN string `json:"order_sn" bson:"order_sn"` // 订单号
OrderId int64 `json:"order_id" bson:"order_id"` // 订单id
CustomerID int64 `json:"customer_id" bson:"customer_id"`
CustomerName string `json:"customer_name" bson:"customer_name"`
ProductID int64 `json:"product_id" bson:"product_id"`
Product string `json:"product" bson:"product"` // 商品名称
Contacts string `json:"contacts" bson:"contacts"` //客户的联系人
Receiver string `json:"receiver" bson:"receiver"` //本单的收货人
ReceiverPhone string `json:"receiver_phone" bson:"receiver_phone"` //本单的收货人电话
Price float64 `json:"price" bson:"price"` //本项价格
Amount int64 `json:"amount" bson:"amount"` //本项购买总数量
WarehouseAmount int64 `json:"warehouse_amount" bson:"warehouse_amount"` // 仓库发货的数量
SupplierAmount int64 `json:"supplier_amount" bson:"supplier_amount"` // 供应商发货的数量
ExtraAmount float64 `json:"extra_amount" bson:"extra_amount"` //本单优惠或折扣金额
Delivery string `json:"delivery" bson:"delivery"` // 快递方式
DeliveryCode string `json:"delivery_code" bson:"delivery_code"` // 快递号
OrderTime int64 `json:"order_time" bson:"order_time"` // 下单时间
ShipTime int64 `json:"ship_time" bson:"ship_time"` // 发货时间
ConfirmTime int64 `json:"confirm_time" bson:"confirm_time"` // 确认订单时间
PayTime int64 `json:"pay_time" bson:"pay_time"` // 订单结算时间
FinishTime int64 `json:"finish_time" bson:"finish_time"` // 供应结束时间
Status int64 `json:"status" bson:"status"` // 订单状态
IsPrepare bool `json:"is_prepare" bson:"is_prepare"` // 是否备货完成
}
func getCustomerSubOrderCollection() *mongo.Collection {
return Client.Collection("customer_suborder")
}
type CustomerSubOrderResult struct {
CustomerSubOrder []CustomerSubOrder `json:"customer_sub_order"`
}
func SelectCustomerSubOrderWithCondition(subOrderFilter bson.M) (*CustomerSubOrderResult, error) {
cur, err := getCustomerSubOrderCollection().Find(context.TODO(), subOrderFilter)
if err != nil {
return nil, err
}
var res = new(CustomerSubOrderResult)
for cur.Next(context.TODO()) {
var c CustomerSubOrder
if err := cur.Decode(&c); err != nil {
return nil, err
}
res.CustomerSubOrder = append(res.CustomerSubOrder, c)
}
return res, nil
}
func MultiplyInsertCustomerSubOrder(subOrders []interface{}) error {
_, err := getCustomerSubOrderCollection().InsertMany(context.TODO(), subOrders)
return err
}
func SelectCustomerSubOrderByComIDAndOrderSN(comID int64, orderSN string)(*CustomerSubOrderResult, error) {
filter := bson.M{}
filter["com_id"] = comID
filter["order_sn"] = orderSN
return SelectCustomerSubOrderWithCondition(filter);
}
func UpdateCustomerSubOrderPrepared(comID, subOrderID int64) (*mongo.UpdateResult, error) {
filter := bson.M{}
filter["com_id"] = comID
filter["sub_order_id"] = subOrderID
return getCustomerSubOrderCollection().UpdateOne(context.TODO(), filter, bson.M{"$set" : bson.M{"is_prepare": true}})
}
|
/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* ceftb xir
* ==================
* This file defines the network modeling intermediate representation (xir)
* data structures. xir is a simple network represenatation where
*
* The primary components are:
* - (sub)networks
* - nodes
* - links
*
* Everything is extensible through a property map member called Props.
*
* Interconnection model supports node-neighbor traversal as well as
* link-endpoint traversal.
*
* Endpoints are the glue that bind nodes to links. Everything is also
* upwards traversable. You can follow pointers from an endpoint to a
* parent node, and then to a parent network, and then to another parent
* network etc...
*
* Serialization to json is inherent. It does not include the traversal
* mechanisims as this would create recursively repeditive output.
*
* Copyright ceftb 2018 - All Rights Reserved
* License: Apache 2.0
*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
package xir
import (
"encoding/json"
"fmt"
"github.com/satori/go.uuid"
"io/ioutil"
"sort"
)
// Data structures ------------------------------------------------------------
/*
guest : {
"name": "gx",
"props": {
"hardware": {
"memory=": "4G",
},
"software": [
{ "name=": "debian-stable" }
]
}
}
host: {
"name": "hx",
"props": {
"hardware": {
"memory+": "32G",
"arch=": "x86_64"
}
}
}
software: [
{
"kind": "os",
"name": "debian-stable",
"requirements": {
"arch?": ["x86_64", "x86"]
}
}
]
*/
type Props map[string]interface{}
type Prop struct {
Key string
Value interface{}
}
type SortedProps []Prop
type Vec2 struct {
X, Y float64
DX, DY float64
CX, CY float64
}
type Bounds struct {
Left, Right, Top, Bottom float64
}
type Net struct {
Id string `json:"id"`
Nodes []*Node `json:"nodes"`
Links []*Link `json:"links"`
Nets []*Net `json:"nets"`
Props Props `json:"props"`
Parent *Net `json:"-"`
Loc Vec2
}
type Node struct {
Id string `json:"id"`
Endpoints []*Endpoint `json:"endpoints"`
Parent *Net `json:"-"`
Props Props `json:"props"`
Visited bool `json:"-"`
Loc Vec2
Pos Point
Vel Point
}
type Link struct {
Id string `json:"id"`
Endpoints []*Endpoint `json:"endpoints"`
Props Props `json:"props"`
}
type Endpoint struct {
Id string `json:"id"`
Props Props `json:"props"`
Neighbors map[string]*Neighbor `json:"-"`
Parent *Node `json:"-"`
Loc Vec2
}
type Software struct {
Props Props `json:"props"`
Requirements Props `json:"target"`
}
type Neighbor struct {
Link *Link
Endpoint *Endpoint
}
// Props methods --------------------------------------------------------------
func (p *Props) Json() string {
js, _ := json.MarshalIndent(p, "", " ")
return string(js)
}
// Net methods ----------------------------------------------------------------
// Factory methods ~~~
func NewNet() *Net {
u := uuid.NewV4()
return &Net{
Id: u.String(),
Props: make(Props),
}
}
func (n *Net) Net() *Net {
u := uuid.NewV4()
net := &Net{
Id: u.String(),
Props: make(Props),
Parent: n,
}
n.Nets = append(n.Nets, net)
return net
}
func (n *Net) AddNet(net *Net) {
net.Parent = n
n.Nets = append(n.Nets, net)
}
func (n *Net) Node() *Node {
u := uuid.NewV4()
node := &Node{
Id: u.String(),
Props: make(Props),
Parent: n,
}
n.Nodes = append(n.Nodes, node)
return node
}
func (n *Net) Link(es ...*Endpoint) *Link {
u := uuid.NewV4()
link := &Link{
Id: u.String(),
Props: make(Props),
Endpoints: es,
}
setNeighbors(link)
n.Links = append(n.Links, link)
link.Props["local"] = link.IsLocal()
return link
}
func (n *Net) Size() int {
s := len(n.Nodes)
for _, x := range n.Nets {
s += x.Size()
}
return s
}
func (net *Net) Bounds() Bounds {
b := Bounds{}
b.Top = 1.0e22
b.Left = 1.0e22
for _, n := range net.Nets {
b.Consume(n.Bounds(), n.Loc)
}
for _, n := range net.Nodes {
if n.Loc.X < b.Left {
b.Left = n.Loc.X
}
if n.Loc.X > b.Right {
b.Right = n.Loc.X
}
if n.Loc.Y > b.Bottom {
b.Bottom = n.Loc.Y
}
if n.Loc.Y < b.Top {
b.Top = n.Loc.Y
}
}
return b
}
func (b *Bounds) Consume(x Bounds, c Vec2) {
if x.Left+c.X < b.Left {
b.Left = x.Left + c.X
}
if x.Right+c.X > b.Right {
b.Right = x.Right + c.X
}
if x.Top+c.Y < b.Top {
b.Top = x.Top + c.Y
}
if x.Bottom+c.Y > b.Bottom {
b.Bottom = x.Bottom + c.Y
}
}
// Search methods ~~~
func (n *Net) GetElementProps(uuid string) *Props {
_, _, node := n.GetNode(uuid)
if node != nil {
return &node.Props
}
_, _, link := n.GetLink(uuid)
if link != nil {
return &link.Props
}
return nil
}
func (n *Net) GetSubnet(uuid string) (int, *Net) {
for i, x := range n.Nets {
if x.Id == uuid {
return i, x
}
}
return -1, nil
}
func (n *Net) DeleteSubnet(uuid string) *Net {
i, x := n.GetSubnet(uuid)
if x == nil {
return nil
}
// excessively cryptic go mechanism to delete element at index
n.Nets[i] = n.Nets[len(n.Nets)-1]
n.Nets = n.Nets[:len(n.Nets)-1]
return x
}
// GetNode finds a node with the specified uuid and returns its index within
// its parent network, a pointer to its parent network and a pointer to the
// node itself.
func (n *Net) GetNode(uuid string) (int, *Net, *Node) {
for i, x := range n.Nodes {
if x.Id == uuid {
return i, n, x
}
for _, e := range x.Endpoints {
if e.Id == uuid {
return i, n, x
}
}
}
for _, subnet := range n.Nets {
i, s, x := subnet.GetNode(uuid)
if x != nil {
return i, s, x
}
}
return -1, nil, nil
}
func (n *Net) DeleteNode(uuid string) *Node {
i, s, x := n.GetNode(uuid)
if x == nil {
return nil
}
// excessively cryptic go mechanism to delete element at index
s.Nodes[i] = s.Nodes[len(s.Nodes)-1]
s.Nodes = s.Nodes[:len(s.Nodes)-1]
return x
}
// GetLink finds a link with the specified uuid and returns its index within
// its parent network, a pointer to its parent network and a pointer to the
// link itself.
func (n *Net) GetLink(uuid string) (int, *Net, *Link) {
for i, x := range n.Links {
if x.Id == uuid {
return i, n, x
}
for _, e := range x.Endpoints {
if e.Id == uuid {
return i, n, x
}
}
}
for _, subnet := range n.Nets {
i, s, x := subnet.GetLink(uuid)
if x != nil {
return i, s, x
}
}
return -1, nil, nil
}
func (n *Net) DeleteLink(uuid string) *Link {
i, s, x := n.GetLink(uuid)
if x == nil {
return nil
}
// excessively cryptic go mechanism to delete element at index
s.Links[i] = s.Links[len(s.Links)-1]
s.Links = s.Links[:len(s.Links)-1]
return x
}
func (n *Net) GetNodeByName(name string) *Node {
for _, x := range n.Nodes {
if x.Props["name"] == name {
return x
}
for _, e := range x.Endpoints {
if e.Props["name"] == name {
return x
}
}
}
return nil
}
func (n *Net) GetNodeEndpointById(id string) *Endpoint {
for _, x := range n.Nodes {
for _, e := range x.Endpoints {
if e.Id == id {
return e
}
}
}
for _, x := range n.Nets {
e := x.GetNodeEndpointById(id)
if e != nil {
return e
}
}
return nil
}
// Convinience methods ~~~
func (n *Net) String() string {
s := "nodes\n"
s += "-----\n"
for _, n := range n.Nodes {
s += propString(n.Props)
s += "\n"
}
s += "links\n"
s += "-----\n"
for _, l := range n.Links {
s += propString(l.Props)
s += " endpoints: "
for _, e := range l.Endpoints {
//if the node has a name print that, otherwise print the id
name, ok := e.Parent.Props["name"]
if ok {
s += name.(string) + " "
} else {
s += e.Id
}
}
s += "\n\n"
}
return s
}
func FromFile(filename string) (*Net, error) {
data, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
return FromString(string(data))
}
func FromString(s string) (*Net, error) {
net := NewNet()
err := json.Unmarshal([]byte(s), net)
if err != nil {
return nil, err
}
PrepNetwork(net)
err = LinkNetwork(net)
if err != nil {
return net, err
}
LiftNetConstraints(net)
return net, nil
}
func (n *Net) ToString() (string, error) {
js, err := json.MarshalIndent(*n, "", " ")
if err != nil {
return "", err
}
return string(js), err
}
func (n *Net) ToFile(filename string) error {
js, err := json.MarshalIndent(*n, "", " ")
if err != nil {
return err
}
err = ioutil.WriteFile(filename, js, 0644)
if err != nil {
return err
}
return nil
}
func (n *Net) Json() ([]byte, error) {
js, err := json.MarshalIndent(*n, "", " ")
if err != nil {
return nil, err
}
return js, nil
}
func (n *Net) Clone() *Net {
// TODO a bit ghetto, we can do better than round trip serialization ...
s, _ := n.ToString()
clone, _ := FromString(s)
return clone
}
// When we read a network from a file or database, the traversal pointers are
// not linked. This function ensures that all the traversal pointers in a network
// data structure are linked.
func LinkNetwork(net *Net) error {
for _, l := range net.Links {
for i, e := range l.Endpoints {
e_ := net.Root().GetNodeEndpointById(e.Id)
if e_ == nil {
return fmt.Errorf("endpoint %s from link %s has no parent", e.Id, l.Id)
}
l.Endpoints[i] = e_
}
setNeighbors(l)
}
//recurse networks
for _, n := range net.Nets {
n.Parent = net
LinkNetwork(n)
}
return nil
}
func PrepNetwork(net *Net) {
for _, n := range net.Nodes {
n.Parent = net
for _, e := range n.Endpoints {
e.Parent = n
e.Neighbors = make(map[string]*Neighbor)
}
}
for _, n := range net.Nets {
n.Parent = net
PrepNetwork(n)
}
}
func (net *Net) Root() *Net {
if net.Parent == nil {
return net
}
return net.Parent.Root()
}
func ToFile(net *Net, filename string) error {
js, err := json.MarshalIndent(net, "", " ")
if err != nil {
return err
}
return ioutil.WriteFile(filename, js, 0660)
}
func (x SortedProps) Len() int { return len(x) }
func (x SortedProps) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x SortedProps) Less(i, j int) bool { return x[i].Key < x[j].Key }
func sortProps(p Props) SortedProps {
result := make(SortedProps, 0, len(p))
for k, v := range p {
result = append(result, Prop{k, v})
}
sort.Sort(result)
return result
}
func propString(p Props) string {
st := ""
ps := sortProps(p)
for _, x := range ps {
st += fmt.Sprintf(" %s: %v\n", x.Key, x.Value)
}
return st
}
// Node methods ---------------------------------------------------------------
func (n *Node) Endpoint() *Endpoint {
u := uuid.NewV4()
ep := &Endpoint{
Id: u.String(),
Props: make(Props),
Neighbors: make(map[string]*Neighbor),
Parent: n,
}
n.Endpoints = append(n.Endpoints, ep)
return ep
}
func (n *Node) Set(p Props) *Node {
for k, v := range p {
n.Props[k] = v
}
return n
}
func (n *Node) AddSoftware(p Props) *Node {
sw, ok := n.Props["software"]
if !ok {
n.Props["software"] = []Props{p}
return n
}
_, ok = sw.([]Props)
if !ok {
n.Props["software"] = []Props{p}
return n
}
n.Props["software"] = append(
n.Props["software"].([]Props),
p,
)
return n
//n.Software = append(n.Software, p)
return n
}
func (n *Node) Clone() *Node {
n_ := &Node{
Id: n.Id,
Props: n.Props,
}
for _, e := range n.Endpoints {
n_.Endpoints = append(n_.Endpoints, &Endpoint{Id: e.Id, Props: e.Props})
}
return n_
}
func (n *Node) Valence() int {
v := 0
for _, e := range n.Endpoints {
v += len(e.Neighbors)
}
return v
}
func (n *Node) Neighbors() []*Node {
var result []*Node
for _, e := range n.Endpoints {
for _, n := range e.Neighbors {
result = append(result, n.Endpoint.Parent)
}
}
return result
}
func (n *Node) Label() string {
if n == nil {
return ""
}
label := n.Id
name, ok := n.Props["name"]
if ok {
label = name.(string)
}
return label
}
// Link methods ---------------------------------------------------------------
func (l *Link) IsLocal() bool {
for _, x := range l.Endpoints {
for _, y := range l.Endpoints {
if x.Parent.Parent.Id != y.Parent.Parent.Id {
return false
}
}
}
return true
}
func (l *Link) Set(p Props) *Link {
for k, v := range p {
l.Props[k] = v
}
return l
}
func (l *Link) Clone() *Link {
l_ := &Link{
Id: l.Id,
Props: l.Props,
}
for _, e := range l.Endpoints {
l_.Endpoints = append(l_.Endpoints, &Endpoint{Id: e.Id, Props: e.Props})
}
return l_
}
// Endpoint methods -----------------------------------------------------------
func (e *Endpoint) Set(p Props) *Endpoint {
for k, v := range p {
e.Props[k] = v
}
return e
}
// helpers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
func setNeighbors(link *Link) {
for _, x := range link.Endpoints {
for _, y := range link.Endpoints {
if x == y {
continue
}
x.Neighbors[y.Id] = &Neighbor{link, y}
y.Neighbors[x.Id] = &Neighbor{link, x}
}
}
}
|
package responses
import (
"encoding/json"
"fmt"
"net/http"
)
func JSON(w http.ResponseWriter, statusCode int, data interface{}) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
w.Header().Set("Access-Control-Allow-Headers", "Origin,Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization")
w.WriteHeader(statusCode)
err := json.NewEncoder(w).Encode(data)
if err != nil {
fmt.Fprintf(w, "%s", err.Error())
}
}
func ERROR(w http.ResponseWriter, statusCode int, err error) {
if err != nil {
JSON(w, statusCode, struct {
Error string `json:"error"`
}{
Error: err.Error(),
})
return
}
JSON(w, http.StatusBadRequest, nil)
}
|
package user
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
log "github.com/sirupsen/logrus"
"github.com/opsbot/cli-go/utils"
"github.com/opsbot/zerotier/api"
"github.com/spf13/cobra"
)
// UpdateCommand returns a cobra command
func UpdateCommand() *cobra.Command {
var fileName string
var fileData []byte
cmd := &cobra.Command{
Use: "put",
Short: "update user",
PreRun: func(cmd *cobra.Command, args []string) {
// get Stdin.Stat so we can check for piped inpput
info, err := os.Stdin.Stat()
if err != nil {
log.Fatal(err)
}
if info.Size() > 0 { // fileData read from Stdin
data, _ := ioutil.ReadAll(os.Stdin)
fileData = []byte(string(data))
return
}
if fileName != "" { // fileData read from file
fileData = utils.FileRead(fileName)
return
}
if cmd.Flag("username").Value.String() == "" {
log.Fatal("you must provide a username")
}
if cmd.Flag("password").Value.String() == "" {
log.Fatal("you must provide a password")
}
if cmd.Flag("venue").Value.String() == "" {
log.Fatal("you must provide a venue id")
}
},
Run: func(cmd *cobra.Command, args []string) {
// channel := fmt.Sprintf("venue-%v", cmd.Flag("venue").Value.String())
// disabled, _ := cmd.Flags().GetBool("disabled")
var user api.User
if fileData != nil {
json.Unmarshal([]byte(fileData), &user)
} else {
user = api.User{}
}
data := api.UserPut(user)
fmt.Println(data.Body)
},
}
cmd.Flags().StringVarP(&fileName, "file", "f", "", "file path")
return cmd
}
|
package main
import (
"fmt"
"github.com/gin-gonic/gin"
// "net/http"
)
type User struct{
FirstName string
LastName string
Email string
}
func listUsers(c *gin.Context){
var users = []User{
User{FirstName:"John",LastName:"Doe",Email:"john.doe@mail.com"},
User{FirstName:"Jane",LastName:"Doe",Email:"jane.doe@mail.com"},
}
c.HTML(200, "users.html", gin.H{
"users":users,
})
}
func getCreateForm(c *gin.Context){
c.HTML(200, "create.html", gin.H{})
}
func createUser(c *gin.Context){
FirstName := c.PostForm("FirstName")
LastName := c.PostForm("LastName")
Email := c.PostForm("Email")
fmt.Println(FirstName)
fmt.Println(LastName)
fmt.Println(Email)
c.Redirect(302,c.FullPath())
}
func main(){
router := gin.Default()
router.LoadHTMLGlob("templates/*")
router.GET("/users",listUsers)
router.GET("/create",getCreateForm)
router.POST("/create",createUser)
router.Run()
} |
package main
/*
https://leetcode.com/problems/find-all-anagrams-in-a-string/discuss/92007/Sliding-Window-algorithm-template-to-solve-all-the-Leetcode-substring-search-problem.
Sliding Window algorithm template to solve all the Leetcode substring search problem.
*/
Among all leetcode questions, I find that there are at least 5 substring search problem which could be solved by the sliding window algorithm.
so I sum up the algorithm template here. wish it will help you!
the template:
public class Solution {
public List<Integer> slidingWindowTemplateByHarryChaoyangHe(String s, String t) {
//init a collection or int value to save the result according the question.
List<Integer> result = new LinkedList<>();
if(t.length()> s.length()) return result;
//create a hashmap to save the Characters of the target substring.
//(K, V) = (Character, Frequence of the Characters)
Map<Character, Integer> map = new HashMap<>();
for(char c : t.toCharArray()){
map.put(c, map.getOrDefault(c, 0) + 1);
}
//maintain a counter to check whether match the target string.
int counter = map.size();//must be the map size, NOT the string size because the char may be duplicate.
//Two Pointers: begin - left pointer of the window; end - right pointer of the window
int begin = 0, end = 0;
//the length of the substring which match the target string.
int len = Integer.MAX_VALUE;
//loop at the begining of the source string
while(end < s.length()){
char c = s.charAt(end);//get a character
if( map.containsKey(c) ){
map.put(c, map.get(c)-1);// plus or minus one
if(map.get(c) == 0) counter--;//modify the counter according the requirement(different condition).
}
end++;
//increase begin pointer to make it invalid/valid again
while(counter == 0 /* counter condition. different question may have different condition */){
char tempc = s.charAt(begin);//***be careful here: choose the char at begin pointer, NOT the end pointer
if(map.containsKey(tempc)){
map.put(tempc, map.get(tempc) + 1);//plus or minus one
if(map.get(tempc) > 0) counter++;//modify the counter according the requirement(different condition).
}
/* save / update(min/max) the result if find a target*/
// result collections or result int value
begin++;
}
}
return result;
}
}
Firstly, here is my sliding solution this question. I will sum up the template below this code.
2) the similar questions are:
https://leetcode.com/problems/minimum-window-substring/
https://leetcode.com/problems/longest-substring-without-repeating-characters/
https://leetcode.com/problems/substring-with-concatenation-of-all-words/
https://leetcode.com/problems/longest-substring-with-at-most-two-distinct-characters/
https://leetcode.com/problems/find-all-anagrams-in-a-string/
func main() {
}
|
package Week_01
import "fmt"
func getHint(secret string, guess string) string {
bulls, cows := 0, 0
bucket := map[byte]int {}
for k, v := range secret {
if byte(v) == guess[k] {
bulls++
}
bucket[byte(v)]++
}
for _,v := range guess {
if bucket[byte(v)] > 0 {
cows++
bucket[byte(v)]--
}
}
cows = cows - bulls
return fmt.Sprintf("%dA%dB", bulls, cows)
}
|
package handler
import (
"github.com/GreenComb/margool-admin/usecases"
"github.com/GreenComb/margool-contrib/middleware"
"github.com/martini-contrib/render"
)
func DashboardCmsForm(ctx *middleware.Context, ren render.Render) {
ctx.Set("StaticAssets", usecases.GetConfStaticAssets())
ctx.Set("Sidebar", "dashboard")
ren.HTML(200, "dashboard/cms", ctx, render.HTMLOptions{
Layout: "layout/layout_admin",
})
}
|
package persistence
import (
"database/sql"
"errors"
"fmt"
structs3 "fp-dynamic-elements-manager-controller/internal/logging/structs"
"fp-dynamic-elements-manager-controller/internal/queue/structs"
structs2 "fp-dynamic-elements-manager-controller/internal/stats/structs"
"github.com/go-sql-driver/mysql"
"github.com/jmoiron/sqlx"
"strings"
"time"
)
const (
ElementsTable = "list_elements"
MySqlErrDuplicateValue = 1062
)
var ErrDuplicateValue = errors.New("duplicate value")
type ElementRepo interface {
GetTotalElementCount() (int64, error)
}
type ListElementRepo struct {
db *sqlx.DB
log *structs3.AppLogger
}
func NewListElementRepo(appDb *sqlx.DB, logger *structs3.AppLogger) *ListElementRepo {
return &ListElementRepo{db: appDb, log: logger}
}
func (l *ListElementRepo) BatchInsertListElements(items []structs.ListElement) {
var valueStrings []string
var valueArgs []interface{}
for _, element := range items {
valueStrings = append(valueStrings, "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
valueArgs = append(valueArgs, element.ID)
valueArgs = append(valueArgs, time.Now())
valueArgs = append(valueArgs, time.Now())
valueArgs = append(valueArgs, element.DeletedAt)
valueArgs = append(valueArgs, element.Source)
valueArgs = append(valueArgs, element.ServiceName)
valueArgs = append(valueArgs, element.Type)
valueArgs = append(valueArgs, element.Value)
valueArgs = append(valueArgs, element.Safe)
valueArgs = append(valueArgs, element.UpdateBatchId)
}
smt := `INSERT INTO %s
(id, created_at, updated_at, deleted_at, source, service_name, type, value, safe, update_batch_id)
VALUES %s
ON DUPLICATE KEY UPDATE updated_at = VALUES(updated_at)`
smt = fmt.Sprintf(smt, ElementsTable, strings.Join(valueStrings, ","))
tx, err := l.db.Begin()
if err != nil {
l.log.SystemLogger.Error(err, "Error starting transaction to batch insert list elements")
return
}
_, err = tx.Exec(smt, valueArgs...)
if err != nil {
l.log.SystemLogger.Error(err, "Error batch inserting list elements, rolling back")
tx.Rollback()
return
}
err = tx.Commit()
if err != nil {
l.log.SystemLogger.Error(err, "Error committing batch insert list elements")
return
}
return
}
func (l *ListElementRepo) InsertListElement(item structs.ListElement) error {
if l.exists(item.Value) {
return ErrDuplicateValue
}
var valueArgs []interface{}
valueArgs = append(valueArgs, item.ID)
valueArgs = append(valueArgs, time.Now())
valueArgs = append(valueArgs, time.Now())
valueArgs = append(valueArgs, item.DeletedAt)
valueArgs = append(valueArgs, item.Source)
valueArgs = append(valueArgs, item.ServiceName)
valueArgs = append(valueArgs, item.Type)
valueArgs = append(valueArgs, item.Value)
valueArgs = append(valueArgs, item.Safe)
valueArgs = append(valueArgs, item.UpdateBatchId)
smt := `INSERT INTO %s (id, created_at, updated_at, deleted_at, source, service_name, type, value, safe, update_batch_id)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
smt = fmt.Sprintf(smt, ElementsTable)
tx, err := l.db.Begin()
if err != nil {
l.log.SystemLogger.Error(err, "Error starting transaction to insert list element")
return err
}
_, err = tx.Exec(smt, valueArgs...)
if err != nil {
l.log.SystemLogger.Error(err, "Error inserting list element, rolling back")
tx.Rollback()
return err
}
err = tx.Commit()
if err != nil {
l.log.SystemLogger.Error(err, "Error committing insert list element")
return err
}
return err
}
func (l *ListElementRepo) UpdateListElement(item structs.ListElement) error {
var valueArgs []interface{}
valueArgs = append(valueArgs, time.Now())
valueArgs = append(valueArgs, item.Value)
valueArgs = append(valueArgs, item.Safe)
valueArgs = append(valueArgs, item.ID)
smt := `UPDATE %s SET updated_at = ?, value = ?, safe = ? WHERE id = ?`
smt = fmt.Sprintf(smt, ElementsTable)
tx, err := l.db.Begin()
if err != nil {
l.log.SystemLogger.Error(err, "Error starting transaction to update list element")
if err.(*mysql.MySQLError).Number == MySqlErrDuplicateValue {
return ErrDuplicateValue
}
return err
}
_, err = tx.Exec(smt, valueArgs...)
if err != nil {
l.log.SystemLogger.Error(err, "Error updating list element, rolling back")
tx.Rollback()
if err.(*mysql.MySQLError).Number == MySqlErrDuplicateValue {
return ErrDuplicateValue
}
return err
}
err = tx.Commit()
if err != nil {
l.log.SystemLogger.Error(err, "Error committing update list element")
if err.(*mysql.MySQLError).Number == MySqlErrDuplicateValue {
return ErrDuplicateValue
}
return err
}
return nil
}
func (l *ListElementRepo) DeleteByValue(value string) {
now := time.Now()
smt := fmt.Sprintf(`UPDATE %s SET deleted_at = ? WHERE value = ?`, ElementsTable)
tx, err := l.db.Begin()
if err != nil {
l.log.SystemLogger.Error(err, "Error starting transaction to delete list element")
return
}
_, err = tx.Exec(smt, now, value)
if err != nil {
l.log.SystemLogger.Error(err, "Error deleting list element, rolling back")
err = tx.Rollback()
return
}
err = tx.Commit()
if err != nil {
l.log.SystemLogger.Error(err, "Error committing delete list element")
return
}
}
func (l *ListElementRepo) GetById(id int) (receiver []structs.ListElement, err error) {
err = l.db.Select(&receiver, fmt.Sprintf("SELECT * FROM %s WHERE id = ?;", ElementsTable), id)
return
}
func (l *ListElementRepo) GetAll() (receiver []structs.ListElement, err error) {
err = l.db.Select(&receiver, fmt.Sprintf("SELECT * FROM %s WHERE deleted_at IS NULL ORDER BY created_at DESC;", ElementsTable))
return
}
func (l *ListElementRepo) GetAllDeleted() (receiver []structs.ListElement, err error) {
err = l.db.Select(&receiver, fmt.Sprintf("SELECT * FROM %s WHERE deleted_at IS NOT NULL ORDER BY created_at DESC;", ElementsTable))
return
}
func (l *ListElementRepo) GetAllByBatchId(batchId int64, types []structs.ElementType) (receiver []structs.ListElement, err error) {
smt := fmt.Sprintf("SELECT * FROM %s WHERE update_batch_id = ? AND type IN (?) AND deleted_at IS NULL ORDER BY created_at DESC;", ElementsTable)
query, args, err := sqlx.In(smt, batchId, types)
if err != nil {
l.log.SystemLogger.Error(err, "Error binding args to query")
}
query = l.db.Rebind(query)
err = l.db.Select(&receiver, query, args...)
return
}
func (l *ListElementRepo) GetAllPaginated(offset, pageSize int, safe bool) (receiver []structs.ListElement, err error) {
err = l.db.Select(&receiver, fmt.Sprintf("SELECT * FROM %s WHERE safe = ? AND deleted_at IS NULL ORDER BY created_at DESC LIMIT ? OFFSET ?;", ElementsTable), safe, pageSize, offset)
return
}
func (l *ListElementRepo) GetAllLike(offset, pageSize int, searchTerm string, safe bool) (receiver []structs.ListElement, err error) {
err = l.db.Select(&receiver, fmt.Sprintf("SELECT * FROM %s WHERE value LIKE ? AND safe = ? AND deleted_at IS NULL ORDER BY created_at DESC LIMIT ? OFFSET ?;", ElementsTable), "%"+searchTerm+"%", safe, pageSize, offset)
return
}
func (l *ListElementRepo) GetAllEquals(searchTerm string) (receiver structs.ListElement, err error) {
err = l.db.Get(&receiver, fmt.Sprintf("SELECT * FROM %s WHERE value = ? AND deleted_at IS NULL;", ElementsTable), searchTerm)
return
}
func (l *ListElementRepo) GetTotalCountWhereLike(safe bool, like string) (total int64, err error) {
err = l.db.Get(&total, fmt.Sprintf("SELECT count(1) FROM %s WHERE value LIKE ? AND safe = ? AND deleted_at IS NULL", ElementsTable), "%"+like+"%", safe)
return
}
func (l *ListElementRepo) GetTotalCount(safe bool) (total int64, err error) {
err = l.db.Get(&total, fmt.Sprintf("SELECT count(1) FROM %s WHERE safe = ? AND deleted_at IS NULL", ElementsTable), safe)
return
}
func (l *ListElementRepo) GetTotalElementCount() (total int64, err error) {
err = l.db.Get(&total, fmt.Sprintf("SELECT count(1) FROM %s WHERE deleted_at IS NULL", ElementsTable))
return
}
func (l *ListElementRepo) GetLatestUpdate(svcName string) (receiver structs.ListElement, err error) {
err = l.db.Get(&receiver, fmt.Sprintf("SELECT * FROM %s WHERE service_name = ? AND deleted_at IS NULL ORDER BY created_at DESC LIMIT 1;", ElementsTable), svcName)
return
}
func (l *ListElementRepo) GetUnpushedBatchIds(moduleId int64, safe bool, types []structs.ElementType) (receiver []int64, err error) {
smt := fmt.Sprintf("SELECT DISTINCT(update_batch_id) FROM %s WHERE type IN (?) AND safe = ? AND update_batch_id NOT IN (SELECT update_batch_id FROM update_statuses WHERE module_metadata_id = ?);", ElementsTable)
query, args, err := sqlx.In(smt, types, safe, moduleId)
if err != nil {
l.log.SystemLogger.Error(err, "Error binding args to query")
}
query = l.db.Rebind(query)
err = l.db.Select(&receiver, query, args...)
return
}
func (l *ListElementRepo) GetStats(serviceName string) (result structs2.Stats) {
var smt string
if serviceName == "" {
smt = fmt.Sprintf(`SELECT
count(1) AS total,
sum(IF(type = 'IP', 1, 0)) AS ip,
sum(IF(type = 'DOMAIN', 1, 0)) AS domain,
sum(IF(type = 'URL', 1, 0)) AS url,
sum(IF(type = 'RANGE', 1, 0)) AS ip_range,
sum(IF(type = 'SNORT', 1, 0)) AS snort
FROM %s
GROUP BY service_name is not null`, ElementsTable)
} else {
smt = fmt.Sprintf(`SELECT
count(1) AS total,
sum(IF(type = 'IP', 1, 0)) AS ip,
sum(IF(type = 'DOMAIN', 1, 0)) AS domain,
sum(IF(type = 'URL', 1, 0)) AS url,
sum(IF(type = 'RANGE', 1, 0)) AS ip_range,
sum(IF(type = 'SNORT', 1, 0)) AS snort
FROM %s
WHERE service_name = '%s'
GROUP BY service_name`, ElementsTable, serviceName)
}
stats := structs2.Stats{}
err := l.db.Get(&stats, smt)
if err == sql.ErrNoRows {
return
}
if err != nil {
l.log.SystemLogger.Error(err, "Error getting stats for list elements")
return
}
return stats
}
func (l *ListElementRepo) exists(value string) bool {
var element structs.ListElement
return l.db.Get(&element, fmt.Sprintf("SELECT * FROM %s WHERE value = ? LIMIT 1;", ElementsTable), value) == nil
}
|
package bpi_test
import (
"testing"
"github.com/dasfoo/bright-pi"
"github.com/dasfoo/i2c"
)
type i2cDevice struct {
regs [256]byte
address byte
t *testing.T
}
func (d *i2cDevice) Close() error {
d.address = 0
return nil
}
func (d *i2cDevice) WriteByteToReg(addr, reg, value byte) error {
if addr == 0 || addr != d.address {
d.t.Error("Invalid address", addr, "expected", d.address)
}
d.regs[reg] = value
return nil
}
func (d *i2cDevice) ReadByteFromReg(addr, reg byte) (byte, error) {
d.t.Error("Unexpected read (1) from", addr, ":", reg)
return 0, nil
}
func (d *i2cDevice) ReadWordFromReg(addr, reg byte) (uint16, error) {
d.t.Error("Unexpected read (2) from", addr, ":", reg)
return 0, nil
}
func (d *i2cDevice) WriteSliceToReg(addr, reg byte, data []byte) (int, error) {
d.t.Error("Unexpected write (", len(data), ") to ", addr, ":", reg)
return 0, nil
}
func (d *i2cDevice) ReadSliceFromReg(addr, reg byte, data []byte) (int, error) {
d.t.Error("Unexpected read of ", len(data), "bytes from ", addr, ":", reg)
return 0, nil
}
func (d *i2cDevice) SetLogger(_ i2c.Logger) {
}
func TestDim(t *testing.T) {
dev := &i2cDevice{address: bpi.DefaultAddress, t: t}
b := bpi.NewBrightPI(dev, bpi.DefaultAddress)
if b.Dim(bpi.WhiteAll, bpi.MaxDim) != nil {
t.Error("Dimming the lights returned an error")
}
for i, reg := range []byte{1, 3, 4, 6} {
if dev.regs[reg+1] != bpi.MaxDim {
t.Error("Incorrect Dim value for light", i,
"expected", bpi.MaxDim, "got", dev.regs[reg+1])
}
}
}
func TestGain(t *testing.T) {
dev := &i2cDevice{address: bpi.DefaultAddress, t: t}
b := bpi.NewBrightPI(dev, bpi.DefaultAddress)
if b.Gain(bpi.MaxGain) != nil {
t.Error("Setting lights gain returned an error")
}
if dev.regs[9] != bpi.MaxGain {
t.Error("Invalid Gain value, expected", bpi.MaxGain, "got", dev.regs[9])
}
}
func TestPower(t *testing.T) {
dev := &i2cDevice{address: bpi.DefaultAddress, t: t}
b := bpi.NewBrightPI(dev, bpi.DefaultAddress)
if b.Power(bpi.IRAll) != nil {
t.Error("Setting lights power returned an error")
}
if dev.regs[0] != 165 {
t.Error("Invalid power value", dev.regs[0])
}
if b.Sleep() != nil {
t.Error("Setting sleep mode failed")
}
if dev.regs[0] != 0 {
t.Error("Expected Sleep() to shut down power, but got", dev.regs[0])
}
}
|
package Split_Linked_List_in_Parts
type ListNode struct {
Val int
Next *ListNode
}
func splitListToParts(root *ListNode, k int) []*ListNode {
p := root
length := 0
for p != nil {
length++
p = p.Next
}
mod := length % k
size := length / k
c := root
result := make([]*ListNode, k)
for i := 0; c != nil && i < k; i++ {
result[i] = c
curSize := 0
if mod > 0 {
curSize = size + 1
mod--
} else {
curSize = size
}
for j := 1; j < curSize; j++ {
c = c.Next
}
tail := c
c = c.Next
tail.Next = nil
}
return result
}
|
package services
import (
"encoding/json"
"fmt"
"github.com/h2non/filetype"
"image"
"image/draw"
"image/jpeg"
"image/png"
"io/ioutil"
"os"
"strings"
"unicode/utf8"
)
func GenerateCard() {
source, err := os.Open("source.png")
if err != nil {
fmt.Println(err)
}
sourceImg, err := png.Decode(source)
//
if err != nil {
fmt.Println(err)
}
// Algo:
// I will generate the first title image. After that I will put this title image on source image
// 20 symbols per title
titleDst := getTitleImage("Heck3rman")
// 26 symbols per string
descriptionDst := getDescriptionImage("Promises to solve rubick's cube but doesn't do this")
imageDst := getImageImage("image3.jpg")
numberImage1 := getNumberImage("99")
numberImage2 := getNumberImage("1")
sr := image.Rectangle{image.Point{0, 0}, image.Point{sourceImg.Bounds().Dx(), sourceImg.Bounds().Dy()}}
rgba := image.NewRGBA(sr)
var titleOffsetX = 35
var titleOffsetY = 15
var maxTitleWidth = 465
var descriptionOffsetX = 35
var descriptionOffsetY = 500
var maxDescriptionWidth = 465
var maxDescriptionHeight = 230
var imageOffsetX = 4
var imageOffsetY = 89
var number1ImageOffsetX = 437
var number1ImageOffsetY = 799
var number2ImageOffsetX = 117
var number2ImageOffsetY = 799
tr := image.Rectangle{image.Point{titleOffsetX, titleOffsetY}, image.Point{maxTitleWidth + titleOffsetX, titleDst.Bounds().Dy() + titleOffsetY}}
dr := image.Rectangle{image.Point{descriptionOffsetX, descriptionOffsetY + (maxDescriptionHeight-descriptionDst.Bounds().Dy())/2}, image.Point{maxDescriptionWidth + descriptionOffsetX, descriptionDst.Bounds().Dy() + descriptionOffsetY + (maxDescriptionHeight-descriptionDst.Bounds().Dy())/2}}
ir := image.Rectangle{image.Point{imageOffsetX, imageOffsetY}, image.Point{imageDst.Bounds().Dx() + imageOffsetX, imageDst.Bounds().Dy() + imageOffsetY}}
n1r := image.Rectangle{image.Point{number1ImageOffsetX, number1ImageOffsetY}, image.Point{numberImage1.Bounds().Dx() + number1ImageOffsetX, number1ImageOffsetY + numberImage1.Bounds().Dy()}}
n2r := image.Rectangle{image.Point{number2ImageOffsetX, number2ImageOffsetY}, image.Point{numberImage1.Bounds().Dx() + number2ImageOffsetX, number2ImageOffsetY + numberImage1.Bounds().Dy()}}
draw.Draw(rgba, sourceImg.Bounds(), sourceImg, image.Point{0, 0}, draw.Over)
draw.Draw(rgba, tr, titleDst, image.Point{0, 0}, draw.Over)
draw.Draw(rgba, dr, descriptionDst, image.Point{0, 0}, draw.Over)
draw.Draw(rgba, ir, imageDst, image.Point{0, 0}, draw.Over)
draw.Draw(rgba, n1r, numberImage1, image.Point{0, 0}, draw.Over)
draw.Draw(rgba, n2r, numberImage2, image.Point{0, 0}, draw.Over)
out, err := os.Create("output.png")
if err != nil {
fmt.Println(err)
}
//var opt jpeg.Options
//opt.Quality = 100
png.Encode(out, rgba)
}
func getImageImage(filename string) *image.RGBA {
var width = 519
var height = 390
imgSrc, err := ioutil.ReadFile(filename)
imgOs, err := os.Open(filename)
if err != nil {
fmt.Println(err)
}
imageType, unknown := filetype.Match(imgSrc)
if unknown != nil {
fmt.Println(unknown)
return nil
}
var imageData image.Image
if filetype.IsImage(imgSrc) {
if imageType.Extension == "jpg" {
imageData, err = jpeg.Decode(imgOs)
} else if imageType.Extension == "png" {
imageData, err = png.Decode(imgOs)
}
if err != nil {
fmt.Println(err)
}
}
dstImage := image.NewRGBA(image.Rectangle{image.Point{0, 0}, image.Point{width, height}})
ir := image.Rectangle{image.Point{0, 0}, image.Point{width, height}}
draw.Draw(dstImage, ir, imageData, image.Point{0, 0}, draw.Over)
return dstImage
}
func getDescriptionImage(description string) *image.RGBA {
font, err := os.Open("description.png")
if err != nil {
fmt.Println(err)
}
fontImage, err := png.Decode(font)
if err != nil {
fmt.Println(err)
}
var maxSymbolsPerString = 27
//var maxDescriptionWidth = 465
//var maxDescriptionHeight = 500
words := strings.Fields(description)
var descriptionStrings []string
var stringLength = 0
var stringVal = ""
for _, word := range words {
if (stringLength + 1 + utf8.RuneCountInString(word)) < maxSymbolsPerString {
stringLength += utf8.RuneCountInString(word) + 1
stringVal += word + " "
} else {
descriptionStrings = append(descriptionStrings, stringVal)
stringVal = word + " "
stringLength = utf8.RuneCountInString(word) + 1
}
}
if stringVal != " " {
descriptionStrings = append(descriptionStrings, stringVal)
}
var whitespace = 1
var verticalWhitespace = 5
var strHeight = 42
var width = 465
var height = len(descriptionStrings)*strHeight + len(descriptionStrings)*verticalWhitespace
//for _, char := range description {
// var letter = letterPoint(string(char), "description.json")
// width += letter.Max.X - letter.Min.X + whitespace
//}
//fmt.Println(width)
//fmt.Println(height)
dstDescription := image.NewRGBA(image.Rectangle{image.Point{0, 0}, image.Point{width, height}})
for i, str := range descriptionStrings {
var yOffset = i*strHeight + i*verticalWhitespace
var cDescriptionWidth = 0
for _, char := range str {
var letter = letterPoint(string(char), "description.json")
cDescriptionWidth += (letter.Max.X - letter.Min.X) + whitespace
}
cDescriptionWidth = (width - cDescriptionWidth) / 2
for _, char := range str {
var letter = letterPoint(string(char), "description.json")
st := image.Rectangle{image.Point{cDescriptionWidth, yOffset}, image.Point{cDescriptionWidth + letter.Max.X - letter.Min.X + whitespace, strHeight + yOffset}}
cDescriptionWidth = cDescriptionWidth + letter.Max.X - letter.Min.X + whitespace
draw.Draw(dstDescription, st, fontImage, image.Point{letter.Min.X, 0}, draw.Over)
}
}
return dstDescription
}
func getNumberImage(numberString string) *image.RGBA {
font, err := os.Open("numbers.png")
if err != nil {
fmt.Println(err)
}
fontImg, err := png.Decode(font)
if err != nil {
fmt.Println(err)
}
var whitespace = 1
var titleWidth = 0
var titleHeight = 62
for _, char := range numberString {
var letter = letterPoint(string(char), "numbers.json")
titleWidth += letter.Max.X - letter.Min.X + whitespace
}
//fmt.Println(titleWidth)
//fmt.Println(titleHeight)
dstTitle := image.NewRGBA(image.Rectangle{image.Point{0, 0}, image.Point{titleWidth, titleHeight}})
var cTitleWidth = 0
for _, char := range numberString {
var letter = letterPoint(string(char), "numbers.json")
//fmt.Println(cTitleWidth)
st := image.Rectangle{image.Point{cTitleWidth, 0}, image.Point{cTitleWidth + letter.Max.X - letter.Min.X + whitespace, titleHeight}}
cTitleWidth = cTitleWidth + letter.Max.X - letter.Min.X + whitespace
draw.Draw(dstTitle, st, fontImg, image.Point{letter.Min.X, 0}, draw.Over)
}
return dstTitle
}
func getTitleImage(titleString string) *image.RGBA {
font, err := os.Open("title.png")
if err != nil {
fmt.Println(err)
}
//sourceImg, err := png.Decode(source)
engFontTitleImg, err := png.Decode(font)
//
if err != nil {
fmt.Println(err)
}
var whitespace = 1
var titleWidth = 0
var titleHeight = 62
for _, char := range titleString {
var letter = letterPoint(string(char), "title.json")
titleWidth += letter.Max.X - letter.Min.X + whitespace
}
//fmt.Println(titleWidth)
//fmt.Println(titleHeight)
dstTitle := image.NewRGBA(image.Rectangle{image.Point{0, 0}, image.Point{titleWidth, titleHeight}})
var cTitleWidth = 0
for _, char := range titleString {
var letter = letterPoint(string(char), "title.json")
//fmt.Println(cTitleWidth)
st := image.Rectangle{image.Point{cTitleWidth, 0}, image.Point{cTitleWidth + letter.Max.X - letter.Min.X + whitespace, titleHeight}}
cTitleWidth = cTitleWidth + letter.Max.X - letter.Min.X + whitespace
draw.Draw(dstTitle, st, engFontTitleImg, image.Point{letter.Min.X, 0}, draw.Over)
}
return dstTitle
}
func letterPoint(s, f string) image.Rectangle {
alphabetJSONFile, err := os.Open(f)
if err != nil {
fmt.Println(err)
}
//r := []rune(s)
defer alphabetJSONFile.Close()
var alphabet ImageFont
alphabetStr, _ := ioutil.ReadAll(alphabetJSONFile)
json.Unmarshal([]byte(alphabetStr), &alphabet)
//fmt.Println(string(r))
//
//fmt.Println(alphabet.Letters[string(r)])
if s == " " {
return image.Rectangle{
image.Point{-10, 0},
image.Point{0, 50},
}
} else {
return image.Rectangle{
image.Point{alphabet.Letters[s].StartPos, 0},
image.Point{alphabet.Letters[s].EndPos, alphabet.Height},
}
}
}
|
package main
import (
"context"
"fmt"
pb "github.com/tony-yang/gcp-cloud-native-stack/frontend/genproto"
)
func (f *frontendServer) getProducts(ctx context.Context) ([]*pb.Product, error) {
resp, err := pb.NewProductCatalogServiceClient(f.catalogConn).ListProducts(ctx, &pb.Empty{})
return resp.GetProducts(), err
}
func (f *frontendServer) getProduct(ctx context.Context, id string) (*pb.Product, error) {
resp, err := pb.NewProductCatalogServiceClient(f.catalogConn).GetProduct(ctx, &pb.GetProductRequest{Id: id})
return resp, err
}
func (f *frontendServer) getRecommendations(ctx context.Context, userID string, productIDs []string) ([]*pb.Product, error) {
resp, err := pb.NewRecommendationServiceClient(f.recommendationConn).ListRecommendations(ctx, &pb.ListRecommendationsRequest{UserId: userID, ProductIds: productIDs})
if err != nil {
return nil, err
}
var out []*pb.Product
for i, pid := range resp.GetProductIds() {
p, err := f.getProduct(ctx, pid)
if err != nil {
return nil, fmt.Errorf("failed to get recommended product info (#%s): %w", pid, err)
}
out = append(out, p)
// Take only first four product recommendations to fit the UI
if i >= 3 {
break
}
}
return out, err
}
|
/*
* Npcf_SMPolicyControl API
*
* Session Management Policy Control Service © 2019, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TSDSI, TTA, TTC). All rights reserved.
*
* API version: 1.0.4
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package openapi
type QosNotificationControlInfo struct {
// An array of PCC rule id references to the PCC rules associated with the QoS notification control info.
RefPccRuleIds []string `json:"refPccRuleIds"`
NotifType QosNotifType `json:"notifType"`
// Represents the content version of some content.
ContVer int32 `json:"contVer,omitempty"`
}
|
package controllers
import (
"github.com/revel/revel"
)
type Message struct {
*revel.Controller
}
func (c Message) Hello() revel.Result {
return c.RenderText("Hello, ReactGo!")
}
|
package operator
import (
"testing"
"github.com/blang/semver/v4"
"github.com/openshift/oc-mirror/pkg/api/v1alpha2"
"github.com/operator-framework/operator-registry/alpha/declcfg"
"github.com/operator-framework/operator-registry/alpha/property"
"github.com/stretchr/testify/require"
)
func TestConvertDCToIncludeConfig(t *testing.T) {
type spec struct {
name string
cfg declcfg.DeclarativeConfig
strategy IncludeConfigManager
exp v1alpha2.IncludeConfig
}
specs := []spec{
{
name: "Success/HeadsOnlyCatalog",
strategy: &catalogStrategy{},
cfg: declcfg.DeclarativeConfig{
Packages: []declcfg.Package{
{Schema: "olm.package", Name: "bar", DefaultChannel: "stable"},
{Schema: "olm.package", Name: "foo", DefaultChannel: "stable"},
},
Channels: []declcfg.Channel{
{Schema: "olm.channel", Name: "stable", Package: "bar", Entries: []declcfg.ChannelEntry{
{Name: "bar.v0.1.0"},
}},
{Schema: "olm.channel", Name: "stable", Package: "foo", Entries: []declcfg.ChannelEntry{
{Name: "foo.v0.1.0"},
}},
},
Bundles: []declcfg.Bundle{
{
Schema: "olm.bundle",
Name: "bar.v0.1.0",
Package: "bar",
Image: "reg/bar:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("bar", "0.1.0"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.1.0",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.1.0"),
},
},
},
},
exp: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
{
Name: "foo",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
},
},
},
{
name: "Success/HeadsOnlyPackages",
strategy: &packageStrategy{
curr: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.2",
},
},
{
Name: "alpha",
},
},
},
{
Name: "baz",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.2",
},
},
{
Name: "foo",
},
},
},
},
cfg: declcfg.DeclarativeConfig{
Packages: []declcfg.Package{
{Schema: "olm.package", Name: "bar", DefaultChannel: "stable"},
{Schema: "olm.package", Name: "baz", DefaultChannel: "stable"},
{Schema: "olm.package", Name: "foo", DefaultChannel: "stable"},
},
Channels: []declcfg.Channel{
{Schema: "olm.channel", Name: "stable", Package: "bar", Entries: []declcfg.ChannelEntry{
{Name: "bar.v0.1.1", Skips: []string{"bar.v0.1.0"}},
{Name: "bar.v0.1.2", Skips: []string{"bar.v0.1.1"}},
{Name: "bar.v0.1.3", Skips: []string{"bar.v0.1.2"}},
}},
{Schema: "olm.channel", Name: "stable", Package: "baz", Entries: []declcfg.ChannelEntry{
{Name: "baz.v0.1.1", Skips: []string{"baz.v0.1.0"}},
{Name: "baz.v0.1.2", Skips: []string{"baz.v0.1.1"}},
{Name: "baz.v0.1.3", Skips: []string{"baz.v0.1.2"}},
}},
{Schema: "olm.channel", Name: "stable", Package: "foo", Entries: []declcfg.ChannelEntry{
{Name: "foo.v0.0.1"},
{Name: "foo.v0.0.2", Replaces: "foo.v0.0.1"},
{Name: "foo.v0.0.3", Replaces: "foo.v0.0.2"},
{Name: "foo.v0.2.0", Replaces: "foo.v0.0.3"},
}},
},
Bundles: []declcfg.Bundle{
{
Schema: "olm.bundle",
Name: "bar.v0.1.1",
Package: "bar",
Image: "reg/bar:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("bar", "0.1.1"),
},
},
{
Schema: "olm.bundle",
Name: "bar.v0.1.2",
Package: "bar",
Image: "reg/bar:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("bar", "0.1.2"),
},
},
{
Schema: "olm.bundle",
Name: "bar.v0.1.3",
Package: "bar",
Image: "reg/bar:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("bar", "0.1.3"),
},
},
{
Schema: "olm.bundle",
Name: "baz.v0.1.1",
Package: "baz",
Image: "reg/baz:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("baz", "0.1.1"),
},
},
{
Schema: "olm.bundle",
Name: "baz.v0.1.2",
Package: "baz",
Image: "reg/baz:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("baz", "0.1.2"),
},
},
{
Schema: "olm.bundle",
Name: "baz.v0.1.3",
Package: "baz",
Image: "reg/baz:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("baz", "0.1.3"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.0.1",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.0.1"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.0.2",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.0.2"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.0.3",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.0.3"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.2.0",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.2.0"),
},
},
},
},
exp: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.2",
},
},
},
},
{
Name: "baz",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.2",
},
},
{
Name: "foo",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.0.1",
},
},
},
},
},
},
},
}
for _, s := range specs {
t.Run(s.name, func(t *testing.T) {
ic, err := s.strategy.ConvertDCToIncludeConfig(s.cfg)
require.NoError(t, err)
require.Equal(t, s.exp, ic)
})
}
}
func TestUpdateIncludeConfig_Catalog(t *testing.T) {
type spec struct {
name string
cfg declcfg.DeclarativeConfig
strategy IncludeConfigManager
in v1alpha2.IncludeConfig
exp v1alpha2.IncludeConfig
expErr string
}
specs := []spec{
{
name: "Success/NewPackages",
strategy: &catalogStrategy{},
cfg: declcfg.DeclarativeConfig{
Packages: []declcfg.Package{
{Schema: "olm.package", Name: "bar", DefaultChannel: "stable"},
{Schema: "olm.package", Name: "foo", DefaultChannel: "stable"},
},
Channels: []declcfg.Channel{
{Schema: "olm.channel", Name: "stable", Package: "bar", Entries: []declcfg.ChannelEntry{
{Name: "bar.v0.1.0"},
}},
{Schema: "olm.channel", Name: "stable", Package: "foo", Entries: []declcfg.ChannelEntry{
{Name: "foo.v0.1.0"},
}},
},
Bundles: []declcfg.Bundle{
{
Schema: "olm.bundle",
Name: "bar.v0.1.0",
Package: "bar",
Image: "reg/bar:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("bar", "0.1.0"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.1.0",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.1.0"),
},
},
},
},
in: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
},
},
exp: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
{
Name: "foo",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
},
},
},
{
name: "Success/NewChannels",
strategy: &catalogStrategy{},
cfg: declcfg.DeclarativeConfig{
Packages: []declcfg.Package{
{Schema: "olm.package", Name: "bar", DefaultChannel: "stable"},
{Schema: "olm.package", Name: "foo", DefaultChannel: "stable"},
},
Channels: []declcfg.Channel{
{Schema: "olm.channel", Name: "stable", Package: "bar", Entries: []declcfg.ChannelEntry{
{Name: "bar.v0.1.0"},
{Name: "bar.v0.1.1", Replaces: "bar.v0.1.0"},
}},
{Schema: "olm.channel", Name: "alpha", Package: "bar", Entries: []declcfg.ChannelEntry{
{Name: "bar.v0.1.0"},
}},
{Schema: "olm.channel", Name: "stable", Package: "foo", Entries: []declcfg.ChannelEntry{
{Name: "foo.v0.1.0"},
}},
},
Bundles: []declcfg.Bundle{
{
Schema: "olm.bundle",
Name: "bar.v0.1.1",
Package: "bar",
Image: "reg/bar:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("bar", "0.1.1"),
},
},
{
Schema: "olm.bundle",
Name: "bar.v0.1.0",
Package: "bar",
Image: "reg/bar:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("bar", "0.1.0"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.1.0",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.1.0"),
},
},
},
},
in: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
{
Name: "foo",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
},
},
exp: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "alpha",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
{
Name: "foo",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
},
},
},
{
name: "Success/PruneChannelHead",
strategy: &catalogStrategy{},
cfg: declcfg.DeclarativeConfig{
Packages: []declcfg.Package{
{Schema: "olm.package", Name: "bar", DefaultChannel: "stable"},
{Schema: "olm.package", Name: "foo", DefaultChannel: "stable"},
},
Channels: []declcfg.Channel{
{Schema: "olm.channel", Name: "stable", Package: "bar", Entries: []declcfg.ChannelEntry{
{Name: "bar.v0.1.1", Skips: []string{"bar.v0.1.0"}},
{Name: "bar.v0.1.2", Skips: []string{"bar.v0.1.1"}},
{Name: "bar.v0.1.3", Skips: []string{"bar.v0.1.2"}},
}},
{Schema: "olm.channel", Name: "stable", Package: "foo", Entries: []declcfg.ChannelEntry{
{Name: "foo.v0.0.1"},
{Name: "foo.v0.0.2", Replaces: "foo.v0.0.1"},
{Name: "foo.v0.0.3", Replaces: "foo.v0.0.2"},
{Name: "foo.v0.2.0", Replaces: "foo.v0.0.3"},
}},
},
Bundles: []declcfg.Bundle{
{
Schema: "olm.bundle",
Name: "bar.v0.1.1",
Package: "bar",
Image: "reg/bar:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("bar", "0.1.1"),
},
},
{
Schema: "olm.bundle",
Name: "bar.v0.1.2",
Package: "bar",
Image: "reg/bar:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("bar", "0.1.2"),
},
},
{
Schema: "olm.bundle",
Name: "bar.v0.1.3",
Package: "bar",
Image: "reg/bar:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("bar", "0.1.3"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.0.1",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.0.1"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.0.2",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.0.2"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.0.3",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.0.3"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.2.0",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.2.0"),
},
},
},
},
in: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
{
Name: "foo",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
},
},
exp: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.1",
},
},
},
},
{
Name: "foo",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.2.0",
},
},
},
},
},
},
},
{
name: "Success/NoNextBundle",
strategy: &catalogStrategy{},
cfg: declcfg.DeclarativeConfig{
Packages: []declcfg.Package{
{Schema: "olm.package", Name: "bar", DefaultChannel: "stable"},
{Schema: "olm.package", Name: "foo", DefaultChannel: "stable"},
},
Channels: []declcfg.Channel{
{Schema: "olm.channel", Name: "stable", Package: "bar", Entries: []declcfg.ChannelEntry{
{Name: "bar.v0.0.2", Skips: []string{"bar.v0.0.1"}},
{Name: "bar.v0.2.2", Skips: []string{"bar.v0.0.2"}},
}},
{Schema: "olm.channel", Name: "stable", Package: "foo", Entries: []declcfg.ChannelEntry{
{Name: "foo.v0.0.2"},
}},
},
Bundles: []declcfg.Bundle{
{
Schema: "olm.bundle",
Name: "bar.v0.0.2",
Package: "bar",
Image: "reg/bar:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("bar", "0.0.2"),
},
},
{
Schema: "olm.bundle",
Name: "bar.v0.2.2",
Package: "bar",
Image: "reg/bar:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("bar", "0.2.2"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.0.2",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.0.2"),
},
},
},
},
in: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
{
Name: "foo",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
},
},
exp: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.2.2",
},
},
},
},
{
Name: "foo",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.0.2",
},
},
},
},
},
},
},
}
for _, s := range specs {
t.Run(s.name, func(t *testing.T) {
ic, err := s.strategy.UpdateIncludeConfig(s.cfg, s.in)
if s.expErr != "" {
require.EqualError(t, err, s.expErr)
} else {
require.NoError(t, err)
require.Equal(t, s.exp, ic)
}
})
}
}
func TestUpdateIncludeConfig_Package(t *testing.T) {
type spec struct {
name string
cfg declcfg.DeclarativeConfig
strategy IncludeConfigManager
in v1alpha2.IncludeConfig
exp v1alpha2.IncludeConfig
expErr string
}
specs := []spec{
{
name: "Success/NewPackages",
strategy: &packageStrategy{
curr: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
{
Name: "foo",
},
},
},
},
cfg: declcfg.DeclarativeConfig{
Packages: []declcfg.Package{
{Schema: "olm.package", Name: "bar", DefaultChannel: "stable"},
{Schema: "olm.package", Name: "foo", DefaultChannel: "stable"},
},
Channels: []declcfg.Channel{
{Schema: "olm.channel", Name: "stable", Package: "bar", Entries: []declcfg.ChannelEntry{
{Name: "bar.v0.1.0"},
}},
{Schema: "olm.channel", Name: "stable", Package: "foo", Entries: []declcfg.ChannelEntry{
{Name: "foo.v0.1.0"},
}},
},
Bundles: []declcfg.Bundle{
{
Schema: "olm.bundle",
Name: "bar.v0.1.0",
Package: "bar",
Image: "reg/bar:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("bar", "0.1.0"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.1.0",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.1.0"),
},
},
},
},
in: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
},
},
exp: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
{
Name: "foo",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
},
},
},
{
name: "Success/NewChannels",
strategy: &packageStrategy{
curr: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
{
Name: "alpha",
},
},
},
{
Name: "foo",
Channels: []v1alpha2.IncludeChannel{
{
Name: "alpha",
},
},
},
},
},
},
cfg: declcfg.DeclarativeConfig{
Packages: []declcfg.Package{
{Schema: "olm.package", Name: "bar", DefaultChannel: "stable"},
{Schema: "olm.package", Name: "foo", DefaultChannel: "alpha"},
},
Channels: []declcfg.Channel{
{Schema: "olm.channel", Name: "stable", Package: "bar", Entries: []declcfg.ChannelEntry{
{Name: "bar.v0.1.0"},
{Name: "bar.v0.1.1", Replaces: "bar.v0.1.0"},
}},
{Schema: "olm.channel", Name: "alpha", Package: "bar", Entries: []declcfg.ChannelEntry{
{Name: "bar.v0.1.0"},
}},
{Schema: "olm.channel", Name: "alpha", Package: "foo", Entries: []declcfg.ChannelEntry{
{Name: "foo.v0.1.0"},
}},
},
Bundles: []declcfg.Bundle{
{
Schema: "olm.bundle",
Name: "bar.v0.1.1",
Package: "bar",
Image: "reg/bar:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("bar", "0.1.1"),
},
},
{
Schema: "olm.bundle",
Name: "bar.v0.1.0",
Package: "bar",
Image: "reg/bar:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("bar", "0.1.0"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.1.0",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.1.0"),
},
},
},
},
in: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
{
Name: "foo",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
},
},
exp: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "alpha",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
{
Name: "foo",
Channels: []v1alpha2.IncludeChannel{
{
Name: "alpha",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
},
},
},
{
name: "Success/PruneChannelHead",
strategy: &packageStrategy{
curr: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.3",
},
},
},
},
{
Name: "foo",
},
},
},
},
cfg: declcfg.DeclarativeConfig{
Packages: []declcfg.Package{
{Schema: "olm.package", Name: "bar", DefaultChannel: "stable"},
{Schema: "olm.package", Name: "foo", DefaultChannel: "stable"},
},
Channels: []declcfg.Channel{
{Schema: "olm.channel", Name: "stable", Package: "bar", Entries: []declcfg.ChannelEntry{
{Name: "bar.v0.1.1", Skips: []string{"bar.v0.1.0"}},
{Name: "bar.v0.1.2", Skips: []string{"bar.v0.1.1"}},
{Name: "bar.v0.1.3", Skips: []string{"bar.v0.1.2"}},
}},
{Schema: "olm.channel", Name: "alpha", Package: "foo", Entries: []declcfg.ChannelEntry{
{Name: "foo.v0.2.0", Skips: []string{"foo.v0.1.0"}},
{Name: "foo.v0.2.1", Skips: []string{"foo.v0.2.0"}},
}},
{Schema: "olm.channel", Name: "stable", Package: "foo", Entries: []declcfg.ChannelEntry{
{Name: "foo.v0.0.1"},
{Name: "foo.v0.0.2", Replaces: "foo.v0.0.1"},
{Name: "foo.v0.0.3", Replaces: "foo.v0.0.2"},
{Name: "foo.v0.2.0", Replaces: "foo.v0.0.3"},
{Name: "foo.v0.2.1", Replaces: "foo.v0.2.0"},
}},
},
Bundles: []declcfg.Bundle{
{
Schema: "olm.bundle",
Name: "bar.v0.1.1",
Package: "bar",
Image: "reg/bar:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("bar", "0.1.1"),
},
},
{
Schema: "olm.bundle",
Name: "bar.v0.1.2",
Package: "bar",
Image: "reg/bar:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("bar", "0.1.2"),
},
},
{
Schema: "olm.bundle",
Name: "bar.v0.1.3",
Package: "bar",
Image: "reg/bar:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("bar", "0.1.3"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.0.1",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.0.1"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.0.2",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.0.2"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.0.3",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.0.3"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.2.0",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.2.0"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.2.1",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.2.1"),
},
},
},
},
in: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.2",
},
},
},
},
{
Name: "foo",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
},
},
exp: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.3",
},
},
},
},
{
Name: "foo",
Channels: []v1alpha2.IncludeChannel{
{
Name: "alpha",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.2.1",
},
},
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.2.0",
},
},
},
},
},
},
},
{
name: "Success/NoNextBundle",
strategy: &packageStrategy{
curr: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
},
{
Name: "foo",
},
},
},
},
cfg: declcfg.DeclarativeConfig{
Packages: []declcfg.Package{
{Schema: "olm.package", Name: "bar", DefaultChannel: "stable"},
{Schema: "olm.package", Name: "foo", DefaultChannel: "stable"},
},
Channels: []declcfg.Channel{
{Schema: "olm.channel", Name: "stable", Package: "bar", Entries: []declcfg.ChannelEntry{
{Name: "bar.v0.0.2", Skips: []string{"bar.v0.0.1"}},
}},
{Schema: "olm.channel", Name: "stable", Package: "foo", Entries: []declcfg.ChannelEntry{
{Name: "foo.v0.1.0"},
}},
},
Bundles: []declcfg.Bundle{
{
Schema: "olm.bundle",
Name: "bar.v0.0.2",
Package: "bar",
Image: "reg/bar:latest",
Properties: []property.Property{
property.MustBuildGVKRequired("etcd.database.coreos.com", "v1", "EtcdBackup"),
property.MustBuildPackage("bar", "0.0.2"),
},
},
{
Schema: "olm.bundle",
Name: "foo.v0.1.0",
Package: "foo",
Image: "reg/foo:latest",
Properties: []property.Property{
property.MustBuildPackage("foo", "0.1.0"),
},
},
},
},
in: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
{
Name: "foo",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
},
},
exp: v1alpha2.IncludeConfig{
Packages: []v1alpha2.IncludePackage{
{
Name: "bar",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.0.2",
},
},
},
},
{
Name: "foo",
Channels: []v1alpha2.IncludeChannel{
{
Name: "stable",
IncludeBundle: v1alpha2.IncludeBundle{
MinVersion: "0.1.0",
},
},
},
},
},
},
},
}
for _, s := range specs {
t.Run(s.name, func(t *testing.T) {
ic, err := s.strategy.UpdateIncludeConfig(s.cfg, s.in)
if s.expErr != "" {
require.EqualError(t, err, s.expErr)
} else {
require.NoError(t, err)
require.Equal(t, s.exp, ic)
}
})
}
}
func TestSearch(t *testing.T) {
type spec struct {
name string
versions []semver.Version
target semver.Version
expectedVersion semver.Version
}
cases := []spec{
{
name: "Valid/TargetExistsInVersions",
versions: []semver.Version{
semver.MustParse("0.0.1"),
semver.MustParse("0.0.2"),
semver.MustParse("0.1.0"),
semver.MustParse("0.2.0"),
semver.MustParse("0.3.0"),
semver.MustParse("0.4.0"),
},
target: semver.MustParse("0.1.0"),
expectedVersion: semver.MustParse("0.2.0"),
},
{
name: "Valid/TargetDoesNotExistInVersions",
versions: []semver.Version{
semver.MustParse("0.0.1"),
semver.MustParse("0.0.2"),
semver.MustParse("0.2.0"),
semver.MustParse("0.3.0"),
semver.MustParse("0.4.0"),
},
target: semver.MustParse("0.1.0"),
expectedVersion: semver.MustParse("0.2.0"),
},
{
name: "Valid/OneHigherAndOneLower",
versions: []semver.Version{
semver.MustParse("0.0.2"),
semver.MustParse("0.2.0"),
},
target: semver.MustParse("0.1.0"),
expectedVersion: semver.MustParse("0.2.0"),
},
{
name: "Valid/TargetIsLatestVersion",
versions: []semver.Version{
semver.MustParse("0.0.1"),
semver.MustParse("0.0.2"),
semver.MustParse("0.1.0"),
},
target: semver.MustParse("0.1.0"),
expectedVersion: semver.Version{},
},
{
name: "Valid/OneBundleInVersions",
versions: []semver.Version{
semver.MustParse("0.0.1"),
},
target: semver.MustParse("0.1.0"),
expectedVersion: semver.MustParse("0.0.1"),
},
{
name: "Valid/NoBundlesInVersions",
versions: []semver.Version{},
target: semver.MustParse("0.1.0"),
expectedVersion: semver.Version{},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
actual := search(c.versions, c.target, 0, len(c.versions)-1)
require.Equal(t, c.expectedVersion, actual)
})
}
}
|
package main
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"log"
"net/url"
"os"
"strings"
"time"
plugin_models "code.cloudfoundry.org/cli/plugin/models"
"code.cloudfoundry.org/cli/plugin"
"github.com/cloudfoundry/noaa/consumer"
"github.com/cloudfoundry/sonde-go/events"
)
type runAndWait struct{}
type runTaskReq struct {
Command string `json:"command"`
}
type runTaskResp struct {
GUID string `json:"guid"`
Name string `json:"name"`
}
type paginatedGetTasksResponse struct {
Pagination struct {
TotalResults int `json:"total_results"`
} `json:"pagination"`
Resources []runTaskResp `json:"resources"`
}
type taskStatus struct {
State string `json:"state"`
}
func doRunAndWait(cliConnection plugin.CliConnection, args []string) error {
if len(args) != 3 {
return errors.New("Expected 2 args: APPNAME cmd")
}
appName := args[1]
cmd := args[2]
app, err := getApp(cliConnection, appName)
if err != nil {
return err
}
b := &bytes.Buffer{}
err = json.NewEncoder(b).Encode(&runTaskReq{Command: cmd})
if err != nil {
return err
}
log.Println("Kicking off task...")
out, err := cliConnection.CliCommandWithoutTerminalOutput("curl", "-H", "Content-Type: application/json", "-d", string(b.Bytes()), "-X", "POST", fmt.Sprintf("/v3/apps/%s/tasks", app.Guid))
if err != nil {
return err
}
log.Println("Task started...")
var tr runTaskResp
err = json.NewDecoder(bytes.NewReader([]byte(strings.Join(out, "\n")))).Decode(&tr)
if err != nil {
return err
}
if tr.GUID == "" {
return errors.New("Empty task ID")
}
return waitForCompletion(cliConnection, app.Guid, tr.GUID, tr.Name)
}
func (c *runAndWait) Run(cliConnection plugin.CliConnection, args []string) {
var err error
switch args[0] {
case "run-and-wait":
err = doRunAndWait(cliConnection, args)
case "wait":
err = doWait(cliConnection, args)
}
if err != nil {
log.Println(err)
os.Exit(1)
}
}
func getApp(cliConnection plugin.CliConnection, appName string) (*plugin_models.GetAppModel, error) {
log.Println("Getting app id...")
app, err := cliConnection.GetApp(appName)
if err != nil {
return nil, err
}
log.Println("App ID:", app.Guid)
return &app, nil
}
func doWait(cliConnection plugin.CliConnection, args []string) error {
if len(args) != 3 {
return errors.New("Expected 2 args: APPNAME TASK")
}
appName := args[1]
task := args[2]
app, err := getApp(cliConnection, appName)
if err != nil {
return err
}
log.Println("Getting task id...")
out, err := cliConnection.CliCommandWithoutTerminalOutput("curl", "-H", "Content-Type: application/json", fmt.Sprintf("/v3/apps/%s/tasks?names=%s", app.Guid, url.QueryEscape(task)))
if err != nil {
return err
}
var gtr paginatedGetTasksResponse
err = json.NewDecoder(bytes.NewReader([]byte(strings.Join(out, "\n")))).Decode(>r)
if err != nil {
return err
}
if gtr.Pagination.TotalResults != 1 {
return fmt.Errorf("Invalid number of tasks found for name %s: %d", task, gtr.Pagination.TotalResults)
}
if gtr.Resources[0].GUID == "" {
return errors.New("Empty task ID")
}
return waitForCompletion(cliConnection, app.Guid, gtr.Resources[0].GUID, gtr.Resources[0].Name)
}
func waitForCompletion(cliConnection plugin.CliConnection, appGUID, taskID, taskName string) error {
log.Println("Task ID / Name:", taskID, " / ", taskName)
targetSourceType := fmt.Sprintf("APP/TASK/%s", taskName)
dopplerEndpoint, err := cliConnection.DopplerEndpoint()
if err != nil {
return err
}
token, err := cliConnection.AccessToken()
if err != nil {
return err
}
cons := consumer.New(dopplerEndpoint, nil, nil)
defer cons.Close()
messages, errorChannel := cons.TailingLogs(appGUID, token)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
for {
select {
case m := <-messages:
if m.GetSourceType() == targetSourceType {
switch m.GetMessageType() {
case events.LogMessage_OUT:
os.Stdout.Write(m.GetMessage())
os.Stdout.WriteString("\n")
case events.LogMessage_ERR:
os.Stderr.Write(m.GetMessage())
os.Stderr.WriteString("\n")
}
}
case e := <-errorChannel:
log.Println("error reading logs:", e)
case <-ctx.Done():
return
}
}
}()
sleepTime := time.Second
maxSleep := time.Second * 30
for {
time.Sleep(sleepTime)
out, err := cliConnection.CliCommandWithoutTerminalOutput("curl", fmt.Sprintf("/v3/tasks/%s", taskID))
if err != nil {
return err
}
fullS := strings.Join(out, "\n")
var ts taskStatus
err = json.NewDecoder(bytes.NewReader([]byte(fullS))).Decode(&ts)
if err != nil {
return err
}
switch ts.State {
case "SUCCEEDED":
return nil // happy
case "FAILED":
log.Println(fullS)
return errors.New("task failed")
default:
sleepTime *= 2
if sleepTime > maxSleep {
sleepTime = maxSleep
}
}
}
}
func (c *runAndWait) GetMetadata() plugin.PluginMetadata {
return plugin.PluginMetadata{
Name: "Run and Wait",
Version: plugin.VersionType{
Major: 0,
Minor: 3,
Build: 0,
},
MinCliVersion: plugin.VersionType{
Major: 6,
Minor: 7,
Build: 0,
},
Commands: []plugin.Command{
{
Name: "run-and-wait",
HelpText: "Run task, and wait until complete.",
UsageDetails: plugin.Usage{
Usage: "run-and-wait\n cf run-and-wait APPNAME \"cmd to run\"",
},
},
{
Name: "wait",
HelpText: "Wait for an existing task",
UsageDetails: plugin.Usage{
Usage: "wait\n cf wait APPNAME TASK",
},
},
},
}
}
func main() {
plugin.Start(&runAndWait{})
}
|
package discern
import (
"fmt"
"github.com/hahnicity/go-discern/config"
"github.com/hahnicity/go-stringit"
)
func Requester(conf *config.Config, companies map[string]string, work chan <-WikiRequest) {
activeRequests := 0
c := make(chan *WikiResponse)
ar := make([]*WikiResponse, 0)
for symbol, page := range companies {
activeRequests++
req := makeWikiRequest(conf.Year, page, symbol, conf.CloseReq, c)
work <- req
ar = manageActiveProc(&activeRequests, conf.Processes, ar, c)
}
// Wait for all requests to finish
for len(ar) < len(companies) {
resp := <- c
ar = append(ar, resp)
}
Analyze(ar, conf)
}
// Throttle number of active requests
// statsgrok.se is the problem here
func manageActiveProc(activeRequests *int,
maxRequests int,
ar []*WikiResponse,
c chan *WikiResponse) []*WikiResponse {
if *activeRequests == maxRequests {
resp := <- c
ar = append(ar, resp)
}
*(activeRequests)--
return ar
}
func Analyze(ar []*WikiResponse, conf *config.Config) {
analyzeMeans(ar, conf.MeanPercentile)
analyzePercentiles(ar, conf.ViewPercentile)
}
func analyzePercentiles(ar []*WikiResponse, viewPercentile float64) {
for _, resp := range ar {
dates := FindRecentDates(resp, viewPercentile)
if len(dates) == 0 {
return
}
fmt.Println(
stringit.Format(
"Analyzed {} and found following dates within {} percentile",
resp.Symbol,
viewPercentile,
),
)
for date, views := range dates {
fmt.Println(stringit.Format("\t{}:{}", date, views))
}
}
}
func analyzeMeans(ar []*WikiResponse, meanPercentile float64) {
means := make(map[string]int)
for _, resp := range ar {
means[resp.Symbol] = FindMeanViews(resp)
}
fmt.Printf("Companies with mean views within the %f percentile were:\n", meanPercentile)
for symbol, views := range FindHighestMeans(means, meanPercentile) {
fmt.Println(stringit.Format("\t{}:{}", symbol, views))
}
}
|
package main
import (
"fmt"
"io"
"os"
)
func isErr(e error) {
if e != nil {
fmt.Println("Error: ", e)
os.Exit(1)
}
}
func main() {
args := os.Args[1]
// file implements a Reader interface
file, err := os.Open(args)
isErr(err)
io.Copy(os.Stdout, file)
}
|
package main
import (
"flag"
"fmt"
)
func main() {
var port = flag.Int("port", 8000, "port number to start the server on")
flag.Parse()
// log.SetFlags(log.LstdFlags | log.Lshortfile)
fmt.Println("Starting server on port:", *port)
fmt.Printf("Option myFlag: %T, %d, %d\n", port, port, *port)
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package verifier is a framework for running verification function in parallel
// to the actual test with option to re-run verification function in a loop until
// the primary test is finished.
//
// Usage:
// verF := func() (verifier.ResultType, error) {
// res := any_test_function_desirable(additional_params)
// return verifier.ResultType{Data: res, Timestamp: time.Now()}, nil
// }
// vf := verifier.NewVerifier(ctx, verF) // This only creates framework.
// defer vf.Finish() // This destroys framework.
// (...)
// vf.StartJob() // This triggers starting verification loop.
// (...test...)
// results, err := vf.StopJob()
// (analyze results slice)
//
// State machine for the verifier:
//
// *
// |
// V
// /-----------------\
// | Idle |- - - - - - - - -
// \-----------------/ \ verifyFinish
// / ^ \
// / \ v
// / \ /-----------------\
// verifyStart ( \ | Finished |
// \ ) verifyStop \-----------------/
// \ / ^
// \ / /
// v / /
// /-----------------\ / verifyFinish
// | Running |- - - - - - - - -
// \-----------------/
// / ^
// / \
// ( )
// \ /
// \___/
// verifyTimeout
package verifier
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package ip
import (
"context"
"io"
"net"
"os"
"reflect"
"testing"
)
// stubCmdRunner is a simple stub of CmdRunner which always returns the given content
// as command output. This is useful for testing some simple parsing that is not
// extracted as an independent function.
type stubCmdRunner struct {
out []byte
}
// Run is a noop stub which always returns nil.
func (r *stubCmdRunner) Run(ctx context.Context, cmd string, args ...string) error {
return nil
}
// Output is a stub which pretends the command is executed successfully and prints
// the pre-assigned output.
func (r *stubCmdRunner) Output(ctx context.Context, cmd string, args ...string) ([]byte, error) {
return r.out, nil
}
// CreateCmd is a stub function which does nothing.
func (r *stubCmdRunner) CreateCmd(ctx context.Context, cmd string, args ...string) {
return
}
// SetStdOut is a stub function which does nothing.
func (r *stubCmdRunner) SetStdOut(stdoutFile *os.File) {
return
}
// StderrPipe is a stub function which always returns nil.
func (r *stubCmdRunner) StderrPipe() (io.ReadCloser, error) {
return nil, nil
}
// StartCmd is a stub function which always returns nil.
func (r *stubCmdRunner) StartCmd() error {
return nil
}
// WaitCmd is a stub function which always returns nil.
func (r *stubCmdRunner) WaitCmd() error {
return nil
}
// CmdExists is a stub function which always returns false.
func (r *stubCmdRunner) CmdExists() bool {
return false
}
// ReleaseProcess is a stub function which always returns nil.
func (r *stubCmdRunner) ReleaseProcess() error {
return nil
}
// ResetCmd is a stub function which does nothing.
func (r *stubCmdRunner) ResetCmd() {
return
}
func TestGetMAC(t *testing.T) {
testcases := []struct {
out string
expect net.HardwareAddr
shouldFail bool
}{
// Some invalid output.
{
out: "",
shouldFail: true, // Empty.
},
{
out: `2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
link/ether 01:02:03:04:05:06 ff:ff:ff:ff:ff:ff
altname enp2s0`,
shouldFail: true, // Unmatched name.
},
// Valid case.
{
out: `2: wlan0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
link/ether 1a:2b:3c:4d:5e:6f ff:ff:ff:ff:ff:ff
`,
expect: net.HardwareAddr{0x1a, 0x2b, 0x3c, 0x4d, 0x5e, 0x6f},
shouldFail: false,
},
}
stub := &stubCmdRunner{}
r := NewRunner(stub)
for i, tc := range testcases {
stub.out = []byte(tc.out)
// Test MAC function.
got, err := r.MAC(context.Background(), "wlan0")
if tc.shouldFail {
if err == nil {
t.Errorf("case#%d should have error", i)
}
continue
}
if err != nil {
t.Errorf("case#%d failed with err=%v", i, err)
continue
}
if !reflect.DeepEqual(got, tc.expect) {
t.Errorf("case#%d got MAC: %v, want: %v", i, got, tc.expect)
}
}
}
func TestShowLink(t *testing.T) {
testcases := []struct {
shouldFail bool
out string
expect *showLinkIfaceResult
}{
// Invalid cases.
{
shouldFail: true, // Empty.
out: "",
},
{
shouldFail: true, // Incomplete results.
out: `1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000`,
},
{
shouldFail: true, // Unmatched name.
out: `2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
link/ether 01:02:03:04:05:06 ff:ff:ff:ff:ff:ff
altname enp2s0`,
},
{
shouldFail: true, // Brief format not supported.
out: "wlan0 UP 01:02:03:04:05:06 <BROADCAST,MULTICAST,UP,LOWER_UP> \n",
},
// Valid cases.
{
shouldFail: false,
out: `2: wlan0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
link/ether 01:02:03:04:05:06 ff:ff:ff:ff:ff:ff
altname enp2s0`,
expect: &showLinkIfaceResult{
name: "wlan0",
state: "UP",
MAC: "01:02:03:04:05:06",
flags: "<BROADCAST,MULTICAST,UP,LOWER_UP>",
},
},
{
shouldFail: false,
out: `2: wlan0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
link/ether 01:02:03:04:05:06 ff:ff:ff:ff:ff:ff
`,
expect: &showLinkIfaceResult{
name: "wlan0",
state: "UP",
MAC: "01:02:03:04:05:06",
flags: "<BROADCAST,MULTICAST,UP,LOWER_UP>",
},
},
{
shouldFail: false,
out: `2: wlan0@: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
link/ether 01:02:03:04:05:06 ff:ff:ff:ff:ff:ff
`,
expect: &showLinkIfaceResult{
name: "wlan0",
state: "UP",
MAC: "01:02:03:04:05:06",
flags: "<BROADCAST,MULTICAST,UP,LOWER_UP>",
},
},
{
shouldFail: false,
out: `2: wlan0@someAlias: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
link/ether 01:02:03:04:05:06 ff:ff:ff:ff:ff:ff
`,
expect: &showLinkIfaceResult{
name: "wlan0",
state: "UP",
MAC: "01:02:03:04:05:06",
flags: "<BROADCAST,MULTICAST,UP,LOWER_UP>",
alias: "someAlias",
},
},
}
stub := &stubCmdRunner{}
r := NewRunner(stub)
for i, tc := range testcases {
stub.out = []byte(tc.out)
// Test showLink function.
got, err := r.showLink(context.Background(), "wlan0")
if tc.shouldFail {
if err == nil {
t.Errorf("case#%d should have error", i)
}
continue
}
if err != nil {
t.Errorf("case#%d failed with err=%v", i, err)
continue
}
if !reflect.DeepEqual(got, tc.expect) {
t.Errorf("case#%d got MAC: %v, want: %v", i, got, tc.expect)
}
}
}
func TestLinkWithPrefix(t *testing.T) {
testcases := []struct {
prefix string
out string
expect []string
}{
{
prefix: "somePrefix",
out: "",
expect: []string{},
},
{
prefix: "somePrefix",
out: "wlan0 UP 01:02:03:04:05:06 <BROADCAST,MULTICAST,UP,LOWER_UP> \n",
expect: []string{}, // Brief format not expected.
},
{
prefix: "somePrefix",
out: `2: wlan0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
link/ether 01:02:03:04:05:06 ff:ff:ff:ff:ff:ff
altname enp2s0`,
expect: []string{}, // No matches.
},
{
prefix: "somePrefix",
out: `2: somePrefix: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
link/ether 01:02:03:04:05:06 ff:ff:ff:ff:ff:ff
altname enp2s0`,
expect: []string{"somePrefix"}, // Name matches exactly.
},
{
prefix: "somePrefix",
out: `2: somePrefix123: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
link/ether 01:02:03:04:05:06 ff:ff:ff:ff:ff:ff
altname enp2s0`,
expect: []string{"somePrefix123"}, // Name matches with prefix.
},
{
prefix: "somePrefix",
out: `2: somePrefix123@: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
link/ether 01:02:03:04:05:06 ff:ff:ff:ff:ff:ff
altname enp2s0`,
expect: []string{"somePrefix123"}, // Can match even with empty alias.
},
{
prefix: "somePrefix",
out: `2: somePrefix123@someAlias: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
link/ether 01:02:03:04:05:06 ff:ff:ff:ff:ff:ff
altname enp2s0`,
expect: []string{"somePrefix123"}, // Can match even with an alias.
},
{
prefix: "veth",
out: `
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: veth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
link/ether 01:02:03:04:05:06 brd ff:ff:ff:ff:ff:ff
altname enp2s0
3: eno2: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc fq_codel state DOWN mode DEFAULT group default qlen 1000
link/ether 01:02:03:04:05:06 brd ff:ff:ff:ff:ff:ff
altname enp0s31f6
`,
expect: []string{"veth1"}, // Can match with multiple interfaces.
},
{
prefix: "veth",
out: `
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: veth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
link/ether 01:02:03:04:05:06 brd ff:ff:ff:ff:ff:ff
altname enp2s0
3: veth2: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc fq_codel state DOWN mode DEFAULT group default qlen 1000
link/ether 01:02:03:04:05:06 brd ff:ff:ff:ff:ff:ff
altname enp0s31f6
`,
expect: []string{"veth1", "veth2"}, // Can match multiple ifaces.
},
}
stub := &stubCmdRunner{}
r := NewRunner(stub)
for i, tc := range testcases {
stub.out = []byte(tc.out)
// Test showLink function.
got, err := r.LinkWithPrefix(context.Background(), tc.prefix)
if err != nil {
t.Errorf("case#%d failed with err=%v", i, err)
continue
}
if len(tc.expect) == 0 {
if len(got) != 0 {
t.Errorf("case#%d got: %v, want: %v", i, got, tc.expect)
}
} else if !reflect.DeepEqual(got, tc.expect) {
t.Errorf("case#%d got: %v, want: %v", i, got, tc.expect)
}
}
}
|
package lang
import "fmt"
func Run(mod *ModuleVirtual) {
env := makeEnvironment(nil)
mod.environment = env
runBlob(mod, env, *mod.bytecode)
}
func loadModuleEnvironment(mod Module) {
// If the given module has already been evaluated, do nothing.
if mod, ok := mod.(*ModuleVirtual); ok && mod.environment == nil {
runVirtualModule(mod)
}
}
func runVirtualModule(mod *ModuleVirtual) {
if mod.bytecode == nil {
Compile(mod)
}
env := makeEnvironment(nil)
mod.environment = env
runBlob(mod, env, *mod.bytecode)
}
type Environment struct {
parent *Environment
stack []Object
state map[string]Object
self *ObjectClosure
}
func (e *Environment) pushToStack(obj Object) {
e.stack = append(e.stack, obj)
}
func (e *Environment) popFromStack() Object {
obj := e.stack[len(e.stack)-1]
e.stack = e.stack[:len(e.stack)-1]
return obj
}
func (e *Environment) alloc(name string) {
e.state[name] = ObjectNone{}
}
func (e *Environment) store(name string, obj Object) {
if _, ok := e.state[name]; ok {
e.state[name] = obj
} else {
e.parent.store(name, obj)
}
}
func (e *Environment) load(name string) Object {
if obj, ok := e.state[name]; ok {
return obj
} else {
if e.parent == nil {
panic(fmt.Sprintf("cannot find variable '%s'", name))
}
return e.parent.load(name)
}
}
func makeEnvironment(parent *Environment) *Environment {
return &Environment{
parent: parent,
state: make(map[string]Object),
}
}
func runBlob(mod *ModuleVirtual, env *Environment, blob Bytecode) Object {
var ip uint32 = 0
instr := blob.Instructions[ip]
for {
if _, ok := instr.(InstrHalt); ok {
return nil
} else if _, ok := instr.(InstrReturn); ok {
return env.popFromStack()
}
ip = runInstr(mod, ip, env, instr)
instr = blob.Instructions[ip]
}
}
func runInstr(mod *ModuleVirtual, ip uint32, env *Environment, instr Instr) uint32 {
switch instr := instr.(type) {
case InstrHalt:
return ip
case InstrNOP:
// do nothing
case InstrJump:
return uint32(instr.addr)
case InstrJumpTrue:
a := env.popFromStack().(*ObjectBool)
if a.val {
return uint32(instr.addr)
}
case InstrJumpFalse:
a := env.popFromStack().(*ObjectBool)
if a.val == false {
return uint32(instr.addr)
}
case InstrPush:
env.pushToStack(instr.Val)
case InstrPop:
env.popFromStack()
case InstrCopy:
a := env.popFromStack()
env.pushToStack(a)
env.pushToStack(a)
case InstrReserve:
env.alloc(instr.Name)
case InstrStore:
a := env.popFromStack()
env.store(instr.Name, a)
case InstrLoadMod:
path := env.popFromStack().(*ObjectStr).val
var alias string
var obj Object
for _, dep := range mod.dependencies {
if dep.relative == path {
if dep.module.IsNative() == false {
runVirtualModule(dep.module.(*ModuleVirtual))
}
alias = dep.alias
obj = dep.module.export()
break
}
}
if obj == nil {
panic("could not load dependency")
}
env.alloc(alias)
env.store(alias, obj)
case InstrLoadAttr:
a := env.popFromStack()
env.pushToStack(a.(*ObjectStruct).Member(instr.Name))
case InstrLoadSelf:
env.pushToStack(env.self)
case InstrLoad:
a := env.load(instr.Name)
env.pushToStack(a)
case InstrDispatch:
obj := env.popFromStack()
switch fn := obj.(type) {
case *ObjectClosure:
child := makeEnvironment(fn.context)
child.self = fn
for _, sym := range fn.params {
child.alloc(sym)
obj := env.popFromStack()
child.store(sym, obj)
}
ret := runBlob(mod, child, fn.bytecode)
env.pushToStack(ret)
case *ObjectBuiltin:
var args []Object
for i := 0; i < instr.args; i++ {
args = append(args, env.popFromStack())
}
if ret, err := fn.val(args); err != nil {
panic(err)
} else {
env.pushToStack(ret)
}
default:
panic(fmt.Sprintf("cannot call %T", obj))
}
case InstrCreateClosure:
fn := env.popFromStack().(*ObjectFunction)
clo := &ObjectClosure{
context: env,
params: fn.params,
bytecode: fn.bytecode,
}
env.pushToStack(clo)
case InstrAdd:
b := env.popFromStack().(*ObjectInt)
a := env.popFromStack().(*ObjectInt)
sum := a.val + b.val
env.pushToStack(&ObjectInt{sum})
case InstrSub:
b := env.popFromStack().(*ObjectInt)
a := env.popFromStack().(*ObjectInt)
sum := a.val - b.val
env.pushToStack(&ObjectInt{sum})
case InstrLT:
b := env.popFromStack().(*ObjectInt)
a := env.popFromStack().(*ObjectInt)
test := a.val < b.val
env.pushToStack(&ObjectBool{test})
case InstrLTEquals:
b := env.popFromStack().(*ObjectInt)
a := env.popFromStack().(*ObjectInt)
test := a.val <= b.val
env.pushToStack(&ObjectBool{test})
case InstrGT:
b := env.popFromStack().(*ObjectInt)
a := env.popFromStack().(*ObjectInt)
test := a.val > b.val
env.pushToStack(&ObjectBool{test})
case InstrGTEquals:
b := env.popFromStack().(*ObjectInt)
a := env.popFromStack().(*ObjectInt)
test := a.val >= b.val
env.pushToStack(&ObjectBool{test})
default:
panic(fmt.Sprintf("cannot interpret %T instructions", instr))
}
return ip + 1
}
|
package main
import (
"bytes"
"encoding/csv"
"fmt"
"go/ast"
"go/format"
"go/token"
"io/ioutil"
"net/http"
"os"
"sort"
"strings"
)
func main() {
const sourceURL = "https://raw.githubusercontent.com/haliaeetus/iso-639/master/data/iso_639-2.csv"
inputData, err := httpGet(sourceURL)
if err != nil {
panic(err)
}
inputReader := bytes.NewReader(inputData)
records, err := csv.NewReader(inputReader).ReadAll()
if err != nil {
panic(err)
}
languageRecords := make(languageRecordList, 0, len(records)-1)
for _, record := range records[1:] {
languageRecords = append(languageRecords, languageRecord{
Name: record[3],
Ref: strings.ToUpper(record[0]),
Alpha3: record[0],
Alpha3B: record[1],
Alpha2: record[2],
})
}
sort.Stable(languageRecords)
// the source data contains some duplicate entries
languageRecords = languageRecords.Deduplicate()
if err = writeASTFile("code.gen.go", languageRecords.GenerateAST()); err != nil {
panic(err)
}
}
func writeASTFile(filename string, astFile *ast.File) (returnedErr error) {
fd, err := os.Create(filename)
if err != nil {
return err
}
defer func() {
if err := fd.Close(); err != nil && returnedErr == nil {
returnedErr = err
}
}()
if err := format.Node(fd, token.NewFileSet(), astFile); err != nil {
return err
}
return nil
}
func httpGet(urlStr string) ([]byte, error) {
res, err := http.Get(urlStr) // nolint: gosec
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(res.Body)
if closeErr := res.Body.Close(); err != nil {
return nil, closeErr
}
return body, err
}
type languageRecord struct {
Name string
Ref string
Alpha3 string
Alpha3B string
Alpha2 string
}
func (lr languageRecord) Alpha3Ref() ast.Expr {
if lr.Alpha3 == "" {
return &ast.Ident{Name: empty3Name}
}
return &ast.BasicLit{Kind: token.STRING, Value: fmt.Sprintf("%q", lr.Alpha3)}
}
func (lr languageRecord) Alpha3BRef() ast.Expr {
if lr.Alpha3B == "" {
return &ast.Ident{Name: empty3Name}
}
return &ast.BasicLit{Kind: token.STRING, Value: fmt.Sprintf("%q", lr.Alpha3B)}
}
func (lr languageRecord) Alpha2Ref() ast.Expr {
if lr.Alpha2 == "" {
return &ast.Ident{Name: empty2Name}
}
return &ast.BasicLit{Kind: token.STRING, Value: fmt.Sprintf("%q", lr.Alpha2)}
}
type languageRecordList []languageRecord
func (l languageRecordList) Len() int {
return len(l)
}
func (l languageRecordList) Less(i, j int) bool {
return l[i].Alpha3 < l[j].Alpha3
}
func (l languageRecordList) Swap(i, j int) {
l[i], l[j] = l[j], l[i]
}
func (l languageRecordList) Deduplicate() languageRecordList {
offset := 0
for i := 1; i < len(l); i++ {
if l[i-offset-1] == l[i] {
offset++
continue
}
l[i-offset] = l[i]
}
return l[:len(l)-offset]
}
func (l languageRecordList) GenerateAST() *ast.File {
return &ast.File{
Name: &ast.Ident{Name: pkgName},
Decls: []ast.Decl{
l.generateLanguages(),
l.generateCodes(),
},
}
}
func (l languageRecordList) generateLanguages() ast.Decl {
specs := make([]ast.Spec, 0, len(l))
for i, lr := range l {
specs = append(specs, &ast.ValueSpec{
Doc: &ast.CommentGroup{
List: []*ast.Comment{
{Text: fmt.Sprintf("\n// %s is %s.", lr.Ref, lr.Name)},
},
},
Names: []*ast.Ident{{Name: lr.Ref}},
Values: []ast.Expr{
&ast.CompositeLit{
Type: &ast.Ident{Name: languageType},
Elts: []ast.Expr{
&ast.BasicLit{Kind: token.INT, Value: fmt.Sprintf("%d", i+1)},
},
},
},
})
}
return &ast.GenDecl{
Tok: token.VAR,
Doc: &ast.CommentGroup{
List: []*ast.Comment{
{Text: "\n// Code generated. DO NOT EDIT."},
},
},
Specs: specs,
}
}
func (l languageRecordList) generateCodes() ast.Decl {
elts := make([]ast.Expr, 0, len(l)+1)
elts = append(elts, &ast.CompositeLit{
Elts: []ast.Expr{
&ast.Ident{Name: empty3Name},
&ast.Ident{Name: empty3Name},
&ast.Ident{Name: empty2Name},
},
})
for _, lr := range l {
elts = append(elts, &ast.CompositeLit{
Elts: []ast.Expr{
lr.Alpha3Ref(),
lr.Alpha3BRef(),
lr.Alpha2Ref(),
},
})
}
return &ast.GenDecl{
Tok: token.VAR,
Specs: []ast.Spec{
&ast.ValueSpec{
Names: []*ast.Ident{{Name: codes}},
Values: []ast.Expr{
&ast.CompositeLit{
Type: &ast.ArrayType{
Len: &ast.Ellipsis{},
Elt: &ast.ArrayType{
Len: &ast.Ident{Name: formatsCount},
Elt: &ast.Ident{Name: stringType},
},
},
Elts: elts,
},
},
},
},
}
}
const pkgName = "languagecode"
const languageType = "Language"
const codes = "codes"
const formatsCount = "formatsCount"
const stringType = "string"
const empty3Name = "empty3"
const empty2Name = "empty2"
|
// Copyright 2021 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lisafs
import (
"path"
"path/filepath"
"runtime/debug"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/flipcall"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/p9"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/unet"
)
// Connection represents a connection between a mount point in the client and a
// mount point in the server. It is owned by the server on which it was started
// and facilitates communication with the client mount.
//
// Each connection is set up using a unix domain socket. One end is owned by
// the server and the other end is owned by the client. The connection may
// spawn additional comunicational channels for the same mount for increased
// RPC concurrency.
//
// Reference model:
// - When any FD is created, the connection takes a ref on it which represents
// the client's ref on the FD.
// - The client can drop its ref via the Close RPC which will in turn make the
// connection drop its ref.
type Connection struct {
// server is the server on which this connection was created. It is immutably
// associated with it for its entire lifetime.
server *Server
// mountPath is the path to a file inside the server that is served to this
// connection as its root FD. IOW, this connection is mounted at this path.
// mountPath is trusted because it is configured by the server (trusted) as
// per the user's sandbox configuration. mountPath is immutable.
mountPath string
// maxMessageSize is the cached value of server.impl.MaxMessageSize().
maxMessageSize uint32
// readonly indicates if this connection is readonly. All write operations
// will fail with EROFS.
readonly bool
// sockComm is the main socket by which this connections is established.
sockComm *sockCommunicator
// channelsMu protects channels.
channelsMu sync.Mutex
// channels keeps track of all open channels.
channels []*channel
// activeWg represents active channels.
activeWg sync.WaitGroup
// reqGate counts requests that are still being handled.
reqGate sync.Gate
// channelAlloc is used to allocate memory for channels.
channelAlloc *flipcall.PacketWindowAllocator
fdsMu sync.RWMutex
// fds keeps tracks of open FDs on this server. It is protected by fdsMu.
fds map[FDID]genericFD
// nextFDID is the next available FDID. It is protected by fdsMu.
nextFDID FDID
}
// CreateConnection initializes a new connection which will be mounted at
// mountPath. The connection must be started separately.
func (s *Server) CreateConnection(sock *unet.Socket, mountPath string, readonly bool) (*Connection, error) {
mountPath = path.Clean(mountPath)
if !filepath.IsAbs(mountPath) {
log.Warningf("mountPath %q is not absolute", mountPath)
return nil, unix.EINVAL
}
c := &Connection{
sockComm: newSockComm(sock),
server: s,
maxMessageSize: s.impl.MaxMessageSize(),
mountPath: mountPath,
readonly: readonly,
channels: make([]*channel, 0, maxChannels()),
fds: make(map[FDID]genericFD),
nextFDID: InvalidFDID + 1,
}
alloc, err := flipcall.NewPacketWindowAllocator()
if err != nil {
return nil, err
}
c.channelAlloc = alloc
return c, nil
}
// ServerImpl returns the associated server implementation.
func (c *Connection) ServerImpl() ServerImpl {
return c.server.impl
}
// Run defines the lifecycle of a connection.
func (c *Connection) Run() {
defer c.close()
// Start handling requests on this connection.
for {
m, payloadLen, err := c.sockComm.rcvMsg(0 /* wantFDs */)
if err != nil {
log.Debugf("sock read failed, closing connection: %v", err)
return
}
respM, respPayloadLen, respFDs := c.handleMsg(c.sockComm, m, payloadLen)
err = c.sockComm.sndPrepopulatedMsg(respM, respPayloadLen, respFDs)
closeFDs(respFDs)
if err != nil {
log.Debugf("sock write failed, closing connection: %v", err)
return
}
}
}
// service starts servicing the passed channel until the channel is shutdown.
// This is a blocking method and hence must be called in a separate goroutine.
func (c *Connection) service(ch *channel) error {
rcvDataLen, err := ch.data.RecvFirst()
if err != nil {
return err
}
for rcvDataLen > 0 {
m, payloadLen, err := ch.rcvMsg(rcvDataLen)
if err != nil {
return err
}
respM, respPayloadLen, respFDs := c.handleMsg(ch, m, payloadLen)
numFDs := ch.sendFDs(respFDs)
closeFDs(respFDs)
ch.marshalHdr(respM, numFDs)
rcvDataLen, err = ch.data.SendRecv(respPayloadLen + chanHeaderLen)
if err != nil {
return err
}
}
return nil
}
func (c *Connection) respondError(comm Communicator, err unix.Errno) (MID, uint32, []int) {
resp := &ErrorResp{errno: uint32(err)}
respLen := uint32(resp.SizeBytes())
resp.MarshalUnsafe(comm.PayloadBuf(respLen))
return Error, respLen, nil
}
func (c *Connection) handleMsg(comm Communicator, m MID, payloadLen uint32) (retM MID, retPayloadLen uint32, retFDs []int) {
if payloadLen > c.maxMessageSize {
log.Warningf("received payload is too large: %d bytes", payloadLen)
return c.respondError(comm, unix.EIO)
}
if !c.reqGate.Enter() {
// c.close() has been called; the connection is shutting down.
return c.respondError(comm, unix.ECONNRESET)
}
defer func() {
c.reqGate.Leave()
// Don't allow a panic to propagate.
if err := recover(); err != nil {
// Include a useful log message.
log.Warningf("panic in handler: %v\n%s", err, debug.Stack())
// Wrap in an EREMOTEIO error; we don't really have a better way to
// describe this kind of error. EREMOTEIO is appropriate for a generic
// failed RPC message.
retM, retPayloadLen, retFDs = c.respondError(comm, unix.EREMOTEIO)
}
}()
// Check if the message is supported for forward compatibility.
if int(m) >= len(c.server.handlers) || c.server.handlers[m] == nil {
log.Warningf("received request which is not supported by the server, MID = %d", m)
return c.respondError(comm, unix.EOPNOTSUPP)
}
// Try handling the request.
respPayloadLen, err := c.server.handlers[m](c, comm, payloadLen)
fds := comm.ReleaseFDs()
if err != nil {
closeFDs(fds)
return c.respondError(comm, p9.ExtractErrno(err))
}
if respPayloadLen > c.maxMessageSize {
log.Warningf("handler for message %d responded with payload which is too large: %d bytes", m, respPayloadLen)
closeFDs(fds)
return c.respondError(comm, unix.EIO)
}
return m, respPayloadLen, fds
}
func (c *Connection) close() {
// Wait for completion of all inflight requests. This is mostly so that if
// a request is stuck, the sandbox supervisor has the opportunity to kill
// us with SIGABRT to get a stack dump of the offending handler.
c.reqGate.Close()
// Shutdown and clean up channels.
c.channelsMu.Lock()
for _, ch := range c.channels {
ch.shutdown()
}
c.activeWg.Wait()
for _, ch := range c.channels {
ch.destroy()
}
// This is to prevent additional channels from being created.
c.channels = nil
c.channelsMu.Unlock()
// Free the channel memory.
if c.channelAlloc != nil {
c.channelAlloc.Destroy()
}
// Ensure the connection is closed.
c.sockComm.destroy()
// Cleanup all FDs.
c.fdsMu.Lock()
defer c.fdsMu.Unlock()
for fdid := range c.fds {
fd := c.stopTrackingFD(fdid)
fd.DecRef(nil) // Drop the ref held by c.
}
}
// Postcondition: The caller gains a ref on the FD on success.
func (c *Connection) lookupFD(id FDID) (genericFD, error) {
c.fdsMu.RLock()
defer c.fdsMu.RUnlock()
fd, ok := c.fds[id]
if !ok {
return nil, unix.EBADF
}
fd.IncRef()
return fd, nil
}
// lookupControlFD retrieves the control FD identified by id on this
// connection. On success, the caller gains a ref on the FD.
func (c *Connection) lookupControlFD(id FDID) (*ControlFD, error) {
fd, err := c.lookupFD(id)
if err != nil {
return nil, err
}
cfd, ok := fd.(*ControlFD)
if !ok {
fd.DecRef(nil)
return nil, unix.EINVAL
}
return cfd, nil
}
// lookupOpenFD retrieves the open FD identified by id on this
// connection. On success, the caller gains a ref on the FD.
func (c *Connection) lookupOpenFD(id FDID) (*OpenFD, error) {
fd, err := c.lookupFD(id)
if err != nil {
return nil, err
}
ofd, ok := fd.(*OpenFD)
if !ok {
fd.DecRef(nil)
return nil, unix.EINVAL
}
return ofd, nil
}
// lookupBoundSocketFD retrieves the boundSockedFD identified by id on this
// connection. On success, the caller gains a ref on the FD.
func (c *Connection) lookupBoundSocketFD(id FDID) (*BoundSocketFD, error) {
fd, err := c.lookupFD(id)
if err != nil {
return nil, err
}
bsfd, ok := fd.(*BoundSocketFD)
if !ok {
fd.DecRef(nil)
return nil, unix.EINVAL
}
return bsfd, nil
}
// insertFD inserts the passed fd into the internal datastructure to track FDs.
// The caller must hold a ref on fd which is transferred to the connection.
func (c *Connection) insertFD(fd genericFD) FDID {
c.fdsMu.Lock()
defer c.fdsMu.Unlock()
res := c.nextFDID
c.nextFDID++
if c.nextFDID < res {
panic("ran out of FDIDs")
}
c.fds[res] = fd
return res
}
// removeFD makes c stop tracking the passed FDID and drops its ref on it.
func (c *Connection) removeFD(id FDID) {
c.fdsMu.Lock()
fd := c.stopTrackingFD(id)
c.fdsMu.Unlock()
if fd != nil {
// Drop the ref held by c. This can take arbitrarily long. So do not hold
// c.fdsMu while calling it.
fd.DecRef(nil)
}
}
// removeControlFDLocked is the same as removeFD with added preconditions.
//
// Preconditions:
// - server's rename mutex must at least be read locked.
// - id must be pointing to a control FD.
func (c *Connection) removeControlFDLocked(id FDID) {
c.fdsMu.Lock()
fd := c.stopTrackingFD(id)
c.fdsMu.Unlock()
if fd != nil {
// Drop the ref held by c. This can take arbitrarily long. So do not hold
// c.fdsMu while calling it.
fd.(*ControlFD).decRefLocked()
}
}
// stopTrackingFD makes c stop tracking the passed FDID. Note that the caller
// must drop ref on the returned fd (preferably without holding c.fdsMu).
//
// Precondition: c.fdsMu is locked.
func (c *Connection) stopTrackingFD(id FDID) genericFD {
fd := c.fds[id]
if fd == nil {
log.Warningf("removeFDLocked called on non-existent FDID %d", id)
return nil
}
delete(c.fds, id)
return fd
}
|
package cpu
func (cpu *CPU) clc() {
println("CLC")
cpu.waitTick()
cpu.C = false
}
func (cpu *CPU) cld() {
println("CLD")
cpu.waitTick()
cpu.D = false
}
func (cpu *CPU) cli() {
println("CLI")
cpu.waitTick()
cpu.I = false
}
func (cpu *CPU) clv() {
println("CLV")
cpu.waitTick()
cpu.V = false
}
func (cpu *CPU) sec() {
println("SEC")
cpu.waitTick()
cpu.C = true
}
func (cpu *CPU) sed() {
println("SED")
cpu.waitTick()
cpu.D = true
}
func (cpu *CPU) sei() {
println("SEI")
cpu.waitTick()
cpu.I = true
}
func (cpu *CPU) tax() {
println("TAX")
cpu.waitTick()
// 这样写让tax和tay只差一个symbol
cpu.x = cpu.a
loadFlag(cpu, cpu.a)
}
func (cpu *CPU) tay() {
println("TAY")
cpu.waitTick()
cpu.y = cpu.a
loadFlag(cpu, cpu.a)
}
func (cpu *CPU) tya() {
println("TYA")
cpu.waitTick()
cpu.a = cpu.y
loadFlag(cpu, cpu.y)
}
func (cpu *CPU) tsx() {
println("TSX")
cpu.waitTick()
cpu.x = cpu.sp
loadFlag(cpu, cpu.sp)
}
func (cpu *CPU) txa() {
println("TXA")
cpu.waitTick()
cpu.a = cpu.x
loadFlag(cpu, cpu.x)
}
func (cpu *CPU) txs() {
println("TXS")
cpu.waitTick()
// fmt.Printf("TXS 0x%v to 0x%v", cpu.x, cpu.sp)
cpu.sp = cpu.x
}
func (cpu *CPU) inx() {
println("INX")
cpu.waitTick()
cpu.x += 1
loadFlag(cpu, cpu.x)
}
func (cpu *CPU) iny() {
println("INY")
cpu.waitTick()
cpu.y += 1
loadFlag(cpu, cpu.y)
}
func (cpu *CPU) dex() {
println("DEX")
cpu.waitTick()
cpu.x -= 1
loadFlag(cpu, cpu.x)
}
func (cpu *CPU) dey() {
println("DEY")
cpu.waitTick()
cpu.y -= 1
loadFlag(cpu, cpu.y)
}
func (cpu *CPU) nop() {
println("NOP")
cpu.waitTick()
}
func (cpu *CPU) brk() {
println("BRK")
cpu.waitTick()
cpu.pc += 2
cpu.push(cpu.pch())
cpu.waitTick()
cpu.push(cpu.pcl())
cpu.waitTick()
// thereis no b-flag , see https://wiki.nesdev.com/w/index.php/CPU_status_flag_behavior
// cpu.B = true
cpu.push(cpu.GetP())
cpu.waitTick()
adl := cpu.read(0xfffe)
cpu.waitTick()
adh := cpu.read(0xffff)
cpu.pc = makeUint16(adh, adl)
printf("BRK -> 0x%x\n", cpu.pc)
cpu.skip1 = false
}
func (cpu *CPU) pha() {
println("PHA")
cpu.waitTick()
cpu.push(cpu.a)
cpu.skip1 = false
}
func (cpu *CPU) pla() {
println("PLA")
cpu.waitTick()
cpu.waitTick()
m := cpu.pull()
cpu.waitTick()
cpu.a = m
loadFlag(cpu, cpu.a)
}
func (cpu *CPU) php() {
println("PHP")
cpu.waitTick()
cpu.push(cpu.GetP())
cpu.skip1 = false
}
func (cpu *CPU) plp() {
println("PLP")
cpu.waitTick()
cpu.waitTick()
m := cpu.pull()
cpu.waitTick()
cpu.SetP(m)
}
func (cpu *CPU) rts() {
pc := cpu.pc
cpu.waitTick()
cpu.waitTick()
pcl := cpu.pull()
cpu.waitTick()
pch := cpu.pull()
cpu.pc = makeUint16(pch, pcl)
cpu.waitTick()
cpu.pc++
// fmt.Printf("RTS from 0x%x to 0x%x, stack is 0x%x\n", pc, cpu.pc, cpu.sp)
printf("RTS from 0x%x to 0x%x\n", pc, cpu.pc)
cpu.skip1 = false
}
func (cpu *CPU) rti() {
// pc := cpu.pc
cpu.waitTick()
cpu.waitTick()
cpu.SetP(cpu.pull())
cpu.waitTick()
pcl := cpu.pull()
cpu.waitTick()
pch := cpu.pull()
cpu.pc = makeUint16(pch, pcl)
// fmt.Printf("RTI from 0x%x to 0x%x\n", pc, cpu.pc)
// fmt.Printf("0x%x 0x%x\n", cpu.pc, cpu.sp)
// for i := cpu.sp; i != 0; i++ {
// fmt.Printf("val 0x%x sp 0x%x\n", cpu.read(makeUint16(0x01, cpu.sp+i)), cpu.sp+i)
// }
Echo = false
cpu.skip1 = false
}
func (cpu *CPU) bcs() bool {
printf("BCS")
return cpu.C
}
func (cpu *CPU) bcc() bool {
printf("BCC")
return !cpu.C
}
func (cpu *CPU) beq() bool {
printf("BEQ")
return cpu.Z
}
func (cpu *CPU) bne() bool {
printf("BNE")
return !cpu.Z
}
func (cpu *CPU) bmi() bool {
printf("BMI")
return cpu.N
}
func (cpu *CPU) bpl() bool {
printf("BPL")
return !cpu.N
}
func (cpu *CPU) bvs() bool {
printf("BVS")
return cpu.V
}
func (cpu *CPU) bvc() bool {
printf("BVC")
return !cpu.V
}
func (cpu *CPU) inc() {
println("INC")
cpu.waitTick()
cpu.data++
loadFlag(cpu, cpu.data)
cpu.waitTick()
cpu.write(cpu.addr(), cpu.data)
cpu.skip1 = false
}
func (cpu *CPU) dec() {
println("DEC")
cpu.waitTick()
cpu.data--
loadFlag(cpu, cpu.data)
cpu.waitTick()
cpu.write(cpu.addr(), cpu.data)
cpu.skip1 = false
}
func (cpu *CPU) lsr() {
println("LSR")
cpu.waitTick()
cpu.C = (cpu.data & 0x01) == 0x01
cpu.data = cpu.data >> 1
loadFlag(cpu, cpu.data)
cpu.waitTick()
cpu.write(cpu.addr(), cpu.data)
cpu.skip1 = false
}
func (cpu *CPU) asl() {
println("ASL")
cpu.waitTick()
cpu.C = (cpu.data & 0x80) == 0x80
cpu.data = cpu.data << 1
loadFlag(cpu, cpu.data)
cpu.waitTick()
cpu.write(cpu.addr(), cpu.data)
cpu.skip1 = false
}
func (cpu *CPU) rol() {
println("ROL")
cpu.waitTick()
cbit := boolToUint8(cpu.C)
cpu.C = (cpu.data & 0x80) == 0x80
cpu.data = cpu.data<<1 + uint8(cbit)
loadFlag(cpu, cpu.data)
cpu.waitTick()
cpu.write(cpu.addr(), cpu.data)
cpu.skip1 = false
}
func (cpu *CPU) ror() {
println("ROR")
cpu.waitTick()
cbit := boolToUint8(cpu.C)
cpu.C = (cpu.data & 0x01) == 0x01
cpu.data = (cpu.data >> 1) + (uint8(cbit) << 7)
loadFlag(cpu, cpu.data)
cpu.waitTick()
cpu.write(cpu.addr(), cpu.data)
cpu.skip1 = false
}
func (cpu *CPU) lda() {
println("LDA")
cpu.waitTick()
cpu.a = cpu.data
loadFlag(cpu, cpu.a)
}
func (cpu *CPU) ldx() {
println("LDX")
cpu.waitTick()
cpu.x = cpu.data
loadFlag(cpu, cpu.x)
}
func (cpu *CPU) ldy() {
println("LDY")
cpu.waitTick()
cpu.y = cpu.data
loadFlag(cpu, cpu.y)
}
func (cpu *CPU) and() {
println("AND")
cpu.waitTick()
cpu.a = cpu.data & cpu.a
loadFlag(cpu, cpu.a)
}
func (cpu *CPU) ora() {
println("ORA")
cpu.waitTick()
cpu.a = cpu.data | cpu.a
loadFlag(cpu, cpu.a)
}
func (cpu *CPU) cmp() {
println("CMP")
cpu.waitTick()
res, c, _ := subUint8V(cpu.a, cpu.data, 1)
// printf("cmp: 0x%x - 0x%x = 0x%x, c is %v", cpu.a, cpu.data, res, c)
cpu.C = c > 0
loadFlag(cpu, res)
}
func (cpu *CPU) cpx() {
println("CPX")
cpu.waitTick()
res, c, _ := subUint8V(cpu.x, cpu.data, 1)
cpu.C = c > 0
loadFlag(cpu, res)
}
func (cpu *CPU) cpy() {
println("CPY")
cpu.waitTick()
res, c, _ := subUint8V(cpu.y, cpu.data, 1)
cpu.C = c > 0
loadFlag(cpu, res)
}
// 存的是jsr第三个字节的地址,所以返回的时候要+1的到下一个指令的地址
func (cpu *CPU) jsr() {
pc := cpu.pc
adl := cpu.data
cpu.waitTick()
cpu.waitTick()
cpu.push(cpu.pch())
cpu.waitTick()
cpu.push(cpu.pcl())
cpu.waitTick()
adh := cpu.read(cpu.pc)
cpu.pc++
cpu.pc = makeUint16(adh, adl)
// fmt.Printf("JSR from 0x%x to 0x%x, sp is 0x%x\n", pc, cpu.pc, cpu.sp)
printf("JSR from 0x%x to 0x%x\n", pc, cpu.pc)
cpu.skip1 = false
}
//
func (cpu *CPU) jump() {
pc := cpu.pc
adl := cpu.data
cpu.waitTick()
adh := cpu.read(cpu.pc)
cpu.pc = makeUint16(adh, adl)
printf("JUMP from 0x%x to 0x%x\n", pc, cpu.pc)
cpu.skip1 = false
}
func (cpu *CPU) jumpIndirect() {
pc := cpu.pc
bal := cpu.data
cpu.waitTick()
bah := cpu.read(cpu.pc)
cpu.waitTick()
adl := cpu.read(makeUint16(bah, bal))
cpu.waitTick()
adh := cpu.read(makeUint16(bah, bal+1))
cpu.pc = makeUint16(adh, adl)
printf("JUMPIND with 0x%x from 0x%x to 0x%x\n", makeUint16(bah, bal), pc, cpu.pc)
// fmt.Printf("JUMPIND with 0x%x from 0x%x to 0x%x\n", makeUint16(bah, bal), pc, cpu.pc)
cpu.skip1 = false
}
func (cpu *CPU) adc() {
println("ADC")
cpu.waitTick()
res, c, v := addUint8V(cpu.a, cpu.data, boolToUint8(cpu.C))
// printf("adc: prec is %v, 0x%x + 0x%x = 0x%x, c is %v, v is %v\n", cpu.C, cpu.a, cpu.data, res, c, v)
cpu.a = res
cpu.C = c > 0
cpu.V = v > 0
loadFlag(cpu, cpu.a)
}
func (cpu *CPU) sbc() {
println("SBC")
cpu.waitTick()
res, c, v := subUint8V(cpu.a, cpu.data, boolToUint8(cpu.C))
// printf("sbc: prec is %v, 0x%x - 0x%x = 0x%x, c is %v, v is %v\n", cpu.C, cpu.a, cpu.data, res, c, v)
cpu.a = res
cpu.C = c > 0
cpu.V = v > 0
loadFlag(cpu, cpu.a)
}
func (cpu *CPU) bit() {
println("BIT")
cpu.waitTick()
cpu.Z = (cpu.data & cpu.a) == 0
cpu.N = ((cpu.data & 0x80) >> 7) > 0
cpu.V = ((cpu.data & 0x40) >> 6) > 0
}
func (cpu *CPU) eor() {
println("EOR")
cpu.waitTick()
cpu.a = (cpu.a | cpu.data) & ^(cpu.a & cpu.data)
loadFlag(cpu, cpu.a)
}
func (cpu *CPU) sta() {
printf("STA ")
cpu.buffer = cpu.a
}
func (cpu *CPU) stx() {
printf("STX ")
cpu.buffer = cpu.x
}
func (cpu *CPU) sty() {
printf("STY ")
cpu.buffer = cpu.y
}
var irqCounter = 0
func (cpu *CPU) irq() {
// Echo = true
// if irqCounter < 100 {
// irqCounter++
// Echo = true
// }
// common.Echo = false
// fmt.Println("IRQ##############################")
// println("IRQ")
// fmt.Println("IRQ")
// fmt.Printf("0x%x 0x%x\n", cpu.pc, cpu.sp)
cpu.fIRQ = false
cpu.waitTick()
cpu.waitTick()
cpu.push(cpu.pch())
cpu.waitTick()
cpu.push(cpu.pcl())
cpu.waitTick()
cpu.push(cpu.GetP() & 0xef)
cpu.waitTick()
adl := cpu.read(0xfffe)
cpu.waitTick()
adh := cpu.read(0xffff)
cpu.pc = makeUint16(adh, adl)
cpu.I = true
cpu.skip1 = false
}
func (cpu *CPU) nmi() {
// common.Echo = true
// println("NMI!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
// fmt.Println("NMI")
// fmt.Printf("0x%x 0x%x\n", cpu.pc, cpu.sp)
// for i := cpu.sp; i != 0; i++ {
// fmt.Printf("val 0x%x sp 0x%x\n", cpu.read(makeUint16(0x01, cpu.sp+i)), cpu.sp+i)
// }
cpu.fNMI = false
cpu.waitTick()
cpu.waitTick()
cpu.push(cpu.pch())
cpu.waitTick()
cpu.push(cpu.pcl())
cpu.waitTick()
cpu.push(cpu.GetP() & 0xef)
cpu.waitTick()
adl := cpu.read(0xfffa)
cpu.waitTick()
adh := cpu.read(0xfffb)
cpu.pc = makeUint16(adh, adl)
cpu.I = true
cpu.skip1 = false
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package video
import (
"context"
"fmt"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/browser/browserfixt"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/input"
"chromiumos/tast/local/media/webmedia/vimeo"
"chromiumos/tast/local/media/webmedia/youtube"
"chromiumos/tast/local/mtbf"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: PlaybackSimultaneous,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Plays multiple videos simultaneously in different tabs",
Contacts: []string{
"abergman@google.com",
"alfred.yu@cienet.com",
"cj.tsai@cienet.com",
"cienet-development@googlegroups.com",
},
SoftwareDeps: []string{"chrome"},
// Purposely leave the empty Attr here. MTBF tests are not included in mainline or crosbolt for now.
Attr: []string{},
Timeout: 5 * time.Minute,
Params: []testing.Param{
{
Fixture: mtbf.LoginReuseFixture,
Val: browser.TypeAsh,
}, {
Name: "lacros",
ExtraSoftwareDeps: []string{"lacros"},
Fixture: mtbf.LoginReuseLacrosFixture,
Val: browser.TypeLacros,
},
},
})
}
var (
youtubeURL string = "https://www.youtube.com/watch?v=kJQP7kiw5Fk"
vimeoURL string = "https://vimeo.com/43401199"
)
// PlaybackSimultaneous verifies that multiple videos in different tabs can be played simultaneously.
func PlaybackSimultaneous(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(chrome.HasChrome).Chrome()
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)
defer cancel()
recorder, err := mtbf.NewRecorder(ctx)
if err != nil {
s.Fatal("Failed to start record performance: ", err)
}
defer recorder.Record(cleanupCtx, s.OutDir())
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to get test API connection: ", err)
}
kb, err := input.Keyboard(ctx)
if err != nil {
s.Fatal("Failed to find keyboard: ", err)
}
defer kb.Close()
br, closeBrowser, err := browserfixt.SetUp(ctx, cr, s.Param().(browser.Type))
if err != nil {
s.Fatal("Failed to open the browser: ", err)
}
defer closeBrowser(cleanupCtx)
type videoPlayer interface {
Open(context.Context, *browser.Browser) error
Close(context.Context)
Play(ctx context.Context) error
IsPlaying(context.Context) (bool, error)
CurrentTime(ctx context.Context) (time float64, err error)
GetURL() string
}
// videoSources is the map of tab-order to video-source.
videoSources := map[int]videoPlayer{
1: youtube.New(tconn, youtubeURL),
2: vimeo.New(tconn, vimeoURL),
}
// Open video sources by certain order.
for order := 1; order <= len(videoSources); order++ {
video := videoSources[order]
if err := video.Open(ctx, br); err != nil {
s.Fatalf("Failed to open video source [%s]: %v", video.GetURL(), err)
}
defer func(ctx context.Context) {
faillog.DumpUITreeWithScreenshotOnError(ctx, s.OutDir(), s.HasError, cr, fmt.Sprintf("before_close_video_%d", order))
video.Close(ctx)
}(cleanupCtx)
if err := video.Play(ctx); err != nil {
s.Fatal("Failed to play video: ", err)
}
if playing, err := video.IsPlaying(ctx); err != nil {
s.Fatal("Failed to check video is playing: ", err)
} else if !playing {
s.Fatal("Video isn't playing")
}
}
// Close the empty lacros tab after all videos are opened.
if err := br.CloseWithURL(ctx, chrome.NewTabURL); err != nil {
s.Fatal("Failed to close empty tab: ", err)
}
// Switching between all tabs and verify all video sources are playing.
for order := range videoSources {
if err := kb.Accel(ctx, fmt.Sprintf("Ctrl+%d", order)); err != nil {
s.Fatal("Failed to switch tab by shortcut: ", err)
}
s.Log("Switched to tab-", order)
// Verify all video sources are playing.
for order, video := range videoSources {
if yt, ok := video.(*youtube.YouTube); ok {
if err := yt.SkipAd(ctx); err != nil {
s.Fatal("Failed to skip ads on YouTube: ", err)
}
}
tBefore, err := video.CurrentTime(ctx)
if err != nil {
s.Fatalf("Failed to get video-%d current time: %v", order, err)
}
tAfter := tBefore
// Checks the video is playing by monitoring the video time.
pollOpt := testing.PollOptions{Timeout: 15 * time.Second, Interval: time.Second}
if err := testing.Poll(ctx, func(ctx context.Context) error {
if tAfter > tBefore {
return nil
}
if tAfter, err = video.CurrentTime(ctx); err != nil {
return testing.PollBreak(errors.Wrap(err, "failed to get video time"))
}
return errors.New("video isn't playing")
}, &pollOpt); err != nil {
s.Fatalf("Video-%d isn't playing within %v: %v", order, pollOpt.Timeout, err)
}
s.Logf("Video-%d is playing", order)
}
}
}
|
package game
import (
"time"
"github.com/nsf/termbox-go"
)
// CSnake is color of the snake
const CSnake = termbox.ColorCyan
// PlayState is the game state where the player controls
type PlayState struct {
Width int
Height int
Snake *Snake
Food Food
SinceLastMove time.Duration
MoveThreshold time.Duration
Direction Direction
}
// NewPlayState Creates a new playstate
func NewPlayState() *PlayState {
return &PlayState{
Width: 20,
Height: 15,
Snake: NewSnake(5, Vec2i{10, 10}),
Food: NewFood(20, 15),
MoveThreshold: time.Second / 5,
Direction: Left,
}
}
// InBounds Checks if object is in bounds
func (s *PlayState) InBounds(vec Vec2i) bool {
return vec.X >= 0 && vec.X < s.Width && vec.Y >= 0 && vec.Y < s.Height
}
// Update moves the snake in current direciton
func (s *PlayState) Update(g *Game, t time.Duration) error {
s.SinceLastMove += t
if s.SinceLastMove > s.MoveThreshold {
s.SinceLastMove = 0
s.Snake.Move(s.Direction)
}
if !s.InBounds(s.Snake.Head()) || s.Snake.KilledSelf() {
g.State = NewEndState(false)
}
if s.Snake.Head() == s.Food.Pos {
s.Snake.Expand()
s.Food = NewFood(s.Width, s.Height)
}
return nil
}
// HandleInput Moves the nsake in given direction
func (s *PlayState) HandleInput(g *Game, input InputEvent) error {
switch input.Input {
case IUp:
s.Direction = Up
case IDown:
s.Direction = Down
case ILeft:
s.Direction = Left
case IRight:
s.Direction = Right
}
return nil
}
// Render renders the game: snake
func (s *PlayState) Render(g *Game, t time.Duration) error {
wnd := g.Render()
s.RenderWalls(g, t, wnd)
// render food
termbox.SetCell(wnd.Left+1+s.Food.Pos.X, wnd.Top+1+s.Food.Pos.Y, s.Food.Char, CFood, CBG)
for _, v := range s.Snake.Body {
termbox.SetCell(wnd.Left+1+v.X, wnd.Top+1+v.Y, 'S', CSnake, CBG)
}
return termbox.Flush()
}
// RenderWalls renders the Walls
func (s *PlayState) RenderWalls(g *Game, t time.Duration, wnd Window) {
for x := 0; x < s.Width+2; x++ {
termbox.SetCell(wnd.Left+x, wnd.Top, '0', CFG, CBG)
termbox.SetCell(wnd.Left+x, wnd.Top+s.Height+1, '0', CFG, CBG)
}
for y := 0; y < s.Height+1; y++ {
termbox.SetCell(wnd.Left, wnd.Top+y, '0', CFG, CBG)
termbox.SetCell(wnd.Left+s.Width+1, wnd.Top+y, '0', CFG, CBG)
}
}
|
package main
import (
"fmt"
"io/ioutil"
"strings"
"github.com/gin-gonic/gin"
"github.com/jackc/pgx"
)
// Database connectivity variables
var db *pgx.ConnPool
var db_err error
//Initialise connection to the database
func init() {
db, db_err = pgx.NewConnPool(pgx.ConnPoolConfig{
ConnConfig: pgx.ConnConfig{
Host: "localhost",
Database: "foodies",
User: "anil",
Password: "205474",
Port: 5432,
},
MaxConnections: 10,
})
if db_err != nil {
fmt.Println("Can't connect to database")
}
}
func main() {
r := gin.Default()
//*************************Hosting client.html page
r.GET("/main", func(c *gin.Context) {
res, _ := ioutil.ReadFile("/home/anil/foodies/spicyX/base/main.html")
c.Data(200, "text/html", res)
})
r.GET("/mainHeader", func(c *gin.Context) {
res, _ := ioutil.ReadFile("/home/anil/foodies/spicyX/base/mainHeader.html")
c.Data(200, "text/html", res)
})
r.GET("/mainfooter", func(c *gin.Context) {
res, _ := ioutil.ReadFile("/home/anil/foodies/spicyX/base/mainfooter.html")
c.Data(200, "text/html", res)
})
r.GET("/menuItems", func(c *gin.Context) {
res, _ := ioutil.ReadFile("/home/anil/foodies/spicyX/base/dashboard/menuItems.html")
c.Data(200, "text/html", res)
})
r.GET("/dashboard", func(c *gin.Context) {
res, _ := ioutil.ReadFile("/home/anil/foodies/spicyX/base/dashboard/dashboard.html")
c.Data(200, "text/html", res)
})
r.GET("/dash", func(c *gin.Context) {
res, _ := ioutil.ReadFile("/home/anil/foodies/spicyX/base/dashboard/dash.html")
c.Data(200, "text/html", res)
})
r.GET("/table", func(c *gin.Context) {
res, _ := ioutil.ReadFile("/home/anil/foodies/spicyX/base/dashboard/table.html")
c.Data(200, "text/html", res)
})
r.GET("/user", func(c *gin.Context) {
res, _ := ioutil.ReadFile("/home/anil/foodies/spicyX/base/dashboard/user.html")
c.Data(200, "text/html", res)
})
r.GET("/delivery", func(c *gin.Context) {
res, _ := ioutil.ReadFile("/home/anil/foodies/spicyX/base/dashboard/delivery.html")
c.Data(200, "text/html", res)
})
r.GET("/viewMenuItems", func(c *gin.Context) {
res, _ := ioutil.ReadFile("/home/anil/foodies/spicyX/base/dashboard/viewMenuItems.html")
c.Data(200, "text/html", res)
})
//**********************fetching Javascript files file
r.GET("/js/:js_file", func(c *gin.Context) {
//to ser
jsFile := c.Param("js_file")
res, err := ioutil.ReadFile("/home/anil/foodies/spicyX/base/js/" + jsFile)
if err != nil {
fmt.Println(err)
c.JSON(404, "error while fetching file")
}
c.Data(200, "application/javascript", res)
// c.Data(200, path.Join("applications", "javascript"), res)
})
//********************fetching CSS files
r.GET("/css/:css_file", func(c *gin.Context) {
//to ser
cssFile := c.Param("css_file")
res, err := ioutil.ReadFile("/home/anil/foodies/spicyX/base/css/" + cssFile)
if err != nil {
fmt.Println(err)
c.JSON(404, "error while fetching file")
}
c.Data(200, "text/css", res)
// c.Data(200, path.Join("applications", "javascript"), res)
})
//********************fetching Images
r.GET("/img/:img_file", func(c *gin.Context) {
//to ser
imgFile := c.Param("img_file")
extension := strings.ToLower(strings.Split(imgFile, ".")[1])
res, err := ioutil.ReadFile("/home/anil/foodies/spicyX/base/img/" + imgFile)
if err != nil {
fmt.Println(err)
c.JSON(404, "error while fetching Image")
}
if extension == "jpg" {
c.Data(200, "image/jpg", res)
} else if extension == "png" {
c.Data(200, "image/png", res)
} else if extension == "jpeg" {
c.Data(200, "image/png", res)
}
// c.Data(200, path.Join("applications", "javascript"), res)
})
//********************Registering vendors
r.POST("/registervendor", func(c *gin.Context) {
var ven vendor
c.BindJSON(&ven)
fmt.Println("\n\nRequest Received for vendor registration: \n\n ")
tx, _ := db.Begin() // tx => transaction , _ => error and execute
defer tx.Rollback() // it will be executed after the completion of local function
fmt.Println(ven.Owner, ven.Name, ven.Email, ven.Mobile, ven.Address, ven.Image, ven.Description, ven.Offer, ven.Password)
// var track ID
var num int64
// insert into users table
err := tx.QueryRow(`
INSERT INTO vendors (owner, vendorname, email ,mobile ,address ,imageaddress ,description,offer, password ) values ($1, $2, $3, $4, $5, $6, $7,$8,$9) returning vendorid
`, ven.Owner, ven.Name, ven.Email, ven.Mobile, ven.Address, ven.Image, ven.Description, ven.Offer, ven.Password).Scan(&num)
fmt.Println(err)
commit_err := tx.Commit()
if commit_err != nil {
tx.Rollback()
c.JSON(500, "ERR")
return
}
fmt.Println("Vendor registered and his ID:", num)
c.JSON(200, num)
})
//I**************************tem menu updation
r.POST("/additems", func(c *gin.Context) {
var val item
c.BindJSON(&val)
fmt.Println("\n\nRequest Received for menu updation: \n\n ")
// fmt.Printf("%#v", menu)
tx, _ := db.Begin() // tx => transaction , _ => error and execute
defer tx.Rollback() // it will be executed after the completion of local function
// fmt.Println("vals", val.Vendorid, val.Itemno, val.Name, val.IType, val.Nature, val.Price, val.Description, val.Image, val.Discount)
_, err := tx.Exec(`
INSERT INTO itemmenu (vendor_id ,item_name ,item_type ,item_nature ,price , item_description ,imageaddress ,discount)
values ($1,$2,$3,$4,$5,$6,$7,$8)`, val.Vendorid, val.Name, val.IType, val.Nature, val.Price,
val.Description, val.Image, val.Discount)
if err != nil {
// c.JSON(500, "error")
fmt.Println("error", err)
}
commit_err := tx.Commit()
if commit_err != nil {
tx.Rollback()
fmt.Println(commit_err)
c.JSON(500, "ERR")
return
}
// fmt.Println("Menu updated")
c.JSON(200, 1)
})
//*************************customer registration
r.GET("/registercustomer", func(c *gin.Context) {
var cus customer
c.BindJSON(&cus)
fmt.Println("\n\nRequest Received : \n\n")
tx, _ := db.Begin() // tx => transaction , _ => error and execute
defer tx.Rollback() // it will be executed after the completion of local function
var track CSID
// insert into users table
tx.QueryRow(`
INSERT INTO customers (customer_name, emailid ,mobile ,address ,password ) values ($1, $2, $3, $4, $5) returning customer_id
`, cus.Name, cus.Email, cus.Mobile, cus.Address, cus.Password).Scan(&track.Customerid)
commit_err := tx.Commit()
if commit_err != nil {
tx.Rollback()
c.JSON(500, "ERR")
return
}
fmt.Println("cutomer registered and his ID:", track.Customerid)
c.JSON(200, track)
})
//*****************************Serving vendors and their id's
r.GET("/getvendors", func(c *gin.Context) {
// c.BindJSON(&cus)
fmt.Println("\n\nRequest Received : \n\n")
rows, err := db.Query(` SELECT vendorid, vendorname from vendors `)
if err != nil {
fmt.Println(err)
c.JSON(500, "error while retreiving vendors data")
}
defer rows.Close()
// var vendors = make(map[string]int)
ven := make([]VendorsToSend, 0)
for rows.Next() {
var t VendorsToSend
err := rows.Scan(&t.Vendorid, &t.Vendorname)
ven = append(ven, t)
if err != nil {
fmt.Println(err)
c.JSON(500, "error while retreiving vendors data")
}
}
c.JSON(200, ven)
fmt.Println("Vendors names are sent")
})
//****************** method to serve request for MENU of particular vendor
r.POST("/getvendorsmenu", func(c *gin.Context) {
var id VID
c.BindJSON(&id)
fmt.Println("\n\nRequest for retreiving vendors menu Received : \n\n")
rows, err := db.Query(` SELECT item_no, item_name, item_type, item_nature, price, item_description, imageaddress, discount
from itemmenu where vendor_id = $1 `, id.Vendorid)
if err != nil {
fmt.Println(err)
c.JSON(500, "error while retreiving vendors menu")
}
defer rows.Close()
// var vendors = make(map[string]int)
items := make([]item, 0)
for rows.Next() {
var t item
err := rows.Scan(&t.Itemno, &t.Name, &t.IType, &t.Nature, &t.Price, &t.Description, &t.Image, &t.Discount)
items = append(items, t)
if err != nil {
fmt.Println(err)
c.JSON(500, "error while retreiving vendors menu")
}
}
c.JSON(200, items)
fmt.Println("Vendors Menu sent")
})
fmt.Println("\n\n\t ##### Foodies server live on :7070 #####")
r.Run(":7070")
}
// vendor holds the incoming requests for a vendor registration.
type vendor struct {
Vendorid int `json:"vendorid,omitempty"`
Owner string `json:"owner"`
Name string `json:"vendorname"`
Email string `json:"email"`
Mobile []string `json:"mobile"`
Address string `json:"address"`
Image string `json:"imageaddress,omitempty"`
Description string `json:"description,omitempty"`
Offer string `json:"offer,omitempty"`
Password string `json:"password"`
}
type VID struct {
Vendorid int `json:"vendorid"`
}
type customer struct {
Customerid int `json:"customer_id,omitempty"`
Name string `json:"customer_name"`
Email string `json:"emailid"`
Mobile []string `json:"mobile"`
Address string `json:"address"`
Password string `json:"password"`
}
type CSID struct {
Customerid int `json:"customerid,omitempty"`
}
// type MENU struct {
// ITEMS []item `json:"items"`
// }
//Menu updation
type item struct {
Vendorid int `json:"vendor_id"`
Itemno int `json:"item_no,omitempty"`
Name string `json:"item_name"`
IType string `json:"item_type"`
Nature bool `json:"item_nature"`
Description string `json:"item_description"`
Price string `json:"price"`
Image string `json:"imageaddress,omitempty"`
Discount float64 `json:"discount,omitempty"`
}
type VendorsToSend struct {
Vendorid int `json:"vendor_id"`
Vendorname string `json:"vendorname"`
}
|
package system
import (
"github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/peer"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestCreateIdentity(t *testing.T) {
Convey("TestCreateIdentity", t, func() {
Convey("return a peerID and a privateKey which should be matched with each other", func() {
peerIDStr, privateKeyStr, err := CreateIdentity()
peerID, err := peer.Decode(peerIDStr)
if err != nil {
t.Error(err)
}
privateKeyBytes, err := crypto.ConfigDecodeKey(privateKeyStr)
if err != nil {
t.Error(err)
}
privateKey, err := crypto.UnmarshalPrivateKey(privateKeyBytes)
if err != nil {
t.Error(err)
}
So(peerID.MatchesPrivateKey(privateKey), ShouldBeTrue)
})
})
}
|
package kubernetes
import (
"testing"
"github.com/stretchr/testify/assert"
core_v1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestFilterPodsForEndpoints(t *testing.T) {
assert := assert.New(t)
endpoints := core_v1.Endpoints{
Subsets: []core_v1.EndpointSubset{
{
Addresses: []core_v1.EndpointAddress{
{
TargetRef: &core_v1.ObjectReference{
Name: "pod-1",
Kind: "Pod",
},
},
{
TargetRef: &core_v1.ObjectReference{
Name: "pod-2",
Kind: "Pod",
},
},
{
TargetRef: &core_v1.ObjectReference{
Name: "other",
Kind: "Other",
},
},
{},
},
},
{
Addresses: []core_v1.EndpointAddress{
{
TargetRef: &core_v1.ObjectReference{
Name: "pod-3",
Kind: "Pod",
},
},
},
},
},
}
pods := []core_v1.Pod{
{ObjectMeta: meta_v1.ObjectMeta{Name: "pod-1"}},
{ObjectMeta: meta_v1.ObjectMeta{Name: "pod-2"}},
{ObjectMeta: meta_v1.ObjectMeta{Name: "pod-3"}},
{ObjectMeta: meta_v1.ObjectMeta{Name: "pod-999"}},
{ObjectMeta: meta_v1.ObjectMeta{Name: "other"}},
}
filtered := FilterPodsForEndpoints(&endpoints, pods)
assert.Len(filtered, 3)
assert.Equal("pod-1", filtered[0].Name)
assert.Equal("pod-2", filtered[1].Name)
assert.Equal("pod-3", filtered[2].Name)
}
|
// Copyright (C) 2022 Storj Labs, Inc.
// See LICENSE for copying information.
package hmacsha512_test
import (
"crypto/hmac"
"crypto/rand"
"crypto/sha512"
"encoding/binary"
"testing"
"github.com/stretchr/testify/require"
"storj.io/common/internal/hmacsha512"
)
// NodeID is a duplicate of storj.NodeID to avoid circular dependency.
type NodeID [32]byte
// PieceID is a duplicate of storj.PieceID to avoid circular dependency.
type PieceID [32]byte
var sinkSum [hmacsha512.Size]byte
var sinkBytes []byte
func TestRandom(t *testing.T) {
for i := 0; i < 10; i++ {
key := make([]byte, 21*(i+1))
node1 := NodeID{}
node2 := NodeID{}
_, _ = rand.Read(key)
_, _ = rand.Read(node1[:])
_, _ = rand.Read(node2[:])
var opt hmacsha512.Partial
opt.Init(key)
std := hmac.New(sha512.New, key)
opt.Write(node1[:])
opt.Write([]byte{1, 0, 0, 0})
got := opt.SumAndReset()
std.Reset()
_, _ = std.Write(node1[:])
_, _ = std.Write([]byte{1, 0, 0, 0})
exp := std.Sum(nil)
require.Equal(t, exp, got[:])
opt.Write(node1[:])
opt.Write([]byte{1, 0, 0, 0})
got = opt.SumAndReset()
require.Equal(t, exp, got[:])
opt.Write(node2[:])
opt.Write([]byte{2, 0, 0, 0})
got = opt.SumAndReset()
std.Reset()
_, _ = std.Write(node2[:])
_, _ = std.Write([]byte{2, 0, 0, 0})
exp = std.Sum(nil)
require.Equal(t, exp, got[:])
}
}
func BenchmarkInlined(b *testing.B) {
for i := 0 + 1; i < b.N+1; i++ {
pieceID := PieceID{byte(i), byte(i), byte(i), byte(i)}
var mac hmacsha512.Partial
mac.Init(pieceID[:])
for k := 0; k < 100; k++ {
nodeid := NodeID{byte(k), byte(k), byte(k), byte(k)}
mac.Write(nodeid[:])
var num [4]byte
binary.BigEndian.PutUint32(num[:], uint32(k))
mac.Write(num[:])
sinkSum = mac.SumAndReset()
}
}
}
func BenchmarkStandard(b *testing.B) {
for i := 0 + 1; i < b.N+1; i++ {
pieceID := PieceID{byte(i), byte(i), byte(i), byte(i)}
mac := hmac.New(sha512.New, pieceID[:])
for k := 0; k < 100; k++ {
nodeid := NodeID{byte(k), byte(k), byte(k), byte(k)}
mac.Reset()
_, _ = mac.Write(nodeid[:])
var num [4]byte
binary.BigEndian.PutUint32(num[:], uint32(k))
_, _ = mac.Write(num[:])
sinkBytes = mac.Sum(nil)
}
}
}
|
package main
import "os"
func main() {
// 我们将在这个网站中使用 panic 来检查预期外的错误。这个 是唯一一个为 panic 准备的例子。
panic("a problem")
// panic 的一个基本用法就是在一个函数返回了错误值但是我们并不知道(或 者不想)处理时终止运行。
// 这里是一个在创建一个新文件时返回异常错误时的 panic 用法。
_, err := os.Create("/tmp/file")
if err != nil {
panic(err)
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.