text stringlengths 11 4.05M |
|---|
package pilgrims
type Players [4]Player
type GameState struct {
Players
Cards
Board
}
type Cards struct {
Resources
DevelopmentCards
SpecialCards
}
type SpecialCards struct {
LongestRoad byte
LargestArmy byte
}
type Resources struct {
Brick byte
Wool byte
Ore byte
Grain byte
Lumber byte
}
type DevelopmentCards struct {
Knight byte
VictoryPoint byte
RoadBuild byte
Monopoly byte
YearOfPlenty byte
}
type Player struct {
Name [10]byte
Settlements byte
Cities byte
Roads byte
Resources Resources
VisibleCards DevelopmentCards
HiddenCards DevelopmentCards
}
const (
Desert byte = iota
Hills
Pasture
Mountains
Fields
Forest
)
const (
Nothing byte = iota
Brick
Wool
Ore
Grain
Lumber
)
type Tile struct {
Letter byte
Chit byte
Terrain byte
Edges [6]byte
Vertices [6]byte
}
|
package module
// 权限
type Privilege struct {
BaseTable
// 展示名称
Label string `json:"label"`
// path
Path string `json:"path"`
// 功能名
Name string `json:"name"`
// 图标
Icon string `json:"icon"`
// 是否是叶子节点(0:不是,1:是)
IsLeaf uint `json:"is_leaf"`
// 是否禁用(0:未禁用,1:禁用)
IsForbidden uint `json:"is_forbidden"`
// 父级节点名
ParentName string `json:"parent_name"`
// 父节点path
ParentPath string `json:"parent_path"`
}
// 表名
func (Privilege) TableName() string {
return "privilege"
}
|
package api
import (
"encoding/json"
"strconv"
"time"
"github.com/cloudfly/ecenter/tools"
"github.com/cloudfly/mowa"
log "github.com/sirupsen/logrus"
"github.com/valyala/fasthttp"
)
func init() {
registerRoute("GET", "/v1/blocks", GetBlocks, 0)
registerRoute("POST", "/v1/blocks", AddBlock, 0)
registerRoute("DELETE", "/v1/blocks/:id", DeleteBlock, 0)
}
func GetBlocks(ctx *fasthttp.RequestCtx) (int, interface{}) {
userInfo := getUser(ctx)
return 200, mowa.Data(muteProvider.GetMutes(ctx, userInfo.Username))
}
func AddBlock(ctx *fasthttp.RequestCtx) (int, interface{}) {
userInfo := getUser(ctx)
l := log.WithField("user", userInfo.Username)
args := ctx.QueryArgs()
name := string(args.Peek("name"))
if len(name) == 0 {
return 200, mowa.Error("name required")
}
blockUserName := userInfo.Username
if string(args.Peek("forAll")) == "true" {
blockUserName = ""
}
var (
deadline time.Time
)
if s := string(args.Peek("deadline")); s != "" {
timestamp, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return 200, mowa.Error(err)
}
deadline = time.Unix(int64(timestamp), 0)
now := time.Now()
if dur := deadline.Sub(now); dur < 0 || dur > time.Hour*24000 {
return 200, mowa.Error("unvalid deadline param")
}
}
var fingerprints []string
if err := json.Unmarshal(ctx.Request.Body(), &fingerprints); err != nil {
return 200, mowa.Error(err)
}
if len(fingerprints) == 0 {
for _, item := range args.PeekMulti("fingerprint") {
fingerprints = append(fingerprints, string(item))
}
}
fingerprints = tools.DistinctString(fingerprints)
/*
if blockUserName == "" && (len(fingerprints) == 0 || fingerprints[0] == "") {
t, err := manager.Get(tid)
if err != nil {
return 200, mowa.Errorf("task not found by name %s", name)
}
if !t.HasOwner(userInfo.Username) && !t.HasReceiver(userInfo.Username) && !userInfo.IsAdmin() {
return 200, mowa.Errorf("you have no permission, can not mute alert for others")
}
}
*/
for _, fingerprint := range fingerprints {
l.WithField("name", name).WithField("fingerprint", fingerprint).Infof("block event until %s", deadline)
if _, err := muteProvider.Mute(ctx, name, fingerprint, blockUserName, deadline, ""); err != nil {
return 200, mowa.Error(err)
}
}
return 200, mowa.Data("OK")
}
func DeleteBlock(ctx *fasthttp.RequestCtx) (int, interface{}) {
userInfo := getUser(ctx)
l := log.WithField("user", userInfo.Username)
l.Infof("unmute by id %d", mowa.Int64Value(ctx, "id", -1))
if err := muteProvider.UnmuteByID(ctx, mowa.Int64Value(ctx, "id", -1)); err != nil {
return 200, mowa.Error(err)
}
return 200, mowa.Data("DELETED")
}
|
package main
import (
"log"
"os"
"os/signal"
"github.com/gorilla/websocket"
)
// catchSig cleans up our websocket conenction if we kill the program
// with a ctrl-c
func catchSig(ch chan os.Signal, c *websocket.Conn) {
// block on waiting for a signal
<-ch
err := c.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
if err != nil {
log.Println("write close:", err)
}
return
}
func main() {
// connect the os signal to our channel
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// use the ws:// Scheme to connect to the websocket
u := "ws://localhost:8000/"
log.Printf("connecting to %s", u)
c, _, err := websocket.DefaultDialer.Dial(u, nil)
if err != nil {
log.Fatal("dial:", err)
}
defer c.Close()
// dispatch our signal catcher
go catchSig(interrupt, c)
process(c)
}
|
// DO NOT EDIT. This file was generated by "github.com/frk/gosql".
package testdata
import (
"github.com/frk/gosql"
)
func (q *InsertOnConflictIgnoreSliceQuery) Exec(c gosql.Conn) error {
var queryString = `INSERT INTO "test_onconflict" AS k (
"key"
, "name"
, "fruit"
, "value"
) VALUES ` // `
params := make([]interface{}, len(q.Data)*4)
for i, v := range q.Data {
pos := i * 4
params[pos+0] = v.Key
params[pos+1] = v.Name
params[pos+2] = v.Fruit
params[pos+3] = v.Value
queryString += `(NULLIF(` + gosql.OrdinalParameters[pos+0] + `, 0)::integer` +
`, NULLIF(` + gosql.OrdinalParameters[pos+1] + `, '')::text` +
`, NULLIF(` + gosql.OrdinalParameters[pos+2] + `, '')::text` +
`, NULLIF(` + gosql.OrdinalParameters[pos+3] + `, 0)::double precision` +
`),`
}
queryString = queryString[:len(queryString)-1]
queryString += ` ON CONFLICT DO NOTHING` // `
_, err := c.Exec(queryString, params...)
return err
}
|
package admin
import (
"firstProject/app/dto"
"firstProject/app/models"
"firstProject/database"
)
//CreateUser 创建用户
func CreateAdmin(dto dto.AdminDto) error {
admin := models.Admin{}
admin.Username = dto.Username
admin.Password = dto.Password
admin.Name = dto.Name
admin.Phone = dto.Phone
err := database.DB.Create(&admin).Error
return err
}
//GetUserByUsername 通过用户名查询用户
func GetAdminByUsername(username string) models.Admin {
user := models.Admin{}
database.DB.Where("username = ?", username).First(&user)
return user
}
/* func AdminList(username string, page , limit int) models.Admin{
} */
|
package cloudconfig
import (
"context"
"fmt"
"strings"
"testing"
"github.com/giantswarm/apiextensions/pkg/apis/provider/v1alpha1"
"github.com/giantswarm/certs"
ignition "github.com/giantswarm/k8scloudconfig/ignition/v_2_2_0"
k8scloudconfig "github.com/giantswarm/k8scloudconfig/v_4_4_0"
"github.com/giantswarm/microerror"
"github.com/giantswarm/micrologger/microloggertest"
"github.com/giantswarm/randomkeys"
"github.com/giantswarm/aws-operator/service/controller/legacy/v28/controllercontext"
"github.com/giantswarm/aws-operator/service/controller/legacy/v28/encrypter"
)
func Test_Service_CloudConfig_NewMasterTemplate(t *testing.T) {
t.Parallel()
testCases := []struct {
CustomObject v1alpha1.AWSConfig
ClusterKeys randomkeys.Cluster
}{
{
CustomObject: v1alpha1.AWSConfig{
Spec: v1alpha1.AWSConfigSpec{
Cluster: v1alpha1.Cluster{
ID: "al9qy",
Etcd: v1alpha1.ClusterEtcd{
Port: 2379,
},
},
},
},
ClusterKeys: randomkeys.Cluster{
APIServerEncryptionKey: randomkeys.RandomKey("fekhfiwoiqhoifhwqefoiqwefoikqhwef"),
},
},
}
for _, tc := range testCases {
ctlCtx := controllercontext.Context{}
ctx := controllercontext.NewContext(context.Background(), ctlCtx)
ccService, err := testNewCloudConfigService()
if err != nil {
t.Fatalf("expected %#v got %#v", nil, err)
}
template, err := ccService.NewMasterTemplate(ctx, tc.CustomObject, certs.Cluster{}, tc.ClusterKeys)
if err != nil {
t.Fatalf("expected %#v got %#v", nil, err)
}
fmt.Printf("%s", template)
templateBytes := []byte(template)
_, err = ignition.ConvertTemplatetoJSON(templateBytes)
if err != nil {
t.Fatalf("expected %#v got %#v", nil, err)
}
expectedStrings := []string{
"/etc/kubernetes/ssl/etcd/client-ca.pem.enc",
"/etc/kubernetes/ssl/etcd/client-crt.pem.enc",
"/etc/kubernetes/ssl/etcd/client-key.pem.enc",
"decrypt-tls-assets.service",
"a2luZDogRW5jcnlwdGlvbkNvbmZpZwphcGlWZXJzaW9uOiB2MQpyZXNvdXJjZXM6CiAgLSByZXNvdXJjZXM6CiAgICAtIHNlY3JldHMKICAgIHByb3ZpZGVyczoKICAgIC0gYWVzY2JjOgogICAgICAgIGtleXM6CiAgICAgICAgLSBuYW1lOiBrZXkxCiAgICAgICAgICBzZWNyZXQ6IGZla2hmaXdvaXFob2lmaHdxZWZvaXF3ZWZvaWtxaHdlZgogICAgLSBpZGVudGl0eToge30=",
}
for _, expectedString := range expectedStrings {
if !strings.Contains(template, expectedString) {
t.Fatalf("want ignition to contain %q", expectedString)
}
}
}
}
func Test_Service_CloudConfig_NewWorkerTemplate(t *testing.T) {
t.Parallel()
testCases := []struct {
CustomObject v1alpha1.AWSConfig
}{
{
CustomObject: v1alpha1.AWSConfig{
Spec: v1alpha1.AWSConfigSpec{
AWS: v1alpha1.AWSConfigSpecAWS{
Region: "123456789-super-magic-aws-region",
},
Cluster: v1alpha1.Cluster{
ID: "al9qy",
},
},
},
},
}
for _, tc := range testCases {
ctlCtx := controllercontext.Context{}
ctx := controllercontext.NewContext(context.Background(), ctlCtx)
ccService, err := testNewCloudConfigService()
if err != nil {
t.Fatalf("expected %#v got %#v", nil, err)
}
template, err := ccService.NewWorkerTemplate(ctx, tc.CustomObject, certs.Cluster{})
if err != nil {
t.Fatalf("expected %#v got %#v", nil, err)
}
expectedStrings := []string{
"/etc/kubernetes/ssl/etcd/client-ca.pem.enc",
"/etc/kubernetes/ssl/etcd/client-crt.pem.enc",
"/etc/kubernetes/ssl/etcd/client-key.pem.enc",
"decrypt-tls-assets.service",
}
for _, expectedString := range expectedStrings {
if !strings.Contains(template, expectedString) {
t.Fatalf("want ignition to contain %q", expectedString)
}
}
}
}
func testNewCloudConfigService() (*CloudConfig, error) {
var ccService *CloudConfig
{
packagePath, err := k8scloudconfig.GetPackagePath()
if err != nil {
return nil, microerror.Mask(err)
}
c := Config{
Encrypter: &encrypter.EncrypterMock{},
Logger: microloggertest.New(),
IgnitionPath: packagePath,
RegistryDomain: "quay.io",
}
ccService, err = New(c)
if err != nil {
return nil, err
}
}
return ccService, nil
}
|
package cryptographic
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"io/ioutil"
"log"
"io"
)
// encrypt file with aes and save to another file.
func EncryptFile(src, dest, key string) error {
plaintext, err := ioutil.ReadFile(src)
if err != nil {
log.Println("Read file failed.")
return err
}
block, err := aes.NewCipher([]byte(key))
if err != nil {
log.Println("Create cipher block failed.")
return err
}
cipherText := make([]byte, aes.BlockSize + len(plaintext))
iv := cipherText[:aes.BlockSize]
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
log.Println("Read rand failed.")
return err
}
cipher.NewCFBEncrypter(block, iv).XORKeyStream(cipherText[aes.BlockSize:], []byte(plaintext))
err = ioutil.WriteFile(dest, cipherText, 0755)
if err != nil {
log.Println("Save encrypted file failed.")
return err
}
return nil
}
|
package envoyconfig
import (
envoy_config_accesslog_v3 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3"
envoy_config_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
envoy_config_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
envoy_http_connection_manager "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
"github.com/pomerium/pomerium/config"
)
func (b *Builder) buildVirtualHost(
options *config.Options,
name string,
host string,
) (*envoy_config_route_v3.VirtualHost, error) {
vh := &envoy_config_route_v3.VirtualHost{
Name: name,
Domains: []string{host},
}
// these routes match /.pomerium/... and similar paths
rs, err := b.buildPomeriumHTTPRoutes(options, host)
if err != nil {
return nil, err
}
vh.Routes = append(vh.Routes, rs...)
return vh, nil
}
// buildLocalReplyConfig builds the local reply config: the config used to modify "local" replies, that is replies
// coming directly from envoy
func (b *Builder) buildLocalReplyConfig(
options *config.Options,
) *envoy_http_connection_manager.LocalReplyConfig {
// add global headers for HSTS headers (#2110)
var headers []*envoy_config_core_v3.HeaderValueOption
// if we're the proxy or authenticate service, add our global headers
if config.IsProxy(options.Services) || config.IsAuthenticate(options.Services) {
headers = toEnvoyHeaders(options.GetSetResponseHeaders())
}
return &envoy_http_connection_manager.LocalReplyConfig{
Mappers: []*envoy_http_connection_manager.ResponseMapper{{
Filter: &envoy_config_accesslog_v3.AccessLogFilter{
FilterSpecifier: &envoy_config_accesslog_v3.AccessLogFilter_ResponseFlagFilter{
ResponseFlagFilter: &envoy_config_accesslog_v3.ResponseFlagFilter{},
},
},
HeadersToAdd: headers,
}},
}
}
|
package server
func (h *httpInteractor) routes() {
//regular endpoints
h.router.HandleFunc("/", h.indexPage()).Methods("GET")
h.router.HandleFunc("/upload", h.uploadEndpoint()).Methods("POST")
//static files
h.router.HandleFunc("/static/css/bulma.min.css", h.bulmaCss()).Methods("GET")
h.router.HandleFunc("/static/css/dropzone.min.css", h.dropzoneCss()).Methods("GET")
h.router.HandleFunc("/static/css/custom.css", h.customCss()).Methods("GET")
h.router.HandleFunc("/static/js/dropzone.min.js", h.dropzoneJs()).Methods("GET")
}
|
/**
Merge two sorted linked lists and return it as a sorted list.
The list should be made by splicing together the nodes of the first two lists.
**/
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func mergeTwoLists(l1 *ListNode, l2 *ListNode) *ListNode {
head := &ListNode{}
pre := head
for l1 != nil && l2 != nil {
if l1.Val <= l2.Val {
head.Next = l1
l1 = l1.Next
}
else if l1.Val > l2.Val {
head.Next = l2
l2 = l2.Next
}
head = head.Next
}
for l1 != nil {
head.Next = l1
l1 = l1.Next
head = head.Next
}
for l2 != nil {
head.Next = l2
l2 = l2.Next
head = head.Next
}
return pre.Next
} |
package minilock
import (
"github.com/sycamoreone/base58"
"os"
"testing"
)
func TestID(t *testing.T) {
idBase58 := "radFxzH6yDYDyHiaZpvUr8UhqbpEzjQdfSF3XeZi9Py72"
idBytes, err := base58.Decode(idBase58)
if err != nil {
t.Fatal(err)
}
if len(idBytes) != 33 {
t.Logf("minilock ID should have 33 bytes, but has %d\n", len(idBytes))
t.Fail()
}
publicKey := new([32]byte)
copy(publicKey[:], idBytes[:32])
id := ID(publicKey)
if id != idBase58 {
t.Fatalf("wrong ID: expected %s but got %s\n", idBase58, id)
}
}
func TestDeriveKeys1(t *testing.T) {
// passphrase and expectedID were generated with the minilock plugin for Chrome.
mailaddr := []byte("mustermann@example.com")
passphrase := []byte("enumeration snapped unwarily distempers lovemaking taciturn sociological")
expectedID := "radFxzH6yDYDyHiaZpvUr8UhqbpEzjQdfSF3XeZi9Py72"
pk, _, _ := DeriveKeys(passphrase, mailaddr)
gotID := ID(pk)
if gotID != expectedID {
t.Fatalf("expected minilock ID %s, but got %s\n", expectedID, string(gotID))
}
}
var testFileContent = `Some text: Test 234.
Email address: mustermann@example.com
Passphrase: enumeration snapped unwarily distempers lovemaking taciturn sociological
minilock ID: radFxzH6yDYDyHiaZpvUr8UhqbpEzjQdfSF3XeZi9Py72`
func TestOpen(t *testing.T) {
f, err := os.Open("testdata/test.txt.minilock")
if err != nil {
t.Fatal("open testdata/test.txt.minilock: ", err)
}
defer f.Close()
// Now Open the file and check if the header is parsed correctly.
mailaddr := []byte("mustermann@example.com")
passphrase := []byte("enumeration snapped unwarily distempers lovemaking taciturn sociological")
pk, sk, err := DeriveKeys(passphrase, mailaddr)
if err != nil {
t.Fatal(err)
}
filename, m, err := Open(f, pk, sk)
if err != nil {
t.Fatal(err)
}
if filename != "test.txt" {
t.Fatal("wrong filename: expected test.txt, but got %s\n", filename)
}
if string(m) != testFileContent {
t.Fatalf("wrong decrypted content: expected testdata/test.txt but got \n%s\n", m)
}
}
|
package handlers
import (
"net/http"
"net/http/httptest"
"strings"
"testing"
. "github.com/smartystreets/goconvey/convey"
"github.com/stellar/gateway/bridge/config"
"github.com/stellar/gateway/horizon"
"github.com/stellar/gateway/mocks"
"github.com/stellar/gateway/net"
"github.com/stellar/gateway/test"
"github.com/stretchr/testify/assert"
)
func TestRequestHandlerBuilder(t *testing.T) {
c := &config.Config{NetworkPassphrase: "Test SDF Network ; September 2015"}
mockHorizon := new(mocks.MockHorizon)
mockHTTPClient := new(mocks.MockHTTPClient)
mockTransactionSubmitter := new(mocks.MockTransactionSubmitter)
mockFederationResolver := new(mocks.MockFederationResolver)
mockStellartomlResolver := new(mocks.MockStellartomlResolver)
requestHandler := RequestHandler{
Config: c,
Client: mockHTTPClient,
Horizon: mockHorizon,
TransactionSubmitter: mockTransactionSubmitter,
FederationResolver: mockFederationResolver,
StellarTomlResolver: mockStellartomlResolver,
}
testServer := httptest.NewServer(http.HandlerFunc(requestHandler.Builder))
defer testServer.Close()
Convey("Builder", t, func() {
Convey("Empty Sequence Number", func() {
data := test.StringToJSONMap(`{
"source": "GBWJES3WOKK7PRLJKZVGIPVFGQSSGCRMY7H3GCZ7BEG6ZTDB4FZXTPJ5",
"sequence_number": "",
"operations": [
{
"type": "create_account",
"body": {
"destination": "GCOEGO43PFSLE4K7WRZQNRO3PIOTRLKRASP32W7DSPBF65XFT4V6PSV3",
"starting_balance": "50"
}
}
],
"signers": ["SABY7FRMMJWPBTKQQ2ZN43AUJQ3Z2ZAK36VYSG2SPE2ABNQXA66H5E5G"]
}`)
// Loading sequence number
mockHorizon.On(
"LoadAccount",
"GBWJES3WOKK7PRLJKZVGIPVFGQSSGCRMY7H3GCZ7BEG6ZTDB4FZXTPJ5",
).Return(
horizon.AccountResponse{
SequenceNumber: "123",
},
nil,
).Once()
Convey("it should return correct XDR", func() {
statusCode, response := net.JSONGetResponse(testServer, data)
responseString := strings.TrimSpace(string(response))
assert.Equal(t, 200, statusCode)
expected := test.StringToJSONMap(`{
"transaction_envelope": "AAAAAGySS3ZylffFaVZqZD6lNCUjCizHz7MLPwkN7Mxh4XN5AAAAZAAAAAAAAAB8AAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAnEM7m3lksnFftHMGxdt6HTitUQSfvVvjk8JfduWfK+cAAAAAHc1lAAAAAAAAAAABn420/AAAAECZTxo7tUr19fExL97C9wjIjRj0A7NK6gUVt7LwUrKqGsVxM6Un1L907brqp6hEjrqWlfvZchwgFv6syME3rXQE"
}`)
assert.Equal(t, expected, test.StringToJSONMap(responseString))
})
})
Convey("CreateAccount", func() {
data := test.StringToJSONMap(`{
"source": "GBWJES3WOKK7PRLJKZVGIPVFGQSSGCRMY7H3GCZ7BEG6ZTDB4FZXTPJ5",
"sequence_number": "123",
"operations": [
{
"type": "create_account",
"body": {
"destination": "GCOEGO43PFSLE4K7WRZQNRO3PIOTRLKRASP32W7DSPBF65XFT4V6PSV3",
"starting_balance": "50"
}
}
],
"signers": ["SABY7FRMMJWPBTKQQ2ZN43AUJQ3Z2ZAK36VYSG2SPE2ABNQXA66H5E5G"]
}`)
Convey("it should return correct XDR", func() {
statusCode, response := net.JSONGetResponse(testServer, data)
responseString := strings.TrimSpace(string(response))
assert.Equal(t, 200, statusCode)
expected := test.StringToJSONMap(`{
"transaction_envelope": "AAAAAGySS3ZylffFaVZqZD6lNCUjCizHz7MLPwkN7Mxh4XN5AAAAZAAAAAAAAAB7AAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAnEM7m3lksnFftHMGxdt6HTitUQSfvVvjk8JfduWfK+cAAAAAHc1lAAAAAAAAAAABn420/AAAAECXY+neSolhAeHUXf+UrOV6PjeJnvLM/HqjOlOEWD3hmu/z9aBksDu9zqa26jS14eMpZzq8sofnnvt248FUO+cP"
}`)
assert.Equal(t, expected, test.StringToJSONMap(responseString))
})
})
Convey("Payment", func() {
data := test.StringToJSONMap(`{
"source": "GBWJES3WOKK7PRLJKZVGIPVFGQSSGCRMY7H3GCZ7BEG6ZTDB4FZXTPJ5",
"sequence_number": "123",
"operations": [
{
"type": "payment",
"body": {
"destination": "GCOEGO43PFSLE4K7WRZQNRO3PIOTRLKRASP32W7DSPBF65XFT4V6PSV3",
"amount": "100",
"asset": {
"code": "USD",
"issuer": "GACETOPHMOLSZLG5IQ3D6KQDKCAAYUYTTQHIEY6IGZE4VOBDD2YY6YAO"
}
}
}
],
"signers": ["SABY7FRMMJWPBTKQQ2ZN43AUJQ3Z2ZAK36VYSG2SPE2ABNQXA66H5E5G"]
}`)
Convey("it should return correct XDR", func() {
statusCode, response := net.JSONGetResponse(testServer, data)
responseString := strings.TrimSpace(string(response))
assert.Equal(t, 200, statusCode)
expected := test.StringToJSONMap(`{
"transaction_envelope": "AAAAAGySS3ZylffFaVZqZD6lNCUjCizHz7MLPwkN7Mxh4XN5AAAAZAAAAAAAAAB7AAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAnEM7m3lksnFftHMGxdt6HTitUQSfvVvjk8JfduWfK+cAAAABVVNEAAAAAAAESbnnY5csrN1ENj8qA1CADFMTnA6CY8g2Scq4Ix6xjwAAAAA7msoAAAAAAAAAAAGfjbT8AAAAQGlQbmCv74lzQpjUOn8dsQ9/BFCKHSev6DLo4lS2wcS20GpfIjGZSXIAry/3porFM+3xrvBWlIH9Tr/QFKjqRAU="
}`)
assert.Equal(t, expected, test.StringToJSONMap(responseString))
})
})
Convey("PathPayment", func() {
data := test.StringToJSONMap(`{
"source": "GBWJES3WOKK7PRLJKZVGIPVFGQSSGCRMY7H3GCZ7BEG6ZTDB4FZXTPJ5",
"sequence_number": "123",
"operations": [
{
"type": "path_payment",
"body": {
"source": "GBYLUAJBHGZMVAYCALM4ZRTAOY74NTSSULG42VKVY2EOWS5X2HFBB2VL",
"destination": "GCOEGO43PFSLE4K7WRZQNRO3PIOTRLKRASP32W7DSPBF65XFT4V6PSV3",
"destination_amount": "500",
"destination_asset": {
"code": "EUR",
"issuer": "GDOJMKTDLGGLROSSM5BV5MXIAQ3JZHASQFUV55WBJ45AFOUXSVVFGPTJ"
},
"send_max": "100",
"send_asset": {
"code": "USD",
"issuer": "GACETOPHMOLSZLG5IQ3D6KQDKCAAYUYTTQHIEY6IGZE4VOBDD2YY6YAO"
},
"path": [
{
"code": "ABCDEFG",
"issuer": "GD4RIHH2HWB4MPJN72G2VGLRPUXDODFNQG6DVU47HMSSSF3RIQ4UXALD"
},
{}
]
}
}
],
"signers": ["SABY7FRMMJWPBTKQQ2ZN43AUJQ3Z2ZAK36VYSG2SPE2ABNQXA66H5E5G"]
}`)
Convey("it should return correct XDR", func() {
statusCode, response := net.JSONGetResponse(testServer, data)
responseString := strings.TrimSpace(string(response))
assert.Equal(t, 200, statusCode)
expected := test.StringToJSONMap(`{
"transaction_envelope": "AAAAAGySS3ZylffFaVZqZD6lNCUjCizHz7MLPwkN7Mxh4XN5AAAAZAAAAAAAAAB7AAAAAAAAAAAAAAABAAAAAQAAAABwugEhObLKgwIC2czGYHY/xs5Sos3NVVXGiOtLt9HKEAAAAAIAAAABVVNEAAAAAAAESbnnY5csrN1ENj8qA1CADFMTnA6CY8g2Scq4Ix6xjwAAAAA7msoAAAAAAJxDO5t5ZLJxX7RzBsXbeh04rVEEn71b45PCX3blnyvnAAAAAUVVUgAAAAAA3JYqY1mMuLpSZ0NesugENpycEoFpXvbBTzoCupeValMAAAABKgXyAAAAAAIAAAACQUJDREVGRwAAAAAAAAAAAPkUHPo9g8Y9Lf6NqplxfS43DK2BvDrTnzslKRdxRDlLAAAAAAAAAAAAAAABn420/AAAAEA9DEvKZhLwLcStP8/ZsqaEAdlNc91Eyz5mLUiN19etsIYaTPNugsVEWYJOiulXXSIwwitoyxQ1t2jr6VS0mXcB"
}`)
assert.Equal(t, expected, test.StringToJSONMap(responseString))
})
})
Convey("ManageOffer", func() {
data := test.StringToJSONMap(`{
"source": "GBWJES3WOKK7PRLJKZVGIPVFGQSSGCRMY7H3GCZ7BEG6ZTDB4FZXTPJ5",
"sequence_number": "123",
"operations": [
{
"type": "manage_offer",
"body": {
"selling": {
"code": "EUR",
"issuer": "GDOJMKTDLGGLROSSM5BV5MXIAQ3JZHASQFUV55WBJ45AFOUXSVVFGPTJ"
},
"buying": {
"code": "USD",
"issuer": "GACETOPHMOLSZLG5IQ3D6KQDKCAAYUYTTQHIEY6IGZE4VOBDD2YY6YAO"
},
"amount": "123456",
"price": "2.93850088",
"offer_id": "100"
}
}
],
"signers": ["SABY7FRMMJWPBTKQQ2ZN43AUJQ3Z2ZAK36VYSG2SPE2ABNQXA66H5E5G"]
}`)
Convey("it should return correct XDR", func() {
statusCode, response := net.JSONGetResponse(testServer, data)
responseString := strings.TrimSpace(string(response))
assert.Equal(t, 200, statusCode)
expected := test.StringToJSONMap(`{
"transaction_envelope": "AAAAAGySS3ZylffFaVZqZD6lNCUjCizHz7MLPwkN7Mxh4XN5AAAAZAAAAAAAAAB7AAAAAAAAAAAAAAABAAAAAAAAAAMAAAABRVVSAAAAAADclipjWYy4ulJnQ16y6AQ2nJwSgWle9sFPOgK6l5VqUwAAAAFVU0QAAAAAAARJuedjlyys3UQ2PyoDUIAMUxOcDoJjyDZJyrgjHrGPAAABH3GCoAACMHl9AL68IAAAAAAAAABkAAAAAAAAAAGfjbT8AAAAQEpMML2mghfM2Dzkpw6eT1N00rrIC7v3xe8zy7yc8rcGzFxIw/4/E69uq+rst+xDoeMTn0b3iBtjr2DEV52o/wE="
}`)
assert.Equal(t, expected, test.StringToJSONMap(responseString))
})
})
Convey("CreatePassiveOffer", func() {
data := test.StringToJSONMap(`{
"source": "GBWJES3WOKK7PRLJKZVGIPVFGQSSGCRMY7H3GCZ7BEG6ZTDB4FZXTPJ5",
"sequence_number": "123",
"operations": [
{
"type": "create_passive_offer",
"body": {
"selling": {
"code": "EUR",
"issuer": "GDOJMKTDLGGLROSSM5BV5MXIAQ3JZHASQFUV55WBJ45AFOUXSVVFGPTJ"
},
"buying": {
"code": "USD",
"issuer": "GACETOPHMOLSZLG5IQ3D6KQDKCAAYUYTTQHIEY6IGZE4VOBDD2YY6YAO"
},
"amount": "123456",
"price": "2.93850088"
}
}
],
"signers": ["SABY7FRMMJWPBTKQQ2ZN43AUJQ3Z2ZAK36VYSG2SPE2ABNQXA66H5E5G"]
}`)
Convey("it should return correct XDR", func() {
statusCode, response := net.JSONGetResponse(testServer, data)
responseString := strings.TrimSpace(string(response))
assert.Equal(t, 200, statusCode)
expected := test.StringToJSONMap(`{
"transaction_envelope": "AAAAAGySS3ZylffFaVZqZD6lNCUjCizHz7MLPwkN7Mxh4XN5AAAAZAAAAAAAAAB7AAAAAAAAAAAAAAABAAAAAAAAAAQAAAABRVVSAAAAAADclipjWYy4ulJnQ16y6AQ2nJwSgWle9sFPOgK6l5VqUwAAAAFVU0QAAAAAAARJuedjlyys3UQ2PyoDUIAMUxOcDoJjyDZJyrgjHrGPAAABH3GCoAACMHl9AL68IAAAAAAAAAABn420/AAAAEAtK8juIThYp4LXtgpN8gVNRR42iiR6tz8euSKqqqzKGELCHcPrmFUuYqtecrJi8CyPCYTp0nqGY9mtJCHFYpsC"
}`)
assert.Equal(t, expected, test.StringToJSONMap(responseString))
})
})
Convey("SetOptions", func() {
data := test.StringToJSONMap(`{
"source": "GBWJES3WOKK7PRLJKZVGIPVFGQSSGCRMY7H3GCZ7BEG6ZTDB4FZXTPJ5",
"sequence_number": "123",
"operations": [
{
"type": "set_options",
"body": {
"inflation_dest": "GBMPZVOMJ67WQBTBCVURDKTGL4557272EGQMAJCXPSMLOE63XPLL6SVA",
"set_flags": [1, 2],
"clear_flags": [4],
"master_weight": 100,
"low_threshold": 1,
"medium_threshold": 2,
"high_threshold": 3,
"home_domain": "stellar.org",
"signer": {
"public_key": "GA6VMJJQM2QBPPIXK2UVTAOS4XSSSAKSCOGFQE55IMRBQR65GIVDTTQV",
"weight": 5
}
}
}
],
"signers": ["SABY7FRMMJWPBTKQQ2ZN43AUJQ3Z2ZAK36VYSG2SPE2ABNQXA66H5E5G"]
}`)
Convey("it should return correct XDR", func() {
statusCode, response := net.JSONGetResponse(testServer, data)
responseString := strings.TrimSpace(string(response))
assert.Equal(t, 200, statusCode)
expected := test.StringToJSONMap(`{
"transaction_envelope": "AAAAAGySS3ZylffFaVZqZD6lNCUjCizHz7MLPwkN7Mxh4XN5AAAAZAAAAAAAAAB7AAAAAAAAAAAAAAABAAAAAAAAAAUAAAABAAAAAFj81cxPv2gGYRVpEapmXzvf6/ohoMAkV3yYtxPbu9a/AAAAAQAAAAQAAAABAAAAAwAAAAEAAABkAAAAAQAAAAEAAAABAAAAAgAAAAEAAAADAAAAAQAAAAtzdGVsbGFyLm9yZwAAAAABAAAAAD1WJTBmoBe9F1apWYHS5eUpAVITjFgTvUMiGEfdMio5AAAABQAAAAAAAAABn420/AAAAEAtQAlVOLBR6sb/YHRg7XcSEPSJ07irs6cCSDpK95rYE7Ga5ghiLXHqRJQ2B9cMmf8FYqzeaHdYPiESZqowhb0F"
}`)
assert.Equal(t, expected, test.StringToJSONMap(responseString))
})
})
Convey("ChangeTrust", func() {
data := test.StringToJSONMap(`{
"source": "GBWJES3WOKK7PRLJKZVGIPVFGQSSGCRMY7H3GCZ7BEG6ZTDB4FZXTPJ5",
"sequence_number": "123",
"operations": [
{
"type": "change_trust",
"body": {
"asset": {
"code": "USD",
"issuer": "GCHGRVNTXAV3OXNMCSA63BUCD6AZZX6PN2542QB6GIVTXGHQ65XS35DS"
}
}
}
],
"signers": ["SABY7FRMMJWPBTKQQ2ZN43AUJQ3Z2ZAK36VYSG2SPE2ABNQXA66H5E5G"]
}`)
Convey("it should return correct XDR", func() {
statusCode, response := net.JSONGetResponse(testServer, data)
responseString := strings.TrimSpace(string(response))
assert.Equal(t, 200, statusCode)
expected := test.StringToJSONMap(`{
"transaction_envelope": "AAAAAGySS3ZylffFaVZqZD6lNCUjCizHz7MLPwkN7Mxh4XN5AAAAZAAAAAAAAAB7AAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAACOaNWzuCu3XawUge2Ggh+BnN/PbrvNQD4yKzuY8PdvLX//////////AAAAAAAAAAGfjbT8AAAAQFftcSiqTvZOQwDJnoJ7buLgYXyjRacggCZ7yEhnPN4eXxlpQycvLLFa3U8xv0Mcnx5frSNKxu0sDIOm88Iicw8="
}`)
assert.Equal(t, expected, test.StringToJSONMap(responseString))
})
})
Convey("AllowTrust", func() {
data := test.StringToJSONMap(`{
"source": "GBWJES3WOKK7PRLJKZVGIPVFGQSSGCRMY7H3GCZ7BEG6ZTDB4FZXTPJ5",
"sequence_number": "123",
"operations": [
{
"type": "allow_trust",
"body": {
"asset_code": "USDUSD",
"trustor": "GBLH67TQHRNRLERQEIQJDNBV2DSWPHAPP43MBIF7DVKA7X55APUNS4LL",
"authorize": true
}
}
],
"signers": ["SABY7FRMMJWPBTKQQ2ZN43AUJQ3Z2ZAK36VYSG2SPE2ABNQXA66H5E5G"]
}`)
Convey("it should return correct XDR", func() {
statusCode, response := net.JSONGetResponse(testServer, data)
responseString := strings.TrimSpace(string(response))
assert.Equal(t, 200, statusCode)
expected := test.StringToJSONMap(`{
"transaction_envelope": "AAAAAGySS3ZylffFaVZqZD6lNCUjCizHz7MLPwkN7Mxh4XN5AAAAZAAAAAAAAAB7AAAAAAAAAAAAAAABAAAAAAAAAAcAAAAAVn9+cDxbFZIwIiCRtDXQ5WecD382wKC/HVQP370D6NkAAAACVVNEVVNEAAAAAAAAAAAAAQAAAAAAAAABn420/AAAAEA9Ht9mJaKdYoRg/rAX/cl/Q89Juhmi8f7iGBdCrSVAs+VN7NVJXR+0aZpoZIjcJD/QBPiuzZIK1ea2fN7I0I8J"
}`)
assert.Equal(t, expected, test.StringToJSONMap(responseString))
})
})
Convey("AccountMerge", func() {
data := test.StringToJSONMap(`{
"source": "GBWJES3WOKK7PRLJKZVGIPVFGQSSGCRMY7H3GCZ7BEG6ZTDB4FZXTPJ5",
"sequence_number": "123",
"operations": [
{
"type": "account_merge",
"body": {
"destination": "GBLH67TQHRNRLERQEIQJDNBV2DSWPHAPP43MBIF7DVKA7X55APUNS4LL"
}
}
],
"signers": ["SABY7FRMMJWPBTKQQ2ZN43AUJQ3Z2ZAK36VYSG2SPE2ABNQXA66H5E5G"]
}`)
Convey("it should return correct XDR", func() {
statusCode, response := net.JSONGetResponse(testServer, data)
responseString := strings.TrimSpace(string(response))
assert.Equal(t, 200, statusCode)
expected := test.StringToJSONMap(`{
"transaction_envelope": "AAAAAGySS3ZylffFaVZqZD6lNCUjCizHz7MLPwkN7Mxh4XN5AAAAZAAAAAAAAAB7AAAAAAAAAAAAAAABAAAAAAAAAAgAAAAAVn9+cDxbFZIwIiCRtDXQ5WecD382wKC/HVQP370D6NkAAAAAAAAAAZ+NtPwAAABALCyRn/E/CgLdPWGgP+1pd2Lkf3jWgNANKQ4QeGgUxgROhqkTUXaPA6XzOWS8yUpzZMufl6nkh8UFqa6Hc1emCA=="
}`)
assert.Equal(t, expected, test.StringToJSONMap(responseString))
})
})
Convey("Inflation", func() {
data := test.StringToJSONMap(`{
"source": "GBWJES3WOKK7PRLJKZVGIPVFGQSSGCRMY7H3GCZ7BEG6ZTDB4FZXTPJ5",
"sequence_number": "123",
"operations": [
{
"type": "inflation",
"body": {}
}
],
"signers": ["SABY7FRMMJWPBTKQQ2ZN43AUJQ3Z2ZAK36VYSG2SPE2ABNQXA66H5E5G"]
}`)
Convey("it should return correct XDR", func() {
statusCode, response := net.JSONGetResponse(testServer, data)
responseString := strings.TrimSpace(string(response))
assert.Equal(t, 200, statusCode)
expected := test.StringToJSONMap(`{
"transaction_envelope": "AAAAAGySS3ZylffFaVZqZD6lNCUjCizHz7MLPwkN7Mxh4XN5AAAAZAAAAAAAAAB7AAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAAAZ+NtPwAAABAlBFCwJ3VzBd+CE+n3mA4t71SVrDIjSgRyBnz9zYLN7qkqu8AD6cyvMRj8/alSozSPAZcSe+qBEO7E5biR+YrAA=="
}`)
assert.Equal(t, expected, test.StringToJSONMap(responseString))
})
})
Convey("ManageData", func() {
data := test.StringToJSONMap(`{
"source": "GBWJES3WOKK7PRLJKZVGIPVFGQSSGCRMY7H3GCZ7BEG6ZTDB4FZXTPJ5",
"sequence_number": "123",
"operations": [
{
"type": "manage_data",
"body": {
"name": "test_data",
"data": "AQIDBAUG"
}
}
],
"signers": ["SABY7FRMMJWPBTKQQ2ZN43AUJQ3Z2ZAK36VYSG2SPE2ABNQXA66H5E5G"]
}`)
Convey("it should return correct XDR", func() {
statusCode, response := net.JSONGetResponse(testServer, data)
responseString := strings.TrimSpace(string(response))
assert.Equal(t, 200, statusCode)
expected := test.StringToJSONMap(`{
"transaction_envelope": "AAAAAGySS3ZylffFaVZqZD6lNCUjCizHz7MLPwkN7Mxh4XN5AAAAZAAAAAAAAAB7AAAAAAAAAAAAAAABAAAAAAAAAAoAAAAJdGVzdF9kYXRhAAAAAAAAAQAAAAYBAgMEBQYAAAAAAAAAAAABn420/AAAAEBkO27ebDbsn1WzzLH5lUfJH3Y0Pgd1dlRx3Ip1dEZkvRPFFDLZuXi5DlW9uxNgeqThNsqnK7PPHfhyuWBVQpgN"
}`)
assert.Equal(t, expected, test.StringToJSONMap(responseString))
})
})
})
}
|
package podprocess
import (
"database/sql"
"time"
"github.com/square/p2/pkg/launch"
"github.com/square/p2/pkg/logging"
"github.com/square/p2/pkg/types"
"github.com/square/p2/pkg/util"
_ "github.com/mattn/go-sqlite3"
)
type FinishService interface {
// Closes any resources such as database connection
Close() error
// Inserts a finish row corresponding to a process exit
Insert(finish FinishOutput) error
// Runs any outstanding migrations
Migrate() error
// Reads all finish data after the given ID
GetLatestFinishes(lastID int64) ([]FinishOutput, error)
// Gets the last finish result for a given PodUniqueKey
LastFinishForPodUniqueKey(podUniqueKey types.PodUniqueKey) (FinishOutput, error)
// Deletes any rows with dates before the specified time
PruneRowsBefore(time.Time) error
// LastFinishID() returns the highest ID in the finishes table. It is
// useful for repairing the workspace file which is meant to contain
// the last processed ID.
LastFinishID() (int64, error)
}
type sqliteFinishService struct {
db *sql.DB
logger logging.Logger
}
func NewSQLiteFinishService(sqliteDBPath string, logger logging.Logger) (FinishService, error) {
db, err := sql.Open("sqlite3", sqliteDBPath)
if err != nil {
return nil, util.Errorf("Could not open database: %s", err)
}
return sqliteFinishService{
db: db,
logger: logger,
}, nil
}
// Represents a row in the sqlite database indicating the exit of a runit process.
type FinishOutput struct {
PodID types.PodID `json:"pod_id"`
LaunchableID launch.LaunchableID `json:"launchable_id"`
EntryPoint string `json:"entry_point"`
PodUniqueKey types.PodUniqueKey `json:"pod_unique_key"`
// These two model the arguments given to the ./finish script under runit:
// (http://smarden.org/runit/runsv.8.html)
ExitCode int `json:"exit_code"`
ExitStatus int `json:"exit_status"`
// This is never written explicitly and is determined automatically by
// sqlite (via AUTOINCREMENT)
ID int64
// This is never written explicitly, it's determined automatically by
// sqlite (via DEFAULT CURRENT_TIMESTAMP)
ExitTime time.Time `json:"exit_time"`
}
func (s sqliteFinishService) Insert(finish FinishOutput) error {
stmt := `insert into finishes(
pod_id,
pod_unique_key,
launchable_id,
entry_point,
exit_code,
exit_status
) VALUES(?, ?, ?, ?, ?, ?)`
_, err := s.db.Exec(stmt,
finish.PodID.String(),
finish.PodUniqueKey.String(),
finish.LaunchableID.String(),
finish.EntryPoint,
finish.ExitCode,
finish.ExitStatus,
)
if err != nil {
return util.Errorf("Couldn't insert finish line into sqlite database: %s", err)
}
return nil
}
// Not considered a migration
const (
getSchemaVersionQuery = `select version from schema_version;`
updateSchemaVersionStatement = `update schema_version set version = ?;`
// This will always be run, and is idempotent
sqliteCreateSchemaVersionTable = `create table if not exists schema_version ( version integer );`
// This should only be run if no rows are returned when checking for the
// written schema_version, which should only happen if the schema
//version table was just created
sqliteInitializeSchemaVersionTable = `insert into schema_version(version) values ( 0 );`
)
var (
sqliteMigrations = []string{
`create table finishes (
id integer not null primary key autoincrement,
date datetime default current_timestamp,
pod_id text,
pod_unique_key text,
launchable_id text,
entry_point text,
exit_code integer,
exit_status integer
);`,
"create index finish_date on finishes(date);",
// FUTURE MIGRATIONS GO HERE
}
)
func (s sqliteFinishService) Migrate() (err error) {
// idempotent
_, err = s.db.Exec(sqliteCreateSchemaVersionTable)
if err != nil {
return util.Errorf("Could not set up schema_version table: %s", err)
}
var lastSchemaVersion int64
err = s.db.QueryRow(getSchemaVersionQuery).Scan(&lastSchemaVersion)
switch {
case err == sql.ErrNoRows:
// We just created the table, insert a row with 0
_, err = s.db.Exec(sqliteInitializeSchemaVersionTable)
if err != nil {
return util.Errorf("Could not initialize schema_version table: %s", err)
}
case err != nil:
return util.Errorf("Error checking schema version: %s", err)
}
if lastSchemaVersion == int64(len(sqliteMigrations)) {
// we're caught up
return nil
}
tx, err := s.db.Begin()
if err != nil {
return util.Errorf("Could not start transaction for migrations: %s", err)
}
defer func() {
if err == nil {
// return the commit error by assigning to return variable
err = tx.Commit()
} else {
// return the original error not the rollback error
_ = tx.Rollback()
}
}()
for i := lastSchemaVersion; i < int64(len(sqliteMigrations)); i++ {
statement := sqliteMigrations[i]
_, err = tx.Exec(statement)
if err != nil {
return util.Errorf("Could not apply migration %d: %s", i+1, err)
}
}
_, err = tx.Exec(updateSchemaVersionStatement, int64(len(sqliteMigrations)))
if err != nil {
s.logger.WithError(err).Errorln("Could not update schema_version table")
}
return err
}
func (s sqliteFinishService) PruneRowsBefore(time time.Time) error {
_, err := s.db.Exec(`
DELETE
FROM finishes
WHERE date < ?
`, time)
return err
}
func (s sqliteFinishService) Close() error {
return s.db.Close()
}
func (f sqliteFinishService) GetLatestFinishes(lastID int64) ([]FinishOutput, error) {
rows, err := f.db.Query(`
SELECT id, date, pod_id, pod_unique_key, launchable_id, entry_point, exit_code, exit_status
FROM finishes
WHERE id > ?
`, lastID)
if err != nil {
f.logger.WithError(err).Errorln("Could not query for latest process exits")
return nil, err
}
defer rows.Close()
var finishes []FinishOutput
for rows.Next() {
finishOutput, err := scanRow(rows)
if err != nil {
f.logger.WithError(err).Errorln("Could not scan row")
return nil, err
}
finishes = append(finishes, finishOutput)
}
return finishes, nil
}
func (f sqliteFinishService) LastFinishForPodUniqueKey(podUniqueKey types.PodUniqueKey) (FinishOutput, error) {
row := f.db.QueryRow(`
SELECT id, date, pod_id, pod_unique_key, launchable_id, entry_point, exit_code, exit_status
FROM finishes
WHERE pod_unique_key = ?
`, podUniqueKey.String())
return scanRow(row)
}
// LastFinishID() returns the highest ID in the finishes table. It is useful for repairing the workspace
// file which is meant to contain the last processed ID.
func (f sqliteFinishService) LastFinishID() (int64, error) {
var id int64
row := f.db.QueryRow("SELECT id FROM finishes ORDER BY id DESC LIMIT 1;")
err := row.Scan(&id)
if err != nil {
return 0, util.Errorf("could not read last ID from database: %s", err)
}
return id, nil
}
// Implemented by both *sql.Row and *sql.Rows
type Scanner interface {
Scan(...interface{}) error
}
// Runs Scan() once on the passed Scanner and converts the result to a FinishOutput
func scanRow(scanner Scanner) (FinishOutput, error) {
var id int64
var date time.Time
var podID, podUniqueKey, launchableID, entryPoint string
var exitCode, exitStatus int
err := scanner.Scan(&id, &date, &podID, &podUniqueKey, &launchableID, &entryPoint, &exitCode, &exitStatus)
if err != nil {
return FinishOutput{}, err
}
return FinishOutput{
ID: id,
PodID: types.PodID(podID),
LaunchableID: launch.LaunchableID(launchableID),
EntryPoint: entryPoint,
PodUniqueKey: types.PodUniqueKey(podUniqueKey),
ExitCode: exitCode,
ExitStatus: exitStatus,
ExitTime: date,
}, nil
}
|
package tzdb
import (
"testing"
"time"
)
const (
baseURL = "http://api.timezonedb.com"
apiKey = "Q34227MHXHAF"
spath = "v2.1/get-time-zone"
)
func Test_GetTimezone(t *testing.T) {
now := time.Now().Unix()
t.Logf("now: %d", now)
client, err := NewTzdbClient(baseURL, apiKey, 3)
if err != nil {
t.Fatal(err)
}
// Test 1: Tokyo
lon := 139.767125
lat := 35.681236
jisa, err := client.GetTimezone(lon, lat, now)
if err != nil {
t.Fatal(err)
}
if jisa != 32400 {
t.Errorf("Wrong jisa, expected: %d, actual: %d", 32400, jisa)
}
// Avoid call limit
time.Sleep(500 * time.Millisecond)
// Test 2: Los Angels
lon = -122.419416
lat = 37.77493
jisa, err = client.GetTimezone(lon, lat, now)
if err != nil {
t.Fatal(err)
}
if jisa != -28800 && jisa != -25200 {
t.Errorf("Wrong jisa, expected: %d or %d, actual: %d", -28800, -25200, jisa)
}
time.Sleep(500 * time.Millisecond)
// Test 3: Los Angels Winter
pt, _ := time.LoadLocation("America/Los_Angeles")
winterDay := time.Date(2019, 12, 1, 0, 0, 0, 0, pt).Unix()
jisa, err = client.GetTimezone(lon, lat, winterDay)
if err != nil {
t.Fatal(err)
}
if jisa != -28800 {
t.Errorf("Wrong jisa, expected: %d, actual: %d", -28800, jisa)
}
time.Sleep(500 * time.Millisecond)
// Test 4: Los Angels Summer
summerDay := time.Date(2019, 7, 1, 0, 0, 0, 0, pt).Unix()
jisa, err = client.GetTimezone(lon, lat, summerDay)
if err != nil {
t.Fatal(err)
}
if jisa != -25200 {
t.Errorf("Wrong jisa, expected: %d, actual: %d", -28800, jisa)
}
}
|
/*
Given what is supposed to be typed and what is actually typed, write a function that returns the broken key(s). The function looks like:
findBrokenKeys(correct phrase, what you actually typed)
Examples
findBrokenKeys("happy birthday", "hawwy birthday") ➞ ["p"]
findBrokenKeys("starry night", "starrq light") ➞ ["y", "n"]
findBrokenKeys("beethoven", "affthoif5") ➞ ["b", "e", "v", "n"]
Notes
Broken keys should be ordered by when they first appear in the sentence.
Only one broken key per letter should be listed.
Letters will all be in lower case.
*/
package main
import (
"unicode/utf8"
)
func main() {
eq(brokenkeys("happy birthday", "hawwy birthday"), []rune{'p'})
eq(brokenkeys("starry night", "starrq light"), []rune{'y', 'n'})
eq(brokenkeys("beethoven", "affthoif5"), []rune{'b', 'e', 'v', 'n'})
eq(brokenkeys("mozart", "aiwgvx"), []rune{'m', 'o', 'z', 'a', 'r', 't'})
eq(brokenkeys("5678", "4678"), []rune{'5'})
eq(brokenkeys("!!??$$", "$$!!??"), []rune{'!', '?', '$'})
}
func brokenkeys(s, t string) []rune {
var p []rune
m := make(map[rune]bool)
for s != "" {
a, x := utf8.DecodeRuneInString(s)
b, y := utf8.DecodeRuneInString(t)
if a != b && !m[a] {
m[a] = true
p = append(p, a)
}
s = s[x:]
t = t[y:]
}
return p
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func eq(a, b []rune) {
assert(len(a) == len(b))
for i := range a {
assert(a[i] == b[i])
}
}
|
package osbuild2
import (
"encoding/json"
"fmt"
)
// Single stage of a pipeline executing one step
type Stage struct {
// Well-known name in reverse domain-name notation, uniquely identifying
// the stage type.
Type string `json:"type"`
// Stage-type specific options fully determining the operations of the
Inputs Inputs `json:"inputs,omitempty"`
Options StageOptions `json:"options,omitempty"`
Devices Devices `json:"devices,omitempty"`
Mounts Mounts `json:"mounts,omitempty"`
}
// StageOptions specify the operations of a given stage-type.
type StageOptions interface {
isStageOptions()
}
type rawStage struct {
Type string `json:"type"`
Options json.RawMessage `json:"options"`
Inputs json.RawMessage `json:"inputs"`
Devices json.RawMessage `json:"devices"`
Mounts json.RawMessage `json:"mounts"`
}
// UnmarshalJSON unmarshals JSON into a Stage object. Each type of stage has
// a custom unmarshaller for its options, selected based on the stage name.
func (stage *Stage) UnmarshalJSON(data []byte) error {
var rawStage rawStage
if err := json.Unmarshal(data, &rawStage); err != nil {
return err
}
var options StageOptions
var inputs Inputs
var devices Devices
var mounts Mounts
switch rawStage.Type {
case "org.osbuild.authselect":
options = new(AuthselectStageOptions)
case "org.osbuild.fix-bls":
options = new(FixBLSStageOptions)
case "org.osbuild.fstab":
options = new(FSTabStageOptions)
case "org.osbuild.grub2":
options = new(GRUB2StageOptions)
case "org.osbuild.locale":
options = new(LocaleStageOptions)
case "org.osbuild.selinux":
options = new(SELinuxStageOptions)
case "org.osbuild.hostname":
options = new(HostnameStageOptions)
case "org.osbuild.users":
options = new(UsersStageOptions)
case "org.osbuild.groups":
options = new(GroupsStageOptions)
case "org.osbuild.timezone":
options = new(TimezoneStageOptions)
case "org.osbuild.cloud-init":
options = new(CloudInitStageOptions)
case "org.osbuild.chrony":
options = new(ChronyStageOptions)
case "org.osbuild.dracut":
options = new(DracutStageOptions)
case "org.osbuild.dracut.conf":
options = new(DracutConfStageOptions)
case "org.osbuild.keymap":
options = new(KeymapStageOptions)
case "org.osbuild.modprobe":
options = new(ModprobeStageOptions)
case "org.osbuild.firewall":
options = new(FirewallStageOptions)
case "org.osbuild.rhsm":
options = new(RHSMStageOptions)
case "org.osbuild.systemd":
options = new(SystemdStageOptions)
case "org.osbuild.systemd.unit":
options = new(SystemdUnitStageOptions)
case "org.osbuild.systemd-logind":
options = new(SystemdLogindStageOptions)
case "org.osbuild.script":
options = new(ScriptStageOptions)
case "org.osbuild.sysconfig":
options = new(SysconfigStageOptions)
case "org.osbuild.kernel-cmdline":
options = new(KernelCmdlineStageOptions)
case "org.osbuild.rpm":
options = new(RPMStageOptions)
inputs = new(RPMStageInputs)
case "org.osbuild.oci-archive":
options = new(OCIArchiveStageOptions)
inputs = new(OCIArchiveStageInputs)
case "org.osbuild.ostree.commit":
options = new(OSTreeCommitStageOptions)
inputs = new(OSTreeCommitStageInputs)
case "org.osbuild.ostree.pull":
options = new(OSTreePullStageOptions)
inputs = new(OSTreePullStageInputs)
case "org.osbuild.ostree.init":
options = new(OSTreeInitStageOptions)
case "org.osbuild.ostree.preptree":
options = new(OSTreePrepTreeStageOptions)
case "org.osbuild.truncate":
options = new(TruncateStageOptions)
case "org.osbuild.sfdisk":
options = new(SfdiskStageOptions)
devices = new(SfdiskStageDevices)
case "org.osbuild.copy":
options = new(CopyStageOptions)
inputs = new(CopyStageInputs)
devices = new(CopyStageDevices)
mounts = new(CopyStageMounts)
case "org.osbuild.mkfs.btrfs":
options = new(MkfsBtrfsStageOptions)
devices = new(MkfsBtrfsStageDevices)
case "org.osbuild.mkfs.ext4":
options = new(MkfsExt4StageOptions)
devices = new(MkfsExt4StageDevices)
case "org.osbuild.mkfs.fat":
options = new(MkfsFATStageOptions)
devices = new(MkfsFATStageDevices)
case "org.osbuild.mkfs.xfs":
options = new(MkfsXfsStageOptions)
devices = new(MkfsXfsStageDevices)
case "org.osbuild.qemu":
options = new(QEMUStageOptions)
inputs = new(QEMUStageInputs)
case "org.osbuild.xz":
options = new(XzStageOptions)
// TODO: Unmarshalling inputs should be moved to a separate method and struct should be determined by its Type
// The stage accepts also source input, but we need to rework all inputs first to handle this nicely here.
// Only files input is used by the XZ stage at this moment.
inputs = new(FilesInputs)
default:
return fmt.Errorf("unexpected stage type: %s", rawStage.Type)
}
if err := json.Unmarshal(rawStage.Options, options); err != nil {
return err
}
if inputs != nil && rawStage.Inputs != nil {
if err := json.Unmarshal(rawStage.Inputs, inputs); err != nil {
return err
}
}
stage.Type = rawStage.Type
stage.Options = options
stage.Inputs = inputs
stage.Devices = devices
stage.Mounts = mounts
return nil
}
|
package main
import (
"context"
_ "github.com/lib/pq"
"github.com/the-gigi/delinkcious/pkg/db_util"
"github.com/the-gigi/delinkcious/pkg/link_manager_client"
om "github.com/the-gigi/delinkcious/pkg/object_model"
"log"
"os"
"os/exec"
)
func check(err error) {
if err != nil {
panic(err)
}
}
func initDB() {
db, err := db_util.RunLocalDB("link_manager")
check(err)
tables := []string{"tags", "links"}
for _, table := range tables {
err = db_util.DeleteFromTableIfExist(db, table)
check(err)
}
}
// Build and run a service in a target directory
func runService(ctx context.Context, targetDir string, service string) {
// Save and restore lsater current working dir
wd, err := os.Getwd()
check(err)
defer os.Chdir(wd)
// Build the server if needed
_, err = os.Stat("./" + service)
if os.IsNotExist(err) {
out, err := exec.Command("go", "build", ".").CombinedOutput()
log.Println(out)
check(err)
}
cmd := exec.CommandContext(ctx, "./"+service)
err = cmd.Start()
check(err)
}
func runLinkService(ctx context.Context) {
runService(ctx, ".", "link_service")
}
func runSocialGraphService(ctx context.Context) {
runService(ctx, "../social_graph_service", "link_service")
}
func killServer(ctx context.Context) {
ctx.Done()
}
func main() {
initDB()
ctx := context.Background()
defer killServer(ctx)
runSocialGraphService(ctx)
runLinkService(ctx)
// Run some tests with the client
cli, err := link_manager_client.NewClient("localhost:8080")
check(err)
links, err := cli.GetLinks(om.GetLinksRequest{Username: "gigi"})
check(err)
log.Print("gigi's links:", links)
err = cli.AddLink(om.AddLinkRequest{Username: "gigi",
Url: "https://github.com/the-gigi",
Title: "Gigi on Github",
Tags: map[string]bool{"programming": true}})
check(err)
links, err = cli.GetLinks(om.GetLinksRequest{Username: "gigi"})
check(err)
log.Print("gigi's links:", links)
err = cli.UpdateLink(om.UpdateLinkRequest{Username: "gigi",
Url: "https://github.com/the-gigi",
Description: "Most of my open source code is here"},
)
check(err)
links, err = cli.GetLinks(om.GetLinksRequest{Username: "gigi"})
check(err)
log.Print("gigi's links:", links)
}
|
package counters
type alertCounter int
// New created exported function
// type alertCounter
func New(value int) alertCounter {
return alertCounter(value)
}
|
package main
import (
"fmt"
"math/big"
)
func main() {
sum := big.NewInt(1)
var x int64
for x = 2; x < 1001; x++ {
n := big.NewInt(x)
n = n.Exp(n,n,nil)
sum = sum.Add(sum,n)
}
sum_str := sum.String()
l := len(sum_str)
last_ten := sum_str[l-10:]
fmt.Println(last_ten)
}
|
package accesslist
import (
"errors"
"fmt"
"testing"
"github.com/10gen/realm-cli/internal/cli"
"github.com/10gen/realm-cli/internal/cloud/realm"
"github.com/10gen/realm-cli/internal/utils/test/assert"
"github.com/10gen/realm-cli/internal/utils/test/mock"
"github.com/Netflix/go-expect"
)
func TestAllowedIPCreateHandler(t *testing.T) {
projectID := "projectID"
appID := "appID"
allowedIPID := "allowedIPID"
allowedIPAddress := "allowedIPAddress"
allowedIPComment := "allowedIPComment"
allowedIPUseCurrent := false
allowedIPAllowAll := false
app := realm.App{
ID: appID,
GroupID: projectID,
ClientAppID: "eggcorn-abcde",
Name: "eggcorn",
}
t.Run("should create an allowed ip", func(t *testing.T) {
out, ui := mock.NewUI()
realmClient := mock.RealmClient{}
var capturedFilter realm.AppFilter
var capturedGroupID, capturedAppID, capturedIPAddress, capturedComment string
var capturedUseCurrent bool
realmClient.FindAppsFn = func(filter realm.AppFilter) ([]realm.App, error) {
capturedFilter = filter
return []realm.App{app}, nil
}
realmClient.AllowedIPCreateFn = func(groupID, appID, ipAddress, comment string, useCurrent bool) (realm.AllowedIP, error) {
capturedGroupID = groupID
capturedAppID = appID
capturedIPAddress = ipAddress
capturedComment = comment
capturedUseCurrent = useCurrent
return realm.AllowedIP{allowedIPID, allowedIPAddress, allowedIPComment, allowedIPUseCurrent}, nil
}
cmd := &CommandCreate{createInputs{
ProjectInputs: cli.ProjectInputs{
Project: projectID,
App: appID,
},
Address: allowedIPAddress,
Comment: allowedIPComment,
UseCurrent: allowedIPUseCurrent,
AllowAll: allowedIPAllowAll,
}}
assert.Nil(t, cmd.Handler(nil, ui, cli.Clients{Realm: realmClient}))
assert.Equal(t, fmt.Sprintf("Successfully created allowed IP, id: %s\n", "allowedIPID"), out.String())
t.Log("and should properly pass through the expected inputs")
assert.Equal(t, realm.AppFilter{projectID, appID, nil}, capturedFilter)
assert.Equal(t, projectID, capturedGroupID)
assert.Equal(t, appID, capturedAppID)
assert.Equal(t, allowedIPAddress, capturedIPAddress)
assert.Equal(t, allowedIPComment, capturedComment)
assert.Equal(t, allowedIPUseCurrent, capturedUseCurrent)
})
t.Run("should return an error", func(t *testing.T) {
for _, tc := range []struct {
description string
setupClient func() realm.Client
expectedErr error
}{
{
description: "when resolving the app fails",
setupClient: func() realm.Client {
realmClient := mock.RealmClient{}
realmClient.FindAppsFn = func(filter realm.AppFilter) ([]realm.App, error) {
return nil, errors.New("something bad happened")
}
return realmClient
},
expectedErr: errors.New("something bad happened"),
},
{
description: "when creating an allowed IP fails",
setupClient: func() realm.Client {
realmClient := mock.RealmClient{}
realmClient.FindAppsFn = func(filter realm.AppFilter) ([]realm.App, error) {
return []realm.App{app}, nil
}
realmClient.AllowedIPCreateFn = func(groupID, appID, ipAddress, comment string, useCurrent bool) (realm.AllowedIP, error) {
return realm.AllowedIP{}, errors.New("something bad happened")
}
return realmClient
},
expectedErr: errors.New("something bad happened"),
},
} {
t.Run(tc.description, func(t *testing.T) {
realmClient := tc.setupClient()
cmd := &CommandCreate{}
err := cmd.Handler(nil, nil, cli.Clients{Realm: realmClient})
assert.Equal(t, tc.expectedErr, err)
})
}
})
}
func TestAllowedIPCreateInputs(t *testing.T) {
for _, tc := range []struct {
description string
inputs createInputs
test func(t *testing.T, i createInputs)
}{
{
description: "should not prompt for inputs when flags provide the data",
inputs: createInputs{
Address: "0.0.0.0",
Comment: "comment",
},
test: func(t *testing.T, i createInputs) {
assert.Equal(t, createInputs{Address: "0.0.0.0", Comment: "comment"}, i)
},
},
{
description: "should not prompt for address when allow-all flag set",
inputs: createInputs{
AllowAll: true,
},
test: func(t *testing.T, i createInputs) {
assert.Equal(t, createInputs{Address: "0.0.0.0", AllowAll: true}, i)
},
},
{
description: "should not prompt for address when use-current flag set",
inputs: createInputs{
UseCurrent: true,
},
test: func(t *testing.T, i createInputs) {
assert.Equal(t, createInputs{UseCurrent: true}, i)
},
},
} {
t.Run(tc.description, func(t *testing.T) {
profile := mock.NewProfile(t)
assert.Nil(t, tc.inputs.Resolve(profile, nil))
tc.test(t, tc.inputs)
})
}
t.Run("should prompt for address when none provided", func(t *testing.T) {
_, console, _, ui, consoleErr := mock.NewVT10XConsole()
assert.Nil(t, consoleErr)
defer console.Close()
profile := mock.NewProfile(t)
procedure := func(c *expect.Console) {
c.ExpectString("IP Address")
c.SendLine("0.0.0.0")
c.ExpectEOF()
}
doneCh := make(chan (struct{}))
go func() {
defer close(doneCh)
procedure(console)
}()
inputs := createInputs{}
assert.Nil(t, inputs.Resolve(profile, ui))
console.Tty().Close() // flush the writers
<-doneCh // wait for procedure to complete
assert.Equal(t, createInputs{Address: "0.0.0.0"}, inputs)
})
t.Run("should error when more than one address given", func(t *testing.T) {
for _, tc := range []struct {
description string
inputs createInputs
}{
{
description: "with allow all specified and an address provided",
inputs: createInputs{Address: "0.0.0.0", AllowAll: true},
},
{
description: "with use current specified and an address provided",
inputs: createInputs{Address: "0.0.0.0", UseCurrent: true},
},
{
description: "with both allow all and use current specified",
inputs: createInputs{AllowAll: true, UseCurrent: true},
},
{
description: "with both allow all and use current specified and an address provided",
inputs: createInputs{Address: "0.0.0.0", AllowAll: true, UseCurrent: true},
},
} {
t.Run(tc.description, func(t *testing.T) {
profile := mock.NewProfile(t)
err := tc.inputs.Resolve(profile, nil)
assert.Equal(t, errTooManyAddressess, err)
})
}
})
}
|
package go_workerpool
type Dispatcher struct {
WorkerPool chan chan Job
Len int
}
func NewDispatcher(n int) *Dispatcher {
return &Dispatcher{
WorkerPool: make(chan chan Job, n),
Len: n,
}
}
func (d *Dispatcher) Run() {
for i := 0; i < d.Len; i++ {
worker := NewWorker(d.WorkerPool)
worker.Start()
}
d.dispatch()
}
func (d *Dispatcher) dispatch() {
//这地方改成队列
var jobQ []Job
var jobChannelQ []chan Job
for {
var activeJob Job
var activeJobChannel chan Job
if len(jobQ) > 0 && len(jobChannelQ) > 0 {
activeJob = jobQ[0]
activeJobChannel = jobChannelQ[0]
}
select {
case job := <-JobChannel:
jobQ = append(jobQ, job)
//fmt.Printf("receive job, current jobQ size is %d\n", len(jobQ))
case jobChannel := <-d.WorkerPool:
jobChannelQ = append(jobChannelQ, jobChannel)
//fmt.Printf("receive ready worker, current jobChannelQ size is %d\n", len(jobChannelQ))
case activeJobChannel <- activeJob:
jobQ = jobQ[1:]
jobChannelQ = jobChannelQ[1:]
}
}
}
|
package handler
import (
"encoding/json"
"fmt"
"github.com/gorilla/websocket"
"github.com/tiagorlampert/CHAOS/client/app/entities"
"github.com/tiagorlampert/CHAOS/client/app/environment"
"github.com/tiagorlampert/CHAOS/client/app/gateways"
ws "github.com/tiagorlampert/CHAOS/client/app/infrastructure/websocket"
"github.com/tiagorlampert/CHAOS/client/app/services"
"github.com/tiagorlampert/CHAOS/client/app/utils/encode"
"net/http"
"time"
)
type Handler struct {
Connection *websocket.Conn
Configuration *environment.Configuration
Gateway gateways.Gateway
Services *services.Services
ClientID string
Connected bool
}
func NewHandler(
configuration *environment.Configuration,
gateway gateways.Gateway,
services *services.Services,
clientID string,
) *Handler {
return &Handler{
Configuration: configuration,
Gateway: gateway,
Services: services,
ClientID: clientID,
}
}
func (h *Handler) KeepConnection() {
sleepTime := 30 * time.Second
for {
if h.Connected {
time.Sleep(sleepTime)
}
err := h.ServerIsAvailable()
if err != nil {
h.Log("[!] Error connecting with server: " + err.Error())
h.Connected = false
time.Sleep(sleepTime)
continue
}
err = h.SendDeviceSpecs()
if err != nil {
h.Log("[!] Error connecting with server: " + err.Error())
h.Connected = false
time.Sleep(sleepTime)
continue
}
h.Connected = true
}
}
func (h *Handler) Log(v ...any) {
fmt.Println(v...)
}
func (h *Handler) ServerIsAvailable() error {
url := fmt.Sprint(h.Configuration.Server.Url, "health")
res, err := h.Gateway.NewRequest(http.MethodGet, url, nil)
if err != nil {
return err
}
if res.StatusCode != http.StatusOK {
return fmt.Errorf(string(res.ResponseBody))
}
return nil
}
func (h *Handler) SendDeviceSpecs() error {
deviceSpecs, err := h.Services.Information.LoadDeviceSpecs()
if err != nil {
return err
}
body, err := json.Marshal(deviceSpecs)
if err != nil {
return err
}
url := fmt.Sprint(h.Configuration.Server.Url, "device")
res, err := h.Gateway.NewRequest(http.MethodPost, url, body)
if err != nil {
return err
}
if res.StatusCode != http.StatusOK {
return fmt.Errorf("error with status code %d", res.StatusCode)
}
return nil
}
func (h *Handler) Reconnect() {
h.Connected = false
for {
conn, err := ws.NewConnection(h.Configuration, h.ClientID)
if err != nil {
h.Log("[!] Error connecting on WS: ", err.Error())
time.Sleep(time.Second * 10)
continue
}
h.Connection = conn
h.Connected = true
h.Log("[*] Successfully connected")
break
}
}
func (h *Handler) HandleCommand() {
for {
if !h.Connected {
h.Reconnect()
continue
}
_, message, err := h.Connection.ReadMessage()
if err != nil {
h.Log("[!] Error reading from connection:", err)
h.Reconnect()
continue
}
var request entities.Command
if err := json.Unmarshal(message, &request); err != nil {
continue
}
var response []byte
var hasError bool
switch request.Command {
case "getos":
deviceSpecs, err := h.Services.Information.LoadDeviceSpecs()
if err != nil {
hasError = true
response = encode.StringToByte(err.Error())
continue
}
response = encode.StringToByte(encode.PrettyJson(deviceSpecs))
break
case "screenshot":
screenshot, err := h.Services.Screenshot.TakeScreenshot()
if err != nil {
hasError = true
response = encode.StringToByte(err.Error())
break
}
response = screenshot
break
case "restart":
if err := h.Services.OS.Restart(); err != nil {
hasError = true
response = encode.StringToByte(err.Error())
}
break
case "shutdown":
if err := h.Services.OS.Shutdown(); err != nil {
hasError = true
response = encode.StringToByte(err.Error())
}
break
case "lock":
if err := h.Services.OS.Lock(); err != nil {
hasError = true
response = encode.StringToByte(err.Error())
}
break
case "sign-out":
if err := h.Services.OS.SignOut(); err != nil {
hasError = true
response = encode.StringToByte(err.Error())
}
break
case "explore":
fileExplorer, err := h.Services.Explorer.ExploreDirectory(request.Parameter)
if err != nil {
response = encode.StringToByte(err.Error())
hasError = true
break
}
explorerBytes, _ := json.Marshal(fileExplorer)
response = explorerBytes
break
case "download":
filepath := request.Parameter
res, err := h.Services.Upload.UploadFile(filepath)
if err != nil {
response = encode.StringToByte(err.Error())
hasError = true
break
}
response = res
break
case "delete":
filepath := request.Parameter
err := h.Services.Delete.DeleteFile(filepath)
if err != nil {
response = encode.StringToByte(err.Error())
hasError = true
break
}
break
case "upload":
filepath := request.Parameter
res, err := h.Services.Download.DownloadFile(filepath)
if err != nil {
response = encode.StringToByte(err.Error())
hasError = true
break
}
response = res
break
case "open-url":
err := h.Services.URL.OpenURL(request.Parameter)
if err != nil {
response = encode.StringToByte(err.Error())
hasError = true
break
}
break
default:
response, err = h.RunCommand(request.Command)
if err != nil {
hasError = true
response = encode.StringToByte(err.Error())
}
}
body, err := json.Marshal(entities.Command{
ClientID: h.ClientID,
Response: response,
HasError: hasError,
})
if err != nil {
continue
}
err = h.Connection.WriteMessage(websocket.BinaryMessage, body)
if err != nil {
continue
}
}
}
func (h *Handler) RunCommand(command string) ([]byte, error) {
return h.Services.Terminal.Run(command)
}
|
package main
import (
"database/sql"
"fmt"
"log"
_ "github.com/lib/pq"
)
const (
dbHost = "localhost"
dbPort = "5432"
dbUser = "root"
dbPassword = ""
dbName = "postgres"
)
// SQLDB - Books DB Object / Bookshelf
type SQLDB struct {
db *sql.DB
table string
}
// Book - A book info
type Book struct {
_id int
title string
author string
}
// Create Table
func (bookshelf *SQLDB) createTable() (err error) {
que := `
CREATE TABLE IF NOT EXISTS "` + bookshelf.table + `"
(
"_id" serial NOT NULL,
"title" character varying(255) NOT NULL,
"author" character varying(255) NOT NULL,
-- "created" date,
-- created_at timestamp with time zone DEFAULT current_timestamp,
CONSTRAINT userinfo_pkey PRIMARY KEY ("_id")
)
-- ) WITH (OIDS=FALSE); // Not work with CockroachDB`
result, err := bookshelf.db.Exec(que)
if err != nil {
fmt.Println("Table Creation Error: ", result, err)
}
return
}
// Select - cRud (one _id)
func (bookshelf *SQLDB) getBook(bookID int) (Book, error) {
result := Book{}
rows, err := bookshelf.db.Query(`SELECT * FROM "`+bookshelf.table+`" where "_id"=$1`, bookID)
if err == nil {
for rows.Next() {
err = rows.Scan(&result._id, &result.title, &result.author)
if err != nil {
fmt.Println("Get Book Error: ", err)
}
}
}
return result, err
}
// Select - cRud (All)
func (bookshelf *SQLDB) allBooks() ([]Book, error) {
books := []Book{}
rows, err := bookshelf.db.Query(`SELECT * FROM "` + bookshelf.table + `" order by "_id"`)
defer rows.Close()
if err == nil {
for rows.Next() {
currentBook := Book{}
err = rows.Scan(¤tBook._id, ¤tBook.title, ¤tBook.author)
if err == nil {
books = append(books, currentBook)
} else {
fmt.Println("Get All Books Error: ", err)
}
}
} else {
return books, err
}
return books, err
}
// Insert - Crud
// func (bookshelf *SQLDB) insertBook(title, author string) (int, error) {
func (bookshelf *SQLDB) insertBook(book Book) (int, error) {
var bookID int
err := bookshelf.db.QueryRow(
`INSERT INTO "`+bookshelf.table+`"("title","author") VALUES($1,$2) RETURNING _id`,
book.title, book.author).Scan(&bookID)
if err != nil {
return 0, err
}
return bookID, err
}
// Update - crUd
func (bookshelf *SQLDB) updateBook(_id int, book Book) (int, error) {
res, err := bookshelf.db.Exec(
`UPDATE "`+bookshelf.table+`" SET "title"=$1,"author"=$2 WHERE "_id"=$3 RETURNING "_id"`,
book.title, book.author, _id)
if err != nil {
return 0, err
}
rowsUpdated, err := res.RowsAffected()
if err != nil {
return 0, err
}
return int(rowsUpdated), err
}
// Delete - cruD
func (bookshelf *SQLDB) removeBook(bookID int) (int, error) {
res, err := bookshelf.db.Exec(`DELETE FROM "`+bookshelf.table+`" WHERE "_id"=$1`, bookID)
if err != nil {
return 0, err
}
rowsDeleted, err := res.RowsAffected()
if err != nil {
return 0, err
}
return int(rowsDeleted), nil
}
func main() {
bookshelf := SQLDB{}
dbinfo := fmt.Sprintf(
"host='%s' port='%s' user='%s' password='%s' dbname='%s' sslmode='disable'",
dbHost, dbPort, dbUser, dbPassword, dbName)
var err error
bookshelf.db, err = sql.Open("postgres", dbinfo)
if err != nil {
log.Fatal(err)
}
defer bookshelf.db.Close()
// Create table
bookshelf.table = "novel"
bookshelf.createTable()
// Insert
newBook := Book{title: "표본실의 청개구리", author: "현진건"}
newBookID, err := bookshelf.insertBook(newBook)
if err != nil {
log.Fatal(err)
}
fmt.Println("Inserted book ID: ", newBookID)
// Select an item
book, _ := bookshelf.getBook(newBookID)
fmt.Println("Inserted Book: ", book)
fmt.Println("---- Wrong author ----")
// Update item
book.author = "염상섭"
updateCount, err := bookshelf.updateBook(newBookID, book)
if err != nil {
log.Fatal(err)
}
fmt.Println("Updated count: ", updateCount)
fmt.Println("---- Author corrected ----")
// Select all
books, _ := bookshelf.allBooks()
fmt.Println("Rest of books: ", books)
// Delete an item
deleted, err := bookshelf.removeBook(newBookID)
if err != nil {
log.Fatal(err)
}
fmt.Println("Deleted count: ", deleted)
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package resourcegrouptest_test
import (
"context"
"fmt"
"sync/atomic"
"testing"
"time"
"github.com/pingcap/failpoint"
rmpb "github.com/pingcap/kvproto/pkg/resource_manager"
"github.com/pingcap/tidb/ddl/resourcegroup"
"github.com/pingcap/tidb/ddl/util/callback"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/domain/infosync"
mysql "github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/parser/auth"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/testkit"
"github.com/stretchr/testify/require"
)
func TestResourceGroupBasic(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
re := require.New(t)
hook := &callback.TestDDLCallback{Do: dom}
var groupID atomic.Int64
onJobUpdatedExportedFunc := func(job *model.Job) {
// job.SchemaID will be assigned when the group is created.
if (job.SchemaName == "x" || job.SchemaName == "y") && job.Type == model.ActionCreateResourceGroup && job.SchemaID != 0 {
groupID.Store(job.SchemaID)
return
}
}
hook.OnJobUpdatedExported.Store(&onJobUpdatedExportedFunc)
dom.DDL().SetHook(hook)
tk.MustExec("set global tidb_enable_resource_control = 'off'")
tk.MustGetErrCode("create user usr1 resource group rg1", mysql.ErrResourceGroupSupportDisabled)
tk.MustExec("create user usr1")
tk.MustGetErrCode("alter user usr1 resource group rg1", mysql.ErrResourceGroupSupportDisabled)
tk.MustGetErrCode("create resource group x RU_PER_SEC=1000 ", mysql.ErrResourceGroupSupportDisabled)
tk.MustExec("set global tidb_enable_resource_control = 'on'")
// test default resource group.
tk.MustQuery("select * from information_schema.resource_groups where name = 'default'").Check(testkit.Rows("default UNLIMITED MEDIUM YES <nil> <nil>"))
tk.MustExec("alter resource group `default` PRIORITY=LOW")
tk.MustQuery("select * from information_schema.resource_groups where name = 'default'").Check(testkit.Rows("default UNLIMITED LOW YES <nil> <nil>"))
tk.MustExec("alter resource group `default` ru_per_sec=1000")
tk.MustQuery("select * from information_schema.resource_groups where name = 'default'").Check(testkit.Rows("default 1000 LOW YES <nil> <nil>"))
tk.MustContainErrMsg("drop resource group `default`", "can't drop reserved resource group")
tk.MustExec("create resource group x RU_PER_SEC=1000")
checkFunc := func(groupInfo *model.ResourceGroupInfo) {
require.Equal(t, true, groupInfo.ID != 0)
require.Equal(t, "x", groupInfo.Name.L)
require.Equal(t, groupID.Load(), groupInfo.ID)
require.Equal(t, uint64(1000), groupInfo.RURate)
require.Nil(t, groupInfo.Runaway)
}
// Check the group is correctly reloaded in the information schema.
g := testResourceGroupNameFromIS(t, tk.Session(), "x")
checkFunc(g)
// test create if not exists
tk.MustExec("create resource group if not exists x RU_PER_SEC=10000")
// Check the resource group is not changed
g = testResourceGroupNameFromIS(t, tk.Session(), "x")
checkFunc(g)
// Check warning message
res := tk.MustQuery("show warnings")
res.Check(testkit.Rows("Note 8248 Resource group 'x' already exists"))
tk.MustExec("set global tidb_enable_resource_control = off")
tk.MustGetErrCode("alter resource group x RU_PER_SEC=2000 ", mysql.ErrResourceGroupSupportDisabled)
tk.MustGetErrCode("drop resource group x ", mysql.ErrResourceGroupSupportDisabled)
tk.MustExec("set global tidb_enable_resource_control = DEFAULT")
tk.MustGetErrCode("create resource group x RU_PER_SEC=1000 ", mysql.ErrResourceGroupExists)
tk.MustExec("alter resource group x RU_PER_SEC=2000 BURSTABLE QUERY_LIMIT=(EXEC_ELAPSED='15s' ACTION DRYRUN WATCH SIMILAR DURATION '10m0s')")
g = testResourceGroupNameFromIS(t, tk.Session(), "x")
re.Equal(uint64(2000), g.RURate)
re.Equal(int64(-1), g.BurstLimit)
re.Equal(uint64(time.Second*15/time.Millisecond), g.Runaway.ExecElapsedTimeMs)
re.Equal(model.RunawayActionDryRun, g.Runaway.Action)
re.Equal(model.WatchSimilar, g.Runaway.WatchType)
re.Equal(int64(time.Minute*10/time.Millisecond), g.Runaway.WatchDurationMs)
tk.MustExec("alter resource group x QUERY_LIMIT=(EXEC_ELAPSED='20s' ACTION DRYRUN WATCH SIMILAR)")
g = testResourceGroupNameFromIS(t, tk.Session(), "x")
re.Equal(uint64(2000), g.RURate)
re.Equal(int64(-1), g.BurstLimit)
re.Equal(uint64(time.Second*20/time.Millisecond), g.Runaway.ExecElapsedTimeMs)
re.Equal(model.RunawayActionDryRun, g.Runaway.Action)
re.Equal(model.WatchSimilar, g.Runaway.WatchType)
re.Equal(int64(0), g.Runaway.WatchDurationMs)
tk.MustQuery("select * from information_schema.resource_groups where name = 'x'").Check(testkit.Rows("x 2000 MEDIUM YES EXEC_ELAPSED='20s', ACTION=DRYRUN, WATCH=SIMILAR DURATION=UNLIMITED <nil>"))
tk.MustExec("drop resource group x")
g = testResourceGroupNameFromIS(t, tk.Session(), "x")
re.Nil(g)
tk.MustExec("alter resource group if exists not_exists RU_PER_SEC=2000")
// Check warning message
res = tk.MustQuery("show warnings")
res.Check(testkit.Rows("Note 8249 Unknown resource group 'not_exists'"))
tk.MustExec("create resource group y RU_PER_SEC=4000")
checkFunc = func(groupInfo *model.ResourceGroupInfo) {
re.Equal(true, groupInfo.ID != 0)
re.Equal("y", groupInfo.Name.L)
re.Equal(groupID.Load(), groupInfo.ID)
re.Equal(uint64(4000), groupInfo.RURate)
re.Equal(int64(4000), groupInfo.BurstLimit)
}
g = testResourceGroupNameFromIS(t, tk.Session(), "y")
checkFunc(g)
tk.MustExec("alter resource group y BURSTABLE RU_PER_SEC=5000 QUERY_LIMIT=(EXEC_ELAPSED='15s' ACTION KILL)")
checkFunc = func(groupInfo *model.ResourceGroupInfo) {
re.Equal(true, groupInfo.ID != 0)
re.Equal("y", groupInfo.Name.L)
re.Equal(groupID.Load(), groupInfo.ID)
re.Equal(uint64(5000), groupInfo.RURate)
re.Equal(int64(-1), groupInfo.BurstLimit)
re.Equal(uint64(time.Second*15/time.Millisecond), groupInfo.Runaway.ExecElapsedTimeMs)
re.Equal(model.RunawayActionKill, groupInfo.Runaway.Action)
re.Equal(int64(0), groupInfo.Runaway.WatchDurationMs)
}
g = testResourceGroupNameFromIS(t, tk.Session(), "y")
checkFunc(g)
tk.MustQuery("select * from information_schema.resource_groups where name = 'y'").Check(testkit.Rows("y 5000 MEDIUM YES EXEC_ELAPSED='15s', ACTION=KILL <nil>"))
tk.MustExec("drop resource group y")
g = testResourceGroupNameFromIS(t, tk.Session(), "y")
re.Nil(g)
tk.MustGetErrCode("create resource group x ru_per_sec=1000 ru_per_sec=200", mysql.ErrParse)
tk.MustContainErrMsg("create resource group x ru_per_sec=1000 ru_per_sec=200, ru_per_sec=300", "Dupliated options specified")
tk.MustGetErrCode("create resource group x burstable, burstable", mysql.ErrParse)
tk.MustContainErrMsg("create resource group x burstable, burstable", "Dupliated options specified")
tk.MustGetErrCode("create resource group x ru_per_sec=1000, burstable, burstable", mysql.ErrParse)
tk.MustContainErrMsg("create resource group x ru_per_sec=1000, burstable, burstable", "Dupliated options specified")
tk.MustGetErrCode("create resource group x burstable, ru_per_sec=1000, burstable", mysql.ErrParse)
tk.MustContainErrMsg("create resource group x burstable, ru_per_sec=1000, burstable", "Dupliated options specified")
tk.MustContainErrMsg("create resource group x ru_per_sec=1000 burstable QUERY_LIMIT=(EXEC_ELAPSED='15s' action kill action cooldown)", "Dupliated runaway options specified")
tk.MustContainErrMsg("create resource group x ru_per_sec=1000 QUERY_LIMIT=(EXEC_ELAPSED='15s') burstable priority=Low, QUERY_LIMIT=(EXEC_ELAPSED='15s')", "Dupliated options specified")
tk.MustContainErrMsg("create resource group x ru_per_sec=1000 QUERY_LIMIT=(EXEC_ELAPSED='15s') QUERY_LIMIT=(EXEC_ELAPSED='15s')", "Dupliated options specified")
tk.MustContainErrMsg("create resource group x ru_per_sec=1000 QUERY_LIMIT=(action kill)", "invalid exec elapsed time")
tk.MustGetErrCode("create resource group x ru_per_sec=1000 QUERY_LIMIT=(EXEC_ELAPSED='15s' action kil)", mysql.ErrParse)
tk.MustContainErrMsg("create resource group x ru_per_sec=1000 QUERY_LIMIT=(EXEC_ELAPSED='15s')", "unknown resource group runaway action")
tk.MustGetErrCode("create resource group x ru_per_sec=1000 EXEC_ELAPSED='15s' action kill", mysql.ErrParse)
tk.MustContainErrMsg("create resource group x ru_per_sec=1000 QUERY_LIMIT=(EXEC_ELAPSED='15d' action kill)", "unknown unit \"d\"")
groups, err := infosync.ListResourceGroups(context.TODO())
re.Equal(1, len(groups))
re.NoError(err)
// Check information schema table information_schema.resource_groups
tk.MustExec("create resource group x RU_PER_SEC=1000 PRIORITY=LOW")
tk.MustQuery("select * from information_schema.resource_groups where name = 'x'").Check(testkit.Rows("x 1000 LOW NO <nil> <nil>"))
tk.MustExec("alter resource group x RU_PER_SEC=2000 BURSTABLE QUERY_LIMIT=(EXEC_ELAPSED='15s' action kill)")
tk.MustQuery("select * from information_schema.resource_groups where name = 'x'").Check(testkit.Rows("x 2000 LOW YES EXEC_ELAPSED='15s', ACTION=KILL <nil>"))
tk.MustQuery("show create resource group x").Check(testkit.Rows("x CREATE RESOURCE GROUP `x` RU_PER_SEC=2000, PRIORITY=LOW, BURSTABLE, QUERY_LIMIT=(EXEC_ELAPSED=\"15s\" ACTION=KILL)"))
tk.MustExec("CREATE RESOURCE GROUP `x_new` RU_PER_SEC=2000 PRIORITY=LOW BURSTABLE=true QUERY_LIMIT=(EXEC_ELAPSED=\"15s\" ACTION=KILL)")
tk.MustQuery("select * from information_schema.resource_groups where name = 'x_new'").Check(testkit.Rows("x_new 2000 LOW YES EXEC_ELAPSED='15s', ACTION=KILL <nil>"))
tk.MustExec("alter resource group x BURSTABLE=false RU_PER_SEC=3000")
tk.MustQuery("select * from information_schema.resource_groups where name = 'x'").Check(testkit.Rows("x 3000 LOW NO EXEC_ELAPSED='15s', ACTION=KILL <nil>"))
tk.MustQuery("show create resource group x").Check(testkit.Rows("x CREATE RESOURCE GROUP `x` RU_PER_SEC=3000, PRIORITY=LOW, QUERY_LIMIT=(EXEC_ELAPSED=\"15s\" ACTION=KILL)"))
tk.MustExec("create resource group y BURSTABLE RU_PER_SEC=2000 QUERY_LIMIT=(EXEC_ELAPSED='1s' action COOLDOWN WATCH EXACT duration '1h')")
tk.MustQuery("select * from information_schema.resource_groups where name = 'y'").Check(testkit.Rows("y 2000 MEDIUM YES EXEC_ELAPSED='1s', ACTION=COOLDOWN, WATCH=EXACT DURATION='1h0m0s' <nil>"))
tk.MustQuery("show create resource group y").Check(testkit.Rows("y CREATE RESOURCE GROUP `y` RU_PER_SEC=2000, PRIORITY=MEDIUM, BURSTABLE, QUERY_LIMIT=(EXEC_ELAPSED=\"1s\" ACTION=COOLDOWN WATCH=EXACT DURATION=\"1h0m0s\")"))
tk.MustExec("CREATE RESOURCE GROUP `y_new` RU_PER_SEC=2000 PRIORITY=MEDIUM QUERY_LIMIT=(EXEC_ELAPSED=\"1s\" ACTION=COOLDOWN WATCH EXACT DURATION=\"1h0m0s\")")
tk.MustQuery("select * from information_schema.resource_groups where name = 'y_new'").Check(testkit.Rows("y_new 2000 MEDIUM NO EXEC_ELAPSED='1s', ACTION=COOLDOWN, WATCH=EXACT DURATION='1h0m0s' <nil>"))
tk.MustExec("alter resource group y_new RU_PER_SEC=3000")
tk.MustQuery("select * from information_schema.resource_groups where name = 'y_new'").Check(testkit.Rows("y_new 3000 MEDIUM NO EXEC_ELAPSED='1s', ACTION=COOLDOWN, WATCH=EXACT DURATION='1h0m0s' <nil>"))
tk.MustExec("CREATE RESOURCE GROUP `z` RU_PER_SEC=2000 PRIORITY=MEDIUM QUERY_LIMIT=(EXEC_ELAPSED=\"1s\" ACTION=COOLDOWN WATCH PLAN DURATION=\"1h0m0s\")")
tk.MustQuery("select * from information_schema.resource_groups where name = 'z'").Check(testkit.Rows("z 2000 MEDIUM NO EXEC_ELAPSED='1s', ACTION=COOLDOWN, WATCH=PLAN DURATION='1h0m0s' <nil>"))
tk.MustExec("alter resource group y RU_PER_SEC=4000")
tk.MustQuery("select * from information_schema.resource_groups where name = 'y'").Check(testkit.Rows("y 4000 MEDIUM YES EXEC_ELAPSED='1s', ACTION=COOLDOWN, WATCH=EXACT DURATION='1h0m0s' <nil>"))
tk.MustQuery("show create resource group y").Check(testkit.Rows("y CREATE RESOURCE GROUP `y` RU_PER_SEC=4000, PRIORITY=MEDIUM, BURSTABLE, QUERY_LIMIT=(EXEC_ELAPSED=\"1s\" ACTION=COOLDOWN WATCH=EXACT DURATION=\"1h0m0s\")"))
tk.MustExec("alter resource group y RU_PER_SEC=4000 PRIORITY=HIGH BURSTABLE")
tk.MustQuery("select * from information_schema.resource_groups where name = 'y'").Check(testkit.Rows("y 4000 HIGH YES EXEC_ELAPSED='1s', ACTION=COOLDOWN, WATCH=EXACT DURATION='1h0m0s' <nil>"))
tk.MustQuery("show create resource group y").Check(testkit.Rows("y CREATE RESOURCE GROUP `y` RU_PER_SEC=4000, PRIORITY=HIGH, BURSTABLE, QUERY_LIMIT=(EXEC_ELAPSED=\"1s\" ACTION=COOLDOWN WATCH=EXACT DURATION=\"1h0m0s\")"))
tk.MustQuery("select count(*) from information_schema.resource_groups").Check(testkit.Rows("6"))
tk.MustGetErrCode("create user usr_fail resource group nil_group", mysql.ErrResourceGroupNotExists)
tk.MustContainErrMsg("create user usr_fail resource group nil_group", "Unknown resource group 'nil_group'")
tk.MustExec("create user user2")
tk.MustGetErrCode("alter user user2 resource group nil_group", mysql.ErrResourceGroupNotExists)
tk.MustContainErrMsg("alter user user2 resource group nil_group", "Unknown resource group 'nil_group'")
tk.MustExec("create resource group do_not_delete_rg ru_per_sec=100")
tk.MustExec("create user usr3 resource group do_not_delete_rg")
tk.MustQuery("select user_attributes from mysql.user where user = 'usr3'").Check(testkit.Rows(`{"resource_group": "do_not_delete_rg"}`))
tk.MustContainErrMsg("drop resource group do_not_delete_rg", "user [usr3] depends on the resource group to drop")
tk.MustExec("alter user usr3 resource group `default`")
tk.MustExec("alter user usr3 resource group ``")
tk.MustExec("alter user usr3 resource group `DeFault`")
tk.MustQuery("select user_attributes from mysql.user where user = 'usr3'").Check(testkit.Rows(`{"resource_group": "default"}`))
tk.MustExec("alter resource group default ru_per_sec = 1000, priority = medium, background = (task_types = 'lightning, BR');")
tk.MustQuery("select * from information_schema.resource_groups where name = 'default'").Check(testkit.Rows("default 1000 MEDIUM YES <nil> TASK_TYPES='lightning,br'"))
tk.MustQuery("show create resource group default").Check(testkit.Rows("default CREATE RESOURCE GROUP `default` RU_PER_SEC=1000, PRIORITY=MEDIUM, BURSTABLE, BACKGROUND=(TASK_TYPES='lightning,br')"))
g = testResourceGroupNameFromIS(t, tk.Session(), "default")
require.EqualValues(t, g.Background.JobTypes, []string{"lightning", "br"})
tk.MustContainErrMsg("create resource group bg ru_per_sec = 1000 background = (task_types = 'lightning')", "unsupported operation")
tk.MustContainErrMsg("alter resource group x background=(task_types='')", "unsupported operation")
tk.MustGetErrCode("alter resource group default background=(task_types='a,b,c')", mysql.ErrResourceGroupInvalidBackgroundTaskName)
}
func testResourceGroupNameFromIS(t *testing.T, ctx sessionctx.Context, name string) *model.ResourceGroupInfo {
dom := domain.GetDomain(ctx)
// Make sure the table schema is the new schema.
err := dom.Reload()
require.NoError(t, err)
g, _ := dom.InfoSchema().ResourceGroupByName(model.NewCIStr(name))
return g
}
func TestResourceGroupRunaway(t *testing.T) {
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/domain/FastRunawayGC", `return(true)`))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/domain/FastRunawayGC"))
}()
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "localhost"}, nil, nil, nil))
tk.MustExec("use test")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(1)")
tk.MustExec("set global tidb_enable_resource_control='on'")
tk.MustExec("create resource group rg1 RU_PER_SEC=1000 QUERY_LIMIT=(EXEC_ELAPSED='50ms' ACTION=KILL)")
tk.MustExec("create resource group rg2 BURSTABLE RU_PER_SEC=2000 QUERY_LIMIT=(EXEC_ELAPSED='50ms' action KILL WATCH EXACT duration '1s')")
tk.MustExec("create resource group rg3 BURSTABLE RU_PER_SEC=2000 QUERY_LIMIT=(EXEC_ELAPSED='50ms' action KILL WATCH EXACT)")
tk.MustQuery("select * from information_schema.resource_groups where name = 'rg2'").Check(testkit.Rows("rg2 2000 MEDIUM YES EXEC_ELAPSED='50ms', ACTION=KILL, WATCH=EXACT DURATION='1s' <nil>"))
tk.MustQuery("select * from information_schema.resource_groups where name = 'rg3'").Check(testkit.Rows("rg3 2000 MEDIUM YES EXEC_ELAPSED='50ms', ACTION=KILL, WATCH=EXACT DURATION=UNLIMITED <nil>"))
tk.MustQuery("select /*+ resource_group(rg1) */ * from t").Check(testkit.Rows("1"))
tk.MustQuery("select /*+ resource_group(rg2) */ * from t").Check(testkit.Rows("1"))
tk.MustQuery("select /*+ resource_group(rg3) */ * from t").Check(testkit.Rows("1"))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/store/copr/sleepCoprRequest", fmt.Sprintf("return(%d)", 60)))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/store/copr/sleepCoprRequest"))
}()
err := tk.QueryToErr("select /*+ resource_group(rg1) */ * from t")
require.ErrorContains(t, err, "[executor:8253]Query execution was interrupted, identified as runaway query")
tryInterval := time.Millisecond * 200
maxWaitDuration := time.Second * 5
tk.EventuallyMustQueryAndCheck("select SQL_NO_CACHE resource_group_name, original_sql, match_type from mysql.tidb_runaway_queries", nil,
testkit.Rows("rg1 select /*+ resource_group(rg1) */ * from t identify"), maxWaitDuration, tryInterval)
// require.Len(t, tk.MustQuery("select SQL_NO_CACHE resource_group_name, original_sql, time from mysql.tidb_runaway_queries").Rows(), 0)
tk.EventuallyMustQueryAndCheck("select SQL_NO_CACHE resource_group_name, original_sql, time from mysql.tidb_runaway_queries", nil,
nil, maxWaitDuration, tryInterval)
tk.MustExec("alter resource group rg1 RU_PER_SEC=1000 QUERY_LIMIT=(EXEC_ELAPSED='100ms' ACTION=COOLDOWN)")
tk.MustQuery("select /*+ resource_group(rg1) */ * from t").Check(testkit.Rows("1"))
tk.MustExec("alter resource group rg1 RU_PER_SEC=1000 QUERY_LIMIT=(EXEC_ELAPSED='100ms' ACTION=DRYRUN)")
tk.MustQuery("select /*+ resource_group(rg1) */ * from t").Check(testkit.Rows("1"))
err = tk.QueryToErr("select /*+ resource_group(rg2) */ * from t")
require.ErrorContains(t, err, "Query execution was interrupted, identified as runaway query")
tk.MustGetErrCode("select /*+ resource_group(rg2) */ * from t", mysql.ErrResourceGroupQueryRunawayQuarantine)
tk.EventuallyMustQueryAndCheck("select SQL_NO_CACHE resource_group_name, original_sql, match_type from mysql.tidb_runaway_queries", nil,
testkit.Rows("rg2 select /*+ resource_group(rg2) */ * from t identify",
"rg2 select /*+ resource_group(rg2) */ * from t watch"), maxWaitDuration, tryInterval)
tk.MustQuery("select SQL_NO_CACHE resource_group_name, watch_text from mysql.tidb_runaway_watch").
Check(testkit.Rows("rg2 select /*+ resource_group(rg2) */ * from t"))
tk.EventuallyMustQueryAndCheck("select SQL_NO_CACHE resource_group_name, original_sql, time from mysql.tidb_runaway_queries", nil,
nil, maxWaitDuration, tryInterval)
tk.EventuallyMustQueryAndCheck("select SQL_NO_CACHE resource_group_name, watch_text, end_time from mysql.tidb_runaway_watch", nil,
nil, maxWaitDuration, tryInterval)
err = tk.QueryToErr("select /*+ resource_group(rg3) */ * from t")
require.ErrorContains(t, err, "Query execution was interrupted, identified as runaway query")
tk.MustGetErrCode("select /*+ resource_group(rg3) */ * from t", mysql.ErrResourceGroupQueryRunawayQuarantine)
tk.EventuallyMustQueryAndCheck("select SQL_NO_CACHE resource_group_name, watch_text from mysql.tidb_runaway_watch", nil,
testkit.Rows("rg3 select /*+ resource_group(rg3) */ * from t"), maxWaitDuration, tryInterval)
tk.MustExec("alter resource group rg2 RU_PER_SEC=1000 QUERY_LIMIT=(EXEC_ELAPSED='50ms' ACTION=COOLDOWN)")
tk.MustQuery("select /*+ resource_group(rg2) */ * from t").Check(testkit.Rows("1"))
tk.MustExec("alter resource group rg2 RU_PER_SEC=1000 QUERY_LIMIT=(EXEC_ELAPSED='50ms' ACTION=DRYRUN)")
tk.MustQuery("select /*+ resource_group(rg2) */ * from t").Check(testkit.Rows("1"))
tk.MustGetErrCode("select /*+ resource_group(rg3) */ * from t", mysql.ErrResourceGroupQueryRunawayQuarantine)
}
func TestResourceGroupHint(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t1(c1 int)")
tk.MustExec("insert into t1 values(1)")
tk.MustExec("set global tidb_enable_resource_control='on'")
tk.MustExec("create resource group rg1 ru_per_sec=1000")
tk.MustQuery("select /*+ resource_group(default) */ * from t1")
tk.MustQuery("select /*+ resource_group(rg1) */ * from t1")
tk.MustQuery("select /*+ resource_group(rg1) resource_group(default) */ * from t1")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 RESOURCE_GROUP() is defined more than once, only the last definition takes effect: RESOURCE_GROUP(default)"))
tk.MustQuery("select /*+ resource_group(rg1) */ DB, RESOURCE_GROUP from information_schema.processlist").Check(testkit.Rows("test rg1"))
tk.MustQuery("select DB, RESOURCE_GROUP from information_schema.processlist").Check(testkit.Rows("test default"))
tk.MustExec("set global tidb_enable_resource_control='off'")
tk.MustQuery("select /*+ resource_group(rg1) */ DB, RESOURCE_GROUP from information_schema.processlist").Check(testkit.Rows("test default"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 8250 Resource control feature is disabled. Run `SET GLOBAL tidb_enable_resource_control='on'` to enable the feature"))
}
func TestAlreadyExistsDefaultResourceGroup(t *testing.T) {
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/domain/infosync/managerAlreadyCreateSomeGroups", `return(true)`))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/domain/infosync/managerAlreadyCreateSomeGroups"))
}()
testkit.CreateMockStoreAndDomain(t)
groups, _ := infosync.ListResourceGroups(context.TODO())
require.Equal(t, 2, len(groups))
}
func TestNewResourceGroupFromOptions(t *testing.T) {
type TestCase struct {
name string
groupName string
input *model.ResourceGroupSettings
output *rmpb.ResourceGroup
err error
}
var tests []TestCase
groupName := "test"
tests = append(tests, TestCase{
name: "empty 1",
input: &model.ResourceGroupSettings{},
err: resourcegroup.ErrUnknownResourceGroupMode,
})
tests = append(tests, TestCase{
name: "empty 2",
input: nil,
err: resourcegroup.ErrInvalidGroupSettings,
})
tests = append(tests, TestCase{
name: "normal case: ru case 1",
input: &model.ResourceGroupSettings{
RURate: 2000,
Priority: 0,
},
output: &rmpb.ResourceGroup{
Name: groupName,
Mode: rmpb.GroupMode_RUMode,
Priority: 0,
RUSettings: &rmpb.GroupRequestUnitSettings{
RU: &rmpb.TokenBucket{Settings: &rmpb.TokenLimitSettings{FillRate: 2000}},
},
},
})
tests = append(tests, TestCase{
name: "normal case: ru case 2",
input: &model.ResourceGroupSettings{
RURate: 5000,
Priority: 8,
},
output: &rmpb.ResourceGroup{
Name: groupName,
Priority: 8,
Mode: rmpb.GroupMode_RUMode,
RUSettings: &rmpb.GroupRequestUnitSettings{
RU: &rmpb.TokenBucket{Settings: &rmpb.TokenLimitSettings{FillRate: 5000}},
},
},
})
tests = append(tests, TestCase{
name: "error case: native case 1",
input: &model.ResourceGroupSettings{
CPULimiter: "8",
IOReadBandwidth: "3000MB/s",
IOWriteBandwidth: "3000Mi",
},
err: resourcegroup.ErrUnknownResourceGroupMode,
})
tests = append(tests, TestCase{
name: "error case: native case 2",
input: &model.ResourceGroupSettings{
CPULimiter: "8c",
IOReadBandwidth: "3000Mi",
IOWriteBandwidth: "3000Mi",
},
err: resourcegroup.ErrUnknownResourceGroupMode,
})
tests = append(tests, TestCase{
name: "error case: native case 3",
input: &model.ResourceGroupSettings{
CPULimiter: "8",
IOReadBandwidth: "3000G",
IOWriteBandwidth: "3000MB",
},
err: resourcegroup.ErrUnknownResourceGroupMode,
})
tests = append(tests, TestCase{
name: "error case: duplicated mode",
input: &model.ResourceGroupSettings{
CPULimiter: "8",
IOReadBandwidth: "3000Mi",
IOWriteBandwidth: "3000Mi",
RURate: 1000,
},
err: resourcegroup.ErrInvalidResourceGroupDuplicatedMode,
})
tests = append(tests, TestCase{
name: "error case: duplicated mode",
groupName: "test_group_too_looooooooooooooooooooooooooooooooooooooooooooooooong",
input: &model.ResourceGroupSettings{
CPULimiter: "8",
IOReadBandwidth: "3000Mi",
IOWriteBandwidth: "3000Mi",
RURate: 1000,
},
err: resourcegroup.ErrTooLongResourceGroupName,
})
for _, test := range tests {
name := groupName
if len(test.groupName) > 0 {
name = test.groupName
}
group, err := resourcegroup.NewGroupFromOptions(name, test.input)
comment := fmt.Sprintf("[%s]\nerr1 %s\nerr2 %s", test.name, err, test.err)
if test.err != nil {
require.ErrorIs(t, err, test.err, comment)
} else {
require.NoError(t, err, comment)
require.Equal(t, test.output, group)
}
}
}
func TestBindHints(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
re := require.New(t)
tk.MustExec("drop resource group if exists rg1")
tk.MustExec("create resource group rg1 RU_PER_SEC=1000")
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int)")
tk.MustExec("create global binding for select * from t using select /*+ resource_group(rg1) */ * from t")
tk.MustQuery("select * from t")
re.Equal("rg1", tk.Session().GetSessionVars().StmtCtx.ResourceGroup)
re.Equal("default", tk.Session().GetSessionVars().ResourceGroupName)
tk.MustQuery("select a, b from t")
re.Equal("", tk.Session().GetSessionVars().StmtCtx.ResourceGroup)
re.Equal("default", tk.Session().GetSessionVars().ResourceGroupName)
}
|
package systemed
// 重启系统
func Reboot() ([]byte, error) {
sys := NewSystemed("systemctl")
sys.SetArgs("reboot")
rs, err := sys.Exec()
return rs, err
}
// 关闭系统,切断电源
func PowerOff() ([]byte, error) {
sys := NewSystemed("systemctl")
sys.SetArgs("poweroff")
rs, err := sys.Exec()
return rs, err
}
// CPU停止工作
func Halt() ([]byte, error) {
sys := NewSystemed("systemctl")
sys.SetArgs("halt")
rs, err := sys.Exec()
return rs, err
}
// 暂停系统
func Suspend() ([]byte, error) {
sys := NewSystemed("systemctl")
sys.SetArgs("suspend")
rs, err := sys.Exec()
return rs, err
}
// 让系统进入冬眠状态
func Hibernate() ([]byte, error) {
sys := NewSystemed("systemctl")
sys.SetArgs("hibernate")
rs, err := sys.Exec()
return rs, err
}
// 让系统进入交互式休眠状态
func HybridSleep() ([]byte, error) {
sys := NewSystemed("systemctl")
sys.SetArgs("hybrid-sleep")
rs, err := sys.Exec()
return rs, err
}
// 启动进入救援状态(单用户状态)
func Rescue() ([]byte, error) {
sys := NewSystemed("systemctl")
sys.SetArgs("rescue")
rs, err := sys.Exec()
return rs, err
}
|
package PDU
import (
"github.com/andrewz1/gosmpp/Data"
"github.com/andrewz1/gosmpp/Exception"
"github.com/andrewz1/gosmpp/Utils"
)
type UnbindResp struct {
Response
}
func NewUnbindResp() *UnbindResp {
a := &UnbindResp{}
a.Construct()
return a
}
func (c *UnbindResp) Construct() {
defer c.SetRealReference(c)
c.Response.Construct()
c.SetCommandId(Data.UNBIND_RESP)
}
func (c *UnbindResp) GetInstance() (IPDU, error) {
return NewUnbindResp(), nil
}
func (c *UnbindResp) SetBody(buf *Utils.ByteBuffer) (err *Exception.Exception, source IPDU) {
source = c.This.(IPDU)
return nil, source
}
func (c *UnbindResp) GetBody() (buf *Utils.ByteBuffer, err *Exception.Exception, source IPDU) {
source = c.This.(IPDU)
return nil, nil, source
}
|
// This file was generated for SObject ContentDocument, API Version v43.0 at 2018-07-30 03:47:59.979096702 -0400 EDT m=+46.323441317
package sobjects
import (
"fmt"
"strings"
)
type ContentDocument struct {
BaseSObject
ArchivedById string `force:",omitempty"`
ArchivedDate string `force:",omitempty"`
ContentAssetId string `force:",omitempty"`
ContentModifiedDate string `force:",omitempty"`
ContentSize int `force:",omitempty"`
CreatedById string `force:",omitempty"`
CreatedDate string `force:",omitempty"`
Description string `force:",omitempty"`
FileExtension string `force:",omitempty"`
FileType string `force:",omitempty"`
Id string `force:",omitempty"`
IsArchived bool `force:",omitempty"`
IsDeleted bool `force:",omitempty"`
LastModifiedById string `force:",omitempty"`
LastModifiedDate string `force:",omitempty"`
LastReferencedDate string `force:",omitempty"`
LastViewedDate string `force:",omitempty"`
LatestPublishedVersionId string `force:",omitempty"`
OwnerId string `force:",omitempty"`
ParentId string `force:",omitempty"`
PublishStatus string `force:",omitempty"`
SharingOption string `force:",omitempty"`
SharingPrivacy string `force:",omitempty"`
SystemModstamp string `force:",omitempty"`
Title string `force:",omitempty"`
}
func (t *ContentDocument) ApiName() string {
return "ContentDocument"
}
func (t *ContentDocument) String() string {
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("ContentDocument #%s - %s\n", t.Id, t.Name))
builder.WriteString(fmt.Sprintf("\tArchivedById: %v\n", t.ArchivedById))
builder.WriteString(fmt.Sprintf("\tArchivedDate: %v\n", t.ArchivedDate))
builder.WriteString(fmt.Sprintf("\tContentAssetId: %v\n", t.ContentAssetId))
builder.WriteString(fmt.Sprintf("\tContentModifiedDate: %v\n", t.ContentModifiedDate))
builder.WriteString(fmt.Sprintf("\tContentSize: %v\n", t.ContentSize))
builder.WriteString(fmt.Sprintf("\tCreatedById: %v\n", t.CreatedById))
builder.WriteString(fmt.Sprintf("\tCreatedDate: %v\n", t.CreatedDate))
builder.WriteString(fmt.Sprintf("\tDescription: %v\n", t.Description))
builder.WriteString(fmt.Sprintf("\tFileExtension: %v\n", t.FileExtension))
builder.WriteString(fmt.Sprintf("\tFileType: %v\n", t.FileType))
builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id))
builder.WriteString(fmt.Sprintf("\tIsArchived: %v\n", t.IsArchived))
builder.WriteString(fmt.Sprintf("\tIsDeleted: %v\n", t.IsDeleted))
builder.WriteString(fmt.Sprintf("\tLastModifiedById: %v\n", t.LastModifiedById))
builder.WriteString(fmt.Sprintf("\tLastModifiedDate: %v\n", t.LastModifiedDate))
builder.WriteString(fmt.Sprintf("\tLastReferencedDate: %v\n", t.LastReferencedDate))
builder.WriteString(fmt.Sprintf("\tLastViewedDate: %v\n", t.LastViewedDate))
builder.WriteString(fmt.Sprintf("\tLatestPublishedVersionId: %v\n", t.LatestPublishedVersionId))
builder.WriteString(fmt.Sprintf("\tOwnerId: %v\n", t.OwnerId))
builder.WriteString(fmt.Sprintf("\tParentId: %v\n", t.ParentId))
builder.WriteString(fmt.Sprintf("\tPublishStatus: %v\n", t.PublishStatus))
builder.WriteString(fmt.Sprintf("\tSharingOption: %v\n", t.SharingOption))
builder.WriteString(fmt.Sprintf("\tSharingPrivacy: %v\n", t.SharingPrivacy))
builder.WriteString(fmt.Sprintf("\tSystemModstamp: %v\n", t.SystemModstamp))
builder.WriteString(fmt.Sprintf("\tTitle: %v\n", t.Title))
return builder.String()
}
type ContentDocumentQueryResponse struct {
BaseQuery
Records []ContentDocument `json:"Records" force:"records"`
}
|
package util_test
import (
"testing"
"github.com/tvacare/web-crawler/util"
)
func TestSliceContains(t *testing.T) {
s1 := "pineapple"
slice1 := []string{"apple", "banana", "orange", "pear"}
b1, p1 := util.SliceContains(s1, slice1)
if b1 == false || p1 == "" {
t.Errorf("Slice contains should have matched - %s, %v", s1, slice1)
}
s2 := "car"
slice2 := []string{"bus", "subway", "airplane", "train"}
b2, p2 := util.SliceContains(s2, slice2)
if b2 != false || p2 != "" {
t.Errorf("Slice contains should not match - %s, %v", s2, slice2)
}
}
|
package files
import (
"bufio"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
)
const (
PROGRAM_HOME = "/.fs_sync/"
SYNC_LIST_FILE = "/.fs_sync/.synclist"
WHITE_LIST_FILE = "/.fs_sync/.whitelist"
)
var SUFFIX_WHITELIST = [...]string{".mod", ".sum"}
type FileSyncManager struct {
SyncListFile *os.File
WhiteListFile *os.File
NumOfSyncFiles int
NumOfWhiteListFiles int
}
func init() {
log.Println("Initializing files...")
createFileIfNotExist()
}
func createFileIfNotExist() {
err := os.MkdirAll(fmt.Sprintf("%s%s", os.Getenv("HOME"), PROGRAM_HOME), 0755)
if err != nil {
log.Fatal(err)
}
syncFile := fmt.Sprintf("%s%s", os.Getenv("HOME"), SYNC_LIST_FILE)
whiteListFile := fmt.Sprintf("%s%s", os.Getenv("HOME"), WHITE_LIST_FILE)
_, err = os.Stat(syncFile)
if os.IsNotExist(err) {
_, err = os.Create(syncFile)
if err != nil {
log.Fatal(err)
}
}
_, err = os.Stat(whiteListFile)
if os.IsNotExist(err) {
_, err = os.Create(whiteListFile)
if err != nil {
log.Fatal(err)
}
}
}
func NewFileSyncManager() *FileSyncManager {
fileSyncManager := new(FileSyncManager)
fileSyncManager.SyncListFile = getFile(SYNC_LIST_FILE)
fileSyncManager.WhiteListFile = getFile(WHITE_LIST_FILE)
return fileSyncManager
}
func getFile(filePath string) *os.File {
filePath = fmt.Sprintf("%s%s", os.Getenv("HOME"), filePath)
file, err := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
log.Fatal(err)
}
return file
}
func (this *FileSyncManager) GetFilesToSync() []string {
lines := make([]string, 0)
//TODO: why doesnt this work fith the file object?
file, err := os.Open(this.SyncListFile.Name())
if err != nil {
return nil
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
return lines
}
func (this *FileSyncManager) AddFileToSynclist(filePath string) error {
return addAllPathInDirToFile(filePath, this.SyncListFile)
}
func (this *FileSyncManager) AddFileToWhitelist(filePath string) error {
return addAllPathInDirToFile(filePath, this.WhiteListFile)
}
func addAllPathInDirToFile(path string, file *os.File) error {
stat, err := os.Stat(path)
if err != nil {
return err
}
if !stat.IsDir() {
for _, suffix := range SUFFIX_WHITELIST {
if filepath.Ext(path) == suffix {
return nil
}
}
if isBinary(path) {
return nil
}
log.Printf("Syncing file %s\n", path)
_, err = file.WriteString(path + "\n")
return err
}
log.Printf("Syncing dir %s\n", path)
_, err = file.WriteString(path + "\n")
filesInfo, err := ioutil.ReadDir(path)
if err != nil {
return err
}
errorStrings := make([]string, 0)
for _, fileInfo := range filesInfo {
newPath := filepath.Join(path, fileInfo.Name())
err = addAllPathInDirToFile(newPath, file)
if err != nil {
errorStrings = append(errorStrings, err.Error())
}
}
if len(errorStrings) == 0 {
return nil
}
return fmt.Errorf(strings.Join(errorStrings, "\n"))
}
func (this *FileSyncManager) Close() error {
errorStrings := make([]string, 0)
err := this.SyncListFile.Close()
errorStrings = append(errorStrings, err.Error())
err = this.WhiteListFile.Close()
errorStrings = append(errorStrings, err.Error())
if len(errorStrings) == 0 {
return nil
}
return fmt.Errorf("%s", strings.Join(errorStrings, "\n"))
}
func isBinary(filePath string) bool {
file, err := os.Open(filePath)
if err != nil {
panic(err)
}
bytes := make([]byte, 1024)
n, err := file.Read(bytes)
if err != nil {
panic(err)
}
if n < 1024 {
log.Printf("file %s has less than 1024 bytes", filePath)
}
var nullByte = []byte{0}
for _, b := range bytes {
if b == nullByte[0] {
return true
}
}
return false
}
|
package base
import (
"errors"
"fmt"
"gengine/context"
"reflect"
"sync"
)
type ConcStatement struct {
Assignments []*Assignment
FunctionCalls []*FunctionCall
MethodCalls []*MethodCall
}
func (cs *ConcStatement) AcceptAssignment(assignment *Assignment) error {
cs.Assignments = append(cs.Assignments, assignment)
return nil
}
func (cs *ConcStatement) AcceptFunctionCall(funcCall *FunctionCall) error {
cs.FunctionCalls = append(cs.FunctionCalls, funcCall)
return nil
}
func (cs *ConcStatement) AcceptMethodCall(methodCall *MethodCall) error {
cs.MethodCalls = append(cs.MethodCalls, methodCall)
return nil
}
func (cs *ConcStatement) Evaluate(dc *context.DataContext, Vars map[string]reflect.Value) (reflect.Value, error) {
aLen := len(cs.Assignments)
fLen := len(cs.FunctionCalls)
mLen := len(cs.MethodCalls)
l := aLen + fLen + mLen
if l <= 0 {
return reflect.ValueOf(nil), nil
} else if l == 1 {
if aLen > 0 {
return cs.Assignments[0].Evaluate(dc, Vars)
}
if fLen > 0 {
return cs.FunctionCalls[0].Evaluate(dc, Vars)
}
if mLen > 0 {
return cs.MethodCalls[0].Evaluate(dc, Vars)
}
} else {
var errLock sync.Mutex
var eMsg []string
var wg sync.WaitGroup
wg.Add(l)
for _, assign := range cs.Assignments {
assignment := assign
go func() {
_, e := assignment.Evaluate(dc, Vars)
if e != nil {
errLock.Lock()
eMsg = append(eMsg, fmt.Sprintf("%+v", e))
errLock.Unlock()
}
wg.Done()
}()
}
for _, fu := range cs.FunctionCalls {
fun := fu
go func() {
_, e := fun.Evaluate(dc, Vars)
if e != nil {
errLock.Lock()
eMsg = append(eMsg, fmt.Sprintf("%+v", e))
errLock.Unlock()
}
wg.Done()
}()
}
for _, me := range cs.MethodCalls {
meth := me
go func() {
_, e := meth.Evaluate(dc, Vars)
if e != nil {
errLock.Lock()
eMsg = append(eMsg, fmt.Sprintf("%+v", e))
errLock.Unlock()
}
wg.Done()
}()
}
wg.Wait()
if len(eMsg) > 0 {
return reflect.ValueOf(nil), errors.New(fmt.Sprintf("%+v", eMsg))
}
}
return reflect.ValueOf(nil), nil
}
|
package main
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"github.com/fatih/color"
_ "github.com/go-sql-driver/mysql"
"github.com/pingcap/tidiff/config"
"github.com/pingcap/tidiff/executor"
"github.com/pingcap/tidiff/history"
"github.com/pingcap/tidiff/uimode"
"github.com/sergi/go-diff/diffmatchpatch"
"gopkg.in/urfave/cli.v2"
)
func main() {
app := cli.App{}
app.Name = "tidiff"
app.Usage = "Execute SQL in TiDB and MySQL and returns the results"
app.Description = "Used to compare the result different in MySQL and TiDB for the same SQL statement"
app.Version = "0.0.2"
app.Flags = []cli.Flag{
&cli.StringFlag{
Name: "mysql.host",
Value: "127.0.0.1",
Usage: "MySQL host",
},
&cli.IntFlag{
Name: "mysql.port",
Value: 3306,
Usage: "MySQL port",
},
&cli.StringFlag{
Name: "mysql.user",
Value: "root",
Usage: "MySQL username",
},
&cli.StringFlag{
Name: "mysql.password",
Value: "",
Usage: "MySQL password",
},
&cli.StringFlag{
Name: "mysql.db",
Value: "",
Usage: "MySQL database",
},
&cli.StringFlag{
Name: "mysql.options",
Value: "charset=utf8mb4",
Usage: "MySQL DSN options",
},
&cli.StringFlag{
Name: "tidb.host",
Value: "127.0.0.1",
Usage: "TiDB host",
},
&cli.IntFlag{
Name: "tidb.port",
Value: 4000,
Usage: "TiDB port",
},
&cli.StringFlag{
Name: "tidb.user",
Value: "root",
Usage: "TiDB username",
},
&cli.StringFlag{
Name: "tidb.password",
Value: "",
Usage: "TiDB password",
},
&cli.StringFlag{
Name: "tidb.db",
Value: "",
Usage: "TiDB database",
},
&cli.StringFlag{
Name: "tidb.options",
Value: "charset=utf8mb4",
Usage: "TiDB DSN options",
},
&cli.StringFlag{
Name: "log.diff",
Value: "",
Usage: "Log all query diff to file",
},
}
app.Action = serve
if err := app.Run(os.Args); err != nil {
println(err)
os.Exit(1)
}
}
func dbConfig(dialect string, ctx *cli.Context) *executor.Config {
return &executor.Config{
Host: ctx.String(dialect + ".host"),
Port: ctx.Int(dialect + ".port"),
User: ctx.String(dialect + ".user"),
Password: ctx.String(dialect + ".password"),
DB: ctx.String(dialect + ".db"),
Options: ctx.String(dialect + ".options"),
}
}
func initConfig(ctx *cli.Context) error {
err := os.MkdirAll(filepath.Join(config.TiDiffPath), os.ModePerm)
if err != nil {
return err
}
_, err = os.Stat(config.TiDiffConfigPath)
if err != nil && os.IsNotExist(err) {
file, err := os.Create(config.TiDiffConfigPath)
if err != nil {
return err
}
if err := file.Close(); err != nil {
return err
}
}
b, err := ioutil.ReadFile(config.TiDiffConfigPath)
if err != nil {
return err
}
lines := strings.Split(string(b), "\n")
for _, line := range lines {
parts := strings.SplitN(line, "=", 2)
if len(parts) != 2 {
continue
}
parts[0] = strings.TrimSpace(parts[0])
parts[1] = strings.TrimSpace(parts[1])
if ctx.IsSet(parts[0]) {
continue
}
if ctx.IsSet(parts[0]) {
continue
}
if err := ctx.Set(parts[0], parts[1]); err != nil {
return err
}
}
return nil
}
func serveCLIMode(ctx *cli.Context, exec *executor.Executor) error {
query := strings.Join(ctx.Args().Slice(), " ")
mysqlResult, tidbResult, err := exec.Query(query)
if err != nil {
return err
}
defer mysqlResult.Close()
defer tidbResult.Close()
mysqlContent, tidbContent := mysqlResult.Content(), tidbResult.Content()
var containsDiff bool
if mysqlResult.Error == nil && tidbResult.Error == nil {
green := color.New(color.FgGreen).SprintFunc()
red := color.New(color.FgRed).SprintFunc()
patch := diffmatchpatch.New()
diff := patch.DiffMain(mysqlContent, tidbContent, false)
var newMySQLContent, newTiDBContent bytes.Buffer
for _, d := range diff {
switch d.Type {
case diffmatchpatch.DiffEqual:
newMySQLContent.WriteString(d.Text)
newTiDBContent.WriteString(d.Text)
case diffmatchpatch.DiffDelete:
newMySQLContent.WriteString(red(d.Text))
containsDiff = true
case diffmatchpatch.DiffInsert:
newTiDBContent.WriteString(green(d.Text))
containsDiff = true
}
}
mysqlContent = newMySQLContent.String()
tidbContent = newTiDBContent.String()
}
logQuery := query
if strings.HasPrefix(query, "!!") {
logQuery = mysqlResult.Rendered
}
fmt.Println(fmt.Sprintf("MySQL(%s)> %s", exec.MySQLConfig.Address(), logQuery))
if mysqlContent != "" {
fmt.Println(mysqlContent)
}
fmt.Println(mysqlResult.Stat() + "\n")
fmt.Println(fmt.Sprintf("TiDB(%s)> %s", exec.TiDBConfig.Address(), logQuery))
if tidbContent != "" {
fmt.Println(tidbContent)
}
fmt.Println(tidbResult.Stat() + "\n")
if containsDiff {
return errors.New("inconsistant result between TiDB and MySQL")
}
return nil
}
func serve(ctx *cli.Context) error {
if err := initConfig(ctx); err != nil {
return err
}
exec := executor.NewExecutor(dbConfig("mysql", ctx), dbConfig("tidb", ctx))
if err := exec.Open(executor.DefaultRetryCnt); err != nil {
return err
}
// Command line mode
if args := ctx.Args(); args.Len() > 0 {
return serveCLIMode(ctx, exec)
}
// User interface mode
recorder := history.NewRecorder()
if err := recorder.Open(); err != nil {
return err
}
defer func() {
if err := recorder.Close(); err != nil {
log.Println(err.Error())
}
}()
if logDiff := ctx.String("log.diff"); logDiff != "" {
diff, err := os.OpenFile(logDiff, os.O_CREATE|os.O_WRONLY, os.ModePerm)
if err != nil {
log.Fatal(err)
}
recorder.SetDiff(diff)
defer diff.Close()
}
return uimode.New(recorder, exec).Serve()
}
|
package tasks_test
import (
"testing"
"github.com/benjlevesque/task/mocks"
"github.com/benjlevesque/task/pkg/tasks"
"github.com/benjlevesque/task/types"
"github.com/stretchr/testify/mock"
)
func TestEditMock(t *testing.T) {
store := &mocks.TaskEditer{}
editor := &mocks.TextEditer{}
store.On("GetTask", 1).Return(types.Task{Title: "toto"}, nil)
store.On("EditTask", 1, mock.AnythingOfType("string")).Return(nil)
editor.On("EditText", mock.AnythingOfType("string")).Return("titi", nil)
tasks.EditTask(store, editor, []string{"1"})
editor.AssertCalled(t, "EditText", "toto")
store.AssertCalled(t, "EditTask", 1, "titi")
}
|
package article_model
import (
"go_web/app/http/models"
"go_web/pkg/logger"
"go_web/pkg/model"
"go_web/pkg/util"
)
// Article 文章模型
type Article struct {
models.BaseModel
Title string `json:"title"`
Body string `json:"body"`
}
func Get(idstr string) (Article, error) {
var article Article
id := util.StringToInt64(idstr)
if err := model.DB.First(&article, id).Error; err != nil {
return article, err
}
return article, nil
}
func GetAll() ([]Article, error) {
var article []Article
if err := model.DB.Find(&article).Error; err != nil {
return article, err
}
return article, nil
}
func (article *Article) Create() (err error) {
result := model.DB.Create(&article)
if err := result.Error; err != nil {
logger.LogError(err)
return err
}
return nil
}
func (article *Article) Update() (rowsAffected int64, err error) {
result := model.DB.Updates(&article)
if err = result.Error; err != nil {
logger.LogError(err)
return 0, err
}
return result.RowsAffected, nil
}
func (article *Article) Delete() (rowsAffected int64, err error) {
result := model.DB.Delete(&article)
if err = result.Error; err != nil {
logger.LogError(err)
return 0, err
}
return result.RowsAffected, nil
}
|
package engine
import (
"context"
"errors"
"fmt"
"log"
"sort"
"strconv"
"strings"
"time"
"github.com/agext/levenshtein"
"github.com/eve-spyglass/spyglass2/feeds"
"github.com/sirupsen/logrus"
"gonum.org/v1/gonum/graph/simple"
)
type (
IntelEngine struct {
Galaxy NewEden
CurrentMap string
monitoredSystems []int32
clearWords []string
reportHistory []*feeds.Report
locationHistory []*feeds.Locstat
currentStatus map[int32]uint8
lastUpdated map[int32]time.Time
locationInput chan feeds.Locstat
intelInput chan feeds.Report
mapGraph *simple.UndirectedGraph
}
IntelResource interface {
// Status returns a map of systems to status, where 0 is unknown 1 is clear and 2 is hostile
Status() map[int32]uint8
// LastUpdated returns the time since any information was received about a system
LastUpdated() map[int32]time.Time
// SetSystems will notify the IntelResource which systems to alarm upon
SetMonitoredSystems(systems []int32) error
// GetJumps will return the connections between the given systems
// it returns a string array where each string represents a connection
// it will be formatted as "1234-5678" and is directional from source to sink
GetJumps() []string
// GetFeeders will return the two channels that can e used to feed information into the resource
GetFeeders() (chan<- feeds.Report, chan<- feeds.Locstat, error)
}
)
func NewIntelEngine(ctx context.Context) (*IntelEngine, error) {
galaxy := make(NewEden)
err := galaxy.LoadData()
if err != nil {
return nil, fmt.Errorf("failed to load galaxy data: %w", err)
}
ie := &IntelEngine{
Galaxy: galaxy,
CurrentMap: "Delve",
}
err = ie.updateMapGraph()
if err != nil {
return nil, fmt.Errorf("failed to update map graph: %w", err)
}
reps := make(chan feeds.Report, 64)
locs := make(chan feeds.Locstat, 64)
ie.intelInput = reps
ie.locationInput = locs
err = ie.startListeners(ctx)
if err != nil {
return nil, fmt.Errorf("failed to start intel listeners")
}
return ie, nil
}
func (ie *IntelEngine) SetCurrentMap(m string) error {
ie.CurrentMap = m
return ie.updateMapGraph()
}
func (ie *IntelEngine) SetClearWords(words []string) {
ie.clearWords = words
}
func (ie *IntelEngine) updateMapGraph() error {
// TODO change this to account for non region mapdefs
// Find the correct region based on the current selected map
for _, r := range ie.Galaxy {
if r.Name == ie.CurrentMap {
// This is us!
ie.mapGraph = simple.NewUndirectedGraph()
for _, c := range r.Constellations {
for _, s := range c.Systems {
for _, g := range s.Stargates {
// Cant have a system link to itself
if s.SystemID == g.Destination.SystemID {
continue
}
ie.mapGraph.SetEdge(ie.mapGraph.NewEdge(simple.Node(s.SystemID), simple.Node(g.Destination.SystemID)))
}
}
}
return nil
}
}
return errors.New("map not found")
}
func (ie *IntelEngine) startListeners(ctx context.Context) error {
go func() {
log.Println("DEBUG: IE: Starting to Listen")
for {
select {
case rep := <-ie.intelInput:
// Received a new intel report
ie.reportHistory = append(ie.reportHistory, &rep)
ie.checkReport(&rep)
log.Printf("IE: Got Intel - %s", rep.Message)
case loc := <-ie.locationInput:
// Received a new location report
log.Printf("IE - Got locstat - %s", loc.Character)
case <-ctx.Done():
panic(1)
// return
}
}
}()
return nil
}
func (ie *IntelEngine) GetIntelMessages() []string {
rl := feeds.ReportList(ie.reportHistory)
sort.Sort(rl)
strngs := make([]string, rl.Len())
for i := range rl {
strngs[i] = rl[i].String()
}
return strngs
}
func (ie *IntelEngine) checkReport(rep *feeds.Report) {
// Now we need to check each part of the message for potential matches to monitored system names.
msgParts := strings.Split(rep.Message, " ")
// TODO: Make these configurable
const dist = 0.8
var ignores = []string{"in", "as", "is"}
var systems []int32
rep.Status = 2
for _, word := range msgParts {
lowerWord := strings.ToLower(word)
for _, i := range ignores {
if lowerWord == strings.ToLower(i) {
continue
}
}
for _, s := range ie.monitoredSystems {
system, err := ie.Galaxy.GetSystem(s)
if err != nil {
continue
}
// D will be in a range of 0 to 1, where 1 is a perfect match
d := levenshtein.Match(lowerWord, strings.ToLower(system.Name), levenshtein.NewParams().BonusPrefix(3).BonusThreshold(0.3).BonusScale(0.21))
log.Printf("DEBUG: IE: MATCHER %s to %s with a distance of %.2f", word, system.Name, d)
if d >= dist {
// We have a system match here! Yay, intel!
log.Printf("DEBUG: IE: Matched %s to %s with a distance of %.2f", word, system.Name, d)
systems = append(systems, system.SystemID)
break
}
}
for _, cw := range ie.clearWords {
logrus.Debug("IE - Checking %s vs %s for CW", lowerWord, strings.ToLower(cw))
if lowerWord == strings.ToLower(cw) {
logrus.Debug("MATCHED CLEAR WORD %s to %s", lowerWord, strings.ToLower(cw))
rep.Status = 1
}
}
}
for _, sys := range systems {
ie.currentStatus[sys] = rep.Status
ie.lastUpdated[sys] = rep.Time
}
}
func (ie *IntelEngine) IsSystemMonitored(sys int32) bool {
for _, s := range ie.monitoredSystems {
if s == sys {
return true
}
}
return false
}
// The following methods are to satisfy the IntelResource interface
// Status returns a map of systems to status, where true is hostile and false is clear
func (ie *IntelEngine) Status() map[int32]uint8 {
// m := make(map[int32]bool, len(ie.monitoredSystems))
// for _, v := range ie.monitoredSystems {
// m[v] = rand.Float32() > 0.5
// }
// return m
return ie.currentStatus
}
// LastUpdated returns the time since any information was received about a system
func (ie *IntelEngine) LastUpdated() map[int32]time.Time {
// t := make(map[int32]time.Time, len(ie.monitoredSystems))
// for _, v := range ie.monitoredSystems {
// t[v] = time.Now().Add(-1 * time.Second * time.Duration(rand.Intn(300)))
// }
// return t
return ie.lastUpdated
}
// SetSystems will notify the IntelResource which systems to monitor for intel
func (ie *IntelEngine) SetMonitoredSystems(systems []int32) error {
ie.monitoredSystems = make([]int32, len(systems))
ie.currentStatus = make(map[int32]uint8, len(systems))
ie.lastUpdated = make(map[int32]time.Time, len(systems))
for _, system := range systems {
sys, err := ie.Galaxy.GetSystem(system)
if err != nil {
continue
}
ie.monitoredSystems = append(ie.monitoredSystems, sys.SystemID)
}
return nil
}
// GetJumps will return the connections between the monitored systems
// This list will contain both directions ie 1 -> 2 and 2 -> 1
func (ie *IntelEngine) GetJumps() []string {
// TODO find a way to preallocate this to some extent
jumps := make([]string, 0)
for _, s := range ie.monitoredSystems {
source, err := ie.Galaxy.GetSystem(s)
if err != nil {
// TODO this shouldnt ever happen so I probably shouldnt be silent here but will do for now
continue
}
for _, gate := range source.Stargates {
if ie.IsSystemMonitored(gate.Destination.SystemID) {
jumps = append(jumps, strconv.Itoa(int(source.SystemID))+"-"+strconv.Itoa(int(gate.Destination.SystemID)))
}
}
}
return jumps
}
func (ie *IntelEngine) GetFeeders() (chan<- feeds.Report, chan<- feeds.Locstat, error) {
return ie.intelInput, ie.locationInput, nil
}
|
package catp
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01500101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:catp.015.001.01 Document"`
Message *ATMDepositCompletionAcknowledgementV01 `xml:"ATMDpstCmpltnAck"`
}
func (d *Document01500101) AddMessage() *ATMDepositCompletionAcknowledgementV01 {
d.Message = new(ATMDepositCompletionAcknowledgementV01)
return d.Message
}
// The ATMDepositCompletionAcknowledgement message is sent by an acquirer or its agent to an ATM to acknowledge the receipt of an ATMDepositCompletionAdvice message.
type ATMDepositCompletionAcknowledgementV01 struct {
// Information related to the protocol management on a segment of the path from the ATM to the acquirer.
Header *iso20022.Header32 `xml:"Hdr"`
// Encrypted body of the message.
ProtectedATMDepositCompletionAcknowledgement *iso20022.ContentInformationType10 `xml:"PrtctdATMDpstCmpltnAck,omitempty"`
// Information related to the acknowledgement of an ATM deposit transaction from the ATM manager.
ATMDepositCompletionAcknowledgement *iso20022.ATMDepositCompletionAcknowledgement1 `xml:"ATMDpstCmpltnAck,omitempty"`
// Trailer of the message containing a MAC.
SecurityTrailer *iso20022.ContentInformationType15 `xml:"SctyTrlr,omitempty"`
}
func (a *ATMDepositCompletionAcknowledgementV01) AddHeader() *iso20022.Header32 {
a.Header = new(iso20022.Header32)
return a.Header
}
func (a *ATMDepositCompletionAcknowledgementV01) AddProtectedATMDepositCompletionAcknowledgement() *iso20022.ContentInformationType10 {
a.ProtectedATMDepositCompletionAcknowledgement = new(iso20022.ContentInformationType10)
return a.ProtectedATMDepositCompletionAcknowledgement
}
func (a *ATMDepositCompletionAcknowledgementV01) AddATMDepositCompletionAcknowledgement() *iso20022.ATMDepositCompletionAcknowledgement1 {
a.ATMDepositCompletionAcknowledgement = new(iso20022.ATMDepositCompletionAcknowledgement1)
return a.ATMDepositCompletionAcknowledgement
}
func (a *ATMDepositCompletionAcknowledgementV01) AddSecurityTrailer() *iso20022.ContentInformationType15 {
a.SecurityTrailer = new(iso20022.ContentInformationType15)
return a.SecurityTrailer
}
|
//https://leetcode-cn.com/problems/3sum/
package main
import "fmt"
func main() {
// nums := []int{-1, 0, 1, 2, -1, -4}
// nums := []int{3, -2, 1, 0}
// nums := []int{-2, 0, 0, 2, 2}
nums := []int{-4, -2, -2, -2, 0, 1, 2, 2, 2, 3, 3, 4, 4, 6, 6}
fmt.Println(threeSum(nums))
}
//排序 + 双指针
func threeSum(nums []int) [][]int {
sortNums(nums)
returnArray := make([][]int, 0)
for k := 0; k < len(nums)-2; k++ {
if nums[k] > 0 {
break
}
if k > 0 && nums[k] == nums[k-1] {
continue
}
i := k + 1
j := len(nums) - 1
for i < j {
sum := nums[k] + nums[i] + nums[j]
if i > k+1 && nums[i] == nums[i-1] || sum < 0 {
i++
continue
}
if j < len(nums)-1 && nums[j] == nums[j+1] || sum > 0 {
j--
continue
}
if sum == 0 {
returnArray = append(returnArray, []int{nums[k], nums[i], nums[j]})
i++
j--
}
}
}
return returnArray
}
func sortNums(nums []int) {
for i := 0; i < len(nums); i++ {
j := len(nums) - 1
for i < j {
if nums[i] > nums[j] {
tmp := nums[j]
nums[j] = nums[i]
nums[i] = tmp
}
j--
}
}
}
//没做出来
func threeSumMap(nums []int) [][]int {
if len(nums) < 3 {
return [][]int{}
}
returnArray := make([][]int, 0)
mapping := make(map[int]int)
for _, num := range nums {
mapping[num] = mapping[num] + 1
}
chkMapping := make(map[string]string)
for mk, mv := range mapping {
fmt.Println(mk, mv)
}
for k, num := range nums {
if k+1 == len(nums) {
break
}
val := 0 - (num + nums[k+1])
if _, ok := mapping[val]; ok {
if (num == val && mapping[num] < 2) || (nums[k+1] == val && mapping[nums[k+1]] < 2) || (num == val && nums[k+1] == val && mapping[num] < 3) {
continue
}
tmpArray := []int{num, nums[k+1], val}
mKey := sortKey(tmpArray)
if _, okk := chkMapping[mKey]; !okk {
chkMapping[mKey] = mKey
returnArray = append(returnArray, tmpArray)
}
}
}
return returnArray
}
func sortKey(nums []int) string {
for i := 0; i < len(nums); i++ {
j := len(nums) - 1
for i < j {
if nums[i] > nums[j] {
tmp := nums[j]
nums[j] = nums[i]
nums[i] = tmp
}
j--
}
}
key := ""
for _, num := range nums {
key += fmt.Sprintf("%d", num)
}
return key
}
|
/*
* Pause a server.
*
* When a virtual machine is paused, its state is frozen (e.g. memory, open applications)
* and monitoring ceases. Billing charges for CPU and memory stop. A paused machine can be
* quickly brought back to life by issuing the "On" power command.
* Any applicable licensing charges continue to accrue while a machine is paused.
*/
package main
import (
"flag"
"fmt"
"os"
"path"
"github.com/grrtrr/clcv2/clcv2cli"
"github.com/grrtrr/exit"
)
func main() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "usage: %s [options] <server-name>\n", path.Base(os.Args[0]))
flag.PrintDefaults()
}
flag.Parse()
if flag.NArg() != 1 {
flag.Usage()
os.Exit(1)
}
client, err := clcv2cli.NewCLIClient()
if err != nil {
exit.Fatal(err.Error())
}
statusId, err := client.PauseServer(flag.Arg(0))
if err != nil {
exit.Fatalf("failed to pause server %s: %s", flag.Arg(0), err)
}
fmt.Printf("Request ID for pausing server: %s\n", statusId)
}
|
package cats
import (
"context"
"encoding/json"
"io/ioutil"
"net/http"
"github.com/NYTimes/marvin"
"github.com/golang/protobuf/proto"
"google.golang.org/appengine/log"
)
func (s *service) addCat(ctx context.Context, r interface{}) (interface{}, error) {
// make type conversion to the expected Cat pointer
req := r.(*PostAddFormatRequest)
// hit the injected DB layer
err := s.db.AddCat(ctx, req.Cat)
if err != nil {
log.Errorf(ctx, "unable to get count: %s", err)
return nil, marvin.NewProtoStatusResponse(&ErrorResponse{
Error: "server error"}, http.StatusInternalServerError)
}
// wrap responses so we can return with a 201 code
return marvin.NewProtoStatusResponse(req.Cat, http.StatusCreated), nil
}
func decodeCat(ctx context.Context, r *http.Request) (interface{}, error) {
var cat Cat
err := json.NewDecoder(r.Body).Decode(&cat)
if err != nil {
return nil, marvin.NewJSONStatusResponse(&ErrorResponse{
Error: "bad request"}, http.StatusBadRequest)
}
return &PostAddFormatRequest{Cat: &cat}, nil
}
func decodeCatProto(ctx context.Context, r *http.Request) (interface{}, error) {
d, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, marvin.NewProtoStatusResponse(&ErrorResponse{
Error: "unable to read request"}, http.StatusBadRequest)
}
var cat Cat
err = proto.Unmarshal(d, &cat)
if err != nil {
return nil, marvin.NewProtoStatusResponse(&ErrorResponse{
Error: "bad request"}, http.StatusBadRequest)
}
return &PostAddFormatRequest{Cat: &cat}, nil
}
|
package main
import (
"context"
"github.com/aws/aws-lambda-go/events"
"testing"
)
var ctx context.Context = nil
var in = events.APIGatewayProxyRequest{
Body: "{\"service\": \"https://graphical.weather.gov/xml/SOAP_server/ndfdXMLserver.php\", \"requestBody\": \"https://graphical.weather.gov/xml/docs/SOAP_Requests/GmlLatLonList.xml\", \"requestMap\": { \"requestedTime\": \"2019-06-22T23:59:59\" }, \"responseMap\": { \"gml:boundedBy\": { \"gml:coordinates\": \"\" }, \"gml:featureMember\": { \"gml:coordinates\": \"\", \"app:validTime\": \"\", \"app:maximumTemperature\": \"\" } }}",
}
func TestHandler(t *testing.T) {
_, err := Handler(ctx, in)
if err != nil {
t.Error("ERROR")
}
}
|
package model
import (
"time"
"golang.org/x/oauth2"
"google.golang.org/api/calendar/v3"
)
type Event struct {
Summary string
Start calendar.EventDateTime
End calendar.EventDateTime
}
type Event2 struct {
Token oauth2.Token `json:"token"`
CalendarId string `json:"calendarId"`
Summary string `json:"summary"`
Start time.Time `json:"start"`
End time.Time `json:"end"`
}
|
package graphql_test
import (
"testing"
"github.com/graphql-go/graphql"
"github.com/graphql-go/graphql/gqlerrors"
"github.com/graphql-go/graphql/testutil"
)
func TestValidate_NoUndefinedVariables_AllVariablesDefined(t *testing.T) {
testutil.ExpectPassesRule(t, graphql.NoUndefinedVariablesRule, `
query Foo($a: String, $b: String, $c: String) {
field(a: $a, b: $b, c: $c)
}
`)
}
func TestValidate_NoUndefinedVariables_AllVariablesDeeplyDefined(t *testing.T) {
testutil.ExpectPassesRule(t, graphql.NoUndefinedVariablesRule, `
query Foo($a: String, $b: String, $c: String) {
field(a: $a) {
field(b: $b) {
field(c: $c)
}
}
}
`)
}
func TestValidate_NoUndefinedVariables_AllVariablesDeeplyDefinedInInlineFragmentsDefined(t *testing.T) {
testutil.ExpectPassesRule(t, graphql.NoUndefinedVariablesRule, `
query Foo($a: String, $b: String, $c: String) {
... on Type {
field(a: $a) {
field(b: $b) {
... on Type {
field(c: $c)
}
}
}
}
}
`)
}
func TestValidate_NoUndefinedVariables_AllVariablesInFragmentsDeeplyDefined(t *testing.T) {
testutil.ExpectPassesRule(t, graphql.NoUndefinedVariablesRule, `
query Foo($a: String, $b: String, $c: String) {
...FragA
}
fragment FragA on Type {
field(a: $a) {
...FragB
}
}
fragment FragB on Type {
field(b: $b) {
...FragC
}
}
fragment FragC on Type {
field(c: $c)
}
`)
}
func TestValidate_NoUndefinedVariables_VariablesWithinSingleFragmentDefinedInMultipleOperations(t *testing.T) {
testutil.ExpectPassesRule(t, graphql.NoUndefinedVariablesRule, `
query Foo($a: String) {
...FragA
}
query Bar($a: String) {
...FragA
}
fragment FragA on Type {
field(a: $a)
}
`)
}
func TestValidate_NoUndefinedVariables_VariableWithinFragmentsDefinedInOperations(t *testing.T) {
testutil.ExpectPassesRule(t, graphql.NoUndefinedVariablesRule, `
query Foo($a: String) {
...FragA
}
query Bar($b: String) {
...FragB
}
fragment FragA on Type {
field(a: $a)
}
fragment FragB on Type {
field(b: $b)
}
`)
}
func TestValidate_NoUndefinedVariables_VariableWithinRecursiveFragmentDefined(t *testing.T) {
testutil.ExpectPassesRule(t, graphql.NoUndefinedVariablesRule, `
query Foo($a: String) {
...FragA
}
fragment FragA on Type {
field(a: $a) {
...FragA
}
}
`)
}
func TestValidate_NoUndefinedVariables_VariableNotDefined(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoUndefinedVariablesRule, `
query Foo($a: String, $b: String, $c: String) {
field(a: $a, b: $b, c: $c, d: $d)
}
`, []gqlerrors.FormattedError{
testutil.RuleError(`Variable "$d" is not defined by operation "Foo".`, 3, 39, 2, 7),
})
}
func TestValidate_NoUndefinedVariables_VariableNotDefinedByUnnamedQuery(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoUndefinedVariablesRule, `
{
field(a: $a)
}
`, []gqlerrors.FormattedError{
testutil.RuleError(`Variable "$a" is not defined.`, 3, 18, 2, 7),
})
}
func TestValidate_NoUndefinedVariables_MultipleVariablesNotDefined(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoUndefinedVariablesRule, `
query Foo($b: String) {
field(a: $a, b: $b, c: $c)
}
`, []gqlerrors.FormattedError{
testutil.RuleError(`Variable "$a" is not defined by operation "Foo".`, 3, 18, 2, 7),
testutil.RuleError(`Variable "$c" is not defined by operation "Foo".`, 3, 32, 2, 7),
})
}
func TestValidate_NoUndefinedVariables_VariableInFragmentNotDefinedByUnnamedQuery(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoUndefinedVariablesRule, `
{
...FragA
}
fragment FragA on Type {
field(a: $a)
}
`, []gqlerrors.FormattedError{
testutil.RuleError(`Variable "$a" is not defined.`, 6, 18, 2, 7),
})
}
func TestValidate_NoUndefinedVariables_VariableInFragmentNotDefinedByOperation(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoUndefinedVariablesRule, `
query Foo($a: String, $b: String) {
...FragA
}
fragment FragA on Type {
field(a: $a) {
...FragB
}
}
fragment FragB on Type {
field(b: $b) {
...FragC
}
}
fragment FragC on Type {
field(c: $c)
}
`, []gqlerrors.FormattedError{
testutil.RuleError(`Variable "$c" is not defined by operation "Foo".`, 16, 18, 2, 7),
})
}
func TestValidate_NoUndefinedVariables_MultipleVariablesInFragmentsNotDefined(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoUndefinedVariablesRule, `
query Foo($b: String) {
...FragA
}
fragment FragA on Type {
field(a: $a) {
...FragB
}
}
fragment FragB on Type {
field(b: $b) {
...FragC
}
}
fragment FragC on Type {
field(c: $c)
}
`, []gqlerrors.FormattedError{
testutil.RuleError(`Variable "$a" is not defined by operation "Foo".`, 6, 18, 2, 7),
testutil.RuleError(`Variable "$c" is not defined by operation "Foo".`, 16, 18, 2, 7),
})
}
func TestValidate_NoUndefinedVariables_SingleVariableInFragmentNotDefinedByMultipleOperations(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoUndefinedVariablesRule, `
query Foo($a: String) {
...FragAB
}
query Bar($a: String) {
...FragAB
}
fragment FragAB on Type {
field(a: $a, b: $b)
}
`, []gqlerrors.FormattedError{
testutil.RuleError(`Variable "$b" is not defined by operation "Foo".`, 9, 25, 2, 7),
testutil.RuleError(`Variable "$b" is not defined by operation "Bar".`, 9, 25, 5, 7),
})
}
func TestValidate_NoUndefinedVariables_VariablesInFragmentNotDefinedByMultipleOperations(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoUndefinedVariablesRule, `
query Foo($b: String) {
...FragAB
}
query Bar($a: String) {
...FragAB
}
fragment FragAB on Type {
field(a: $a, b: $b)
}
`, []gqlerrors.FormattedError{
testutil.RuleError(`Variable "$a" is not defined by operation "Foo".`, 9, 18, 2, 7),
testutil.RuleError(`Variable "$b" is not defined by operation "Bar".`, 9, 25, 5, 7),
})
}
func TestValidate_NoUndefinedVariables_VariableInFragmentUsedByOtherOperation(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoUndefinedVariablesRule, `
query Foo($b: String) {
...FragA
}
query Bar($a: String) {
...FragB
}
fragment FragA on Type {
field(a: $a)
}
fragment FragB on Type {
field(b: $b)
}
`, []gqlerrors.FormattedError{
testutil.RuleError(`Variable "$a" is not defined by operation "Foo".`, 9, 18, 2, 7),
testutil.RuleError(`Variable "$b" is not defined by operation "Bar".`, 12, 18, 5, 7),
})
}
func TestValidate_NoUndefinedVariables_VaMultipleUndefinedVariablesProduceMultipleErrors(t *testing.T) {
testutil.ExpectFailsRule(t, graphql.NoUndefinedVariablesRule, `
query Foo($b: String) {
...FragAB
}
query Bar($a: String) {
...FragAB
}
fragment FragAB on Type {
field1(a: $a, b: $b)
...FragC
field3(a: $a, b: $b)
}
fragment FragC on Type {
field2(c: $c)
}
`, []gqlerrors.FormattedError{
testutil.RuleError(`Variable "$a" is not defined by operation "Foo".`, 9, 19, 2, 7),
testutil.RuleError(`Variable "$c" is not defined by operation "Foo".`, 14, 19, 2, 7),
testutil.RuleError(`Variable "$a" is not defined by operation "Foo".`, 11, 19, 2, 7),
testutil.RuleError(`Variable "$b" is not defined by operation "Bar".`, 9, 26, 5, 7),
testutil.RuleError(`Variable "$c" is not defined by operation "Bar".`, 14, 19, 5, 7),
testutil.RuleError(`Variable "$b" is not defined by operation "Bar".`, 11, 26, 5, 7),
})
}
|
package entity
import (
"log"
"posthis/storage"
"time"
"gorm.io/gorm"
)
type Reply struct {
ID uint `gorm:"primarykey"`
CreatedAt time.Time
UpdatedAt time.Time
Content string `gorm:"default:''"`
UserID uint //User.ID
PostID uint //Post.ID
Media []*Media `gorm:"foreignKey:ReplyOwnerID;constraint:OnDelete:CASCADE;"`
}
func (reply *Reply) BeforeDelete(tx *gorm.DB) (err error) {
log.Println(reply.ID, "REPLY Getting Deleted")
for _, media := range reply.Media {
// delete from firebase storage
if err = storage.DeleteFile(media.Name); err != nil {
return
}
}
return
}
|
package group
import (
"Open_IM/pkg/common/db/mysql_model/im_mysql_model"
"Open_IM/pkg/common/log"
"Open_IM/pkg/proto/group"
"context"
)
func (s *groupServer) GetGroupApplicationList(_ context.Context, pb *group.GetGroupApplicationListReq) (*group.GetGroupApplicationListResp, error) {
log.Info("", "", "rpc GetGroupApplicationList call start..., [pb: %s]", pb.String())
reply, err := im_mysql_model.GetGroupApplicationList(pb.UID)
if err != nil {
log.Error("", "", "rpc GetGroupApplicationList call..., im_mysql_model.GetGroupApplicationList fail [uid: %s] [err: %s]", pb.UID, err.Error())
return &group.GetGroupApplicationListResp{ErrCode: 701, ErrMsg: "GetGroupApplicationList failed"}, nil
}
log.Info("", "", "rpc GetGroupApplicationList call..., im_mysql_model.GetGroupApplicationList")
return reply, nil
}
|
package main
import (
"github.com/funkygao/gobench/util"
"sync"
"testing"
)
func main() {
b := testing.Benchmark(benchmarkDefer)
util.ShowBenchResult("defer", b)
b = testing.Benchmark(benchmarkDeferUnlock)
util.ShowBenchResult("defer mutex unlock", b)
b = testing.Benchmark(benchmarkNodeferUnlock)
util.ShowBenchResult("no defer mutex unlock", b)
}
func benchmarkDefer(b *testing.B) {
b.ReportAllocs()
f := func() {
defer func() {
}()
}
for i := 0; i < b.N; i++ {
f()
}
}
func benchmarkDeferUnlock(b *testing.B) {
b.ReportAllocs()
var mu sync.Mutex
for i := 0; i < b.N; i++ {
deferUnlock(mu)
}
}
func benchmarkNodeferUnlock(b *testing.B) {
b.ReportAllocs()
var mu sync.Mutex
for i := 0; i < b.N; i++ {
nodeferUnlock(mu)
}
}
func deferUnlock(mu sync.Mutex) {
mu.Lock()
defer mu.Unlock()
}
func nodeferUnlock(mu sync.Mutex) {
mu.Lock()
mu.Unlock()
}
|
package accesslist
import (
"errors"
"strings"
"testing"
"github.com/10gen/realm-cli/internal/cli"
"github.com/10gen/realm-cli/internal/cloud/realm"
"github.com/10gen/realm-cli/internal/utils/test/assert"
"github.com/10gen/realm-cli/internal/utils/test/mock"
)
func TestAllowedIPDeleteHandler(t *testing.T) {
projectID := "projectID"
appID := "appID"
app := realm.App{
ID: appID,
GroupID: projectID,
ClientAppID: "test-abcde",
Name: "test",
}
allowedIPs := []realm.AllowedIP{
{ID: "address1", Address: "0.0.0.0", Comment: "comment"},
{ID: "address2", Address: "192.1.1.1"},
{ID: "address3", Address: "192.158.1.38", Comment: "cool comment"},
}
t.Run("should show empty state message if no allowed ips are found", func(t *testing.T) {
_, ui := mock.NewUI()
realmClient := mock.RealmClient{}
realmClient.FindAppsFn = func(filter realm.AppFilter) ([]realm.App, error) {
return []realm.App{app}, nil
}
realmClient.AllowedIPsFn = func(groupID, appID string) ([]realm.AllowedIP, error) {
return nil, nil
}
cmd := &CommandDelete{deleteInputs{
ProjectInputs: cli.ProjectInputs{
Project: projectID,
App: appID,
},
}}
assert.Equal(t, errors.New("no IP addresses or CIDR blocks to delete"), cmd.Handler(nil, ui, cli.Clients{Realm: realmClient}))
})
for _, tc := range []struct {
description string
testInput []string
expectedOutput string
deleteErr error
expectedAllowedIPIDs []string
}{
{
description: "should return successful outputs for proper inputs",
testInput: []string{"0.0.0.0", "192.1.1.1"},
expectedOutput: strings.Join([]string{
"Deleted 2 IP address(es) and/or CIDR block(s)",
" IP Address Comment Deleted Details",
" ---------- ------- ------- -------",
" 0.0.0.0 comment true ",
" 192.1.1.1 true ",
"",
}, "\n"),
expectedAllowedIPIDs: []string{"address1", "address2"},
},
{
description: "should output the errors for deletes on individual allowed ips",
testInput: []string{"0.0.0.0", "192.158.1.38"},
expectedOutput: strings.Join([]string{
"Deleted 2 IP address(es) and/or CIDR block(s)",
" IP Address Comment Deleted Details ",
" ------------ ------------ ------- ------------------",
" 0.0.0.0 comment false something happened",
" 192.158.1.38 cool comment false something happened",
"",
}, "\n"),
deleteErr: errors.New("something happened"),
expectedAllowedIPIDs: []string{"address1", "address3"},
},
} {
t.Run(tc.description, func(t *testing.T) {
realmClient := mock.RealmClient{}
var deleteArgs struct {
groupID, appID, allowedIPID string
}
var allowedIPIDs []string
realmClient.FindAppsFn = func(filter realm.AppFilter) ([]realm.App, error) {
return []realm.App{app}, nil
}
realmClient.AllowedIPsFn = func(groupID, appID string) ([]realm.AllowedIP, error) {
return allowedIPs, nil
}
realmClient.AllowedIPDeleteFn = func(groupID, appID, allowedIPID string) error {
deleteArgs = struct {
groupID, appID, allowedIPID string
}{groupID, appID, allowedIPID}
allowedIPIDs = append(allowedIPIDs, allowedIPID)
return tc.deleteErr
}
cmd := &CommandDelete{deleteInputs{
cli.ProjectInputs{projectID, appID, nil},
tc.testInput,
}}
out, ui := mock.NewUI()
assert.Nil(t, cmd.Handler(nil, ui, cli.Clients{Realm: realmClient}))
assert.Equal(t, tc.expectedOutput, out.String())
assert.Equal(t, "projectID", deleteArgs.groupID)
assert.Equal(t, "appID", deleteArgs.appID)
assert.Equal(t, tc.expectedAllowedIPIDs, allowedIPIDs)
})
}
t.Run("should return an error", func(t *testing.T) {
for _, tc := range []struct {
description string
realmClient func() realm.Client
testInput []string
expectedErr error
}{
{
description: "if there is an issue with finding allowed ips",
realmClient: func() realm.Client {
return mock.RealmClient{
FindAppsFn: func(filter realm.AppFilter) ([]realm.App, error) {
return []realm.App{app}, nil
},
AllowedIPsFn: func(groupID, appID string) ([]realm.AllowedIP, error) {
return nil, errors.New("something happened with allowed ips")
},
}
},
expectedErr: errors.New("something happened with allowed ips"),
},
{
description: "if there is no app",
realmClient: func() realm.Client {
return mock.RealmClient{
FindAppsFn: func(filter realm.AppFilter) ([]realm.App, error) {
return nil, errors.New("something went wrong with the app")
},
AllowedIPsFn: func(groupID, appID string) ([]realm.AllowedIP, error) {
return allowedIPs, nil
},
}
},
expectedErr: errors.New("something went wrong with the app"),
},
{
description: "if provided ip address or cidr block does not exist in access list",
realmClient: func() realm.Client {
return mock.RealmClient{
FindAppsFn: func(filter realm.AppFilter) ([]realm.App, error) {
return []realm.App{app}, nil
},
AllowedIPsFn: func(groupID, appID string) ([]realm.AllowedIP, error) {
return allowedIPs, nil
},
}
},
testInput: []string{"0.0.0.0", "0.0.0.1", "192.1.2.1"},
expectedErr: errors.New("unable to find IP address(es) and/or CIDR block(s): 0.0.0.1, 192.1.2.1"),
},
} {
t.Run(tc.description, func(t *testing.T) {
cmd := &CommandDelete{deleteInputs{
cli.ProjectInputs{projectID, appID, nil},
tc.testInput,
}}
err := cmd.Handler(nil, nil, cli.Clients{Realm: tc.realmClient()})
assert.Equal(t, tc.expectedErr, err)
})
}
})
}
func TestAllowedIPDeleteInputs(t *testing.T) {
allowedIPs := []realm.AllowedIP{
{ID: "address1", Address: "0.0.0.0", Comment: "comment"},
{ID: "address2", Address: "192.1.1.1"},
{ID: "address3", Address: "192.158.1.38", Comment: "cool comment"},
}
t.Run("should return allowed ips when specified by address", func(t *testing.T) {
inputs := deleteInputs{
Addresses: []string{"0.0.0.0"},
}
allowedIPsResult, err := inputs.resolveAllowedIP(nil, allowedIPs)
assert.Nil(t, err)
assert.Equal(t, []realm.AllowedIP{allowedIPs[0]}, allowedIPsResult)
})
for _, tc := range []struct {
description string
selectedAllowedIPs []string
expectedOutput []realm.AllowedIP
}{
{
description: "allow single selection",
selectedAllowedIPs: []string{"192.1.1.1"},
expectedOutput: []realm.AllowedIP{allowedIPs[1]},
},
{
description: "allow multiple selections",
selectedAllowedIPs: []string{"0.0.0.0", "192.1.1.1", "192.158.1.38"},
expectedOutput: []realm.AllowedIP{allowedIPs[0], allowedIPs[1], allowedIPs[2]},
},
} {
t.Run("should prompt for allowed ips with no input: "+tc.description, func(t *testing.T) {
inputs := deleteInputs{}
_, console, _, ui, consoleErr := mock.NewVT10XConsole()
assert.Nil(t, consoleErr)
defer console.Close()
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
console.ExpectString("Which IP Address(es) and/or CIDR block(s) would you like to delete?")
for _, selected := range tc.selectedAllowedIPs {
console.Send(selected)
console.Send(" ")
}
console.SendLine("")
console.ExpectEOF()
}()
allowedIPsResult, err := inputs.resolveAllowedIP(ui, allowedIPs)
console.Tty().Close()
<-doneCh
assert.Nil(t, err)
assert.Equal(t, tc.expectedOutput, allowedIPsResult)
})
}
}
|
package main
import (
"fmt"
"strings"
"os"
"image"
"image/gif"
"image/draw"
"golang.org/x/crypto/ssh/terminal"
"github.com/nfnt/resize"
"github.com/ivolo/go-image-to-ascii"
"image/color/palette"
"github.com/ivolo/go-giphy"
"errors"
"net/http"
)
func check(err error) {
if err != nil {
panic(err)
}
}
func main() {
query := strings.Join(os.Args[1:], " ")
if len(query) == 0 {
fmt.Println("usage: giffy <query>")
os.Exit(1)
}
c := giphy.New("dc6zaTOxFJmzC")
gifs, err := c.Search(query)
check(err)
fmt.Printf("Found %d gifs for '%s'.\n", len(gifs), query)
if len(gifs) == 0 {
return
}
for _, g := range(gifs) {
gif, err := download(g.Images["original"].URL)
check(err)
ttyWidth, ttyHeight, err := terminal.GetSize(1)
check(err)
// fix inconsistent frame sizing with dealising
dealias(gif, uint(ttyWidth), uint(ttyHeight))
for _, img := range(gif.Image) {
resized := resize.Resize(uint(ttyWidth), uint(ttyHeight), img, resize.NearestNeighbor)
str := ascii.Convert(resized)
fmt.Print(str)
// fmt.Printf("\x1b[%dA", ttyHeight) // move cursor up
// fmt.Printf("\x1b[%dD", ttyWidth) // move cursor left
// fmt.Printf("\x1b[%dF", ttyHeight) // move cursor to the beginning of the line
}
}
}
func download(url string) (*gif.GIF, error) {
//log.Printf("GET %s ..", url)
res, err := http.Get(url)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return nil, errors.New(fmt.Sprintf("error response '%d'", res.StatusCode))
}
return gif.DecodeAll(res.Body)
}
func dealias(gif *gif.GIF, width uint, height uint) {
// TODO: add better dealiasing algorithm: http://stackoverflow.com/questions/9988517/resize-gif-animation-pil-imagemagick-python
// credit: https://github.com/dpup/go-scratch/blob/master/gif-resize/gif-resize.go#L3
// This demonstrates a solution to resizing animated gifs.
//
// Frames in an animated gif aren't necessarily the same size, subsequent
// frames are overlayed on previous frames. Therefore, resizing the frames
// individually may cause problems due to aliasing of transparent pixels. This
// example tries to avoid this by building frames from all previous frames and
// resizing the frames as RGB.
// Create a new RGBA image to hold the incremental frames.
firstFrame := gif.Image[0].Bounds()
b := image.Rect(0, 0, firstFrame.Dx(), firstFrame.Dy())
img := image.NewRGBA(b)
for index, frame := range gif.Image {
bounds := frame.Bounds()
draw.Draw(img, bounds, frame, bounds.Min, draw.Over)
resized := resize.Resize(width, height, img, resize.NearestNeighbor)
b2 := resized.Bounds()
pm := image.NewPaletted(b2, palette.Plan9)
draw.FloydSteinberg.Draw(pm, b2, resized, image.ZP)
gif.Image[index] = pm
}
} |
package profile
import (
"testing"
"reflect"
"s3-web-browser/server/go/domain/db"
)
func TestTransaction(t *testing.T) {
conn, err := db.ConnectionForTest()
if err != nil {
t.Fatalf("failed test %#v", err)
return
}
defer conn.Close()
tx, err := conn.Begin()
if err != nil {
t.Fatalf("failed test %#v", err)
return
}
baseprofile := []Profile {
Profile{Profileid: "profile1", Profilename: "profname1", Connjson: `{"type":"accesskey"}`, Bucket: "bucket1", Basepath: "/1"},
Profile{Profileid: "profile2", Profilename: "profname2", Connjson: `{"type":"accesskey"}`, Bucket: "bucket2", Basepath: "/2"},
Profile{Profileid: "profile3", Profilename: "profname3", Connjson: `{"type":"accesskey"}`, Bucket: "bucket3", Basepath: "/3"},
}
for _, baseprofile := range baseprofile {
_, err := DeleteByID(tx, baseprofile.Profileid)
if err != nil {
tx.Rollback()
t.Fatalf("failed test %#v", err)
return
}
_, err = Insert(tx, &baseprofile)
if err != nil {
tx.Rollback()
t.Fatalf("failed test %#v", err)
return
}
}
baseprofile[1].Profilename = "profnamemod"
baseprofile[1].Connjson = `{"type":"ec2attached"}`
baseprofile[1].Bucket = "bucket2alpha"
baseprofile[1].Basepath = "/update"
_, err = UpdateByID(tx, &baseprofile[1])
if err != nil {
tx.Rollback()
t.Fatalf("failed test %#v", err)
return
}
profiles, err := SelectAll(tx)
if err != nil {
tx.Rollback()
t.Fatalf("failed test %#v", err)
return
}
for _, profile := range profiles {
var foundProfile *Profile
for _, basep := range baseprofile {
if profile.Profileid == basep.Profileid {
foundProfile = &basep
break
}
}
if foundProfile == nil {
continue
}
if !reflect.DeepEqual(profile, *foundProfile) {
t.Errorf("Actual: %s, but excepted: %s", profile.String(), foundProfile.String())
}
}
p1, err := SelectByID(tx, baseprofile[1].Profileid)
if !reflect.DeepEqual(p1, &baseprofile[1]) {
t.Errorf("Actual: %s, but excepted: %s", p1.String(), baseprofile[1].String())
}
tx.Rollback()
}
|
package assert
const ASSERT bool = false
func Assert(b bool) {}
|
package grpcutil
import (
"fmt"
xtr "github.com/brown-csci1380/tracing-framework-go/xtrace/client"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
"os"
)
// Handles propagation of x-trace metadata around grpc server requests (as the ServerOption to grpc.NewServer)
var XTraceServerInterceptor grpc.UnaryServerInterceptor = func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
md, ok := metadata.FromIncomingContext(ctx)
//md, ok := metadata.FromContext(ctx)
if !ok {
fmt.Fprintln(os.Stderr, "no metadata in request context.")
}
GRPCRecieved(md, fmt.Sprintf("Recieved %s, args: %s", info.FullMethod, req))
resp, err := handler(ctx, req)
if err != nil {
xtr.Logf("Returning from %s, error: %s", info.FullMethod, err.Error())
} else {
xtr.Logf("Returning from %s, response: %s", info.FullMethod, resp)
}
grpc.SetHeader(ctx, metadata.Pairs(GRPCMetadata()...))
return resp, err
}
// Handles propagation of x-trace metadata around grpc server stream RPCs (as a ServerOption to grpc.NewServer)
var XTraceStreamServerInterceptor grpc.StreamServerInterceptor = func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
md, ok := metadata.FromIncomingContext(ss.Context())
if !ok {
fmt.Fprintln(os.Stderr, "no metadata in request context.")
}
GRPCRecieved(md, fmt.Sprintf("Recieved %s", info.FullMethod))
err := handler(srv, ss)
if err != nil {
xtr.Logf("Failed to create remote stream for %s, error: %s", info.FullMethod, err.Error())
} else {
xtr.Logf("Cread remote stream for %s, successful", info.FullMethod)
}
ss.SetHeader(metadata.Pairs(GRPCMetadata()...))
return err
}
// Handles propagation of x-trace metadata around grpc remote calls (as the argument to grpc.WithUnaryInterceptor)
var XTraceClientInterceptor grpc.UnaryClientInterceptor = func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
xtr.Logf("Calling %s, arg: %v", method, req)
var md metadata.MD
err := invoker(metadata.NewOutgoingContext(ctx, metadata.Pairs(GRPCMetadata()...)), method, req, reply, cc, append(opts, grpc.Header(&md))...)
GRPCReturned(md, fmt.Sprintf("Returned from remote %s, error: %v, value: %v", method, err, reply))
return err
}
// Handles propagation of x-trace metadata around grpc stream calls (as the argument to grpc.WithStreamInterceptor)
var XTraceStreamClientInterceptor grpc.StreamClientInterceptor = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
xtr.Logf("Calling %s, desc: %v", method, desc)
var md metadata.MD
cs, err := streamer(metadata.NewOutgoingContext(ctx, metadata.Pairs(GRPCMetadata()...)), desc, cc, method, append(opts, grpc.Header(&md))...)
GRPCReturned(md, fmt.Sprintf("Recieved remote stream for %v: error: %v, stream: %v", method, err, cs))
return cs, err
}
|
package pathfileops
import (
"errors"
"fmt"
"os"
"time"
)
// FileInfoPlus - Conforms to the os.FileInfo interface. This structure will store
// os.FileInfo information plus additional information related to a file or directory.
//
type FileInfoPlus struct {
// isFInfoInitialized - Not part of FileInfo interface.
// 'true' = structure fields have been properly initialized
isFInfoInitialized bool
// isDirPathInitialized - Not part of FileInfo interface.
// 'true' = structure field 'dirPath' has been successfully initialized
isDirPathInitialized bool
// CreateTimeStamp - Not part of FileInfo interface.
// Date time at which this instance of Type 'FileInfoPlus' was initialized
CreateTimeStamp time.Time
dirPath string // Not part of FileInfo interface. Directory path associated with file name
fName string // FileInfo.Name() base name of the file
fSize int64 // FileInfo.Size() length in bytes for regular files; system-dependent for others
fMode os.FileMode // FileInfo.Mode() file mode bits
fModTime time.Time // FileInfo.ModTime() file modification time
isDir bool // FileInfo.IsDir() 'true'= this is a directory not a file
dataSrc interface{} // FileInfo.Sys() underlying data source (can return nil)
origFileInfo os.FileInfo
}
// Name - base name of the file.
// Example:
// Complete File Name: "newerFileForTest_01.txt"
// Base Name returned by Name(): "newerFileForTest_01.txt"
//
func (fip FileInfoPlus) Name() string {
return fip.fName
}
//Size - file length in bytes for regular files; system-dependent for others
func (fip FileInfoPlus) Size() int64 {
return fip.fSize
}
// Mode - file mode bits. See os.FileMode
// A FileMode represents a file's mode and permission bits.
// The bits have the same definition on all systems, so that
// information about files can be moved from one system
// to another as a portable. Not all bits apply to all systems.
// The only required bit is ModeDir for directories.
//
// type FileMode uint32
//
// The defined file mode bits are the most significant bits of the FileMode.
// The nine least-significant bits are the standard Unix rwxrwxrwx permissions.
// The values of these bits should be considered part of the public API and
// may be used in wire protocols or disk representations: they must not be
// changed, although new bits might be added.
// const (
// // The single letters are the abbreviations
// // used by the String method's formatting.
// ModeDir FileMode = 1 << (32 - 1 - iota) // d: is a directory
// ModeAppend // a: append-only
// ModeExclusive // l: exclusive use
// ModeTemporary // T: temporary file; Plan 9 only
// ModeSymlink // L: symbolic link
// ModeDevice // D: device file
// ModeNamedPipe // p: named pipe (FIFO)
// ModeSocket // S: Unix domain socket
// ModeSetuid // u: setuid
// ModeSetgid // g: setgid
// ModeCharDevice // c: Unix character device, when ModeDevice is set
// ModeSticky // t: sticky
//
// // Mask for the type bits. For regular files, none will be set.
// ModeType = ModeDir | ModeSymlink | ModeNamedPipe | ModeSocket | ModeDevice
//
// ModePerm FileMode = 0777 // Unix permission bits
// )
//
func (fip FileInfoPlus) Mode() os.FileMode {
return fip.fMode
}
// ModTime - file modification time
func (fip FileInfoPlus) ModTime() time.Time {
return fip.fModTime
}
// IsDir - 'true' = this is a directory,
// not a file.
//
// abbreviation for Mode().IsDir()
//
func (fip FileInfoPlus) IsDir() bool {
return fip.isDir
}
// Sys - underlying data source (can return nil)
func (fip FileInfoPlus) Sys() interface{} {
return fip.dataSrc
}
// SysAsString - underlying data source. If Sys is
// 'nil', this method will return an empty string.
//
func (fip FileInfoPlus) SysAsString() string {
if fip.dataSrc == nil {
return ""
}
str := fmt.Sprintf("%v", fip.dataSrc)
return str
}
// CopyOut - Creates a deep copy of the current FileInfoPlus
// instance and returns it.
//
// This method is NOT part of the FileInfo interface.
//
func (fip *FileInfoPlus) CopyOut() FileInfoPlus {
newInfo := FileInfoPlus{}
newInfo.SetName(fip.Name())
newInfo.SetSize(fip.Size())
newInfo.SetMode(fip.Mode())
newInfo.SetModTime(fip.ModTime())
newInfo.SetIsDir(fip.IsDir())
newInfo.SetSysDataSrc(fip.Sys())
_ = newInfo.SetDirectoryPath(fip.DirPath())
newInfo.isFInfoInitialized = fip.isFInfoInitialized
newInfo.CreateTimeStamp = fip.CreateTimeStamp
newInfo.origFileInfo = fip.origFileInfo
return newInfo
}
// DirPath - Returns the directory path. This field, FileInfoPlus.dirPath,
// is not part of the standard FileInfo interface.
//
// This method is NOT part of the FileInfo interface.
//
func (fip *FileInfoPlus) DirPath() string {
return fip.dirPath
}
// Equal - Compares two FileInfoPlus objects to determine
// if they are equal.
//
// This method is NOT part of the FileInfo interface.
//
func (fip *FileInfoPlus) Equal(fip2 *FileInfoPlus) bool {
if fip.Name() != fip2.Name() ||
fip.Size() != fip2.Size() ||
fip.Mode() != fip2.Mode() ||
fip.ModTime() != fip2.ModTime() ||
fip.IsDir() != fip2.IsDir() {
return false
}
if fip.DirPath() != fip2.DirPath() {
return false
}
if fip.Sys() == nil && fip2.Sys() == nil {
return true
}
if fip.Sys() == nil && fip2.Sys() != nil {
return false
}
if fip.Sys() != nil && fip2.Sys() == nil {
return false
}
strFipSys := fmt.Sprintf("%v", fip.Sys())
strFip2Sys := fmt.Sprintf("%v", fip2.Sys())
if strFipSys != strFip2Sys {
return false
}
return true
}
// Empty - Sets the internal data fields of the current
// FileInfoPlus instances to their zero or nil value.
//
// This method is NOT part of the FileInfo interface.
//
func (fip *FileInfoPlus) Empty() {
fip.isFInfoInitialized = false
fip.isDirPathInitialized = false
fip.CreateTimeStamp = time.Time{}
fip.dirPath = ""
fip.fName = ""
fip.fSize = 0
fip.fMode = os.FileMode(0000)
fip.fModTime = time.Time{}
fip.isDir = false
fip.dataSrc = nil
fip.origFileInfo = nil
}
// GetOriginalFileInfo - If the FileInfoPlus instance was initialized
// with an os.FileInfo value, this method will return that original
// os.FileInfo value. This is useful for passing parameters to some
// low level go routines such as os.SameFile().
//
// This method is NOT part of the FileInfo interface.
//
func (fip *FileInfoPlus) GetOriginalFileInfo() os.FileInfo {
return fip.origFileInfo
}
// IsFileInfoInitialized - Returns a boolean value signaling whether
// this instance of FileInfoPlus has been initialized.
//
// A FileInfoPlus instance is properly initialized only if one of the
// following three methods is called:
//
// 1. FileInfoPlus.NewFromFileInfo()
// 2. FileInfoPlus.NewFromPathFileInfo()
// 3. FileInfoPlus.SetIsFInfoInitialized()
//
// This method is NOT part of the FileInfo interface.
//
func (fip *FileInfoPlus) IsFileInfoInitialized() bool {
return fip.isFInfoInitialized
}
// IsDirectoryPathInitialized - Returns a boolean value signaling whether
// the directory path has been initialized for this instance of the
// FileInfoPlus instance. FYI, the fields FileInfoPlus.isDirPathInitialized
// and FileInfoPlus.dirPath do NOT exist in a standard os.FileInfo object.
//
// A FileInfoPlus directory path is properly initialized only if one of
// the following two methods is called:
//
// 1. FileInfoPlus.NewFromPathFileInfo()
// 2. FileInfoPlus.SetDirectoryPath
//
// This method is NOT part of the FileInfo interface.
//
func (fip *FileInfoPlus) IsDirectoryPathInitialized() bool {
return fip.isDirPathInitialized
}
// NewFromDirMgrFileInfo - Creates and returns a new FileInfoPlus object
// populated with a Directory Manager (DirMgr) and File Info data (os.FileInfo)
// received from the input parameters 'dMgr' and 'info'.
//
// This method is NOT part of the FileInfo interface.
//
// ------------------------------------------------------------------------
//
// Example Usage:
//
// fip, err := FileInfoPlus{}.NewFromDirMgrFileInfo(dMgr, info)
// fip is now configured as a newly populated FileInfoPlus instance.
//
func (fip FileInfoPlus) NewFromDirMgrFileInfo(
dMgr DirMgr,
info os.FileInfo) (FileInfoPlus, error) {
ePrefix := "FileInfoPlus.NewFromDirMgrFileInfo() "
err := dMgr.IsDirMgrValid("")
if err != nil {
return FileInfoPlus{},
fmt.Errorf(ePrefix + "ERROR: Input Parameter 'dMgr' is INVALID!\n" +
"%v", err.Error())
}
if info == nil {
return FileInfoPlus{},
errors.New(ePrefix + "ERROR: Input Parameter 'info' is nil !\n")
}
newInfo := FileInfoPlus{}.NewFromFileInfo(info)
newInfo.dirPath = dMgr.GetAbsolutePath()
newInfo.isDirPathInitialized = true
return newInfo, nil
}
// NewFromFileInfo - Creates and returns a new FileInfoPlus object
// populated with FileInfo data received from the input parameter.
// Notice that this version of the 'NewFromPathFileNameExtStr' method does NOT set the
// Directory path.
//
// This method is NOT part of the FileInfo interface.
//
// ------------------------------------------------------------------------
//
// Example Usage:
//
// fip := FileInfoPlus{}.NewFromFileInfo(info)
// fip is now a newly populated FileInfoPlus instance.
//
func (fip FileInfoPlus) NewFromFileInfo(info os.FileInfo) FileInfoPlus {
if info == nil {
return FileInfoPlus{}
}
newInfo := FileInfoPlus{}
newInfo.SetName(info.Name())
newInfo.SetSize(info.Size())
newInfo.SetMode(info.Mode())
newInfo.SetModTime(info.ModTime())
newInfo.SetIsDir(info.IsDir())
newInfo.SetSysDataSrc(info.Sys())
newInfo.SetIsFInfoInitialized(true)
newInfo.origFileInfo = info
return newInfo
}
// NewFromPathFileInfo - Creates and returns a new FileInfoPlus object
// populated with directory path and FileInfo data received from
// the input parameters.
//
// This method is NOT part of the FileInfo interface.
//
// ------------------------------------------------------------------------
//
// Example Usage:
//
// fip, err := FileInfoPlus{}.NewFromPathFileInfo(dirPath, info)
// fip is now a newly populated FileInfoPlus instance.
//
func (fip FileInfoPlus) NewFromPathFileInfo(
dirPath string,
info os.FileInfo) (FileInfoPlus, error) {
ePrefix := "FileInfoPlus.NewFromPathFileInfo() "
errCode := 0
errCode,
_,
dirPath = FileHelper{}.isStringEmptyOrBlank(dirPath)
if errCode < 0 {
return FileInfoPlus{},
fmt.Errorf(ePrefix +
"\nError: Input parameter 'dirPath' is an EMPTY String!\n")
}
if info == nil {
return FileInfoPlus{},
errors.New(ePrefix + "ERROR: Input Parameter 'info' is nil !\n")
}
newInfo := FileInfoPlus{}.NewFromFileInfo(info)
newInfo.dirPath = dirPath
newInfo.isDirPathInitialized = true
return newInfo, nil
}
// SetDirectoryPath - Sets the dirPath field. This
// field is not part of the standard FileInfo data structure.
func (fip *FileInfoPlus) SetDirectoryPath(dirPath string) error {
fh := FileHelper{}
errCode := 0
errCode,
_,
dirPath = fh.isStringEmptyOrBlank(dirPath)
if errCode < 0 {
return fmt.Errorf("FileInfoPlus.SetDirectoryPath()\n" +
"Error: Input parameter 'dirPath' is an EMPTY String!\n")
}
dirPath = fh.RemovePathSeparatorFromEndOfPathString(dirPath)
fip.dirPath = dirPath
fip.isDirPathInitialized = true
return nil
}
// SetName - Sets the file name field.
func (fip *FileInfoPlus) SetName(name string) {
fip.fName = name
}
// SetSize - Sets the file size field
func (fip *FileInfoPlus) SetSize(fileSize int64) {
fip.fSize = fileSize
}
// SetMode - Sets the file Mode
func (fip *FileInfoPlus) SetMode(fileMode os.FileMode) {
fip.fMode = fileMode
}
// SetModTime - Sets the file modification time
func (fip *FileInfoPlus) SetModTime(fileModTime time.Time) {
fip.fModTime = fileModTime
}
// SetIsDir - Sets is directory field.
func (fip *FileInfoPlus) SetIsDir(isDir bool) {
fip.isDir = isDir
}
// SetSysDataSrc - Sets the dataSrc field
func (fip *FileInfoPlus) SetSysDataSrc(sysDataSrc interface{}) {
fip.dataSrc = sysDataSrc
}
// SetIsFInfoInitialized - Sets the flag for 'Is File Info Initialized'
// If set to 'true' it means that all of the File Info fields have
// been initialized.
func (fip *FileInfoPlus) SetIsFInfoInitialized(isInitialized bool) {
if !isInitialized {
fip.isFInfoInitialized = false
fip.CreateTimeStamp = time.Time{}
return
}
fip.isFInfoInitialized = true
fip.CreateTimeStamp = time.Now().Local()
return
}
|
package main
import (
"flag"
"log"
"net/http"
"os"
"github.com/gorilla/context"
"github.com/gorilla/mux"
"github.com/matscus/Hamster/Mock/dadata/cache"
"github.com/matscus/Hamster/Mock/dadata/handlers"
)
func init() {
cache.LoadCache()
}
var (
listenport string
mode string
)
func main() {
flag.StringVar(&mode, "mode", "https", "server mode")
flag.StringVar(&listenport, "port", ":9443", "port to Listen")
flag.Parse()
r := mux.NewRouter()
r.HandleFunc("/api/v1/dadata/search/fio", handlers.GetFIO).Methods("POST")
r.HandleFunc("/api/v1/dadata/search/fio", handlers.GetFIO).Methods("GET").Queries("meta_chanel", "{meta_chanel}", "query", "{query}")
r.HandleFunc("/api/v1/dadata/search/address", handlers.GetAddress).Methods("POST")
r.HandleFunc("/api/v1/dadata/search/address", handlers.GetAddress).Methods("GET").Queries("meta_chanel", "{meta_chanel}", "query", "{query}")
r.HandleFunc("/api/v1/dadata/search/party", handlers.GetOrganization).Methods("POST")
r.HandleFunc("/api/v1/dadata/search/party", handlers.GetOrganization).Methods("GET").Queries("meta_chanel", "{meta_chanel}", "query", "{query}")
http.Handle("/api/v1/dadata/search/", r)
switch mode {
case "http":
log.Printf("Listen to http on port %s", listenport)
log.Fatal(http.ListenAndServe(listenport, r))
case "https":
log.Printf("Listen to https on port %s", listenport)
log.Fatal(http.ListenAndServeTLS(listenport, os.Getenv("SERVERREM"), os.Getenv("SERVERKEY"), context.ClearHandler(http.DefaultServeMux)))
}
}
|
package main
import (
"flag"
"fmt"
"github.com/APTrust/exchange/context"
"github.com/APTrust/exchange/models"
"github.com/APTrust/exchange/workers"
"os"
)
// apt_fetch receives messages from nsqd describing
// items in the S3 receiving buckets. It fetches and and validates
// tar files, then queues them for storage, if they validate successfully.
func main() {
pathToConfigFile := parseCommandLine()
config, err := models.LoadConfigFile(pathToConfigFile)
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
_context := context.NewContext(config)
_context.MessageLog.Info("Connecting to NSQLookupd at %s", _context.Config.NsqLookupd)
_context.MessageLog.Info("NSQDHttpAddress is %s", _context.Config.NsqdHttpAddress)
consumer, err := workers.CreateNsqConsumer(_context.Config, &_context.Config.FetchWorker)
if err != nil {
_context.MessageLog.Fatalf(err.Error())
}
_context.MessageLog.Info("apt_fetch started")
fetcher := workers.NewAPTFetcher(_context)
consumer.AddHandler(fetcher)
consumer.ConnectToNSQLookupd(_context.Config.NsqLookupd)
// This reader blocks until we get an interrupt, so our program does not exit.
<-consumer.StopChan
}
func parseCommandLine() (configFile string) {
var pathToConfigFile string
flag.StringVar(&pathToConfigFile, "config", "", "Path to APTrust config file")
flag.Parse()
if pathToConfigFile == "" {
printUsage()
os.Exit(1)
}
return pathToConfigFile
}
// Tell the user about the program.
func printUsage() {
message := `
apt_fetch_and_validate: Reads from NSQ to find which tar files are waiting
in the receiving buckets to be ingested. It fetches those files into the
local staging area, validates them, and pushes them into the record queue
if they are valid.
Usage: apt_fetch -config=<path to APTrust config file>
Param -config is required.
`
fmt.Println(message)
}
|
package protocol
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
var header = &Header{
OpCode: OpCodeQuery,
ResponseTo: 1,
RequestID: 2,
MessageLength: 300,
}
func TestHeader_Encode(t *testing.T) {
totals := 1000000
start := time.Now().UnixNano()
for i := 0; i < totals; i++ {
_, err := header.Encode()
if err != nil {
t.Error(err)
}
}
cost := time.Now().UnixNano() - start
fmt.Println("encode ops:", 1e9*int64(totals)/cost)
}
func TestHeader_Decode(t *testing.T) {
bs, err := header.Encode()
if err != nil {
t.Error(err)
}
header2 := &Header{}
if err := header2.Decode(bs); err != nil {
assert.Error(t, err, "decode header failed")
}
assert.Equal(t, header.OpCode, header2.OpCode, "decode OpCode failed")
assert.Equal(t, header.ResponseTo, header2.ResponseTo, "decode ResponseTo failed")
assert.Equal(t, header.RequestID, header2.RequestID, "decode RequestID failed")
assert.Equal(t, header.MessageLength, header2.MessageLength, "decode MessageLength failed")
totals := 1000000
start := time.Now().UnixNano()
for i := 0; i < totals; i++ {
header2.Decode(bs)
}
cost := time.Now().UnixNano() - start
fmt.Println("decode ops:", 1e9*int64(totals)/cost)
}
|
package olm
import (
"testing"
opregistry "github.com/operator-framework/operator-registry/pkg/registry"
"github.com/stretchr/testify/require"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
)
func TestLabelSetsFor(t *testing.T) {
tests := []struct {
name string
obj interface{}
expected []labels.Set
}{
{
name: "Nil/Nil",
obj: nil,
expected: nil,
},
{
name: "NotOperatorSurfaceOrCRD/Nil",
obj: struct{ data string }{"some-data"},
expected: nil,
},
{
name: "CRD/ProvidedAndRequired",
obj: &v1beta1.CustomResourceDefinition{
TypeMeta: metav1.TypeMeta{
Kind: "CustomResourceDefinition",
APIVersion: v1beta1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "Ghosts.ghouls",
},
Spec: v1beta1.CustomResourceDefinitionSpec{
Group: "ghouls",
Versions: []v1beta1.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Storage: true,
Served: true,
},
},
Names: v1beta1.CustomResourceDefinitionNames{
Kind: "Ghost",
Plural: "Ghosts",
},
},
},
expected: []labels.Set{
{
APILabelKeyPrefix + "6435ab0d7c6bda64": "provided",
},
{
APILabelKeyPrefix + "6435ab0d7c6bda64": "required",
},
},
},
{
name: "OperatorSurface/Provided",
obj: operatorSurface{
ProvidedAPIs: map[opregistry.APIKey]struct{}{
{Group: "ghouls", Version: "v1alpha1", Kind: "Ghost", Plural: "Ghosts"}: {},
},
},
expected: []labels.Set{
{
APILabelKeyPrefix + "6435ab0d7c6bda64": "provided",
},
},
},
{
name: "OperatorSurface/ProvidedAndRequired",
obj: operatorSurface{
ProvidedAPIs: map[opregistry.APIKey]struct{}{
{Group: "ghouls", Version: "v1alpha1", Kind: "Ghost", Plural: "Ghosts"}: {},
},
RequiredAPIs: map[opregistry.APIKey]struct{}{
{Group: "ghouls", Version: "v1alpha1", Kind: "Goblin", Plural: "Goblins"}: {},
},
},
expected: []labels.Set{
{
APILabelKeyPrefix + "6435ab0d7c6bda64": "provided",
APILabelKeyPrefix + "557c9f42470aa352": "required",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
labelSets, err := LabelSetsFor(tt.obj)
require.NoError(t, err)
require.ElementsMatch(t, tt.expected, labelSets)
})
}
}
|
package spool
import (
"bytes"
"context"
"crypto/sha256"
"fmt"
"strings"
"time"
"cloud.google.com/go/spanner"
admin "cloud.google.com/go/spanner/admin/database/apiv1"
"github.com/cloudspannerecosystem/spool/model"
databasepb "google.golang.org/genproto/googleapis/spanner/admin/database/v1"
)
// State represents a state of the database.
type State int64
const (
// StateIdle represents a idle state.
StateIdle State = iota
// StateBusy represents a busy state.
StateBusy
)
// Int64 returns s as int64.
func (s State) Int64() int64 {
return int64(s)
}
// String returns a string representing the state.
func (s State) String() string {
switch s {
case StateIdle:
return "idle"
case StateBusy:
return "busy"
}
return "unknown"
}
// Pool represents a Spanner database pool.
type Pool struct {
client *spanner.Client
adminClient *admin.DatabaseAdminClient
conf *Config
ddlStatements []string
checksum string
}
// NewPool creates a new Pool.
func NewPool(ctx context.Context, conf *Config, ddl []byte) (*Pool, error) {
client, err := spanner.NewClient(ctx, conf.Database(), conf.ClientOptions()...)
if err != nil {
return nil, err
}
adminClient, err := admin.NewDatabaseAdminClient(ctx, conf.ClientOptions()...)
if err != nil {
return nil, err
}
pool := &Pool{
client: client,
adminClient: adminClient,
conf: conf,
ddlStatements: ddlToStatements(ddl),
checksum: checksum(ddl),
}
return pool, nil
}
func ddlToStatements(ddl []byte) []string {
ddls := bytes.Split(ddl, []byte(";"))
ddlStatements := make([]string, 0, len(ddls))
for _, s := range ddls {
if stmt := strings.TrimSpace(string(s)); stmt != "" {
ddlStatements = append(ddlStatements, stmt)
}
}
return ddlStatements
}
func checksum(ddl []byte) string {
return fmt.Sprintf("%x", sha256.Sum256(ddl))
}
// Create creates a new database and adds to the pool.
func (p *Pool) Create(ctx context.Context, dbNamePrefix string) (*model.SpoolDatabase, error) {
dbName := fmt.Sprintf("%s-%d", dbNamePrefix, time.Now().Unix())
sdb := &model.SpoolDatabase{
DatabaseName: dbName,
Checksum: p.checksum,
State: StateIdle.Int64(),
CreatedAt: spanner.CommitTimestamp,
UpdatedAt: spanner.CommitTimestamp,
}
return p.create(ctx, sdb)
}
func (p *Pool) create(ctx context.Context, sdb *model.SpoolDatabase) (*model.SpoolDatabase, error) {
op, err := p.adminClient.CreateDatabase(ctx, &databasepb.CreateDatabaseRequest{
Parent: p.conf.Instance(),
CreateStatement: fmt.Sprintf("CREATE DATABASE `%s`", sdb.DatabaseName),
ExtraStatements: p.ddlStatements,
})
if err != nil {
return nil, err
}
if _, err := op.Wait(ctx); err != nil {
return nil, err
}
ts, err := p.client.Apply(ctx, []*spanner.Mutation{sdb.Insert(ctx)})
if err != nil {
_ = dropDatabase(ctx, p.conf.WithDatabaseID(sdb.DatabaseName))
return nil, err
}
sdb.CreatedAt = ts
sdb.UpdatedAt = ts
return sdb, nil
}
// Get gets a idle database from the pool.
func (p *Pool) Get(ctx context.Context) (*model.SpoolDatabase, error) {
var sdb *model.SpoolDatabase
if _, err := p.client.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error {
var err error
sdb, err = model.FindSpoolDatabaseByChecksumState(ctx, txn, p.checksum, StateIdle.Int64())
if err != nil {
return err
}
sdb.ChangeState(StateBusy.Int64())
if err := txn.BufferWrite([]*spanner.Mutation{sdb.Update(ctx)}); err != nil {
return err
}
return nil
}); err != nil {
return nil, err
}
return sdb, nil
}
// GetOrCreate gets a idle database or creates a new database.
func (p *Pool) GetOrCreate(ctx context.Context, dbNamePrefix string) (*model.SpoolDatabase, error) {
sdb, err := p.Get(ctx)
if err == nil {
return sdb, nil
}
if !isErrNotFound(err) {
return nil, err
}
dbName := fmt.Sprintf("%s-%d", dbNamePrefix, time.Now().Unix())
sdb = &model.SpoolDatabase{
DatabaseName: dbName,
Checksum: p.checksum,
State: StateBusy.Int64(),
CreatedAt: spanner.CommitTimestamp,
UpdatedAt: spanner.CommitTimestamp,
}
return p.create(ctx, sdb)
}
// List gets all databases from the pool.
func (p *Pool) List(ctx context.Context) ([]*model.SpoolDatabase, error) {
return model.FindSpoolDatabasesByChecksum(ctx, p.client.ReadOnlyTransaction(), p.checksum)
}
// Put adds a database to the pool.
func (p *Pool) Put(ctx context.Context, dbName string) error {
if _, err := p.client.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error {
sdb, err := model.FindSpoolDatabase(ctx, txn, dbName)
if err != nil {
return err
}
sdb.ChangeState(StateIdle.Int64())
if err := txn.BufferWrite([]*spanner.Mutation{sdb.Update(ctx)}); err != nil {
return err
}
return nil
}); err != nil {
return err
}
return nil
}
// Clean removes all idle databases.
func (p *Pool) Clean(ctx context.Context, filters ...func(sdb *model.SpoolDatabase) bool) error {
return clean(ctx, p.client, p.conf, func(ctx context.Context, txn *spanner.ReadWriteTransaction) ([]*model.SpoolDatabase, error) {
sdbs, err := model.FindSpoolDatabasesByChecksumState(ctx, txn, p.checksum, StateIdle.Int64())
if err != nil {
return nil, err
}
return filter(sdbs, filters...), nil
})
}
|
package config
type Config struct {
DB database `toml:"database"`
}
type database struct {
Server string
Port int
User string
Password string
DbName string
}
|
package futures
import (
"testing"
"github.com/stretchr/testify/suite"
)
type accountServiceTestSuite struct {
baseTestSuite
}
func TestAccountService(t *testing.T) {
suite.Run(t, new(accountServiceTestSuite))
}
func (s *accountServiceTestSuite) TestGetBalance() {
data := []byte(`[
{
"accountAlias": "SgsR",
"asset": "USDT",
"balance": "122607.35137903",
"crossWalletBalance": "23.72469206",
"crossUnPnl": "0.00000000",
"availableBalance": "23.72469206",
"maxWithdrawAmount": "23.72469206"
}
]`)
s.mockDo(data, nil)
defer s.assertDo()
s.assertReq(func(r *request) {
e := newSignedRequest()
s.assertRequestEqual(e, r)
})
res, err := s.client.NewGetBalanceService().Do(newContext())
s.r().NoError(err)
s.r().Len(res, 1)
e := &Balance{
AccountAlias: "SgsR",
Asset: "USDT",
Balance: "122607.35137903",
CrossWalletBalance: "23.72469206",
CrossUnPnl: "0.00000000",
AvailableBalance: "23.72469206",
MaxWithdrawAmount: "23.72469206",
}
s.assertBalanceEqual(e, res[0])
}
func (s *accountServiceTestSuite) assertBalanceEqual(e, a *Balance) {
r := s.r()
r.Equal(e.AccountAlias, a.AccountAlias, "AccountAlias")
r.Equal(e.Asset, a.Asset, "Asset")
r.Equal(e.Balance, a.Balance, "Balance")
r.Equal(e.CrossWalletBalance, a.CrossWalletBalance, "CrossWalletBalance")
r.Equal(e.CrossUnPnl, a.CrossUnPnl, "CrossUnPnl")
r.Equal(e.AvailableBalance, a.AvailableBalance, "AvailableBalance")
r.Equal(e.MaxWithdrawAmount, a.MaxWithdrawAmount, "MaxWithdrawAmount")
}
func (s *accountServiceTestSuite) TestGetAccount() {
data := []byte(`{
"assets": [
{
"asset": "USDT",
"initialMargin": "0.33683000",
"maintMargin": "0.02695000",
"marginBalance": "8.74947592",
"maxWithdrawAmount": "8.41264592",
"openOrderInitialMargin": "0.00000000",
"positionInitialMargin": "0.33683000",
"unrealizedProfit": "-0.44537584",
"walletBalance": "9.19485176",
"crossWalletBalance": "23.72469206",
"crossUnPnl": "0.00000000",
"availableBalance": "126.72469206",
"marginAvailable": true,
"updateTime": 1625474304765
}
],
"canDeposit": true,
"canTrade": true,
"canWithdraw": true,
"feeTier": 2,
"maxWithdrawAmount": "8.41264592",
"multiAssetsMargin": false,
"positions": [
{
"isolated": false,
"leverage": "20",
"initialMargin": "0.33683",
"maintMargin": "0.02695",
"openOrderInitialMargin": "0.00000",
"positionInitialMargin": "0.33683",
"symbol": "BTCUSDT",
"unrealizedProfit": "-0.44537584",
"entryPrice": "8950.5",
"maxNotional": "250000",
"positionSide": "BOTH",
"positionAmt": "0.436",
"bidNotional": "0",
"askNotional": "0",
"updateTime":1618646402359
}
],
"totalInitialMargin": "0.33683000",
"totalMaintMargin": "0.02695000",
"totalMarginBalance": "8.74947592",
"totalOpenOrderInitialMargin": "0.00000000",
"totalPositionInitialMargin": "0.33683000",
"totalUnrealizedProfit": "-0.44537584",
"totalWalletBalance": "9.19485176",
"updateTime": 0
}`)
s.mockDo(data, nil)
defer s.assertDo()
s.assertReq(func(r *request) {
e := newSignedRequest()
s.assertRequestEqual(e, r)
})
res, err := s.client.NewGetAccountService().Do(newContext())
s.r().NoError(err)
e := &Account{
Assets: []*AccountAsset{
{
Asset: "USDT",
InitialMargin: "0.33683000",
MaintMargin: "0.02695000",
MarginBalance: "8.74947592",
MaxWithdrawAmount: "8.41264592",
OpenOrderInitialMargin: "0.00000000",
PositionInitialMargin: "0.33683000",
UnrealizedProfit: "-0.44537584",
WalletBalance: "9.19485176",
CrossWalletBalance: "23.72469206",
CrossUnPnl: "0.00000000",
AvailableBalance: "126.72469206",
MarginAvailable: true,
UpdateTime: 1625474304765,
},
},
CanTrade: true,
CanWithdraw: true,
CanDeposit: true,
FeeTier: 2,
MaxWithdrawAmount: "8.41264592",
MultiAssetsMargin: false,
Positions: []*AccountPosition{
{
Isolated: false,
Leverage: "20",
InitialMargin: "0.33683",
MaintMargin: "0.02695",
OpenOrderInitialMargin: "0.00000",
PositionInitialMargin: "0.33683",
Symbol: "BTCUSDT",
UnrealizedProfit: "-0.44537584",
EntryPrice: "8950.5",
MaxNotional: "250000",
PositionSide: "BOTH",
PositionAmt: "0.436",
BidNotional: "0",
AskNotional: "0",
UpdateTime: 1618646402359,
},
},
TotalInitialMargin: "0.33683000",
TotalMaintMargin: "0.02695000",
TotalMarginBalance: "8.74947592",
TotalOpenOrderInitialMargin: "0.00000000",
TotalPositionInitialMargin: "0.33683000",
TotalUnrealizedProfit: "-0.44537584",
TotalWalletBalance: "9.19485176",
UpdateTime: 0,
}
s.assertAccountEqual(e, res)
}
func (s *accountServiceTestSuite) assertAccountEqual(e, a *Account) {
r := s.r()
r.Equal(e.CanDeposit, a.CanDeposit, "CanDeposit")
r.Equal(e.CanTrade, a.CanTrade, "CanTrade")
r.Equal(e.CanWithdraw, a.CanWithdraw, "CanWithdraw")
r.Equal(e.FeeTier, a.FeeTier, "FeeTier")
r.Equal(e.MaxWithdrawAmount, a.MaxWithdrawAmount, "MaxWithdrawAmount")
r.Equal(e.TotalInitialMargin, a.TotalInitialMargin, "TotalInitialMargin")
r.Equal(e.TotalMaintMargin, a.TotalMaintMargin, "TotalMaintMargin")
r.Equal(e.TotalMarginBalance, a.TotalMarginBalance, "TotalMarginBalance")
r.Equal(e.TotalOpenOrderInitialMargin, a.TotalOpenOrderInitialMargin, "TotalOpenOrderInitialMargin")
r.Equal(e.TotalPositionInitialMargin, a.TotalPositionInitialMargin, "TotalPositionInitialMargin")
r.Equal(e.TotalUnrealizedProfit, a.TotalUnrealizedProfit, "TotalUnrealizedProfit")
r.Equal(e.TotalWalletBalance, a.TotalWalletBalance, "TotalWalletBalance")
r.Equal(e.UpdateTime, a.UpdateTime, "UpdateTime")
r.Equal(e.MultiAssetsMargin, a.MultiAssetsMargin, "MultiAssetsMargin")
r.Len(a.Assets, len(e.Assets))
for i := 0; i < len(a.Assets); i++ {
r.Equal(e.Assets[i].Asset, a.Assets[i].Asset, "Asset")
r.Equal(e.Assets[i].InitialMargin, a.Assets[i].InitialMargin, "InitialMargin")
r.Equal(e.Assets[i].MaintMargin, a.Assets[i].MaintMargin, "MaintMargin")
r.Equal(e.Assets[i].MarginBalance, a.Assets[i].MarginBalance, "MarginBalance")
r.Equal(e.Assets[i].MaxWithdrawAmount, a.Assets[i].MaxWithdrawAmount, "MaxWithdrawAmount")
r.Equal(e.Assets[i].OpenOrderInitialMargin, a.Assets[i].OpenOrderInitialMargin, "OpenOrderInitialMargin")
r.Equal(e.Assets[i].PositionInitialMargin, a.Assets[i].PositionInitialMargin, "PositionInitialMargin")
r.Equal(e.Assets[i].UnrealizedProfit, a.Assets[i].UnrealizedProfit, "UnrealizedProfit")
r.Equal(e.Assets[i].WalletBalance, a.Assets[i].WalletBalance, "WalletBalance")
r.Equal(e.Assets[i].CrossWalletBalance, a.Assets[i].CrossWalletBalance, "CrossWalletBalance")
r.Equal(e.Assets[i].CrossUnPnl, a.Assets[i].CrossUnPnl, "CrossUnPnl")
r.Equal(e.Assets[i].AvailableBalance, a.Assets[i].AvailableBalance, "AvailableBalance")
r.Equal(e.Assets[i].MarginAvailable, a.Assets[i].MarginAvailable, "MarginAvailable")
r.Equal(e.Assets[i].UpdateTime, a.Assets[i].UpdateTime, "UpdateTime")
}
r.Len(a.Positions, len(e.Positions))
for i := 0; i < len(a.Positions); i++ {
r.Equal(e.Positions[i].Isolated, a.Positions[i].Isolated, "Isolated")
r.Equal(e.Positions[i].Leverage, a.Positions[i].Leverage, "Leverage")
r.Equal(e.Positions[i].InitialMargin, a.Positions[i].InitialMargin, "InitialMargin")
r.Equal(e.Positions[i].MaintMargin, a.Positions[i].MaintMargin, "MaintMargin")
r.Equal(e.Positions[i].OpenOrderInitialMargin, a.Positions[i].OpenOrderInitialMargin, "OpenOrderInitialMargin")
r.Equal(e.Positions[i].PositionInitialMargin, a.Positions[i].PositionInitialMargin, "PositionInitialMargin")
r.Equal(e.Positions[i].Symbol, a.Positions[i].Symbol, "Symbol")
r.Equal(e.Positions[i].UnrealizedProfit, a.Positions[i].UnrealizedProfit, "UnrealizedProfit")
r.Equal(e.Positions[i].EntryPrice, a.Positions[i].EntryPrice, "EntryPrice")
r.Equal(e.Positions[i].MaxNotional, a.Positions[i].MaxNotional, "MaxNotional")
r.Equal(e.Positions[i].PositionSide, a.Positions[i].PositionSide, "PositionSide")
r.Equal(e.Positions[i].PositionAmt, a.Positions[i].PositionAmt, "PositionAmt")
r.Equal(e.Positions[i].BidNotional, a.Positions[i].BidNotional, "BidNotional")
r.Equal(e.Positions[i].AskNotional, a.Positions[i].AskNotional, "AskNotional")
r.Equal(e.Positions[i].UpdateTime, a.Positions[i].UpdateTime, "UpdateTime")
}
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package staleread_test
import (
"context"
"fmt"
"testing"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessiontxn"
"github.com/pingcap/tidb/sessiontxn/staleread"
"github.com/pingcap/tidb/testkit"
"github.com/stretchr/testify/require"
"github.com/tikv/client-go/v2/oracle"
)
func TestStaleReadTxnScope(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
checkProviderTxnScope := func() {
provider := createStaleReadProvider(t, tk, false)
require.Equal(t, kv.GlobalTxnScope, provider.GetTxnScope())
provider = createStaleReadProvider(t, tk, true)
require.Equal(t, kv.GlobalTxnScope, provider.GetTxnScope())
tk.MustExec("rollback")
}
checkProviderTxnScope()
require.NoError(t, failpoint.Enable("tikvclient/injectTxnScope", fmt.Sprintf(`return("%v")`, "bj")))
defer func() {
require.NoError(t, failpoint.Disable("tikvclient/injectTxnScope"))
}()
checkProviderTxnScope()
tk.MustExec("set @@global.tidb_enable_local_txn=1")
tk.MustExec("rollback")
tk = testkit.NewTestKit(t, store)
checkProviderTxnScope()
}
func TestStaleReadReplicaReadScope(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
checkProviderReplicaReadScope := func(scope string) {
provider := createStaleReadProvider(t, tk, false)
require.Equal(t, scope, provider.GetReadReplicaScope())
provider = createStaleReadProvider(t, tk, true)
require.Equal(t, scope, provider.GetReadReplicaScope())
tk.MustExec("rollback")
}
checkProviderReplicaReadScope(kv.GlobalReplicaScope)
require.NoError(t, failpoint.Enable("tikvclient/injectTxnScope", fmt.Sprintf(`return("%v")`, "bj")))
defer func() {
require.NoError(t, failpoint.Disable("tikvclient/injectTxnScope"))
}()
checkProviderReplicaReadScope("bj")
}
func createStaleReadProvider(t *testing.T, tk *testkit.TestKit, explicitTxn bool) *staleread.StalenessTxnContextProvider {
tk.MustExec("rollback")
require.NoError(t, tk.Session().PrepareTxnCtx(context.TODO()))
se := tk.Session()
ts := getOracleTS(t, se)
if explicitTxn {
err := sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{
Type: sessiontxn.EnterNewTxnWithBeginStmt,
StaleReadTS: ts,
})
require.NoError(t, err)
} else {
is, err := domain.GetDomain(se).GetSnapshotInfoSchema(ts)
require.NoError(t, err)
err = sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{
Type: sessiontxn.EnterNewTxnWithReplaceProvider,
Provider: staleread.NewStalenessTxnContextProvider(se, ts, is),
})
require.NoError(t, err)
}
return sessiontxn.GetTxnManager(se).GetContextProvider().(*staleread.StalenessTxnContextProvider)
}
func getOracleTS(t testing.TB, sctx sessionctx.Context) uint64 {
ts, err := sctx.GetStore().GetOracle().GetTimestamp(context.TODO(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
require.NoError(t, err)
return ts
}
|
package blocker
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
)
// UnknownBlockerTestSuite 是 UnknownBlocker 的单元测试的 Test Suite
type UnknownBlockerTestSuite struct {
suite.Suite
blockerPool *BlockerPool
}
// SetupSuite 设置测试环境
func (suite *UnknownBlockerTestSuite) SetupSuite() {
configDoc, _ := LoadConfig(blockerConfigPath)
suite.blockerPool, _ = NewBlockerPool(configDoc, blockerDBFile)
}
// TestIsMacBlocked zone或者mac是否在白名单内
func (suite *UnknownBlockerTestSuite) TestIsMacBlocked() {
t := suite.T()
clientID := "A123456789"
mac := "mac2"
zone := "CN"
blocker := suite.blockerPool.GetBlocker(clientID)
ok := blocker.IsMacBlocked(mac, zone)
assert.Equal(t, true, ok)
}
// TestIsIPBlocked ip是否在白名单内
func (suite *UnknownBlockerTestSuite) TestIsIPBlocked() {
t := suite.T()
ip := "121.0.0.10"
clientID := "A123456789"
blocker := suite.blockerPool.GetBlocker(clientID)
ok := blocker.IsIPBlocked(ip)
assert.Equal(t, true, ok)
}
// TestIgnoreIPCheck mac是否在免ip过滤白名单内
func (suite *UnknownBlockerTestSuite) TestIgnoreIPCheck() {
t := suite.T()
clientID := "A123456789"
mac := "mac3"
blocker := suite.blockerPool.GetBlocker(clientID)
ok := blocker.IgnoreIPCheck(mac)
assert.Equal(t, false, ok)
}
func TestUnknownBlockerTestSuite(t *testing.T) {
suite.Run(t, new(UnknownBlockerTestSuite))
}
|
package shardkv
import (
"sync"
"umich.edu/eecs491/proj4/shardmaster"
)
type Clerk struct {
mu sync.Mutex
sm *shardmaster.Clerk
impl ClerkImpl
}
func MakeClerk(shardmasters []string) *Clerk {
ck := new(Clerk)
ck.sm = shardmaster.MakeClerk(shardmasters)
ck.InitImpl()
return ck
}
func (ck *Clerk) Put(key string, value string) {
ck.PutAppend(key, value, "Put")
}
func (ck *Clerk) Append(key string, value string) {
ck.PutAppend(key, value, "Append")
}
|
package aggregatedrange
import (
"fmt"
"io/ioutil"
"log"
"math/rand"
"testing"
"time"
"incognito-chain/common"
"incognito-chain/privacy/operation"
"github.com/stretchr/testify/assert"
)
func TestMain(m *testing.M) {
log.SetOutput(ioutil.Discard)
m.Run()
}
var _ = func() (_ struct{}) {
fmt.Println("This runs before init()!")
Logger.Init(common.NewBackend(nil).Logger("test", true))
return
}()
func TestPad(t *testing.T) {
data := []struct {
number int
paddedNumber int
}{
{1000, 1024},
{3, 4},
{5, 8},
}
for _, item := range data {
num := pad(item.number)
assert.Equal(t, item.paddedNumber, num)
}
}
func TestPowerVector(t *testing.T) {
twoVector := powerVector(new(operation.Scalar).FromUint64(2), 5)
assert.Equal(t, 5, len(twoVector))
}
func TestInnerProduct(t *testing.T) {
for j := 0; j < 100; j++ {
n := maxExp
a := make([]*operation.Scalar, n)
b := make([]*operation.Scalar, n)
uinta := make([]uint64, n)
uintb := make([]uint64, n)
uintc := uint64(0)
for i := 0; i < n; i++ {
uinta[i] = uint64(rand.Intn(100000000))
uintb[i] = uint64(rand.Intn(100000000))
a[i] = new(operation.Scalar).FromUint64(uinta[i])
b[i] = new(operation.Scalar).FromUint64(uintb[i])
uintc += uinta[i] * uintb[i]
}
c, _ := innerProduct(a, b)
assert.Equal(t, new(operation.Scalar).FromUint64(uintc), c)
}
}
func TestEncodeVectors(t *testing.T) {
for i := 0; i < 100; i++ {
var AggParam = newBulletproofParams(1)
n := maxExp
a := make([]*operation.Scalar, n)
b := make([]*operation.Scalar, n)
G := make([]*operation.Point, n)
H := make([]*operation.Point, n)
for i := range a {
a[i] = operation.RandomScalar()
b[i] = operation.RandomScalar()
G[i] = new(operation.Point).Set(AggParam.g[i])
H[i] = new(operation.Point).Set(AggParam.h[i])
}
actualRes, err := encodeVectors(a, b, G, H)
if err != nil {
Logger.Log.Info("Err: %v\n", err)
}
expectedRes := new(operation.Point).Identity()
for i := 0; i < n; i++ {
expectedRes.Add(expectedRes, new(operation.Point).ScalarMult(G[i], a[i]))
expectedRes.Add(expectedRes, new(operation.Point).ScalarMult(H[i], b[i]))
}
assert.Equal(t, expectedRes, actualRes)
}
}
func TestAggregatedRangeProveVerify(t *testing.T) {
for i := 0; i < 10; i++ {
//prepare witness for Aggregated range protocol
wit := new(AggregatedRangeWitness)
numValue := rand.Intn(maxOutputNumber)
values := make([]uint64, numValue)
rands := make([]*operation.Scalar, numValue)
for i := range values {
values[i] = uint64(rand.Uint64())
rands[i] = operation.RandomScalar()
}
wit.Set(values, rands)
// proving
proof, err := wit.Prove()
assert.Equal(t, nil, err)
// verify the proof
res, err := proof.Verify()
assert.Equal(t, true, res)
assert.Equal(t, nil, err)
// validate sanity for proof
isValidSanity := proof.ValidateSanity()
assert.Equal(t, true, isValidSanity)
// convert proof to bytes array
bytes := proof.Bytes()
expectProofSize := EstimateMultiRangeProofSize(numValue)
assert.Equal(t, int(expectProofSize), len(bytes))
// new aggregatedRangeProof from bytes array
proof2 := new(AggregatedRangeProof)
proof2.SetBytes(bytes)
// verify the proof
res, err = proof2.Verify()
assert.Equal(t, true, res)
assert.Equal(t, nil, err)
}
}
func TestAggregatedRangeProveVerifyUltraFast(t *testing.T) {
count := 10
proofs := make([]*AggregatedRangeProof, 0)
for i := 0; i < count; i++ {
//prepare witness for Aggregated range protocol
wit := new(AggregatedRangeWitness)
numValue := rand.Intn(maxOutputNumber)
values := make([]uint64, numValue)
rands := make([]*operation.Scalar, numValue)
for i := range values {
values[i] = uint64(rand.Uint64())
rands[i] = operation.RandomScalar()
}
wit.Set(values, rands)
// proving
proof, err := wit.Prove()
assert.Equal(t, nil, err)
proofs = append(proofs, proof)
}
// verify the proof faster
res, err, _ := VerifyBatchingAggregatedRangeProofs(proofs)
assert.Equal(t, true, res)
assert.Equal(t, nil, err)
}
func TestBenchmarkAggregatedRangeProveVerifyUltraFast(t *testing.T) {
for k := 1; k < 100; k += 5 {
count := k
proofs := make([]*AggregatedRangeProof, 0)
start := time.Now()
t1 := time.Now().Sub(start)
for i := 0; i < count; i++ {
//prepare witness for Aggregated range protocol
wit := new(AggregatedRangeWitness)
//numValue := rand.Intn(maxOutputNumber)
numValue := 2
values := make([]uint64, numValue)
rands := make([]*operation.Scalar, numValue)
for i := range values {
values[i] = uint64(rand.Uint64())
rands[i] = operation.RandomScalar()
}
wit.Set(values, rands)
// proving
proof, err := wit.Prove()
assert.Equal(t, nil, err)
start := time.Now()
proof.Verify()
t1 += time.Now().Sub(start)
proofs = append(proofs, proof)
}
// verify the proof faster
start = time.Now()
res, err, _ := VerifyBatchingAggregatedRangeProofs(proofs)
fmt.Println(t1, time.Now().Sub(start), k)
assert.Equal(t, true, res)
assert.Equal(t, nil, err)
}
}
func TestInnerProductProveVerify(t *testing.T) {
for k := 0; k < 1; k++ {
numValue := rand.Intn(maxOutputNumber)
numValuePad := pad(numValue)
aggParam := new(bulletproofParams)
aggParam.g = AggParam.g[0 : numValuePad*maxExp]
aggParam.h = AggParam.h[0 : numValuePad*maxExp]
aggParam.u = AggParam.u
aggParam.cs = AggParam.cs
wit := new(InnerProductWitness)
n := maxExp * numValuePad
wit.a = make([]*operation.Scalar, n)
wit.b = make([]*operation.Scalar, n)
for i := range wit.a {
wit.a[i] = new(operation.Scalar).FromUint64(uint64(rand.Intn(1000000)))
wit.b[i] = new(operation.Scalar).FromUint64(uint64(rand.Intn(1000000)))
}
c, err := innerProduct(wit.a, wit.b)
if err != nil {
Logger.Log.Info("Err: %v\n", err)
}
wit.p = new(operation.Point).ScalarMult(aggParam.u, c)
for i := range wit.a {
wit.p.Add(wit.p, new(operation.Point).ScalarMult(aggParam.g[i], wit.a[i]))
wit.p.Add(wit.p, new(operation.Point).ScalarMult(aggParam.h[i], wit.b[i]))
}
proof, err := wit.Prove(aggParam)
if err != nil {
fmt.Printf("Err: %v\n", err)
return
}
res2 := proof.Verify(aggParam)
assert.Equal(t, true, res2)
bytes := proof.Bytes()
proof2 := new(InnerProductProof)
proof2.SetBytes(bytes)
res3 := proof2.Verify(aggParam)
assert.Equal(t, true, res3)
res3prime := proof2.Verify(aggParam)
assert.Equal(t, true, res3prime)
}
}
func TestInnerProductProveVerifyUltraFast(t *testing.T) {
proofs := make([]*InnerProductProof, 0)
csList := make([][]byte, 0)
count := 15
for k := 0; k < count; k++ {
numValue := rand.Intn(maxOutputNumber)
numValuePad := pad(numValue)
aggParam := new(bulletproofParams)
aggParam.g = AggParam.g[0 : numValuePad*maxExp]
aggParam.h = AggParam.h[0 : numValuePad*maxExp]
aggParam.u = AggParam.u
aggParam.cs = AggParam.cs
wit := new(InnerProductWitness)
n := maxExp * numValuePad
wit.a = make([]*operation.Scalar, n)
wit.b = make([]*operation.Scalar, n)
for i := range wit.a {
wit.a[i] = new(operation.Scalar).FromUint64(uint64(rand.Intn(1000000)))
wit.b[i] = new(operation.Scalar).FromUint64(uint64(rand.Intn(1000000)))
}
c, err := innerProduct(wit.a, wit.b)
if err != nil {
Logger.Log.Info("Err: %v\n", err)
}
if k == 0 {
wit.p = new(operation.Point).ScalarMult(aggParam.u, c.Add(c, new(operation.Scalar).FromUint64(1)))
} else {
wit.p = new(operation.Point).ScalarMult(aggParam.u, c)
}
for i := range wit.a {
wit.p.Add(wit.p, new(operation.Point).ScalarMult(aggParam.g[i], wit.a[i]))
if k == count-1 {
wit.p.Add(wit.p, new(operation.Point).ScalarMult(aggParam.h[i], wit.a[i]))
} else {
wit.p.Add(wit.p, new(operation.Point).ScalarMult(aggParam.h[i], wit.b[i]))
}
}
proof, err := wit.Prove(aggParam)
if err != nil {
fmt.Printf("Err: %v\n", err)
return
}
proofs = append(proofs, proof)
csList = append(csList, aggParam.cs)
}
res := VerifyBatchingInnerProductProofs(proofs, csList)
assert.Equal(t, false, res)
res = VerifyBatchingInnerProductProofs(proofs[1:], csList[1:])
assert.Equal(t, false, res)
res = VerifyBatchingInnerProductProofs(proofs[:len(proofs)-1], csList[:len(proofs)-1])
assert.Equal(t, false, res)
res = VerifyBatchingInnerProductProofs(proofs[1:len(proofs)-1], csList[1:len(proofs)-1])
assert.Equal(t, true, res)
}
func benchmarkAggRangeProof_Proof(numberofOutput int, b *testing.B) {
wit := new(AggregatedRangeWitness)
values := make([]uint64, numberofOutput)
rands := make([]*operation.Scalar, numberofOutput)
for i := range values {
values[i] = uint64(rand.Uint64())
rands[i] = operation.RandomScalar()
}
wit.Set(values, rands)
b.ResetTimer()
for i := 0; i < b.N; i++ {
wit.Prove()
}
}
func TestAnStrictInnerProductProveVerifyUltraFast(t *testing.T) {
proofs := make([]*InnerProductProof, 0)
csList := make([][]byte, 0)
count := 5
for k := 0; k < count; k++ {
numValue := rand.Intn(maxOutputNumber)
numValuePad := pad(numValue)
aggParam := new(bulletproofParams)
aggParam.g = AggParam.g[0 : numValuePad*maxExp]
aggParam.h = AggParam.h[0 : numValuePad*maxExp]
aggParam.u = AggParam.u
aggParam.cs = AggParam.cs
wit := new(InnerProductWitness)
n := maxExp * numValuePad
wit.a = make([]*operation.Scalar, n)
wit.b = make([]*operation.Scalar, n)
for i := range wit.a {
wit.a[i] = new(operation.Scalar).FromUint64(uint64(rand.Intn(1000000)))
wit.b[i] = new(operation.Scalar).FromUint64(uint64(rand.Intn(1000000)))
}
c, err := innerProduct(wit.a, wit.b)
if err != nil {
Logger.Log.Info("Err: %v\n", err)
}
wit.p = new(operation.Point).ScalarMult(aggParam.u, c)
for i := range wit.a {
wit.p.Add(wit.p, new(operation.Point).ScalarMult(aggParam.g[i], wit.a[i]))
wit.p.Add(wit.p, new(operation.Point).ScalarMult(aggParam.h[i], wit.b[i]))
}
proof, err := wit.Prove(aggParam)
if err != nil {
fmt.Printf("Err: %v\n", err)
return
}
proofs = append(proofs, proof)
csList = append(csList, aggParam.cs)
}
res := VerifyBatchingInnerProductProofs(proofs, csList)
assert.Equal(t, true, res)
for j := 0; j < 50; j += 1 {
i := common.RandInt() % len(proofs)
r := common.RandInt() % 5
if r == 0 {
ran := common.RandInt() % len(proofs[i].l)
remember := proofs[i].l[ran]
proofs[i].l[ran] = obfuscatePoint(proofs[i].l[ran])
assert.NotEqual(t, remember, proofs[i].l[ran])
res := VerifyBatchingInnerProductProofs(proofs, csList)
assert.Equal(t, false, res)
proofs[i].l[ran] = remember
} else if r == 1 {
ran := common.RandInt() % len(proofs[i].r)
remember := proofs[i].r[ran]
proofs[i].r[ran] = obfuscatePoint(proofs[i].r[ran])
assert.NotEqual(t, remember, proofs[i].r[ran])
res := VerifyBatchingInnerProductProofs(proofs, csList)
assert.Equal(t, false, res)
proofs[i].r[ran] = remember
} else if r == 2 {
remember := proofs[i].a
proofs[i].a = obfuscateScalar(proofs[i].a)
assert.NotEqual(t, remember, proofs[i].a)
res := VerifyBatchingInnerProductProofs(proofs, csList)
assert.Equal(t, false, res)
proofs[i].a = remember
} else if r == 3 {
remember := proofs[i].b
proofs[i].b = obfuscateScalar(proofs[i].b)
assert.NotEqual(t, remember, proofs[i].b)
res := VerifyBatchingInnerProductProofs(proofs, csList)
assert.Equal(t, false, res)
proofs[i].b = remember
} else if r == 4 {
remember := proofs[i].p
proofs[i].p = obfuscatePoint(proofs[i].p)
assert.NotEqual(t, remember, proofs[i].p)
res := VerifyBatchingInnerProductProofs(proofs, csList)
assert.Equal(t, false, res)
proofs[i].p = remember
}
}
res = VerifyBatchingInnerProductProofs(proofs, csList)
assert.Equal(t, true, res)
}
func obfuscatePoint(value *operation.Point) *operation.Point {
for {
k := value.GetKey()
r := common.RandInt() % len(k)
i := common.RandInt() % 8
k[r] ^= (1 << uint8(i))
after, err := new(operation.Point).SetKey(&k)
if err == nil {
return after
}
}
}
func obfuscateScalar(value *operation.Scalar) *operation.Scalar {
for {
k := value.GetKey()
r := common.RandInt() % len(k)
i := common.RandInt() % 8
k[r] ^= (1 << uint8(i))
after, err := new(operation.Scalar).SetKey(&k)
if err == nil {
return after
}
}
}
func benchmarkAggRangeProof_Verify(numberofOutput int, b *testing.B) {
wit := new(AggregatedRangeWitness)
values := make([]uint64, numberofOutput)
rands := make([]*operation.Scalar, numberofOutput)
for i := range values {
values[i] = uint64(common.RandInt64())
rands[i] = operation.RandomScalar()
}
wit.Set(values, rands)
proof, _ := wit.Prove()
b.ResetTimer()
for i := 0; i < b.N; i++ {
proof.Verify()
}
}
func benchmarkAggRangeProof_VerifyFaster(numberofOutput int, b *testing.B) {
wit := new(AggregatedRangeWitness)
values := make([]uint64, numberofOutput)
rands := make([]*operation.Scalar, numberofOutput)
for i := range values {
values[i] = uint64(common.RandInt64())
rands[i] = operation.RandomScalar()
}
wit.Set(values, rands)
proof, _ := wit.Prove()
b.ResetTimer()
for i := 0; i < b.N; i++ {
proof.Verify()
}
}
func BenchmarkAggregatedRangeWitness_Prove1(b *testing.B) { benchmarkAggRangeProof_Proof(1, b) }
func BenchmarkAggregatedRangeProof_Verify1(b *testing.B) { benchmarkAggRangeProof_Verify(1, b) }
func BenchmarkAggregatedRangeProof_VerifyFaster1(b *testing.B) {
benchmarkAggRangeProof_VerifyFaster(1, b)
}
func BenchmarkAggregatedRangeWitness_Prove2(b *testing.B) { benchmarkAggRangeProof_Proof(2, b) }
func BenchmarkAggregatedRangeProof_Verify2(b *testing.B) { benchmarkAggRangeProof_Verify(2, b) }
func BenchmarkAggregatedRangeProof_VerifyFaster2(b *testing.B) {
benchmarkAggRangeProof_VerifyFaster(2, b)
}
func BenchmarkAggregatedRangeWitness_Prove4(b *testing.B) { benchmarkAggRangeProof_Proof(4, b) }
func BenchmarkAggregatedRangeProof_Verify4(b *testing.B) { benchmarkAggRangeProof_Verify(4, b) }
func BenchmarkAggregatedRangeProof_VerifyFaster4(b *testing.B) {
benchmarkAggRangeProof_VerifyFaster(4, b)
}
func BenchmarkAggregatedRangeWitness_Prove8(b *testing.B) { benchmarkAggRangeProof_Proof(8, b) }
func BenchmarkAggregatedRangeProof_Verify8(b *testing.B) { benchmarkAggRangeProof_Verify(8, b) }
func BenchmarkAggregatedRangeProof_VerifyFaster8(b *testing.B) {
benchmarkAggRangeProof_VerifyFaster(8, b)
}
func BenchmarkAggregatedRangeWitness_Prove16(b *testing.B) { benchmarkAggRangeProof_Proof(16, b) }
func BenchmarkAggregatedRangeProof_Verify16(b *testing.B) { benchmarkAggRangeProof_Verify(16, b) }
func BenchmarkAggregatedRangeProof_VerifyFaster16(b *testing.B) {
benchmarkAggRangeProof_VerifyFaster(16, b)
}
|
package main
import "fmt"
// Runes are just characters. UTF-8 character set is supported
func main() {
fmt.Println('A')
for i := 300; i < 310; i++ {
fmt.Println(i, "String value of Rune ", string(i))
}
}
|
package main
import (
"context"
"encoding/json"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/teris-io/shortid"
"github.com/wooiliang/aws-lambda-go/events"
"github.com/wooiliang/aws-lambda-go/lambda"
)
// HelloWorld struct
type HelloWorld struct {
Foo string `json:"foo"`
}
func putItem(helloWorld *HelloWorld) error {
shortID, _ := shortid.Generate()
svc := dynamodb.New(session.New())
input := &dynamodb.PutItemInput{
Item: map[string]*dynamodb.AttributeValue{
"id": {
S: aws.String(shortID),
},
"foo": {
S: aws.String(helloWorld.Foo),
},
},
ReturnConsumedCapacity: aws.String("TOTAL"),
TableName: aws.String("EmployerActivities"),
}
result, err := svc.PutItem(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case dynamodb.ErrCodeConditionalCheckFailedException:
fmt.Println(dynamodb.ErrCodeConditionalCheckFailedException, aerr.Error())
case dynamodb.ErrCodeProvisionedThroughputExceededException:
fmt.Println(dynamodb.ErrCodeProvisionedThroughputExceededException, aerr.Error())
case dynamodb.ErrCodeResourceNotFoundException:
fmt.Println(dynamodb.ErrCodeResourceNotFoundException, aerr.Error())
case dynamodb.ErrCodeItemCollectionSizeLimitExceededException:
fmt.Println(dynamodb.ErrCodeItemCollectionSizeLimitExceededException, aerr.Error())
case dynamodb.ErrCodeInternalServerError:
fmt.Println(dynamodb.ErrCodeInternalServerError, aerr.Error())
default:
fmt.Println(aerr.Error())
return aerr
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
return err
}
}
fmt.Println(result)
return nil
}
func getJSON(body string) (*HelloWorld, error) {
helloWorld := &HelloWorld{}
if err := json.Unmarshal([]byte(body), helloWorld); err != nil {
return nil, err
}
return helloWorld, nil
}
func handler(ctx context.Context, sqsEvent events.SQSEvent) error {
for _, message := range sqsEvent.Records {
fmt.Printf("The message %s for event source %s = %s \n", message.MessageId, message.EventSource, message.Body)
if helloWorld, err := getJSON(message.Body); err != nil {
fmt.Println(err)
return err
} else if err := putItem(helloWorld); err != nil {
return err
}
}
return nil
}
func main() {
lambda.Start(handler)
}
|
package main
import (
pf "../pathfileops"
p2 "../pathfileops/v2"
"fmt"
"io"
"os"
fp "path/filepath"
"strings"
"time"
)
/*
import (
pf "../pathfileops"
"fmt"
"io"
fp "path/filepath"
"strings"
)
*/
func main() {
mainTests{}.mainTests117SortFileMgrsCaseSensitive()
}
type mainTests struct {
Input string
Output string
}
func (mtst mainTests) mainTests117SortFileMgrsCaseSensitive() {
testDir1 := "../../dirmgrtests/dir01/dir02"
runelc := 'a'
const aryLen = 12
fAry := make([]string, aryLen)
fh := p2.FileHelper{}
fMgrCol := p2.FileMgrCollection{}.New()
var err error
for i:=0 ; i < aryLen; i++ {
strChar := string(runelc)
if (i+1) % 2 == 0 {
strChar = strings.ToUpper(strChar)
}
fileName := fmt.Sprintf("fileName_%v_%03d.txt", strChar, i+1)
testFile := testDir1 + "/" + fileName
testFile, err = fh.MakeAbsolutePath(testFile)
if err != nil {
fmt.Printf("Error returned by fh.MakeAbsolutePath(testFile)\n" +
"testFile='%v'\nError='%v'\n", testFile, err.Error())
return
}
runelc++
fAry[i] = testFile
}
for j:=0; j < aryLen; j++ {
err = fMgrCol.AddFileMgrByPathFileNameExt(fAry[aryLen-1-j])
if err != nil {
fmt.Printf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(fAry[%v])\n" +
"fAry[%v]='%v'\nError='%v'\n", j, j, fAry[j], err.Error())
return
}
}
fmt.Println("=============================")
fmt.Println(" Unordered FileMgr List")
fmt.Println("=============================")
fmt.Println()
var fMgr p2.FileMgr
for k:=0; k < aryLen; k++ {
fMgr, err = fMgrCol.PeekFileMgrAtIndex(k)
if err != nil {
fmt.Printf("Error returned by fMgrCol.PeekFileMgrAtIndex(index)\n" +
"index='%v'\nError='%v\n", k, err.Error())
return
}
fmt.Printf("%3d.\t%v\n", k+1, fMgr.GetAbsolutePathFileName() )
}
fMgrCol.SortByAbsPathFileName(false)
fmt.Println()
fmt.Println("=============================")
fmt.Println(" Ordered FileMgr List ")
fmt.Println("=============================")
fmt.Println()
for m:=0; m < aryLen; m++ {
fMgr, err = fMgrCol.PeekFileMgrAtIndex(m)
if err != nil {
fmt.Printf("Error returned by fMgrCol.PeekFileMgrAtIndex(index)\n" +
"index='%v'\nError='%v\n", m, err.Error())
return
}
fmt.Printf("%3d.\t%v\n", m+1, fMgr.GetAbsolutePathFileName() )
}
}
func (mtst mainTests) mainTest116SortDirsCaseInsensitive() {
df := make([]string, 10, 10)
df[0] = "../../dirmgrtests"
df[1] = "../../dirmgrtests/dir01"
df[2] = "../../dirmgrtests/dir01/dir02"
df[3] = "../../dirmgrtests/dir01/dir02/dir03"
df[4] = "../../dirmgrtests/dir01/dir02/dir03/dir04"
df[5] = "../../Dirmgrtests"
df[6] = "../../Dirmgrtests/Dir01"
df[7] = "../../Dirmgrtests/Dir01/Dir02"
df[8] = "../../Dirmgrtests/Dir01/Dir02/Dir03"
df[9] = "../../Dirmgrtests/Dir01/Dir02/Dir03/Dir04"
dmgrCol := p2.DirMgrCollection{}.New()
var err error
fh := p2.FileHelper{}
fmt.Println(" UnSorted List ")
fmt.Println("=================")
fmt.Println()
for i := 0; i < 10; i++ {
err = dmgrCol.AddDirMgrByPathNameStr(df[i])
if err != nil {
fmt.Printf("Error returned by dmgrCol.AddDirMgrByPathNameStr(df[i]). "+
"i='%v', df[i]='%v' Error='%v' ", i, df[i], err.Error())
return
}
absUnSorted, err2 := fh.MakeAbsolutePath(df[i])
if err2 != nil {
fmt.Printf("Error returned by fh.MakeAbsolutePath(df[i])\n" +
"df[i]='%v'\nError='%v'\n", df[i], err2.Error())
return
}
fmt.Printf("%3d.\t%v\n", i+1, absUnSorted)
}
dmgrCol.SortByAbsPath(true)
fmt.Println()
fmt.Println("=================")
fmt.Println(" Sorted List ")
fmt.Println("=================")
fmt.Println()
for k:=0; k < 10; k++ {
dMgr, err := dmgrCol.PeekDirMgrAtIndex(k)
if err != nil {
fmt.Printf("Error returned by dmgrCol.PeekDirMgrAtIndex(k)\n" +
"k='%v'\nError='%v'\n", k, err.Error())
return
}
fmt.Printf("%3d.\t%v\n", k+1, dMgr.GetAbsolutePath())
}
}
func (mtst mainTests) mainTest115SortDirs() {
df := make([]string, 10, 10)
df[0] = "../dirmgrtests"
df[1] = "../dirmgrtests/dir01"
df[2] = "../dirmgrtests/dir01/dir02"
df[3] = "../dirmgrtests/dir01/dir02/dir03"
df[4] = "../dirmgrtests/dir01/dir02/dir03/dir04"
df[5] = "../Dirmgrtests"
df[6] = "../Dirmgrtests/Dir01"
df[7] = "../Dirmgrtests/Dir01/Dir02"
df[8] = "../Dirmgrtests/Dir01/Dir02/Dir03"
df[9] = "../Dirmgrtests/Dir01/Dir02/Dir03/Dir04"
dmgrCol := p2.DirMgrCollection{}.New()
var err error
fh := p2.FileHelper{}
fmt.Println(" UnSorted List ")
fmt.Println("=================")
fmt.Println()
for i := 0; i < 10; i++ {
err = dmgrCol.AddDirMgrByPathNameStr(df[i])
if err != nil {
fmt.Printf("Error returned by dmgrCol.AddDirMgrByPathNameStr(df[i]). "+
"i='%v', df[i]='%v' Error='%v' ", i, df[i], err.Error())
return
}
absUnSorted, err2 := fh.MakeAbsolutePath(df[i])
if err2 != nil {
fmt.Printf("Error returned by fh.MakeAbsolutePath(df[i])\n" +
"df[i]='%v'\nError='%v'\n", df[i], err2.Error())
return
}
fmt.Printf("%3d.\t%v\n", i+1, absUnSorted)
}
dmgrCol.SortByAbsPath(false)
fmt.Println()
fmt.Println("=================")
fmt.Println(" Sorted List ")
fmt.Println("=================")
fmt.Println()
for k:=0; k < 10; k++ {
dMgr, err := dmgrCol.PeekDirMgrAtIndex(k)
if err != nil {
fmt.Printf("Error returned by dmgrCol.PeekDirMgrAtIndex(k)\n" +
"k='%v'\nError='%v'\n", k, err.Error())
return
}
fmt.Printf("%3d.\t%v\n", k+1, dMgr.GetAbsolutePath())
}
}
func (mtst mainTests) mainTest114StripLeadingDotPathSeparators() {
fmt.Println(" mainTest114StripLeadingDotPathSeparators ")
fmt.Println("********************************************************")
fmt.Println(" START!!! ")
fmt.Println("********************************************************")
fmt.Println()
dirName := ".././... SomeDirDirName"
fh := p2.FileHelper{}
finalDirName, strLen := fh.StripLeadingDotSeparatorChars(dirName)
fmt.Println(" mainTest114StripLeadingDotPathSeparators ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println(" Beginning Dir Name: ", dirName)
fmt.Println(" Final Dir Name: ", finalDirName)
fmt.Println("Length of Final Dir Name: ", strLen)
}
func (mtst mainTests) mainTest113AreSameFile() {
fmt.Println(" mainTest113AreSameFile ")
fmt.Println("********************************************************")
fmt.Println(" START!!! ")
fmt.Println("********************************************************")
fmt.Println()
expectedFileNameExt := "newerFileForTest_01.txt"
fh := pf.FileHelper{}
baseDir, err := mtst.getBaseProjectPath(true)
if err != nil {
fmt.Printf("Error returned from mtst.getBaseProjectPath(true)\n" +
"Error='%v'\n", err.Error())
return
}
targetDir := baseDir + "filesfortest\\newfilesfortest"
adjustedPath := fh.AdjustPathSlash(targetDir)
dMgr, err := pf.DirMgr{}.New(adjustedPath)
if err != nil {
fmt.Printf("Error returned from DirMgr{}."+
"NewFromPathFileNameExtStr(adjustedPath).\n"+
"adjustedPath='%v'\nError='%v'\n",
adjustedPath, err.Error())
return
}
srcFMgr, err := pf.FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt)
if err != nil {
fmt.Printf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt(dMgr, "+
"expectedFileNameExt).\n"+
"dMgr.absolutePath='%v'\nexpectedFileNameExt='%v'\nError='%v'\n",
dMgr.GetAbsolutePath(), adjustedPath, err.Error())
return
}
destFMgr := srcFMgr.CopyOut()
srcFInfo, err := srcFMgr.GetFileInfo()
if err != nil {
fmt.Printf("Error returned by srcFMgr.GetFileInfo()\n" +
"Error='%v'\n", err.Error())
return
}
destFInfo, err := destFMgr.GetFileInfo()
if err != nil {
fmt.Printf("Error returned by destFMgr.GetFileInfo()\n" +
"Error='%v'\n", err.Error())
return
}
result := os.SameFile(srcFInfo, destFInfo)
if result == false {
fmt.Printf("ERROR: After 'destFMgr := srcFMgr.CopyOut()',\n" +
"Expected os.SameFile(srcFInfoPlus.GetOriginalFileInfo(), " +
"destFInfoPlus.GetOriginalFileInfo())) == 'true'.\n" +
"Instead, the comparison return value was 'false'.\n")
return
}
fmt.Println(" mainTest113AreSameFile ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
}
func (mtst mainTests) mainTest112AreSameFile() {
expectedFileNameExt := "newerFileForTest_01.txt"
fh := pf.FileHelper{}
baseDir, err := mtst.getBaseProjectPath(true)
if err != nil {
fmt.Printf("Error returned from mtst.getBaseProjectPath(true)\n" +
"Error='%v'\n", err.Error())
return
}
targetDir := baseDir + "filesfortest\\newfilesfortest"
adjustedPath := fh.AdjustPathSlash(targetDir)
dMgr, err := pf.DirMgr{}.New(adjustedPath)
if err != nil {
fmt.Printf("Error returned from DirMgr{}."+
"NewFromPathFileNameExtStr(adjustedPath).\n"+
"adjustedPath='%v'\nError='%v'\n",
adjustedPath, err.Error())
return
}
srcFMgr, err := pf.FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt)
if err != nil {
fmt.Printf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt(dMgr, "+
"expectedFileNameExt).\n"+
"dMgr.absolutePath='%v'\nexpectedFileNameExt='%v'\nError='%v'\n",
dMgr.GetAbsolutePath(), adjustedPath, err.Error())
return
}
destFMgr := srcFMgr.CopyOut()
srcFInfo, err := srcFMgr.GetFileInfoPlus()
if err != nil {
fmt.Printf("Error returned by srcFMgr.GetFileInfoPlus()\n" +
"Error='%v'\n", err.Error())
return
}
destFInfo, err := destFMgr.GetFileInfoPlus()
if err != nil {
fmt.Printf("Error returned by destFMgr.GetFileInfoPlus()\n" +
"Error='%v'\n", err.Error())
return
}
result := os.SameFile(srcFInfo.GetOriginalFileInfo(), destFInfo.GetOriginalFileInfo())
if result == false {
fmt.Printf("ERROR: After 'destFMgr := srcFMgr.CopyOut()',\n" +
"Expected os.SameFile(srcFInfo.GetOriginalFileInfo(), " +
"destFInfo.GetOriginalFileInfo())) == 'true'.\n" +
"Instead, the comparison return value was 'false'.\n")
return
}
fmt.Println(" mainTest112AreSameFile ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
}
func (mtst mainTests) mainTest111AreSameFile() {
fh := pf.FileHelper{}
baseDir, err := mtst.getBaseProjectPath(true)
if err != nil {
fmt.Printf("Error returned from mtst.getBaseProjectPath(true)\n" +
"Error='%v'\n", err.Error())
return
}
targetDir := baseDir + "filesfortest\\newfilesfortest"
targetDir = fh.AdjustPathSlash(targetDir)
targetFile := targetDir + string(os.PathSeparator) + "newerFileForTest_01.txt"
tDMgr, err := pf.DirMgr{}.New(targetDir)
if err != nil {
fmt.Printf("Error returned by DirMgr{}.New(targetDir)\n" +
"targetDir='%v'\nError='%v'\n", targetDir, err.Error())
return
}
fInfo, err := os.Stat(targetFile)
if err != nil {
fmt.Printf("Error returned by os.Stat(targetFile)\n" +
"targetFile='%v'\n" +
"Error='%v'\n", targetFile, err.Error() )
return
}
fInfoPlus, err := pf.FileInfoPlus{}.NewFromDirMgrFileInfo(tDMgr, fInfo)
if err != nil {
fmt.Printf("Error returned by FileInfoPlus{}.NewFromPathFileInfo(tDMgr, fInfo)\n" +
"tDMgr='%v'\nError='%v'\n", tDMgr.GetAbsolutePath(), err.Error())
return
}
fInfoPlus2 := fInfoPlus.CopyOut()
fInfoPlus3 := fInfoPlus2.CopyOut()
result := os.SameFile(fInfoPlus.GetOriginalFileInfo(), fInfoPlus3.GetOriginalFileInfo())
if result == false {
fmt.Printf("ERROR: After 'fInfoPlus3 := fInfoPlus2.CopyOut()',\n" +
"Expected os.SameFile(fInfoPlus, fInfoPlus3) == 'true'.\n" +
"Instead, the comparison return value was 'false'.\n")
return
}
fmt.Println(" mainTest111AreSameFile ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
}
func (mtst mainTests) mainTest110AreSameFile() {
fh := pf.FileHelper{}
baseDir, err := mtst.getBaseProjectPath(true)
if err != nil {
fmt.Printf("Error returned from mtst.getBaseProjectPath(true)\n" +
"Error='%v'\n", err.Error())
return
}
targetDir := baseDir + "filesfortest\\newfilesfortest"
targetDir = fh.AdjustPathSlash(targetDir)
targetFile := targetDir + string(os.PathSeparator) + "newerFileForTest_01.txt"
tDMgr, err := pf.DirMgr{}.New(targetDir)
if err != nil {
fmt.Printf("Error returned by DirMgr{}.New(targetDir)\n" +
"targetDir='%v'\nError='%v'\n", targetDir, err.Error())
return
}
fInfo, err := os.Stat(targetFile)
if err != nil {
fmt.Printf("Error returned by os.Stat(targetFile)\n" +
"targetFile='%v'\n" +
"Error='%v'\n", targetFile, err.Error() )
return
}
fInfoPlus, err := pf.FileInfoPlus{}.NewFromDirMgrFileInfo(tDMgr, fInfo)
if err != nil {
fmt.Printf("Error returned by FileInfoPlus{}.NewFromPathFileInfo(tDMgr, fInfo)\n" +
"tDMgr='%v'\nError='%v'\n", tDMgr.GetAbsolutePath(), err.Error())
return
}
fInfoPlus2 := fInfoPlus.CopyOut()
result := os.SameFile(fInfoPlus.GetOriginalFileInfo(), fInfoPlus2.GetOriginalFileInfo())
if result == false {
fmt.Printf("ERROR: After 'fInfoPlus2 := fInfoPlus.CopyOut()',\n" +
"Expected os.SameFile(fInfoPlus, fInfoPlus2) == 'true'.\n" +
"Instead, the comparison return value was 'false'.\n")
return
}
fmt.Println(" mainTest110AreSameFile ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
}
func (mtst mainTests) mainTest109AreSameFile() {
fh := pf.FileHelper{}
baseDir, err := mtst.getBaseProjectPath(true)
if err != nil {
fmt.Printf("Error returned from mtst.getBaseProjectPath(true)\n" +
"Error='%v'\n", err.Error())
return
}
targetDir := baseDir + "filesfortest\\newfilesfortest"
targetDir = fh.AdjustPathSlash(targetDir)
targetFile := targetDir + string(os.PathSeparator) + "newerFileForTest_01.txt"
fInfo, err := os.Stat(targetFile)
if err != nil {
fmt.Printf("Error returned by os.Stat(targetFile)\n" +
"targetFile='%v'\n" +
"Error='%v'\n", targetFile, err.Error() )
return
}
fInfoPlus, err := pf.FileInfoPlus{}.NewFromPathFileInfo(targetDir, fInfo)
if err != nil {
fmt.Printf("Error returned by FileInfoPlus{}.NewFromPathFileInfo(targetDir, fInfo)\n" +
"Error='%v'\n", err.Error())
return
}
fInfoPlus2 := fInfoPlus.CopyOut()
fInfoPlus3 := fInfoPlus2.CopyOut()
result := os.SameFile(fInfoPlus.GetOriginalFileInfo(), fInfoPlus3.GetOriginalFileInfo())
if result == false {
fmt.Printf("ERROR: After 'fInfoPlus3 := fInfoPlus2.CopyOut()',\n" +
"Expected os.SameFile(fInfoPlus, fInfoPlus3) == 'true'.\n" +
"Instead, the comparison return value was 'false'.\n")
return
}
fmt.Println(" mainTest109AreSameFile ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
}
func (mtst mainTests) mainTest108AreSameFile() {
fh := pf.FileHelper{}
baseDir, err := mtst.getBaseProjectPath(true)
if err != nil {
fmt.Printf("Error returned from mtst.getBaseProjectPath(true)\n" +
"Error='%v'\n", err.Error())
return
}
targetDir := baseDir + "filesfortest\\newfilesfortest"
targetDir = fh.AdjustPathSlash(targetDir)
targetFile := targetDir + string(os.PathSeparator) + "newerFileForTest_01.txt"
fInfo, err := os.Stat(targetFile)
if err != nil {
fmt.Printf("Error returned by os.Stat(targetFile)\n" +
"targetFile='%v'\n" +
"Error='%v'\n", targetFile, err.Error() )
return
}
fInfoPlus, err := pf.FileInfoPlus{}.NewFromPathFileInfo(targetDir, fInfo)
if err != nil {
fmt.Printf("Error returned by FileInfoPlus{}.NewFromPathFileInfo(targetDir, fInfo)\n" +
"Error='%v'\n", err.Error())
return
}
fInfoPlus2 := fInfoPlus.CopyOut()
result := os.SameFile(fInfoPlus.GetOriginalFileInfo(), fInfoPlus2.GetOriginalFileInfo())
if result == false {
fmt.Printf("ERROR: After 'fInfoPlus2 := fInfoPlus.CopyOut()',\n" +
"Expected os.SameFile(fInfoPlus, fInfoPlus2) == 'true'.\n" +
"Instead, the comparison return value was 'false'.\n")
return
}
fmt.Println(" mainTest108AreSameFile ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
}
func (mtst mainTests) mainTest107CopyFileMgrByIo06() {
expectedFileNameExt := "newerFileForTest_01.txt"
fh := pf.FileHelper{}
baseDir, err := mtst.getBaseProjectPath(true)
if err != nil {
fmt.Printf("Error returned from mtst.getBaseProjectPath(true)\n" +
"Error='%v'\n", err.Error())
return
}
targetDir := baseDir + "filesfortest\\newfilesfortest"
adjustedPath := fh.AdjustPathSlash(targetDir)
dMgr, err := pf.DirMgr{}.New(adjustedPath)
if err != nil {
fmt.Printf("Error returned from DirMgr{}."+
"NewFromPathFileNameExtStr(adjustedPath).\n"+
"adjustedPath='%v'\nError='%v'\n",
adjustedPath, err.Error())
return
}
srcFMgr, err := pf.FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt)
if err != nil {
fmt.Printf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt(dMgr, "+
"expectedFileNameExt).\n"+
"dMgr.absolutePath='%v'\nexpectedFileNameExt='%v'\nError='%v'\n",
dMgr.GetAbsolutePath(), adjustedPath, err.Error())
return
}
destFMgr := srcFMgr.CopyOut()
err = srcFMgr.CopyFileMgrByIo(&destFMgr)
if err == nil {
fmt.Println("Expected error return from CopyFileMgrByIo(&destFMgr) because " +
"source file is equivalent to destination file. However, NO ERROR WAS RETURNED!")
return
}
fmt.Println(" mainTest107CopyFileMgrByIo06 ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
}
func (mtst mainTests) mainTest106GetFileSize() {
fh := pf.FileHelper{}
baseDir, err := mtst.getBaseProjectPath(true)
if err != nil {
fmt.Printf("Error returned from mtst.getBaseProjectPath(true)\n" +
"Error='%v'\n", err.Error())
return
}
targetFile := fh.AdjustPathSlash(baseDir + "filesfortest/newfilesfortest/newerFileForTest_01.txt")
srcFMgr, err := pf.FileMgr{}.New(targetFile)
if err != nil {
fmt.Printf("Error returned from FileMgr{}.New(targetFile).\n"+
"targetFile='%v'\nError='%v'\n",
targetFile, err.Error())
return
}
actualFileSize := srcFMgr.GetFileSize()
expectedFileSize := int64(29)
if expectedFileSize != actualFileSize {
fmt.Printf("Expected file size='29'.\nInstead, file size='%v'\n"+
"File='%v'",
actualFileSize, srcFMgr.GetAbsolutePathFileName())
return
}
fmt.Println(" mainTest106GetFileSize ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
}
func (mtst mainTests) mainTest105FMgrFInfo() {
testFile :=
"D:\\gowork\\src\\MikeAustin71\\pathfileopsgo\\filesfortest\\levelfilesfortest\\level_0_3_test.txt"
fMgr, err := pf.FileMgr{}.New(testFile)
if err != nil {
fmt.Printf("Error returned from FileMgr{}.New(testFile)\n" +
"testFile='%v'\n" +
"Error='%v'\n", testFile, err.Error())
return
}
fInfoPlus, err := fMgr.GetFileInfoPlus()
if err != nil {
fmt.Printf("Error returned from fMgr.GetFileInfoPlus()\n" +
"Error='%v'\n", err.Error())
return
}
result := fInfoPlus.IsDirectoryPathInitialized()
if result == false {
fmt.Printf("ERROR: Expected fInfoPlus.IsDirectoryPathInitialized() would return 'true'\n" +
"because 'fInfoPlus' is properly initialized.\n" +
"However, fInfoPlus.IsFileInfoInitialized() returned 'false'\n")
return
}
fmt.Println(" mainTest105FMgrFInfo ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
}
func (mtst mainTests) mainTest104FileOpsColEqual() {
sf := make([]string, 5, 10)
sf[0] = "../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../dirmgrtests/level_0_0_test.txt"
df[1] = "../dirmgrtests/level_0_1_test.txt"
df[2] = "../dirmgrtests/level_0_2_test.txt"
df[3] = "../dirmgrtests/level_0_3_test.txt"
df[4] = "../dirmgrtests/level_0_4_test.txt"
fOpsCol1 := pf.FileOpsCollection{}.New()
for i := 0; i < 5; i++ {
fOp, err := pf.FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
fmt.Printf("Error returned by FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i]). "+
"i='%v'\n" +
"sf[i]='%v'\n" +
"df[i]='%v'\n" +
"Error='%v'\n", i,sf[i], df[i], err.Error())
return
}
err = fOp.SetFileOpsCode(pf.FileOpCode.CopySourceToDestinationByHardLink())
if err != nil {
fmt.Printf("Error returned by fOp.SetFileOpsCode(FileOpCode.CopySourceToDestinationByIo())\n" +
"Error='%v'\n", err.Error())
return
}
err = fOpsCol1.AddByFileOps(fOp)
if err != nil {
fmt.Printf("Error returned by fOpsCol1.AddByFileOps(fOp). "+
"i='%v'\n" +
"srcFile='%v'\n" +
"df[i]='%v'\n" +
"Error='%v' ", i,sf[i], df[i], err.Error())
return
}
}
fOpsCol2 := pf.FileOpsCollection{}.New()
for i := 0; i < 5; i++ {
if i == 2 {
fOp, err := pf.FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
fmt.Printf("Error returned by FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i]). "+
"i='%v'\n" +
"sf[i]='%v'\n" +
"df[i]='%v'\n" +
"Error='%v'\n", i,sf[i], df[i], err.Error())
return
}
err = fOp.SetFileOpsCode(pf.FileOpCode.CopySourceToDestinationByIo())
if err != nil {
fmt.Printf("Error returned by fOp.SetFileOpsCode(FileOpCode.CopySourceToDestinationByIo())\n" +
"Error='%v'\n", err.Error())
return
}
err = fOpsCol2.AddByFileOps(fOp)
if err != nil {
fmt.Printf("Error returned by fOpsCol2.AddByFileOps(fOp). "+
"i='%v'\n" +
"srcFile='%v'\n" +
"df[i]='%v'\n" +
"Error='%v' ", i,sf[i], df[i], err.Error())
return
}
} else {
fOp, err := pf.FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
fmt.Printf("Error returned by FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i]). "+
"i='%v'\n" +
"sf[i]='%v'\n" +
"df[i]='%v'\n" +
"Error='%v'\n", i,sf[i], df[i], err.Error())
return
}
err = fOp.SetFileOpsCode(pf.FileOpCode.CopySourceToDestinationByHardLink())
if err != nil {
fmt.Printf("Error returned by fOp.SetFileOpsCode(FileOpCode.CopySourceToDestinationByIo())\n" +
"Error='%v'\n", err.Error())
return
}
err = fOpsCol2.AddByFileOps(fOp)
if err != nil {
fmt.Printf("Error returned by fOpsCol2.AddByFileOps(fOp). "+
"i='%v'\n" +
"srcFile='%v'\n" +
"df[i]='%v'\n" +
"Error='%v' ", i,sf[i], df[i], err.Error())
return
}
}
}
if fOpsCol1.Equal(&fOpsCol2) == true {
fmt.Println("ERROR: Expected that fOpsCol1!=fOpsCol2.\n" +
"However, THEY ARE EQUAL!!!")
return
}
fmt.Println(" mainTest104FileOpsColEqual ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
}
func (mtst mainTests) mainTest103FHlprConsolidateErrors() {
errs := make([]error, 0, 100)
for i:=0; i < 3; i++ {
errNo := fmt.Sprintf("Error #%0.3d: Error message.\n\n\n", i)
err := fmt.Errorf(errNo)
errs = append(errs, err)
}
fh := pf.FileHelper{}
err := fh.ConsolidateErrors(errs)
if err == nil {
fmt.Printf("Error return from fh.ConsolidateErrors(errs) is 'nil'\n")
return
}
errStr := fmt.Sprintf("%v", err.Error())
errFmtStr := strings.ReplaceAll(errStr, "\n", "@")
fmt.Println(" mainTest103FHlprConsolidateErrors ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println("Decoded Err Str: ", errFmtStr)
fmt.Println("Error String:")
fmt.Printf("%vTrailer For Test", err.Error())
}
func (mtst mainTests) mainTest102TestNewFromKnownPath(parentDirectory, subDirectoryName string) {
dMgr, err :=
pf.DirMgr{}.NewFromKnownPathDirectoryName(parentDirectory, subDirectoryName)
if err != nil {
fmt.Printf("%v", err.Error())
return
}
fmt.Println(" mainTest102TestNewFromKnownPath ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println(" parentDirectory: ", parentDirectory)
fmt.Println("subDirectoryName: ", subDirectoryName)
fmt.Println(" DirMgr Abs Path: ", dMgr.GetAbsolutePath())
}
func (mtst mainTests) mainTest101TestBadPathChars(pathStr string) {
fh := pf.FileHelper{}
isErrorDetected := false
doublePathSeparator := string(os.PathSeparator) + string(os.PathSeparator)
tPathStr := fh.AdjustPathSlash(pathStr)
if strings.Contains(tPathStr, doublePathSeparator) {
isErrorDetected = true
}
fmt.Println(" mainTest101TestBadPathChars ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println("Original Path String: ", pathStr)
fmt.Println(" Error Detected: ", isErrorDetected)
}
func (mtst mainTests) mainTest100GetAbsPath(pathStr string ) {
fh := pf.FileHelper{}
absOrigDir, err := fh.MakeAbsolutePath(pathStr)
if err != nil {
fmt.Printf("Error returned from fh.MakeAbsolutePath(pathStr).\n" +
"pathStr='%v'\n"+
"Error='%v'",
pathStr,
err.Error())
return
}
fmt.Println(" mainTest100GetAbsPath ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println("Original Path String: ", pathStr)
fmt.Println(" Absolute Path: ", absOrigDir)
}
func (mtst mainTests) mainTest99GetVolumeName(pathStr string) {
fh := pf.FileHelper{}
volIdx,
volLen,
volName := fh.GetVolumeNameIndex(pathStr)
fmt.Println(" mainTest99GetVolumeName ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println("Original Path String: ", pathStr)
fmt.Println(" Volume Index: ", volIdx)
fmt.Println(" Volume Length: ", volLen)
fmt.Println(" Volume Name: ", volName)
}
func (mtst mainTests) mainTest98ParseValidPathStr(pathStr string) {
dMgr := pf.DirMgr{}
validPathDto,
err := dMgr.ParseValidPathStr(pathStr)
if err != nil {
fmt.Printf("Error returned by dMgr.ParseValidPathStr(pathStr)\n"+
"pathStr='%v'\n"+
"Error='%v'\n",
pathStr, err.Error())
return
}
err = validPathDto.IsDtoValid("mainTest97ParseValidPathStr() ")
if err != nil {
fmt.Printf("%v", err.Error())
return
}
fmt.Println(" mainTest97ParseValidPathStr ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println(" Original Path String: ", pathStr)
fmt.Println("Original Path String Length: ", len(pathStr))
fmt.Println("Original Path String Length: ", validPathDto.GetOriginalPathStr())
fmt.Println(" Validated Path String: ", validPathDto.GetPath())
fmt.Println(" Path String Length: ", validPathDto.GetPathStrLen())
fmt.Println(" Validated Absolute Path: ", validPathDto.GetAbsPath())
fmt.Println("Absolute Path String Length: ", validPathDto.GetAbsPathStrLen())
fmt.Println(" Volume Name: ", validPathDto.GetPathVolumeName())
fmt.Println(" Volume Index: ", validPathDto.GetPathVolumeIndex())
fmt.Println(" Volume String Length: ", validPathDto.GetPathVolumeStrLength())
fmt.Println(" Valid Path Dto Initialized: ", validPathDto.IsInitialized())
fmt.Println(" Valid Path Dto PathIsValid: ", validPathDto.GetPathIsValid())
if validPathDto.GetPathStrLen() != len(validPathDto.GetPath()) {
fmt.Printf("Path String Length Error!\n"+
"Expected String Length='%v'\nActual String Length='%v'\n",
validPathDto.GetPathStrLen(), len(validPathDto.GetPath()))
}
if validPathDto.GetAbsPathStrLen() != len(validPathDto.GetAbsPath()) {
fmt.Printf("Absolute Path String Length Error!\n"+
"Expected String Length='%v'\nActual String Length='%v'\n",
validPathDto.GetAbsPathStrLen(), len(validPathDto.GetAbsPath()))
}
}
func (mtst mainTests) mainTest97DirNew03(origDir, expectedPath string) {
fh := pf.FileHelper{}
rawOrigDir := origDir
origDir = fh.AdjustPathSlash(origDir)
expectedPath = fh.AdjustPathSlash(expectedPath)
expectedAbsPath, err := fh.MakeAbsolutePath(expectedPath)
if err != nil {
fmt.Printf("Error returned by err := fh.MakeAbsolutePath(expectedPath)\n"+
"expectedPath='%v'\n"+
"Error='%v'\n",
expectedPath, err.Error())
return
}
dMgr, err := pf.DirMgr{}.New(origDir)
if err != nil {
fmt.Printf("Error returned from DirMgr{}.New(origDir).\n"+
"origDir=='%v'\nError='%v'\n",
origDir, err.Error())
return
}
if expectedPath != dMgr.GetPath() {
fmt.Printf("ERROR: Expected path='%v'\n"+
"Instead, path='%v'\n",
expectedPath, dMgr.GetPath())
}
if expectedAbsPath != dMgr.GetAbsolutePath() {
fmt.Printf("ERROR: Expected absolute path='%v'\n"+
"Instead, absolute path='%v'\n",
expectedAbsPath, dMgr.GetAbsolutePath())
return
}
fmt.Println(" mainTest96DirNew02 ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println(" Original Path: ", rawOrigDir)
fmt.Println(" Expected Path: ", expectedPath)
fmt.Println(" Actual Path: ", dMgr.GetPath())
fmt.Println("Expected Absolute Path: ", expectedAbsPath)
fmt.Println(" Actual Absolute Path: ", dMgr.GetAbsolutePath())
return
}
func (mtst mainTests) mainTest96DirNew02() {
fh := pf.FileHelper{}
origDir := fh.AdjustPathSlash("../testfiles/testfiles2/")
expectedPath := fh.AdjustPathSlash("../testfiles/testfiles2")
expectedAbsPath, err := fh.MakeAbsolutePath(expectedPath)
if err != nil {
fmt.Printf("Error returned by err := fh.MakeAbsolutePath(expectedPath)\n"+
"expectedPath='%v'\n"+
"Error='%v'\n",
expectedPath, err.Error())
return
}
dMgr, err := pf.DirMgr{}.New(origDir)
if err != nil {
fmt.Printf("Error returned from DirMgr{}.New(origDir).\n"+
"origDir=='%v'\nError='%v'\n",
origDir, err.Error())
return
}
if expectedPath != dMgr.GetPath() {
fmt.Printf("ERROR: Expected path='%v'\n"+
"Instead, path='%v'\n",
expectedPath, dMgr.GetPath())
}
if expectedAbsPath != dMgr.GetAbsolutePath() {
fmt.Printf("ERROR: Expected absolute path='%v'\n"+
"Instead, absolute path='%v'\n",
expectedAbsPath, dMgr.GetAbsolutePath())
return
}
fmt.Println(" mainTest96DirNew02 ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println(" Expected Path: ", expectedPath)
fmt.Println(" Actual Path: ", dMgr.GetPath())
fmt.Println("Expected Absolute Path: ", expectedAbsPath)
fmt.Println(" Actual Absolute Path: ", dMgr.GetAbsolutePath())
return
}
func (mtst mainTests) mainTest95DirNew01() {
fh := pf.FileHelper{}
origDir := fh.AdjustPathSlash("../testfiles/testfiles2/.git")
expectedPath := fh.AdjustPathSlash("../testfiles/testfiles2/.git")
expectedAbsDir, err := fh.MakeAbsolutePath(expectedPath)
if err != nil {
fmt.Printf("Test Startup Error returned by fh.MakeAbsolutePath(expectedPath).\n"+
"expectedPath='%v'\nError='%v'\n", expectedPath, err.Error())
return
}
dMgr, err := pf.DirMgr{}.New(origDir)
if err != nil {
fmt.Printf("Error returned from DirMgr{}.New(origDir).\n"+
"origDir=='%v' Error='%v'\n",
origDir, err.Error())
return
}
if true != dMgr.IsInitialized() {
fmt.Printf("Expected DirMgr.isFInfoInitialized=='%v'.\n"+
"Instead, DirMgr.isFInfoInitialized=='%v'\n",
true, dMgr.IsInitialized())
return
}
if expectedAbsDir != dMgr.GetAbsolutePath() {
fmt.Printf("Expected absolute path does match actual absolute path!\n"+
"Expected absolute path='%v'\n"+
"Actual absolute path='%v'\n",
expectedAbsDir, dMgr.GetAbsolutePath())
}
fmt.Println(" mainTest95DirNew01 ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println("Expected Absolute Path: ", expectedAbsDir)
fmt.Println(" Actual Absolute Path: ", dMgr.GetAbsolutePath())
}
func (mtst mainTests) mainTest94Index02() {
fh := pf.FileHelper{}
pathStr := fh.AdjustPathSlash("../dir1/dir2/dir3/")
//dirName := string(os.PathSeparator) + ".git"
dirName := string(os.PathSeparator)
origPathName := pathStr
lPathStr := len(pathStr)
if pathStr[lPathStr-1] == os.PathSeparator {
pathStr = pathStr[0 : lPathStr-1]
}
newDirName := dirName
if newDirName[0] == os.PathSeparator {
newDirName = newDirName[1:]
}
fullPath := pathStr + string(os.PathSeparator) + newDirName
if len(newDirName) == 0 {
fullPath = pathStr
}
fmt.Println(" mainTest94Index02 ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println(" pathStr: ", origPathName)
fmt.Println(" dirName: ", dirName)
fmt.Println("newDirName: ", newDirName)
fmt.Println(" fullPath: ", fullPath)
// Print out
// pathStr: ..\dir1\dir2\dir3\
// dirName: \
// newDirName:
// fullPath: ..\dir1\dir2\dir3
}
func (mtst mainTests) mainTest93Index01() {
fh := pf.FileHelper{}
pathStr := fh.AdjustPathSlash("../dir1/dir2/dir3")
dirName := ".git"
origPathName := pathStr
lPathStr := len(pathStr)
if pathStr[lPathStr-1] != os.PathSeparator {
pathStr += string(os.PathSeparator)
}
fullPath := pathStr + dirName
fmt.Println(" mainTest93Index01 ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println(" pathStr: ", origPathName)
fmt.Println(" dirName: ", dirName)
fmt.Println("fullPath: ", fullPath)
}
func (mtst mainTests) mainTest92GetDirTreeBytes() {
var tStart, tEnd time.Time
targetDir := "D:\\T88"
// targetDir := "D:\\T88\\pathfileopsgo"
// targetDir := "D:\\T05\\filesfortest"
testDMgr, err := pf.DirMgr{}.New(targetDir)
if err != nil {
fmt.Printf("Error returned by DirMgr{}.New(targetDir)\n"+
"targetDir='%v'\n"+
"Error='%v'\n\n", targetDir, err.Error())
return
}
fsc := pf.FileSelectionCriteria{}
fmt.Println("Starting Search # 1 ...")
fmt.Println()
tStart = time.Now()
testDInfo1, errs := testDMgr.FindDirectoryTreeFiles(fsc)
tEnd = time.Now()
if len(errs) > 0 {
fmt.Printf("Error returned by testDMgr.FindDirectoryTreeFiles(fsc)\n"+
"testDMgr='%v'\n"+
"Errors Follow:\n\n'%v'", targetDir,
testDMgr.ConsolidateErrors(errs))
return
}
duration1Str := mtst.timer(tStart, tEnd)
total1FileBytes := testDInfo1.FoundFiles.GetTotalFileBytes()
total1Files := testDInfo1.FoundFiles.GetNumOfFiles()
total1Dirs := testDInfo1.Directories.GetNumOfDirs()
fmt.Println(" mainTest92GetDirTreeBytes() ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println(" Target Directory: ", targetDir)
fmt.Println(" Test Directory Manager: ", testDMgr.GetAbsolutePath())
fmt.Println(" Total Number of Files: ", total1Files)
fmt.Println(" Total File Bytes: ", total1FileBytes)
fmt.Println("Total Number of Directories: ", total1Dirs)
fmt.Println(" Elapsed Time: ", duration1Str)
fmt.Println()
}
func (mtst mainTests) mainTest91GetDirTreeBytes() {
var tStart, tEnd time.Time
/*
basePath, err := mtst.getBaseProjectPath(true)
if err != nil {
fmt.Printf("Error returned by mtst.getBaseProjectPath(false)\n"+
"Error='%v'\n\n", err.Error())
return
}
*/
// testDir := basePath + "filesfortest"
testDir := "D:\\T88"
testDMgr, err := pf.DirMgr{}.New(testDir)
if err != nil {
fmt.Printf("Error returned by DirMgr{}.New(testDir)\n"+
"testDir='%v'\n"+
"Error='%v'\n\n", testDir, err.Error())
return
}
tStart = time.Now()
dirStats, errs := testDMgr.GetDirectoryStats()
tEnd = time.Now()
if len(errs) > 0 {
fmt.Printf("Error returned by testDMgr.GetDirectoryStats()\n"+
"testDMgr='%v'\n"+
"Errors Follow:\n\n%v",
testDMgr.GetAbsolutePath(),
testDMgr.ConsolidateErrors(errs))
return
}
duration1Str := mtst.timer(tStart, tEnd)
fmt.Println(" mainTest91GetDirTreeBytes() ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println(" Test Directory: ", testDir)
fmt.Println(" Test Directory Manager: ", testDMgr.GetAbsolutePath())
fmt.Println("Number of Sub-Directories: ", dirStats.NumOfSubDirs())
fmt.Println(" Number of Files: ", dirStats.NumOfFiles())
fmt.Println(" Number Of File Bytes: ", dirStats.NumOfBytes())
fmt.Println(" Elapsed Time: ", duration1Str)
fmt.Println()
}
func (mtst mainTests) maintTest90GetBaseProject() {
basePath, err := mtst.getBaseProjectPath(false)
if err != nil {
fmt.Printf("Error returned by mtst.getBaseProjectPath(false)\n"+
"Error='%v'\n\n", err.Error())
return
}
basePathWithSeparator, err := mtst.getBaseProjectPath(true)
if err != nil {
fmt.Printf("Error returned by mtst.getBaseProjectPath(true)\n"+
"Error='%v'\n\n", err.Error())
return
}
localPath := "archive"
fh := pf.FileHelper{}
absoluteLocalPath := fh.JoinPathsAdjustSeparators(basePath, localPath)
fmt.Println(" maintTest90GetBaseProject() ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println("Project Base Path Without Separator: ", basePath)
fmt.Println(" Project Base Path With Separator: ", basePathWithSeparator)
fmt.Println(" Local Constructed Path: ", absoluteLocalPath)
}
func (mtst mainTests) mainTest89MoveSubDirectoryTree() {
originDir := "D:\\gowork\\src\\MikeAustin71\\pathfileopsgo\\logTest"
originDMgr, err := pf.DirMgr{}.New(originDir)
if err != nil {
fmt.Printf("Test Setup Error returned by DirMgr{}.New(originDir).\n"+
"originDir='%v'\nError='%v'", originDir, err.Error())
return
}
srcDir := "D:\\T06\\TestDirMgr_MoveSubDirectoryTree_02"
srcDMgr, err := pf.DirMgr{}.New(srcDir)
if err != nil {
fmt.Printf("Test Setup Error returned by DirMgr{}.New(srcDir).\n"+
"srcDir='%v'\nError='%v'", srcDir, err.Error())
return
}
destDir := "D:\\T07\\TestDirMgr_MoveSubDirectoryTree_02"
destDMgr, err := pf.DirMgr{}.New(destDir)
if err != nil {
fmt.Printf("Test Setup Error returned by DirMgr{}.New(destDir).\n"+
"destDir='%v'\nError='%v'", destDir, err.Error())
return
}
fsc := pf.FileSelectionCriteria{}
_,
errs := originDMgr.CopyDirectoryTree(srcDMgr, true, fsc)
if len(errs) > 0 {
fmt.Printf("Test Setup Errors returned by originDMgr.CopyDirectoryTree(srcDMgr, true, fsc).\n"+
"srcDMgr='%v'\nErrors Follow:\n\n%v", srcDMgr.GetAbsolutePath(),
originDMgr.ConsolidateErrors(errs).Error())
return
}
dirMoveStats,
errs :=
srcDMgr.MoveSubDirectoryTree(destDMgr)
if len(errs) > 0 {
fmt.Printf("Test Setup Errors returned by srcDMgr.MoveSubDirectoryTree(destDMgr).\n"+
"destDMgr='%v'\nErrors Follow:\n\n%v", destDMgr.GetAbsolutePath(),
originDMgr.ConsolidateErrors(errs).Error())
return
}
fmt.Println(" mainTest89MoveSubDirectoryTree() ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println(" dirMoveStats.NumOfSubDirectories: ", dirMoveStats.NumOfSubDirectories)
fmt.Println(" dirMoveStats.SourceFilesRemaining: ", dirMoveStats.SourceFilesRemaining)
fmt.Println("dirMoveStats.SourceFileBytesRemaining: ", dirMoveStats.SourceFileBytesRemaining)
fmt.Println(" dirMoveStats.SourceFilesMoved: ", dirMoveStats.SourceFilesMoved)
fmt.Println(" dirMoveStats.SourceFileBytesMoved: ", dirMoveStats.SourceFileBytesMoved)
fmt.Println(" dirMoveStats.TotalSrcFilesProcessed: ", dirMoveStats.TotalSrcFilesProcessed)
fmt.Println(" dirMoveStats.NumOfSubDirectories: ", dirMoveStats.NumOfSubDirectories)
fmt.Println(" dirMoveStats.DirsCreated: ", dirMoveStats.DirsCreated)
fmt.Println(" dirMoveStats.TotalDirsProcessed: ", dirMoveStats.TotalDirsProcessed)
}
func (mtst mainTests) mainTest88CopySubDirectoryTree() {
srcDir := "D:\\gowork\\src\\MikeAustin71\\pathfileopsgo\\logTest"
srcDMgr, err := pf.DirMgr{}.New(srcDir)
if err != nil {
fmt.Printf("Test Setup Error returned by DirMgr{}.New(srcDir).\n"+
"srcDir='%v'\nError='%v'", srcDir, err.Error())
return
}
targetDir := "D:\\T06\\TestDirMgr_CopySubDirectoryTree_06"
fh := pf.FileHelper{}
_ = fh.DeleteDirPathAll(targetDir)
targetDMgr, err := pf.DirMgr{}.New(targetDir)
if err != nil {
fmt.Printf("Test Setup Error returned by DirMgr{}.New(targetDir).\n"+
"targetDir='%v'\nError='%v'", targetDir, err.Error())
_ = fh.DeleteDirPathAll(targetDir)
return
}
fsc := pf.FileSelectionCriteria{}
var copyEmptyDirectories bool
copyEmptyDirectories = false
dTreeStats,
errs := srcDMgr.CopySubDirectoryTree(targetDMgr, copyEmptyDirectories, fsc)
if len(errs) > 0 {
fmt.Printf("Errors returned by srcDMgr.CopySubDirectoryTree(targetDMgr, true, fsc)\n"+
"targetDMgr='%v'\nErrors Follow:\n\n%v",
targetDMgr.GetAbsolutePath(),
targetDMgr.ConsolidateErrors(errs))
_ = targetDMgr.DeleteAll()
return
}
if !targetDMgr.DoesAbsolutePathExist() {
fmt.Println("ERROR: The target directory path DOES NOT EXIST!!")
return
}
fsc = pf.FileSelectionCriteria{}
srcDTreeInfo, err := srcDMgr.FindWalkSubDirFiles(fsc)
if err != nil {
fmt.Printf("Test Verification Error returned by srcDMgr.FindWalkSubDirFiles(fsc).\n"+
"source directory='%v'\nError='%v'", srcDMgr.GetAbsolutePath(), err.Error())
_ = targetDMgr.DeleteAll()
return
}
fsc = pf.FileSelectionCriteria{}
targetDTreeInfo, err := targetDMgr.FindWalkSubDirFiles(fsc)
if err != nil {
fmt.Printf("Test Verification Error returned by targetDMgr.FindWalkDirFiles(fsc).\n"+
"target directory='%v'\nError='%v'", targetDMgr.GetAbsolutePath(), err.Error())
_ = targetDMgr.DeleteAll()
return
}
srcDirs := srcDTreeInfo.Directories.GetNumOfDirs()
srcDirs-- // Discount the one empty subdirectory
targetDirs := targetDTreeInfo.Directories.GetNumOfDirs()
if srcDirs != targetDirs {
fmt.Printf("ERROR: Expected %v-directories would be created.\n"+
"Instead, %v-directories were created!\n",
srcDirs, targetDirs)
_ = targetDMgr.DeleteAll()
return
}
tFileInfo, err := targetDMgr.FindFilesBySelectCriteria(fsc)
if err != nil {
fmt.Printf("Error returned by targetDMgr.FindFilesBySelectCriteria(fsc).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr.GetAbsolutePath(), err.Error())
_ = targetDMgr.DeleteAll()
return
}
if tFileInfo.GetNumOfFileMgrs() > 0 {
fmt.Printf("ERROR: Expected ZERO files in top level target directory.\n"+
"Instead, the top level target directory had %v-files.\nTarget Directory='%v'\n",
tFileInfo.GetNumOfFileMgrs(), targetDMgr.GetAbsolutePath())
}
// Subtract 1 to eliminate the empty directory
expectedDirsCopied := uint64(srcDTreeInfo.Directories.GetNumOfDirs() - 2)
expectedDirsCreated := uint64(srcDTreeInfo.Directories.GetNumOfDirs() - 2)
expectedTotalDirsProcessed := uint64(srcDTreeInfo.Directories.GetNumOfDirs())
if expectedTotalDirsProcessed != dTreeStats.TotalDirsScanned {
fmt.Printf("Error: Expected dTreeCopyStats.TotalDirsScanned='%v'.\n"+
"Instead, dTreeCopyStats.TotalDirsScanned='%v'\n",
expectedTotalDirsProcessed, dTreeStats.TotalDirsScanned)
}
if expectedDirsCopied != dTreeStats.DirsCopied {
fmt.Printf("Error: Expected dTreeCopyStats.DirsCopied='%v'.\n"+
"Instead, dTreeCopyStats.DirsCopied='%v'\n",
expectedDirsCopied, dTreeStats.DirsCopied)
}
if expectedDirsCreated != dTreeStats.DirsCreated {
fmt.Printf("Error: Expected dTreeCopyStats.DirsCreated='%v'.\n"+
"Instead, dTreeCopyStats.DirsCreated='%v'\n",
expectedDirsCopied, dTreeStats.DirsCreated)
}
expectedFilesCopied := uint64(srcDTreeInfo.FoundFiles.GetNumOfFileMgrs())
expectedFileBytesCopied := srcDTreeInfo.FoundFiles.GetTotalFileBytes()
expectedFilesNotCopied := uint64(0)
expectedFileBytesNotCopied := uint64(0)
expectedTotalFilesProcessed := expectedFilesCopied
if expectedFilesCopied != dTreeStats.FilesCopied {
fmt.Printf("Error: Expected dTreeCopyStats.FilesCopied='%v'.\n"+
"Instead, dTreeCopyStats.FilesCopied='%v'\n",
expectedFilesCopied, dTreeStats.FilesCopied)
}
if expectedFileBytesCopied != dTreeStats.FileBytesCopied {
fmt.Printf("Error: Expected dTreeCopyStats.FileBytesCopied='%v'.\n"+
"Instead, dTreeCopyStats.FileBytesCopied='%v'\n",
expectedFileBytesCopied, dTreeStats.FileBytesCopied)
}
if expectedFilesNotCopied != dTreeStats.FilesNotCopied {
fmt.Printf("Error: Expected dTreeCopyStats.FilesNotCopied='%v'.\n"+
"Instead, dTreeCopyStats.FilesNotCopied='%v'\n",
expectedFilesNotCopied, dTreeStats.FilesNotCopied)
}
if expectedFileBytesNotCopied != dTreeStats.FileBytesNotCopied {
fmt.Printf("Error: Expected dTreeCopyStats.FileBytesNotCopied='%v'.\n"+
"Instead, dTreeCopyStats.FileBytesNotCopied='%v'\n",
expectedFileBytesNotCopied, dTreeStats.FileBytesNotCopied)
}
if expectedTotalFilesProcessed != dTreeStats.TotalFilesProcessed {
fmt.Printf("Error: Expected dTreeCopyStats.TotalFilesProcessed='%v'.\n"+
"Instead, dTreeCopyStats.TotalFilesProcessed='%v'\n",
expectedTotalFilesProcessed, dTreeStats.TotalFilesProcessed)
}
fmt.Println(" mainTest84CopyDirTree() ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println("Total Directories Processed: ", dTreeStats.TotalDirsScanned)
fmt.Println(" Directories Copied: ", dTreeStats.DirsCopied)
fmt.Println(" Directories Created: ", dTreeStats.DirsCreated)
fmt.Println(" Total Files Processed: ", dTreeStats.TotalFilesProcessed)
fmt.Println(" Files Copied: ", dTreeStats.FilesCopied)
fmt.Println(" Files Not Copied: ", dTreeStats.FilesNotCopied)
fmt.Println("Copy Empty Directories Flag: ", copyEmptyDirectories)
return
}
func (mtst mainTests) mainTest87CopyDirectoryTree() {
setUpDir1 := "D:\\gowork\\src\\MikeAustin71\\pathfileopsgo\\filesfortest\\levelfilesfortest"
setUpDMgr1, err := pf.DirMgr{}.New(setUpDir1)
if err != nil {
fmt.Printf("Test Setup Error returned by DirMgr{}.New(setUpDir1)\n"+
"setUpDir1='%v'\nError='%v'\n",
setUpDir1, err.Error())
return
}
setupDir2 := "D:\\gowork\\src\\MikeAustin71\\pathfileopsgo\\filesfortest\\htmlFilesForTest"
setUpDMgr2, err := pf.DirMgr{}.New(setupDir2)
if err != nil {
fmt.Printf("Test Setup Error returned by DirMgr{}.New(setupDir2)\n"+
"setupDir2='%v'\nError='%v'\n",
setupDir2, err.Error())
return
}
srcDir := "D:\\gowork\\src\\MikeAustin71\\pathfileopsgo\\createFilesTest\\levelfilesfortest"
srcDMgr, err := pf.DirMgr{}.New(srcDir)
if err != nil {
fmt.Printf("Test Setup Error returned by DirMgr{}.New(srcDir).\n"+
"srcDir='%v'\nError='%v'", srcDir, err.Error())
return
}
fsc := pf.FileSelectionCriteria{}
_,
errs := setUpDMgr1.CopyDirectoryTree(srcDMgr, false, fsc)
if len(errs) > 0 {
fmt.Printf("Test Setup Errors returned by setUpDMgr1.CopyDirectoryTree(srcDMgr, false, fsc).\n"+
"srcDMgr='%v'\nErrors Follow:\n%v", srcDMgr.GetAbsolutePath(),
pf.DirMgr{}.ConsolidateErrors(errs).Error())
return
}
srcHtmlDir := "D:\\gowork\\src\\MikeAustin71\\pathfileopsgo\\createFilesTest\\" +
"levelfilesfortest\\level_01_dir\\level_02_dir\\htmlFilesForTest"
srcHtmlDMgr, err := pf.DirMgr{}.New(srcHtmlDir)
if err != nil {
fmt.Printf("Test Setup Error returned by DirMgr{}.New(srcHtmlDir).\n"+
"srcHtmlDir='%v'\nError='%v'", srcHtmlDir, err.Error())
return
}
fsc = pf.FileSelectionCriteria{}
_,
errs = setUpDMgr2.CopyDirectory(srcHtmlDMgr, fsc, false)
if len(errs) > 0 {
fmt.Printf("Test Setup Errors returned by setUpDMgr2.CopyDirectory(srcHtmlDMgr, fsc).\n"+
"srcHtmlDMgr='%v'\nErrors Follow:\n%v\n",
srcHtmlDMgr.GetAbsolutePath(),
pf.DirMgr{}.ConsolidateErrors(errs).Error())
return
}
targetDir := "D:\\gowork\\src\\MikeAustin71\\pathfileopsgo\\dirmgrtests\\levelfilesfortest"
fh := pf.FileHelper{}
err = fh.DeleteDirPathAll(targetDir)
if err != nil {
fmt.Printf("Error returned from fh.DeleteDirPathAll(targetDir)\n"+
"targetDir='%v'\nError='%v'\n", targetDir, err.Error())
return
}
targetDMgr, err := pf.DirMgr{}.New(targetDir)
if err != nil {
fmt.Printf("Test Setup Error returned by DirMgr{}.New(targetDir).\n"+
"targetDir='%v'\nError='%v'", targetDir, err.Error())
return
}
fsc = pf.FileSelectionCriteria{}
fsc.FileNamePatterns = []string{"*.txt"}
// Copy '.txt' files only to targetDMgr
dtreeCopyStats,
errs := srcDMgr.CopyDirectoryTree(
targetDMgr,
false,
fsc)
if len(errs) > 0 {
fmt.Printf("Errors returned by srcDMgr.CopyDirectoryTree(targetDMgr, false, fsc)\n"+
"targetDMgr='%v'\nErrors Follow:\n%v",
targetDMgr.GetAbsolutePath(),
pf.DirMgr{}.ConsolidateErrors(errs).Error())
_ = fh.DeleteDirPathAll(targetDir)
_ = fh.DeleteDirPathAll(srcDir)
return
}
if !targetDMgr.DoesAbsolutePathExist() {
fmt.Printf("ERROR: The target directory path DOES NOT EXIST!!\n"+
"Number Of FilesCopied='%v'\n", dtreeCopyStats.FilesCopied)
_ = fh.DeleteDirPathAll(targetDir)
_ = fh.DeleteDirPathAll(srcDir)
return
}
fsc = pf.FileSelectionCriteria{}
fsc.FileNamePatterns = []string{"*.txt"}
srcTextDTreeInfo, err := srcDMgr.FindWalkDirFiles(fsc)
if err != nil {
fmt.Printf("Test Verification Error returned by\n"+
"srcTextDTreeInfo, err := srcDMgr.FindWalkDirFiles(fsc).\n"+
"source directory='%v'\nError='%v'",
srcDMgr.GetAbsolutePath(), err.Error())
_ = fh.DeleteDirPathAll(targetDir)
_ = fh.DeleteDirPathAll(srcDir)
return
}
expectedNumOfCopiedFiles := srcTextDTreeInfo.FoundFiles.GetNumOfFileMgrs()
fsc = pf.FileSelectionCriteria{}
fsc.FileNamePatterns = []string{"*.htm"}
srcHtmlDTreeInfo, err := srcDMgr.FindWalkDirFiles(fsc)
if err != nil {
fmt.Printf("Test Verification Error returned by\n"+
"srcHtmlDTreeInfo, err := srcDMgr.FindWalkDirFiles(fsc).\n"+
"source directory='%v'\nError='%v'",
srcDMgr.GetAbsolutePath(), err.Error())
_ = fh.DeleteDirPathAll(targetDir)
_ = fh.DeleteDirPathAll(srcDir)
return
}
expectedNumOfFilesNotCopied := srcHtmlDTreeInfo.FoundFiles.GetNumOfFileMgrs()
targetDTreeInfo, err := targetDMgr.FindWalkDirFiles(fsc)
if err != nil {
fmt.Printf("Test Verification Error returned by targetDMgr.FindWalkDirFiles(fsc).\n"+
"target directory='%v'\nError='%v'", targetDMgr.GetAbsolutePath(), err.Error())
_ = fh.DeleteDirPathAll(targetDir)
_ = fh.DeleteDirPathAll(srcDir)
return
}
expectedNumOfDirectoriesCopied := srcTextDTreeInfo.Directories.GetNumOfDirs() - 1
if expectedNumOfDirectoriesCopied != targetDTreeInfo.Directories.GetNumOfDirs() {
fmt.Printf("Expected %v-directories would be created. Instead, %v-directories were created!\n"+
"targetDTreeInfo.Directories.GetNumOfDirs()='%v'\n",
expectedNumOfDirectoriesCopied,
targetDTreeInfo.Directories.GetNumOfDirs(),
targetDTreeInfo.Directories.GetNumOfDirs())
}
if uint64(expectedNumOfCopiedFiles) != dtreeCopyStats.FilesCopied {
fmt.Printf("Expected %v-files would be copied.\n"+
"Instead, numberOfFilesCopied-'%v'\n",
expectedNumOfCopiedFiles, dtreeCopyStats.FilesCopied)
}
if uint64(expectedNumOfFilesNotCopied) != dtreeCopyStats.FilesNotCopied {
fmt.Printf("Expected %v-files would NOT be copied.\n"+
"Instead, numberOfFilesNotCopied='%v'!",
expectedNumOfFilesNotCopied, dtreeCopyStats.FilesNotCopied)
}
if uint64(expectedNumOfDirectoriesCopied) != dtreeCopyStats.DirsCopied {
fmt.Printf("Expected that %v-directories would be copied.\n"+
"Instead, %v-directories were copied.",
expectedNumOfDirectoriesCopied, dtreeCopyStats.DirsCopied)
}
err = fh.DeleteDirPathAll(targetDir)
if err != nil {
fmt.Printf("Test Clean-Up Error returned by fh.DeleteDirPathAll(fh.DeleteDirPathAll(targetDir)\n"+
"Target Directory Path='%v'\nError='%v'\n", targetDir, err.Error())
}
err = fh.DeleteDirPathAll(srcDir)
if err != nil {
fmt.Printf("Test Clean-Up Error returned by fh.DeleteDirPathAll(fh.DeleteDirPathAll(srcDir)\n"+
"Source Directory Path='%v'\nError='%v'\n", srcDir, err.Error())
}
return
}
func (mtst mainTests) mainTest86CopySubDirTree() {
src := "D:\\T05\\levelfilesfortest"
dst := "D:\\T06\\levelfilesfortest"
srcDMgr, err := pf.DirMgr{}.New(src)
if err != nil {
fmt.Printf("Error returned by pf.DirMgr{}.New(src)\n"+
"src='%v'\nError='%v'\n", src, err.Error())
return
}
targetDMgr, err := pf.DirMgr{}.New(dst)
if err != nil {
fmt.Printf("Error returned by pf.DirMgr{}.New(dst)\n"+
"dst='%v'\nError='%v'\n", dst, err.Error())
return
}
fsc := pf.FileSelectionCriteria{}
// fsc.FileNamePatterns = []string{"*.htm"}
var copyEmptyDirectories bool
copyEmptyDirectories = false
dtreeStats,
errs := srcDMgr.CopySubDirectoryTree(targetDMgr, copyEmptyDirectories, fsc)
if len(errs) > 0 {
fmt.Printf("Errors returned by srcDMgr.CopyDirectoryTree("+
"targetDMgr, true, fsc)\n"+
"targetDMgr='%v'\n"+
"Errors Follow:\n%v",
targetDMgr.GetAbsolutePath(),
srcDMgr.ConsolidateErrors(errs))
return
}
if dtreeStats.ComputeError != nil {
fmt.Printf("Error returned by dtreeStats.ComputeError\n"+
"Error='%v'\n", dtreeStats.ComputeError.Error())
}
fmt.Println(" mainTest84CopyDirTree() ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println("Total Directories Processed: ", dtreeStats.TotalDirsScanned)
fmt.Println(" Directories Copied: ", dtreeStats.DirsCopied)
fmt.Println(" Directories Created: ", dtreeStats.DirsCreated)
fmt.Println(" Total Files Processed: ", dtreeStats.TotalFilesProcessed)
fmt.Println(" Files Copied: ", dtreeStats.FilesCopied)
fmt.Println(" Files Not Copied: ", dtreeStats.FilesNotCopied)
fmt.Println("Copy Empty Directories Flag: ", copyEmptyDirectories)
}
func (mtst mainTests) mainTest85FindWalkSubDirFiles() {
testDir := "D:\\gowork\\src\\MikeAustin71\\pathfileopsgo\\logTest"
testDMgr, err := pf.DirMgr{}.New(testDir)
if err != nil {
fmt.Printf("Error returned by DirMgr{}.New(testDir).\n"+
"testDir='%v'\nError='%v'\n",
testDir, err.Error())
return
}
fsc := pf.FileSelectionCriteria{}
dTreeInfo, err := testDMgr.FindWalkSubDirFiles(fsc)
if err != nil {
fmt.Printf("Error returned by testDMgr.FindWalkSubDirFiles(fsc)\n"+
"testDMgr='%v'\nError='%v'\n",
testDMgr.GetAbsolutePath(),
err.Error())
return
}
expectedNumOfDirs := 7
expectedNumOfFiles := 5
if len(dTreeInfo.ErrReturns) > 0 {
fmt.Printf("dTreeInfo Returned Errors:\n\n%v",
testDMgr.ConsolidateErrors(dTreeInfo.ErrReturns))
return
}
if expectedNumOfFiles != dTreeInfo.FoundFiles.GetNumOfFileMgrs() {
fmt.Printf("Error: Expected dTreeInfo.FoundFiles.GetNumOfFileMgrs()='%v'.\n"+
"Instead, dTreeInfo.FoundFiles.GetNumOfFileMgrs()='%v'\n",
expectedNumOfFiles, dTreeInfo.FoundFiles.GetNumOfFileMgrs())
return
}
if expectedNumOfDirs != dTreeInfo.Directories.GetNumOfDirs() {
fmt.Printf("Error: Expected dTreeInfo.Directories.GetNumOfDirs()='%v'\n"+
"Instead, dTreeInfo.Directories.GetNumOfDirs()='%v'\n",
expectedNumOfDirs, dTreeInfo.Directories.GetNumOfDirs())
return
}
fmt.Println(" mainTest85FindWalkSubDirFiles ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println("Number Of Directories Found: ", dTreeInfo.Directories.GetNumOfDirs())
fmt.Println(" Number Of Files Found: ", dTreeInfo.FoundFiles.GetNumOfFileMgrs())
}
func (mtst mainTests) mainTest84CopyDirTree() {
src := "D:\\T05\\levelfilesfortest"
dst := "D:\\T06\\levelfilesfortest"
srcDMgr, err := pf.DirMgr{}.New(src)
if err != nil {
fmt.Printf("Error returned by pf.DirMgr{}.New(src)\n"+
"src='%v'\nError='%v'\n", src, err.Error())
return
}
targetDMgr, err := pf.DirMgr{}.New(dst)
if err != nil {
fmt.Printf("Error returned by pf.DirMgr{}.New(dst)\n"+
"dst='%v'\nError='%v'\n", dst, err.Error())
return
}
fsc := pf.FileSelectionCriteria{}
//fsc.FileNamePatterns = []string{"*.txt"}
var copyEmptyDirectories bool
copyEmptyDirectories = false
dtreeStats,
errs := srcDMgr.CopyDirectoryTree(targetDMgr, copyEmptyDirectories, fsc)
if len(errs) > 0 {
fmt.Printf("Errors returned by srcDMgr.CopyDirectoryTree("+
"targetDMgr, true, fsc)\n"+
"targetDMgr='%v'\n"+
"Errors Follow:\n%v",
targetDMgr.GetAbsolutePath(),
srcDMgr.ConsolidateErrors(errs))
return
}
if dtreeStats.ComputeError != nil {
fmt.Printf("Error returned by dtreeStats.ComputeError\n"+
"Error='%v'\n", dtreeStats.ComputeError.Error())
}
fmt.Println(" mainTest84CopyDirTree() ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
fmt.Println("Total Directories Processed: ", dtreeStats.TotalDirsScanned)
fmt.Println(" Directories Copied: ", dtreeStats.DirsCopied)
fmt.Println(" Total Files Processed: ", dtreeStats.TotalFilesProcessed)
fmt.Println(" Files Copied: ", dtreeStats.FilesCopied)
fmt.Println(" Files Not Copied: ", dtreeStats.FilesNotCopied)
fmt.Println("Copy Empty Directories Flag: ", copyEmptyDirectories)
}
func (mtst mainTests) mainTest83DmgrDeleteDirAll() {
//srcDir := "D:\\T04\\checkfiles\\checkfiles03\\dir01\\dir02\\dir03"
srcDir := "D:\\T04\\checkfiles\\checkfiles03\\dir01"
dMgr, err := pf.DirMgr{}.New(srcDir)
if err != nil {
fmt.Printf("Error returned by pf.DirMgr{}.New(srcDir)\n"+
"srcDir='%v'\nError='%v'\n", srcDir, err.Error())
return
}
err = dMgr.DeleteAll()
if err != nil {
fmt.Printf("Error returned by dMgr.DeleteAll()\n"+
"dMgr='%v'\nError='%v'\n",
dMgr.GetAbsolutePath(), err.Error())
return
}
fmt.Println(" mainTest83DmgrDeleteDirAll() ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println()
}
func (mtst mainTests) mainTest82CopyByIO() {
fh := pf.FileHelper{}
// setupFileName := "testRead918256.txt"
//sourceFile := "D:\\gowork\\src\\MikeAustin71\\pathfileopsgo\\filesfortest\\levelfilesfortest\\level_0_3_test.txt"
sourceFile := "D:\\T03\\ppc_6800_gsg.pdf"
// sourceFile := fh.AdjustPathSlash(
// "D:\\gowork\\src\\MikeAustin71\\pathfileopsgo\\filesfortest\\checkfiles\\" + setupFileName)
sourceFile = fh.AdjustPathSlash(sourceFile)
destFile := fh.AdjustPathSlash(
"D:\\T04\\checkfiles\\checkfiles03\\ppc_6800_gsg.pdf")
fileDoesExist, err := fh.DoesThisFileExist(sourceFile)
if err != nil {
fmt.Printf("Error returned by fh.DoesThisFileExist(sourceFile)\n"+
"sourceFile='%v'\nError='%v'\n", sourceFile, err.Error())
return
}
if !fileDoesExist {
fmt.Printf("Test Setup Error: Source File DOES NOT EXIST!\n"+
"sourceFile='%v'\n", sourceFile)
return
}
sourceFMgr, err := pf.FileMgr{}.New(sourceFile)
if err != nil {
fmt.Printf("Error returned by pf.FileMgr{}.New(sourceFile).\n"+
"sourceFile='%v'\nError='%v'\n",
sourceFile, err.Error())
return
}
destFMgr, err := pf.FileMgr{}.New(destFile)
if err != nil {
fmt.Printf("Error returned by pf.FileMgr{}.New(destFile).\n"+
"destFile='%v'\nError='%v'\n",
destFile, err.Error())
return
}
err = sourceFMgr.CopyFileMgrByIoWithBuffer(&destFMgr, 0)
if err != nil {
fmt.Printf("Error returned by fh.CopyFileByIo(sourceFile, destFile)\n"+
"sourceFile='%v'\ndestFile='%v'\nError='%v'\n",
sourceFile, destFile, err.Error())
return
}
fileDoesExist, err = fh.DoesThisFileExist(destFile)
if err != nil {
fmt.Printf("Error returned by fh.DoesThisFileExist(destFile)\n"+
"destFile='%v'\nError='%v'\n", destFile, err.Error())
return
}
if !fileDoesExist {
fmt.Printf("Error: After the copy operation, the Destination File\n"+
"DOES NOT EXIST!\n"+
"destFile='%v'\n", destFile)
return
}
fmt.Println(" mainTest82CopyByIO() ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println(" Copied Source File: ", sourceFile)
fmt.Println()
fmt.Println("To Destination File: ", destFile)
}
func (mtst mainTests) mainTest81ReadFileLine() {
// TestFileMgr_ReadFileLine_03
// xt_filemanager_07_test.go
fh := pf.FileHelper{}
setupFileName := "testRead918256.txt"
setupFile := fh.AdjustPathSlash(
"D:\\gowork\\src\\MikeAustin71\\pathfileopsgo\\filesfortest\\checkfiles\\" + setupFileName)
filePath := fh.AdjustPathSlash(
"D:\\T04\\checkfiles\\checkfiles03\\" + setupFileName)
absBaseFilePath, err := fh.MakeAbsolutePath(
"D:\\T04\\checkfiles\\checkfiles03")
if err != nil {
fmt.Printf("Test Setup Error: Error returned by fh.MakeAbsolutePath"+
"(\"../checkfiles/checkfiles03/checkfiles03_02\").\n"+
"Error='%v'\n", err.Error())
return
}
err = fh.MakeDirAll(absBaseFilePath)
if err != nil {
fmt.Printf("Test Setup Error: Error returned by fh.MakeDirAll(absBaseFilePath).\n"+
"absBaseFilePath='%v'\nError='%v'\n", absBaseFilePath, err.Error())
return
}
err = fh.DeleteDirFile(filePath)
if err != nil {
fmt.Printf("Test Setup Error returned by fh.DeleteDirFile(filePath)\n"+
"filePath='%v'\nError='%v'\n",
filePath, err.Error())
return
}
err = fh.CopyFileByIo(setupFile, filePath)
if err != nil {
fmt.Printf("Test Setup Error returned by fh.CopyFileByIo(setupFile, filePath)\n"+
"setupFile='%v'\nfilePath='%v'\nError='%v'\n",
setupFile, filePath, err.Error())
return
}
fMgr, err := pf.FileMgr{}.NewFromPathFileNameExtStr(filePath)
if err != nil {
fmt.Printf("Error returned from common.FileMgr{}."+
"NewFromPathFileNameExtStr(filePath).\n"+
"filePath='%v'\nError='%v'\n",
filePath, err.Error())
return
}
delim := byte('\n')
bytes := make([]byte, 0, 50)
for i := 0; i < 4; i++ {
bytes, err = fMgr.ReadFileLine(delim)
if err != nil &&
err != io.EOF {
fmt.Printf("Error returned by fMgr.ReadFileLine(delim) on "+
"Line#1.\n"+
"fMgr='%v'\nError='%v'\n",
fMgr.GetAbsolutePathFileName(), err.Error())
_ = fMgr.CloseThisFile()
return
}
fmt.Printf("Line-%v: %v\n", i, string(bytes))
}
fmt.Println()
isErrEOF := false
if err == io.EOF {
isErrEOF = true
}
err = fMgr.CloseThisFile()
if err != nil {
fmt.Printf("Error returned by fMgr.CloseThisFile().\n"+
"fMgr='%v'\n Error='%v'",
fMgr.GetAbsolutePathFileName(), err.Error())
return
}
if fMgr.GetFilePtr() != nil {
fmt.Println("ERROR: After fMgr.CloseThisFile(), expected " +
"fMgr.filePtr==nil.\n" +
"However, fMgr.filePtr IS NOT EQUAL TO NIL!")
_ = fMgr.CloseThisFile()
return
}
actualStr := string(bytes)
actualStr = strings.Replace(actualStr, "\r\n", "", -1)
isErr := false
if "Thank you, for your support." != actualStr {
fmt.Printf("Expected line #4 = 'Thank you, for your support.'\n"+
"Instead, line #4 = '%v'\n", actualStr)
isErr = true
}
if !isErrEOF {
fmt.Println("ERROR: Expected the last error return from fMgr.ReadFileLine(delim)\n" +
"to be io.EOF.\n" +
"Instead, error WAS NOT equal to io.EOF!")
isErr = true
}
_ = fMgr.CloseThisFile()
_ = fMgr.DeleteThisFile()
if isErr {
return
}
fmt.Println(" mainTest81ReadFileLine() ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
}
func (mtst mainTests) mainTest80FileAccessCtrlDetection() {
fileAccessCtrl, err2 := pf.FileAccessControl{}.NewWriteOnlyAccess()
if err2 != nil {
fmt.Printf(
"Error returned by FileAccessControl{}.NewReadWriteAccess().\n"+
"Error='%v'\n", err2.Error())
return
}
fNewOpenType, err2 := fileAccessCtrl.GetFileOpenType()
if err2 != nil {
fmt.Printf("Error returned by fileAccessCtrl.GetFileOpenType()!\n"+
"Error='%v'\n", err2.Error())
return
}
if fNewOpenType != pf.FOpenType.TypeReadWrite() &&
fNewOpenType != pf.FOpenType.TypeWriteOnly() {
fmt.Printf("fNewOpenType error!\n"+
"fNewOpenType=='%v'\n", fNewOpenType.String())
return
}
fmt.Println(" maintTest80FileAccessCtrlDetection() ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println("fNewOpenType: ", fNewOpenType.String())
}
func (mtst mainTests) maintTest79WriteBytes() {
fh := pf.FileHelper{}
testText := "Now is the time for all good men to come to the aid of their country."
lenTestText := len(testText)
filePath := "D:\\T04\\checkfiles\\checkfiles03\\testWriteXX241289.txt"
absFilePath, err := fh.MakeAbsolutePath(filePath)
if err != nil {
fmt.Printf("Error returned by fh.MakeAbsolutePath(filePath)\n"+
"filePath='%v'\nError='%v'\n", filePath, err.Error())
return
}
fMgr, err := pf.FileMgr{}.NewFromPathFileNameExtStr(absFilePath)
if err != nil {
fmt.Printf("Error returned from FileMgr{}.NewFromPathFileNameExtStr(filePath). "+
"filePathName='%v' Error='%v'",
filePath, err.Error())
return
}
err = fMgr.CreateThisFile()
if err != nil {
fmt.Printf("Error returned by fMgr.CreateThisFile(). Error='%v' ",
err.Error())
return
}
err = fMgr.OpenThisFileWriteOnly()
if err != nil {
fmt.Printf("Error returned by fMgr.OpenThisFileWriteOnly(). Error='%v' ",
err.Error())
return
}
bytesToWrite := []byte(testText)
numBytesWritten, err := fMgr.WriteBytesToFile(bytesToWrite)
if err != nil {
fmt.Printf("Error returned by fMgr.WriteBytesToFile(bytesToWrite). Error='%v' ",
err.Error())
return
}
err = fMgr.FlushBytesToDisk()
if err != nil {
fmt.Printf("Error returned by fMgr.FlushBytesToDisk(). Error='%v' ",
err.Error())
return
}
verifyBytesWritten := fMgr.GetFileBytesWritten()
err = fMgr.CloseThisFile()
if err != nil {
fmt.Printf("Error returned by #1 fMgr.CloseThisFile().")
return
}
if verifyBytesWritten != uint64(numBytesWritten) {
fmt.Printf("verifyBytesWritten != numBytesWritten\n"+
"verifyBytesWritten='%v'\nnumBytesWritten='%v'\n",
verifyBytesWritten, uint64(numBytesWritten))
return
}
bytesRead := make([]byte, lenTestText+5)
numBytesRead, err := fMgr.ReadFileBytes(bytesRead)
if err != nil {
fmt.Printf("Error returned by fMgr.ReadFileBytes(bytesRead). Error='%v'",
err.Error())
return
}
if numBytesRead == 0 {
fmt.Printf("Number of bytes read returned by fMgr.ReadFileBytes() is ZERO!\n"+
"fMgr='%v'\n",
fMgr.GetAbsolutePath())
}
err = fMgr.CloseThisFile()
if err != nil {
fmt.Printf("Error returned by #2 fMgr.CloseThisFile().")
return
}
err = fMgr.DeleteThisFile()
if err != nil {
fmt.Printf("fMgr.DeleteThisFile() FAILED! Error='%v'", err.Error())
return
}
fmt.Println(" maintTest79WriteBytes ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
}
func (mtst mainTests) maintTest78WriteBytes() {
fh := pf.FileHelper{}
testText := "Now is the time for all good men to come to the aid of their country."
lenTestText := len(testText)
filePath := "D:\\T04\\checkfiles\\checkfiles03\\testWriteXX241289.txt"
absFilePath, err := fh.MakeAbsolutePath(filePath)
if err != nil {
fmt.Printf("Error returned by fh.MakeAbsolutePath(filePath)\n"+
"filePath='%v'\nError='%v'\n", filePath, err.Error())
return
}
fMgr, err := pf.FileMgr{}.NewFromPathFileNameExtStr(absFilePath)
if err != nil {
fmt.Printf("Error returned from FileMgr{}.NewFromPathFileNameExtStr(filePath). "+
"filePathName='%v' Error='%v'",
filePath, err.Error())
return
}
err = fMgr.CreateThisFile()
if err != nil {
fmt.Printf("Error returned by fMgr.CreateThisFile(). Error='%v' ",
err.Error())
return
}
err = fMgr.OpenThisFileWriteOnly()
if err != nil {
fmt.Printf("Error returned by fMgr.OpenThisFileWriteOnly(). Error='%v' ",
err.Error())
return
}
bytesToWrite := []byte(testText)
numBytesWritten, err := fMgr.WriteBytesToFile(bytesToWrite)
if err != nil {
fmt.Printf("Error returned by fMgr.WriteBytesToFile(bytesToWrite). Error='%v' ",
err.Error())
return
}
err = fMgr.FlushBytesToDisk()
if err != nil {
fmt.Printf("Error returned by fMgr.FlushBytesToDisk(). Error='%v' ",
err.Error())
return
}
verifyBytesWritten := fMgr.GetFileBytesWritten()
err = fMgr.CloseThisFile()
if err != nil {
fmt.Printf("Error returned by #1 fMgr.CloseThisFile().")
return
}
bytesRead := make([]byte, lenTestText+5)
numBytesRead, err := fMgr.ReadFileBytes(bytesRead)
if err != nil {
fmt.Printf("Error returned by fMgr.ReadFileBytes(bytesRead). Error='%v'",
err.Error())
return
}
err = fMgr.CloseThisFile()
if err != nil {
fmt.Printf("Error returned by #2 fMgr.CloseThisFile().")
return
}
err = fMgr.DeleteThisFile()
if err != nil {
fmt.Printf("fMgr.DeleteThisFile() FAILED! Error='%v'", err.Error())
return
}
stringRead := string(bytesRead)
stringRead = stringRead[:len(bytesRead)-5]
if testText != stringRead {
fmt.Printf("Error: Expected stringRead='%v'. Instead, stringRead='%v' ",
testText, stringRead)
return
}
if verifyBytesWritten != uint64(lenTestText) {
fmt.Printf("Error: verifyBytesWritten != lenTestText. verifyBytesWritten='%v' "+
"lenTestText='%v' ", verifyBytesWritten, lenTestText)
}
if numBytesRead != lenTestText {
fmt.Printf("Error: numBytesRead != lenTestText. numBytesRead='%v' "+
"lenTestText='%v' ", numBytesRead, lenTestText)
}
if numBytesRead != numBytesWritten {
fmt.Printf("Error: numBytesRead != numBytesWritten. numBytesRead='%v' "+
"numBytesWritten='%v' ", numBytesRead, numBytesWritten)
}
fmt.Println(" maintTest78WriteBytes ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
}
func (mtst mainTests) maintTest77OpenThisFileWriteOnlyAppend() {
fh := pf.FileHelper{}
testText1 := "Now is the time for all good men to come to the aid of their country.\n"
// testText2 := "Damn the torpedoes, full speed ahead!\n"
filePath := fh.AdjustPathSlash(
"D:\\T04\\checkfiles\\checkfiles03\\TestFileMgr_OpenThisFileWriteOnlyAppend_01.txt")
basePath := fh.AdjustPathSlash("D:\\T04\\checkfiles")
err := fh.DeleteDirPathAll(basePath)
if err != nil {
fmt.Printf("Error returned by fh.DeleteDirFile(filePath)\n"+
"filePath='%v'\nError='%v'\n",
filePath, err.Error())
return
}
fMgr, err := pf.FileMgr{}.NewFromPathFileNameExtStr(filePath)
if err != nil {
fmt.Printf("Error returned from FileMgr{}.NewFromPathFileNameExtStr"+
"(filePath).\nfilePathName='%v'\nError='%v'\n",
filePath, err.Error())
return
}
err = fMgr.CreateDirAndFile()
if err != nil {
fmt.Printf("Error returned by fMgr.CreateThisFile().\n"+
"fMgr='%v'\nError='%v'\n",
fMgr.GetAbsolutePathFileName(), err.Error())
_ = fMgr.DeleteThisFile()
return
}
err = fMgr.CloseThisFile()
if err != nil {
fmt.Printf("Error returned by fMgr.CloseThisFile().\n"+
"fMgr='%v'\nError='%v'\n",
fMgr.GetAbsolutePathFileName(), err.Error())
return
}
err = fMgr.OpenThisFileWriteOnly()
if err != nil {
fmt.Printf("Error returned by fMgr.OpenThisFileWriteOnly().\n"+
"fMgr='%v'\nError='%v'\n",
fMgr.GetAbsolutePathFileName(), err.Error())
_ = fMgr.DeleteThisFile()
return
}
bytesToWrite := []byte(testText1)
bytesWritten := 0
// fMgr.isFilePtrOpen = false
bytesWritten, err = fMgr.WriteBytesToFile(bytesToWrite)
if err != nil {
fmt.Printf("Error returned by fMgr.WriteBytesToFile(bytesToWrite).\n"+
"fMgr='%v'\nError='%v'\n",
fMgr.GetAbsolutePathFileName(), err.Error())
_ = fMgr.DeleteThisFile()
return
}
fmt.Println("bytesWritten: ", bytesWritten)
err = fMgr.DeleteThisFile()
if err != nil {
fmt.Printf("fMgr.DeleteThisFile() FAILED!\n"+
"fMgr='%v'\nError='%v'\n",
fMgr.GetAbsolutePathFileName(), err.Error())
_ = fMgr.DeleteThisFile()
return
}
fmt.Println(" maintTest77OpenThisFileWriteOnlyAppend ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
}
func (mtst mainTests) mainTest76OpenThisFileWriteOnlyAppend() {
fh := pf.FileHelper{}
testText1 := "Now is the time for all good men to come to the aid of their country.\n"
testText2 := "Damn the torpedoes, full speed ahead!\n"
filePath := fh.AdjustPathSlash(
"D:\\T04\\checkfiles\\checkfiles03\\TestFileMgr_OpenThisFileWriteOnlyAppend_01.txt")
basePath := fh.AdjustPathSlash("D:\\T04\\checkfiles")
err := fh.DeleteDirPathAll(basePath)
if err != nil {
fmt.Printf("Error returned by fh.DeleteDirFile(filePath)\n"+
"filePath='%v'\nError='%v'\n",
filePath, err.Error())
return
}
fMgr, err := pf.FileMgr{}.NewFromPathFileNameExtStr(filePath)
if err != nil {
fmt.Printf("Error returned from FileMgr{}.NewFromPathFileNameExtStr"+
"(filePath).\nfilePathName='%v'\nError='%v'\n",
filePath, err.Error())
return
}
err = fMgr.CreateDirAndFile()
if err != nil {
fmt.Printf("Error returned by fMgr.CreateThisFile().\n"+
"fMgr='%v'\nError='%v'\n",
fMgr.GetAbsolutePathFileName(), err.Error())
_ = fMgr.DeleteThisFile()
return
}
err = fMgr.CloseThisFile()
if err != nil {
fmt.Printf("Error returned by fMgr.CloseThisFile().\n"+
"fMgr='%v'\nError='%v'\n",
fMgr.GetAbsolutePathFileName(), err.Error())
return
}
err = fMgr.OpenThisFileWriteOnly()
if err != nil {
fmt.Printf("Error returned by fMgr.OpenThisFileWriteOnly().\n"+
"fMgr='%v'\nError='%v'\n",
fMgr.GetAbsolutePathFileName(), err.Error())
_ = fMgr.DeleteThisFile()
return
}
bytesToWrite := []byte(testText1)
_, err = fMgr.WriteBytesToFile(bytesToWrite)
if err != nil {
fmt.Printf("Error returned by fMgr.WriteBytesToFile(bytesToWrite).\n"+
"fMgr='%v'\nError='%v'\n",
fMgr.GetAbsolutePathFileName(), err.Error())
_ = fMgr.DeleteThisFile()
return
}
err = fMgr.CloseThisFile()
if err != nil {
fmt.Printf("Error returned by #1 fMgr.CloseThisFile().\n"+
"fMgr='%v'\nError='%v'\n",
fMgr.GetAbsolutePathFileName(), err.Error())
_ = fMgr.DeleteThisFile()
return
}
err = fMgr.OpenThisFileWriteOnlyAppend()
if err != nil {
fmt.Printf("Error returned by fMgr.OpenThisFileWriteOnlyAppend().\n"+
"fMgr='%v'\nError='%v'\n",
fMgr.GetAbsolutePathFileName(), err.Error())
_ = fMgr.DeleteThisFile()
return
}
bytesToWrite = []byte(testText2)
_, err = fMgr.WriteBytesToFile(bytesToWrite)
if err != nil {
fmt.Printf("Error returned by #2 fMgr.WriteBytesToFile(bytesToWrite).\n"+
"fMgr='%v'\nError='%v'\n",
fMgr.GetAbsolutePathFileName(), err.Error())
_ = fMgr.DeleteThisFile()
return
}
err = fMgr.CloseThisFile()
if err != nil {
fmt.Printf("Error returned by #1 fMgr.CloseThisFile().\n"+
"fMgr='%v'\nError='%v'\n",
fMgr.GetAbsolutePathFileName(), err.Error())
_ = fMgr.DeleteThisFile()
return
}
bytesRead1, err := fMgr.ReadFileLine('\n')
if err != nil {
fmt.Printf("Error returned by #1 fMgr.ReadFileLine(newline).\n"+
"Error='%v'\n\n", err.Error())
_ = fMgr.CloseThisFile()
return
}
bytesRead2, err := fMgr.ReadFileLine('\n')
if err != nil {
fmt.Printf("Error returned by #2 fMgr.ReadFileLine(newline).\n"+
"Error='%v'\n", err.Error())
_ = fMgr.CloseThisFile()
return
}
err = fMgr.CloseThisFile()
if err != nil {
fmt.Printf("Error returned by #2 fMgr.CloseThisFile().\n"+
"fMgr='%v'\nError='%v'\n",
fMgr.GetAbsolutePathFileName(), err.Error())
_ = fMgr.DeleteThisFile()
return
}
err = fMgr.DeleteThisFile()
if err != nil {
fmt.Printf("fMgr.DeleteThisFile() FAILED!\n"+
"fMgr='%v'\nError='%v'\n",
fMgr.GetAbsolutePathFileName(), err.Error())
return
}
stringRead := string(bytesRead1)
stringRead = stringRead[:len(stringRead)-1]
stringRead1 := stringRead
testText1 = testText1[:len(testText1)-1]
setSuccess := true
if testText1 != stringRead {
fmt.Printf("Error: Expected #1 stringRead='%v'.\n"+
"Instead, #1 stringRead='%v'\n",
testText1, stringRead)
setSuccess = false
}
stringRead = string(bytesRead2)
stringRead = strings.Replace(stringRead, "\r\n", "", -1)
testText2 = strings.Replace(testText2, "\r\n", "", -1)
if testText2 != stringRead {
fmt.Printf("Error: Expected #2 stringRead='%v'.\n"+
"Instead, #2 stringRead='%v'\n",
testText2, stringRead)
setSuccess = false
}
if !setSuccess {
return
}
fmt.Println(" mainTest76OpenThisFileWriteOnlyAppend ")
fmt.Println("********************************************************")
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println("stringRead1: ", stringRead1)
fmt.Println(" testText1: ", testText1)
return
}
func (mtst mainTests) maintTest75FileMgrGetTimeVal() {
filePath :=
"D:\\gowork\\src\\MikeAustin71\\pathfileopsgo\\filesfortest\\levelfilesfortest\\level_0_2_test.txt"
fMgr, err := pf.FileMgr{}.New(filePath)
if err != nil {
fmt.Printf("Error returned by pf.FileMgr{}.New(filePath)\n"+
"filePath='%v'\n"+
"Error='%v'\n", filePath, err.Error())
return
}
fileModTime, err := fMgr.GetFileModTime()
if err != nil {
fmt.Printf("Error returned by fMgr.GetFileModTime()\n"+
"Error='%v'\n", err.Error())
return
}
timeFormatSpec := "2006-01-02 15:04:05 -0700 MST"
fmt.Println(" maintTest75FileMgrGetTimeVal ")
fmt.Println("********************************************************")
fmt.Println(" fileModTime: ", fileModTime.Format(timeFormatSpec))
}
func (mtst mainTests) mainTest73FileHelperFileExist() {
filePath := "D:\\gowork\\src\\MikeAustin71\\pathfileopsgo\\checkfiles"
dMgr, err := pf.DirMgr{}.New(filePath)
if err != nil {
fmt.Printf("Error returned by pf.DirMgr{}.New(filePath)\n"+
"Error='%v'", err.Error())
return
}
dirDoesExist, err := dMgr.DoesThisDirectoryExist()
fmt.Println(" mainTest73FileHelperFileExist ")
fmt.Println("********************************************************")
fmt.Println(" filePath: ", filePath)
fmt.Println("dirDoesExist: ", dirDoesExist)
}
func (mtst mainTests) mainTest72OpenReadOnlyFile() {
fh := pf.FileHelper{}
rawPath := "D:\\gowork\\src\\MikeAustin71\\pathfileopsgo\\checkfiles\\TestFileMgr_OpenThisFileReadOnly_03.txt"
filePath, err := fh.MakeAbsolutePath(rawPath)
if err != nil {
fmt.Printf("Error returned by fh.MakeAbsolutePath("+
"rawPath)\n"+
"rawPath='%v'\n"+
"Error='%v'\n", rawPath, err.Error())
return
}
err = fh.DeleteDirFile(filePath)
if err != nil {
fmt.Printf("Error returned from fh.DeleteDirFile(filePath)\n"+
"filePath='%v'\nError='%v'\n", filePath, err.Error())
return
}
fMgr, err := pf.FileMgr{}.NewFromPathFileNameExtStr(filePath)
if err != nil {
fmt.Printf("Error returned from FileMgr{}.NewFromPathFileNameExtStr(filePath).\n"+
"filePath='%v'\nError='%v'\n", filePath, err.Error())
_ = fh.DeleteDirFile(filePath)
return
}
fileDoesExist, err := fMgr.DoesThisFileExist()
if err != nil {
fmt.Printf("Non-Path Error returned from #1 fMgr.DoesThisFileExist().\n"+
"filePath='%v'\nError='%v'\n",
filePath, err.Error())
_ = fh.DeleteDirFile(filePath)
return
}
if fileDoesExist {
fmt.Printf("ERROR: Test file should NOT exist!.\n"+
"However, test file DOES EXIST!\n"+
"test file='%v'", filePath)
_ = fh.DeleteDirFile(filePath)
return
}
err = fMgr.CreateThisFile()
if err != nil {
fmt.Printf("Error returned from fMgr.CreateThisFile().\n"+
"filePath='%v'\nError='%v'\n",
fMgr.GetAbsolutePath(), err.Error())
_ = fMgr.DeleteThisFile()
return
}
err = fMgr.OpenThisFileReadOnly()
if err != nil {
fmt.Printf("Error returned from fMgr.OpenThisFileReadOnly().\n"+
"filePath='%v'\nError='%v'\n",
fMgr.GetAbsolutePath(), err.Error())
_ = fMgr.DeleteThisFile()
return
}
err = fMgr.CloseThisFile()
if err != nil {
fmt.Printf("Error returned from fMgr.CloseThisFile().\n"+
"Error='%v'\n",
err.Error())
}
err = fMgr.DeleteThisFile()
if err != nil {
fmt.Printf("Error returned from fMgr.DeleteThisFile().\n"+
"Error='%v'\n",
err.Error())
}
}
func (mtst mainTests) mainTest71IsPathFileString() {
fh := pf.FileHelper{}
testPath := fh.AdjustPathSlash("../filesfortest/levelfilesfortest/level_01_dir/" +
"level_02_dir/iDoNotExist")
pathFileType, absolutePath, err := fh.IsPathFileString(testPath)
if err != nil {
fmt.Printf("Error returned from fh.IsPathFileString(testPath). "+
"pathFile='%v' Error='%v' ", testPath, err.Error())
return
}
fmt.Println(" mainTest70AdjustPathStr ")
fmt.Println("********************************************************")
fmt.Println(" testPath: ", testPath)
fmt.Println("absolutePath: ", absolutePath)
fmt.Println("pathFileType: ", pathFileType.String())
}
func (mtst mainTests) mainTest70AdjustPathStr() {
fh := pf.FileHelper{}
testPath := "../../../"
adjustedPath := fh.AdjustPathSlash(testPath)
fmt.Println(" mainTest70AdjustPathStr ")
fmt.Println("********************************************************")
fmt.Println(" testPath: ", testPath)
fmt.Println("adjustedPath: ", adjustedPath)
}
func (mtst mainTests) mainTest69CleanDirStr() {
fh := pf.FileHelper{}
// testPathFile := "/d/gowork/src/MikeAustin71/pathfileopsgo/pathfileops/" +
// "levelfilesfortest/level_0_0_test.txt"
// testPathFile := "d:\\gowork\\src\\MikeAustin71\\pathfileopsgo\\pathfileops" +
// "\\levelfilesfortest\\level_0_0_test.txt"
testPathFile := "../filesfortest//levelfilesfortest/level_01_dir/level_1_1_test.txt"
absFilePath, err := fh.MakeAbsolutePath(testPathFile)
if err != nil {
}
fmt.Println(" mainTest67AreFilesSame ")
fmt.Println("********************************************************")
volName := fp.VolumeName(testPathFile)
cleanFilePath, isEmpty, err := fh.CleanDirStr(testPathFile)
if err != nil {
fmt.Printf("Error returned by fh.CleanDirStr(testPathFile)\n"+
"testPathFile='%v'\nError='%v'\n",
testPathFile, err.Error())
return
}
fmt.Println(" SUCCESS!!! ")
fmt.Println("********************************************************")
fmt.Println("testPathFile: ", testPathFile)
fmt.Println("--------------------------------------------------------")
fmt.Println("fh.CleanDirStr() Results:")
fmt.Println("--------------------------------------------------------")
fmt.Println(" isEmpty: ", isEmpty)
fmt.Println(" cleanPath: ", cleanFilePath)
fmt.Println(" Volume Name: ", volName)
fmt.Println("Absolute Path: ", absFilePath)
fmt.Println()
}
// getBaseProjectPath - Gets the base path on this machine for the
// 'pathfileopsgo' project.
//
func (mtst mainTests) getBaseProjectPath(
addTrailingPathSeparator bool) (basePath string, err error) {
ePrefix := "getBaseProjectPath() "
fh := pf.FileHelper{}
basePath = ""
err = nil
currDir, err2 := fh.GetAbsCurrDir()
if err2 != nil {
err = fmt.Errorf(ePrefix+
"Error returned by fh.GetAbsCurrDir().\nError='%v'\n", err2.Error())
return basePath, err
}
target := "pathfileopsgo"
idx := strings.Index(currDir, target)
if idx < 0 {
err = fmt.Errorf(ePrefix +
"Error: Unable to locate \"pathfileopsgo\" in current directory string!\n")
return basePath, err
}
idx += len(target)
basePath = currDir[0:idx]
if addTrailingPathSeparator {
basePath += string(os.PathSeparator)
}
return basePath, err
}
func (mtst mainTests) timer(starTime, endTime time.Time) string {
// MicroSecondNanoseconds - Number of Nanoseconds in a Microsecond
// A MicroSecond is 1/1,000,000 or 1 one-millionth of a second
MicroSecondNanoseconds := int64(time.Microsecond)
// MilliSecondNanoseconds - Number of Nanoseconds in a MilliSecond
// A millisecond is 1/1,000 or 1 one-thousandth of a second
MilliSecondNanoseconds := int64(time.Millisecond)
// SecondNanoseconds - Number of Nanoseconds in a Second
SecondNanoseconds := int64(time.Second)
// MinuteNanoseconds - Number of Nanoseconds in a minute
MinuteNanoseconds := int64(time.Minute)
// HourNanoseconds - Number of Nanoseconds in an hour
HourNanoseconds := int64(time.Hour)
t2Dur := endTime.Sub(starTime)
str := ""
totalNanoseconds := t2Dur.Nanoseconds()
numOfHours := int64(0)
numOfMinutes := int64(0)
numOfSeconds := int64(0)
numOfMillisecionds := int64(0)
numOfMicroseconds := int64(0)
numOfNanoseconds := int64(0)
if totalNanoseconds >= HourNanoseconds {
numOfHours = totalNanoseconds / HourNanoseconds
totalNanoseconds = totalNanoseconds - (numOfHours * HourNanoseconds)
}
if totalNanoseconds >= MinuteNanoseconds {
numOfMinutes = totalNanoseconds / MinuteNanoseconds
totalNanoseconds = totalNanoseconds - (numOfMinutes * MinuteNanoseconds)
}
if totalNanoseconds >= SecondNanoseconds {
numOfSeconds = totalNanoseconds / SecondNanoseconds
totalNanoseconds = totalNanoseconds - (numOfSeconds * SecondNanoseconds)
}
if totalNanoseconds >= SecondNanoseconds {
numOfSeconds = totalNanoseconds / SecondNanoseconds
totalNanoseconds = totalNanoseconds - (numOfSeconds * SecondNanoseconds)
}
if totalNanoseconds >= MilliSecondNanoseconds {
numOfMillisecionds = totalNanoseconds / MilliSecondNanoseconds
totalNanoseconds = totalNanoseconds - (numOfMillisecionds * MilliSecondNanoseconds)
}
if totalNanoseconds >= MicroSecondNanoseconds {
numOfMicroseconds = totalNanoseconds / MicroSecondNanoseconds
totalNanoseconds = totalNanoseconds - (numOfMicroseconds * MicroSecondNanoseconds)
}
numOfNanoseconds = totalNanoseconds
if numOfHours > 0 {
str += fmt.Sprintf("%v-Hours ", numOfHours)
}
if numOfMinutes > 0 {
str += fmt.Sprintf("%v-Minutes ", numOfMinutes)
}
if numOfSeconds > 0 || str != "" {
str += fmt.Sprintf("%v-Seconds ", numOfSeconds)
}
if numOfMillisecionds > 0 || str != "" {
str += fmt.Sprintf("%v-Milliseconds ", numOfMillisecionds)
}
if numOfMicroseconds > 0 || str != "" {
str += fmt.Sprintf("%v-Microseconds ", numOfMicroseconds)
}
str += fmt.Sprintf("%v-Nanoseconds", numOfNanoseconds)
return str
}
|
package http
import (
"net/http"
"bytes"
"fmt"
"encoding/json"
"github.com/lygo/health"
)
type Registrator interface {
Handle(pattern string, handler http.Handler)
}
// read that docs for understend more params
// https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
func WrapHandler(livenessPath string, readinessPath string, healthPath string, healther health.Healther, router Registrator) {
if healthPath == `` {
healthPath = `/health`
}
if livenessPath == `` {
livenessPath = `/liveness`
}
if readinessPath == `` {
readinessPath = `/readiness`
}
// for kube - check transport
router.Handle(livenessPath, LivenessHandler())
// for kube - ability to provide services
router.Handle(readinessPath, ReadinessHandler(healther))
// for show full info
router.Handle(healthPath, HealthHandler(healther))
}
func LivenessHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("OK"))
})
}
func ReadinessHandler(healther health.Healther) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
state := healther.Check(r.Context())
if state.Status == health.HealthServing {
w.Write([]byte(`OK`))
} else {
buff := bytes.NewBuffer(nil)
for _, component := range state.Components {
switch component.Status {
case health.HealthComponentStatusFail:
buff.WriteString(fmt.Sprintf(
"- %s %s: %s",
component.ComponentName,
component.Status.String(),
component.Description,
))
case health.HealthComponentStatusTimeout:
buff.WriteString(fmt.Sprintf(
"- %s %s: %s %s",
component.ComponentName,
component.Status.String(),
component.Description,
component.Duration.String(),
))
}
}
http.Error(w, buff.String(), http.StatusServiceUnavailable)
}
})
}
func HealthHandler(healther health.Healther) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
state := healther.Check(r.Context())
if err := json.NewEncoder(w).Encode(state); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
})
}
|
// SPDX-License-Identifier: Unlicense OR MIT
package gpu
import (
"time"
"github.com/gop9/olt/gio/app/internal/gl"
)
type timers struct {
ctx *context
timers []*timer
}
type timer struct {
Elapsed time.Duration
ctx *context
obj gl.Query
state timerState
}
type timerState uint8
const (
timerIdle timerState = iota
timerRunning
timerWaiting
)
func newTimers(ctx *context) *timers {
return &timers{
ctx: ctx,
}
}
func (t *timers) newTimer() *timer {
if t == nil {
return nil
}
tt := &timer{
ctx: t.ctx,
obj: t.ctx.CreateQuery(),
}
t.timers = append(t.timers, tt)
return tt
}
func (t *timer) begin() {
if t == nil || t.state != timerIdle {
return
}
t.ctx.BeginQuery(gl.TIME_ELAPSED_EXT, t.obj)
t.state = timerRunning
}
func (t *timer) end() {
if t == nil || t.state != timerRunning {
return
}
t.ctx.EndQuery(gl.TIME_ELAPSED_EXT)
t.state = timerWaiting
}
func (t *timers) ready() bool {
if t == nil {
return false
}
for _, tt := range t.timers {
if tt.state != timerWaiting {
return false
}
if t.ctx.GetQueryObjectuiv(tt.obj, gl.QUERY_RESULT_AVAILABLE) == 0 {
return false
}
}
for _, tt := range t.timers {
tt.state = timerIdle
nanos := t.ctx.GetQueryObjectuiv(tt.obj, gl.QUERY_RESULT)
tt.Elapsed = time.Duration(nanos)
}
return t.ctx.GetInteger(gl.GPU_DISJOINT_EXT) == 0
}
func (t *timers) release() {
if t == nil {
return
}
for _, tt := range t.timers {
t.ctx.DeleteQuery(tt.obj)
}
t.timers = nil
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package handlers
import (
"math/rand"
"net/url"
"testing"
fuzz "github.com/google/gofuzz"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/endpoints/request"
)
func TestGenerateLink(t *testing.T) {
testCases := []struct {
name string
requestInfo *request.RequestInfo
obj runtime.Object
expect string
expectErr bool
clusterScoped bool
}{
{
name: "obj has more priority than requestInfo",
requestInfo: &request.RequestInfo{
Name: "should-not-use",
Namespace: "should-not-use",
Resource: "pod",
},
obj: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "should-use", Namespace: "should-use"}},
expect: "/api/v1/should-use/pod/should-use",
expectErr: false,
clusterScoped: false,
},
{
name: "hit errEmptyName",
requestInfo: &request.RequestInfo{
Name: "should-use",
Namespace: "should-use",
Resource: "pod",
},
obj: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "should-not-use"}},
expect: "/api/v1/should-use/pod/should-use",
expectErr: false,
clusterScoped: false,
},
{
name: "use namespace of requestInfo if obj namespace is empty",
requestInfo: &request.RequestInfo{
Name: "should-not-use",
Namespace: "should-use",
Resource: "pod",
},
obj: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "should-use"}},
expect: "/api/v1/should-use/pod/should-use",
expectErr: false,
clusterScoped: false,
},
{
name: "hit error",
requestInfo: &request.RequestInfo{
Name: "",
Namespace: "",
Resource: "pod",
},
obj: &v1.Pod{ObjectMeta: metav1.ObjectMeta{}},
expect: "name must be provided",
expectErr: true,
clusterScoped: false,
},
{
name: "cluster scoped",
requestInfo: &request.RequestInfo{
Name: "only-name",
Namespace: "should-not-use",
Resource: "pod",
},
obj: &v1.Pod{ObjectMeta: metav1.ObjectMeta{}},
expect: "/api/v1/only-name",
expectErr: false,
clusterScoped: true,
},
}
for _, test := range testCases {
n := ContextBasedNaming{
SelfLinker: meta.NewAccessor(),
SelfLinkPathPrefix: "/api/v1/",
ClusterScoped: test.clusterScoped,
}
uri, err := n.GenerateLink(test.requestInfo, test.obj)
if uri != test.expect && err.Error() != test.expect {
if test.expectErr {
t.Fatalf("%s: unexpected non-error: %v", test.name, err)
} else {
t.Fatalf("%s: expected: %v, but got: %v", test.name, test.expect, uri)
}
}
}
}
func Test_fastURLPathEncode_fuzz(t *testing.T) {
specialCases := []string{"/", "//", ".", "*", "/abc%"}
for _, test := range specialCases {
got := fastURLPathEncode(test)
u := url.URL{Path: test}
expected := u.EscapedPath()
if got != expected {
t.Errorf("%q did not match %q", got, expected)
}
}
f := fuzz.New().Funcs(
func(s *string, c fuzz.Continue) {
*s = randString(c.Rand)
},
)
for i := 0; i < 2000; i++ {
var test string
f.Fuzz(&test)
got := fastURLPathEncode(test)
u := url.URL{Path: test}
expected := u.EscapedPath()
if got != expected {
t.Errorf("%q did not match %q", got, expected)
}
}
}
// Unicode range fuzzer from github.com/google/gofuzz/fuzz.go
type charRange struct {
first, last rune
}
var unicodeRanges = []charRange{
{0x00, 0x255},
{' ', '~'}, // ASCII characters
{'\u00a0', '\u02af'}, // Multi-byte encoded characters
{'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
}
// randString makes a random string up to 20 characters long. The returned string
// may include a variety of (valid) UTF-8 encodings.
func randString(r *rand.Rand) string {
n := r.Intn(20)
runes := make([]rune, n)
for i := range runes {
runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r)
}
return string(runes)
}
// choose returns a random unicode character from the given range, using the
// given randomness source.
func (r *charRange) choose(rand *rand.Rand) rune {
count := int64(r.last - r.first)
return r.first + rune(rand.Int63n(count))
}
|
package main
import (
"fmt"
"os"
)
func main() {
env, ok := os.LookupEnv("NAME")
if !ok {
fmt.Println(`ENV Variable not found`)
os.Exit(1)
}
fmt.Println(env, "hello, this is working..")
}
|
package compactor
import "github.com/golang/snappy"
// snappy compress
var (
defaultSnappyCompactor Compactor = NewSnappy()
)
type Snappy struct{}
func NewSnappy() *Snappy {
return new(Snappy)
}
func (s *Snappy) Name() string {
return "snappy"
}
func (s *Snappy) Encode(src []byte) (dst []byte, err error) {
dst = snappy.Encode(nil, src)
return
}
func (s *Snappy) Decode(src []byte) (dst []byte, err error) {
return snappy.Decode(nil, src)
}
|
package ibc
import (
host "github.com/cosmos/ibc-go/modules/core/24-host"
"github.com/gookit/gcli/v3"
"github.com/ovrclk/akcmd/cmd/cosmos-sdk/x/ibc/channel"
"github.com/ovrclk/akcmd/cmd/cosmos-sdk/x/ibc/client/cli"
"github.com/ovrclk/akcmd/cmd/cosmos-sdk/x/ibc/connection"
)
// GetQueryCmd returns the cli query commands for this module
func GetQueryCmd() *gcli.Command {
// Group ibc queries under a subcommand
ibcQueryCmd := &gcli.Command{
Name: host.ModuleName,
Desc: "Querying commands for the IBC module",
Func: func(cmd *gcli.Command, args []string) error {
cmd.ShowHelp()
return nil
},
Subs: []*gcli.Command{
cli.GetQueryCmd(),
connection.GetQueryCmd(),
channel.GetQueryCmd(),
},
}
return ibcQueryCmd
}
// GetTxCmd returns the transaction commands for this module
func GetTxCmd() *gcli.Command {
ibcTxCmd := &gcli.Command{
Name: host.ModuleName,
Desc: "IBC transaction subcommands",
Func: func(cmd *gcli.Command, args []string) error {
cmd.ShowHelp()
return nil
},
Subs: []*gcli.Command{
cli.NewTxCmd(),
// channel: pkg had no tx commands, so not added.
},
}
return ibcTxCmd
}
|
// Copyright 2020 Frederik Zipp. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package canvas
import (
"image"
"image/color"
"testing"
)
func BenchmarkContext(b *testing.B) {
draws := make(chan []byte)
go func() {
for range draws {
}
}()
ctx := newContext(draws, nil, config{})
for n := 0; n < b.N; n++ {
ctx.SetFillStyle(color.White)
ctx.SetFillStyleString("green")
ctx.SetFillStyleGradient(&Gradient{})
ctx.SetFillStylePattern(&Pattern{})
ctx.SetFont("bold 48px serif")
ctx.SetGlobalAlpha(1)
ctx.SetGlobalCompositeOperation(OpDestinationOut)
ctx.SetImageSmoothingEnabled(true)
ctx.SetLineCap(CapRound)
ctx.SetLineDashOffset(1)
ctx.SetLineJoin(JoinBevel)
ctx.SetLineWidth(1)
ctx.SetMiterLimit(1)
ctx.SetShadowBlur(1)
ctx.SetShadowColor(color.White)
ctx.SetShadowColorString("#ffa0b3")
ctx.SetShadowOffsetX(1)
ctx.SetShadowOffsetY(1)
ctx.SetStrokeStyle(color.White)
ctx.SetStrokeStyleString("yellow")
ctx.SetStrokeStyleGradient(&Gradient{})
ctx.SetStrokeStylePattern(&Pattern{})
ctx.SetTextAlign(AlignLeft)
ctx.SetTextBaseline(BaselineBottom)
ctx.Arc(1, 1, 1, 1, 1, true)
ctx.ArcTo(1, 1, 1, 1, 1)
ctx.BeginPath()
ctx.BezierCurveTo(1, 1, 1, 1, 1, 1)
ctx.ClearRect(1, 1, 1, 1)
ctx.Clip()
ctx.ClosePath()
ctx.Ellipse(1, 1, 1, 1, 1, 1, 1, true)
ctx.Fill()
ctx.FillRect(1, 1, 1, 1)
ctx.FillText("hello, world", 1, 1)
ctx.FillTextMaxWidth("hello, world", 1, 1, 1)
ctx.LineTo(1, 1)
ctx.MoveTo(1, 1)
ctx.QuadraticCurveTo(1, 1, 1, 1)
ctx.Rect(1, 1, 1, 1)
ctx.Restore()
ctx.Rotate(1)
ctx.Save()
ctx.Scale(1, 1)
ctx.Stroke()
ctx.StrokeText("hello, world", 1, 1)
ctx.StrokeTextMaxWidth("hello, world", 1, 1, 1)
ctx.StrokeRect(1, 1, 1, 1)
ctx.Translate(1, 1)
ctx.Transform(1, 1, 1, 1, 1, 1)
ctx.SetTransform(1, 1, 1, 1, 1, 1)
ctx.SetLineDash([]float64{1, 1, 1})
ctx.CreateImageData(image.NewRGBA(image.Rect(0, 0, 0, 0)))
ctx.PutImageData(&ImageData{}, 1, 1)
ctx.PutImageDataDirty(&ImageData{}, 1, 1, 1, 1, 1, 1)
ctx.DrawImage(&ImageData{}, 1, 1)
ctx.DrawImageScaled(&ImageData{}, 1, 1, 1, 1)
ctx.DrawImageSubRectangle(&ImageData{}, 1, 1, 1, 1, 1, 1, 1, 1)
ctx.CreateLinearGradient(1, 1, 1, 1)
ctx.CreateRadialGradient(1, 1, 1, 1, 1, 1)
ctx.CreatePattern(&ImageData{}, PatternRepeat)
ctx.GetImageData(1, 1, 1, 1)
ctx.Flush()
}
close(draws)
}
|
package testdata
import (
"github.com/frk/gosql/internal/testdata/common"
)
type FilterBasicRecords struct {
User *common.User2 `rel:"test_user"`
Filter common.FilterMaker
}
|
package permuter
import (
"fmt"
"testing"
)
func ExamplePermute() {
myList := []interface{}{1, 2, 3}
Permute(myList, func(l []interface{}) {
fmt.Print("[")
for index, e := range myList {
fmt.Print(e)
if index != len(myList) {
fmt.Print(",")
}
}
fmt.Print("]")
})
//Output:[1,2,3][2,1,3][2,3,1][1,3,2][3,1,2][3,2,1]
}
func TestEmptyPermute(t *testing.T) {
original := []interface{}{}
called := false
Permute(original, func(permutation []interface{}) {
if called {
t.Error("Permute with emty list calls sink more as once")
}
called = true
if len(permutation) != 0 {
t.Error("Permute with empty list has wrong size expected 0, got ", len(permutation))
}
})
if !called {
t.Error("sink did not get called for empty list")
}
}
func testLength() int {
//This function is defined by the test framework and indicates whether a long or short test is requested
if testing.Short() {
return 5
}
return 11
}
func TestPermutationCount(t *testing.T) {
maxLength := testLength()
for length := 0; length <= maxLength; length++ {
theList := ListOfFirstNPositiveNumbers(length)
expectedCount := factorial(length)
actual := 0
Permute(theList, func([]interface{}) {
actual++
})
if actual != expectedCount {
t.Errorf("Expected %d permutations for a list %s with length %d, got %d instead",
expectedCount, theList, length, actual)
}
}
}
func TestPermuteSum(t *testing.T) {
maxLength := testLength()
for length := 0; length <= maxLength; length++ {
theList := ListOfFirstNPositiveNumbers(length)
actual := make([]int, length)
Permute(theList, func(permutation []interface{}) {
counter := 0
for _, e := range permutation {
v := (e).(int)
actual[counter] += v
counter++
}
})
expectedSum := expectedSum(length)
for _, value := range actual {
if value != expectedSum {
t.Errorf("Expected %d as the sum for a list %s with length %d, got %d instead",
expectedSum, theList, length, value)
}
}
}
}
func TestPermuteMult(t *testing.T) {
maxLength := 4 //higher number will overflow int64 !!
for length := 0; length <= maxLength; length++ {
theList := ListOfFirstNPositiveNumbers(length)
actual := make([]uint64, length)
for index := range actual {
actual[index] = 1
}
Permute(theList, func(permutation []interface{}) {
counter := 0
for _, e := range permutation {
v := (e).(int)
actual[counter] *= uint64(v)
counter++
}
})
expectedProduct := intPow(factorial(length), factorial(length-1))
for _, value := range actual {
if value != expectedProduct {
t.Errorf("Expected %d as the product for a list %s with length %d, got %d instead",
expectedProduct, theList, length, value)
}
}
}
}
func intPow(base, exp int) uint64 {
var result uint64 = 1
base64 := uint64(base)
for i := 0; i < exp; i++ {
result *= base64
}
return result
}
func testSumOfPermutationElement(t *testing.T) {
maxLength := testLength()
for length := 0; length <= maxLength; length++ {
theList := ListOfFirstNPositiveNumbers(length)
actual := 0
Permute(theList, func(permutation []interface{}) {
for _, e := range permutation {
v := (e).(int)
actual += v
}
})
expectedSum := ((length + 1) * length) / 2
if actual != expectedSum {
t.Errorf("Expected %d as the sum for a list %s with length %d, got %d instead",
expectedSum, theList, length, actual)
}
}
}
func ListOfFirstNPositiveNumbers(n int) []interface{} {
theList := make([]interface{}, n)
for i := 0; i < n; i++ {
theList[i] = i + 1
}
return theList
}
func listToString(theList []interface{}) string {
s := fmt.Sprint("[")
for index, e := range theList {
s += fmt.Sprint(e)
if index != len(theList)-1 {
s += fmt.Sprint(",")
}
}
s += fmt.Sprint("]")
return s
}
func expectedSum(length int) int {
return ((length + 1) * length) / 2 * factorial(length-1)
}
func factorial(length int) int {
result := 1
for i := length; i > 1; i-- {
result *= i
}
return result
}
/*
func expectedNumber(length int) int {
return factorial(length)
}
*/
|
package ionic
import (
"bytes"
"encoding/json"
"fmt"
)
const (
getEntityOverviewEndpoint = "/v1/score/getEntityOverviewForEntity"
)
func (ic *IonClient) GetEntityOverview(entity EntityInput, token string) (EntityOverview, error) {
b, err := json.Marshal(entity)
if err != nil {
return EntityOverview{}, fmt.Errorf("marshal input to JSON: %w", err)
}
resp, err := ic.Post(getEntityOverviewEndpoint, token, nil, *bytes.NewBuffer(b), nil)
if err != nil {
return EntityOverview{}, fmt.Errorf("api request: %w", err)
}
entityOverview := EntityOverview{}
if err := json.Unmarshal(resp, &entityOverview); err != nil {
return EntityOverview{}, fmt.Errorf("unmarshal API response: %w", err)
}
return entityOverview, nil
}
|
package cwb
import (
"context"
"net/http"
"github.com/google/go-querystring/query"
)
const (
// tide forecasts 1 month
Tide1MonthId = "F-A0021-001"
)
type TideForecastsService service
type TideForecast1MonthOptions struct {
Limit int `url:"limit,omitempty"`
Offset int `url:"offset,omitempty"`
LocationName string `url:"locationName,omitempty"` // see https://opendata.cwb.gov.tw/opendatadoc/MMC/A0021-001.pdf
ElementName string `url:"elementName,omitempty"`
Sort string `url:"sort,omitempty"`
StartTime string `url:"startTime,omitempty"`
DataTime string `url:"dataTime,omitempty"`
TimeForm string `url:"timeForm,omitempty"`
TimeTo string `url:"timeTo,omitempty"`
}
type TideForecast1MonthResponse struct {
Success string `json:"success"`
Result Result `json:"result"`
Records struct {
Dataid string `json:"dataid"`
Note string `json:"note"`
Location []TideForecastLocation `json:"location"`
} `json:"records"`
}
type TideForecastLocation struct {
LocationName string `json:"locationName"`
StationId string `json:"stationId"`
ValidTime []struct {
StartTime string `json:"startTime"`
EndTime string `json:"endTime"`
WeatherElement []TideForecastWeatherElement `json:"weatherElement"`
} `json:"validTime"`
}
type TideForecastWeatherElement struct {
ElementName string `json:"elementName"`
ElementValue *string `json:"elementValue,omitempty"`
Time []TideForecastTime `json:"time,omitempty"`
}
type TideForecastTime struct {
DataTime string `json:"dataTime"`
Parameter []TideForecastParameter `json:"parameter"`
}
type TideForecastParameter struct {
ParameterName string `json:"parameterName"`
ParameterValue *string `json:"parameterValue,omitempty"`
ParameterMeasure *string `json:"parameterMeasure,omitempty"`
}
// Get1MonthTide gets 1 month tide forecasts.
// see https://opendata.cwb.gov.tw/dist/opendata-swagger.html#/%E9%A0%90%E5%A0%B1/get_v1_rest_datastore_F_A0021_001
func (s *TideForecastsService) Get1MonthTide(ctx context.Context, options TideForecast1MonthOptions) (*TideForecast1MonthResponse, *http.Response, error) {
values, _ := query.Values(options)
forecast := new(TideForecast1MonthResponse)
req, err := s.client.Get(ctx, s.client.generateURL(Tide1MonthId, values), forecast)
if err != nil {
return nil, nil, err
}
return forecast, req, nil
}
|
package options
import (
"flag"
"github.com/spf13/pflag"
)
type WebHookOptions struct {
Port int
KubeConfig string
MasterURL string
CertDir string
SidecarConfig string
VerFlag bool
}
func NewDefaultWebHookOptions() WebHookOptions {
return WebHookOptions{
Port: 443,
KubeConfig: "/root/.kube/config",
MasterURL: "",
CertDir: "/etc/edge-service-autonomy-webhook/certs",
SidecarConfig: "/etc/edge-service-autonomy-webhook/config/sidecarconfig.yaml",
VerFlag: false,
}
}
func (o *WebHookOptions) AddFlags(fs *pflag.FlagSet) {
if o == nil {
return
}
// Add the command line flags from other dependencies(klog, kubebuilder, etc.)
fs.AddGoFlagSet(flag.CommandLine)
fs.IntVar(&o.Port, "secure_port", o.Port, "The port on which to serve HTTPS.")
fs.StringVar(&o.KubeConfig, "kubeconfig", o.KubeConfig, "Path to a kubeconfig. Only required if out-of-cluster.")
fs.StringVar(&o.MasterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
fs.StringVar(&o.CertDir, "cert_dir", "", "The directory where the TLS certs are located.")
fs.StringVar(&o.SidecarConfig, "sidecar_config", o.SidecarConfig, "File containing the mutation configuration.")
fs.BoolVar(&o.VerFlag, "version", o.VerFlag, "Prints the Version info of webhook.")
}
|
package csvreader
import (
"reflect"
"strconv"
"time"
)
func setField(field reflect.Value, valStr string) error {
if !field.CanSet() {
return nil
}
switch field.Kind() {
case reflect.Bool:
if val, err := strconv.ParseBool(valStr); err == nil {
field.Set(reflect.ValueOf(val).Convert(field.Type()))
}
case reflect.Int:
if val, err := strconv.ParseInt(valStr, 10, 64); err == nil {
field.Set(reflect.ValueOf(int(val)).Convert(field.Type()))
}
case reflect.Int8:
if val, err := strconv.ParseInt(valStr, 10, 8); err == nil {
field.Set(reflect.ValueOf(int8(val)).Convert(field.Type()))
}
case reflect.Int16:
if val, err := strconv.ParseInt(valStr, 10, 16); err == nil {
field.Set(reflect.ValueOf(int16(val)).Convert(field.Type()))
}
case reflect.Int32:
if val, err := strconv.ParseInt(valStr, 10, 32); err == nil {
field.Set(reflect.ValueOf(int32(val)).Convert(field.Type()))
}
case reflect.Int64:
if val, err := time.ParseDuration(valStr); err == nil {
field.Set(reflect.ValueOf(val).Convert(field.Type()))
} else if val, err := strconv.ParseInt(valStr, 10, 64); err == nil {
field.Set(reflect.ValueOf(val).Convert(field.Type()))
}
case reflect.Uint:
if val, err := strconv.ParseUint(valStr, 10, 64); err == nil {
field.Set(reflect.ValueOf(uint(val)).Convert(field.Type()))
}
case reflect.Uint8:
if val, err := strconv.ParseUint(valStr, 10, 8); err == nil {
field.Set(reflect.ValueOf(uint8(val)).Convert(field.Type()))
}
case reflect.Uint16:
if val, err := strconv.ParseUint(valStr, 10, 16); err == nil {
field.Set(reflect.ValueOf(uint16(val)).Convert(field.Type()))
}
case reflect.Uint32:
if val, err := strconv.ParseUint(valStr, 10, 32); err == nil {
field.Set(reflect.ValueOf(uint32(val)).Convert(field.Type()))
}
case reflect.Uint64:
if val, err := strconv.ParseUint(valStr, 10, 64); err == nil {
field.Set(reflect.ValueOf(val).Convert(field.Type()))
}
case reflect.Uintptr:
if val, err := strconv.ParseUint(valStr, 10, 64); err == nil {
field.Set(reflect.ValueOf(uintptr(val)).Convert(field.Type()))
}
case reflect.Float32:
if val, err := strconv.ParseFloat(valStr, 32); err == nil {
field.Set(reflect.ValueOf(float32(val)).Convert(field.Type()))
}
case reflect.Float64:
if val, err := strconv.ParseFloat(valStr, 64); err == nil {
field.Set(reflect.ValueOf(val).Convert(field.Type()))
}
case reflect.String:
field.Set(reflect.ValueOf(valStr).Convert(field.Type()))
case reflect.Ptr:
field.Set(reflect.New(field.Type().Elem()))
}
return nil
}
|
package diffsquares
// SquareOfSum calculates square of the sum of numbers to n
func SquareOfSum(n int) int {
sum := (n * (n + 1) / 2)
return sum * sum
}
// SumOfSquares calculates sum of squares to n
func SumOfSquares(n int) int {
return n * (n + 1) * (2*n + 1) / 6
}
// Difference calculate square of sum minus sum of squares
func Difference(n int) int {
return SquareOfSum(n) - SumOfSquares(n)
}
|
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provider
import (
"fmt"
"regexp"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
)
func TestAccDataInstall_basic(t *testing.T) {
resourceName := "data.flux_install.main"
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
ProviderFactories: testAccProviderFactories,
Steps: []resource.TestStep{
{
// Without required target_path set
Config: testAccDataInstallEmpty,
ExpectError: regexp.MustCompile(`The argument "target_path" is required, but no definition was found\.`),
},
{
// With invalid log level
Config: testAccDataInstallLogLevel,
ExpectError: regexp.MustCompile(`Error: expected log_level to be one of \[info debug error\], got warn`),
},
{
// With invalid arch set
Config: testAccDataInstallArch,
ExpectError: regexp.MustCompile(`Error: expected arch to be one of \[amd64 arm64 arm\], got powerpc`),
},
{
// Check default values
Config: testAccDataInstallBasic,
Check: resource.ComposeAggregateTestCheckFunc(
resource.TestCheckResourceAttrSet(resourceName, "content"),
resource.TestCheckResourceAttr(resourceName, "arch", "amd64"),
resource.TestCheckResourceAttr(resourceName, "log_level", "info"),
resource.TestCheckResourceAttr(resourceName, "namespace", "flux-system"),
resource.TestCheckResourceAttr(resourceName, "network_policy", "true"),
resource.TestCheckResourceAttr(resourceName, "path", "staging-cluster/flux-system/gotk-components.yaml"),
resource.TestCheckResourceAttr(resourceName, "registry", "ghcr.io/fluxcd"),
resource.TestCheckResourceAttr(resourceName, "target_path", "staging-cluster"),
resource.TestCheckResourceAttr(resourceName, "version", "latest"),
resource.TestCheckResourceAttr(resourceName, "watch_all_namespaces", "true"),
),
},
// Ensure attribute value changes are propagated correctly into the state
{
Config: testAccDataInstallWithArg("arch", "arm64"),
Check: resource.TestCheckResourceAttr(resourceName, "arch", "arm64"),
},
{
Config: testAccDataInstallWithArg("log_level", "debug"),
Check: resource.TestCheckResourceAttr(resourceName, "log_level", "debug"),
},
{
Config: testAccDataInstallWithArg("namespace", "test-system"),
Check: resource.TestCheckResourceAttr(resourceName, "namespace", "test-system"),
},
{
Config: testAccDataInstallWithArg("network_policy", "false"),
Check: resource.TestCheckResourceAttr(resourceName, "network_policy", "false"),
},
{
Config: testAccDataInstallWithArg("version", "0.2.1"),
Check: resource.TestCheckResourceAttr(resourceName, "version", "0.2.1"),
},
{
Config: testAccDataInstallWithArg("watch_all_namespaces", "false"),
Check: resource.TestCheckResourceAttr(resourceName, "watch_all_namespaces", "false"),
},
},
})
}
const (
testAccDataInstallEmpty = `data "flux_install" "main" {}`
testAccDataInstallBasic = `
data "flux_install" "main" {
target_path = "staging-cluster"
}
`
testAccDataInstallLogLevel = `
data "flux_install" "main" {
target_path = "staging-cluster"
log_level = "warn"
}
`
testAccDataInstallArch = `
data "flux_install" "main" {
target_path = "staging-cluster"
arch = "powerpc"
}
`
)
func testAccDataInstallWithArg(attr string, value string) string {
return fmt.Sprintf(`
data "flux_install" "main" {
target_path = "staging-cluster"
%s = %q
}
`, attr, value)
}
|
package main
import (
"fmt"
"os"
"path"
"strings"
"github.com/rightscale/rsc/ca"
"github.com/rightscale/rsc/cm15"
"github.com/rightscale/rsc/cm16"
"github.com/rightscale/rsc/cmd"
"github.com/rightscale/rsc/policy"
"github.com/rightscale/rsc/rl10"
"github.com/rightscale/rsc/rsapi"
"github.com/rightscale/rsc/ss"
"gopkg.in/alecthomas/kingpin.v2"
)
// ParseCommandLine retrieves the command and top level flag values.
func ParseCommandLine(app *kingpin.Application) (*cmd.CommandLine, error) {
// 1. Register all commands
app.Command("setup", "create config file, defaults to $HOME/.rsc, use '--config' to override")
app.Command("json", "apply jsonselect expression to STDIN")
RegisterClientCommands(app)
// 2. Parse flags
cmdLine := cmd.CommandLine{}
app.Flag("config", "path to rsc config file").Short('c').Default(path.Join(os.Getenv("HOME"), ".rsc")).StringVar(&cmdLine.ConfigPath)
app.Flag("retry", "Number of retry attempts for non-successful API responses (500, 503, and timeouts only)").Short('R').Default("0").IntVar(&cmdLine.Retry)
app.Flag("account", "RightScale account ID").Short('a').IntVar(&cmdLine.Account)
app.Flag("host", "RightScale login endpoint (e.g. 'us-3.rightscale.com')").Short('h').StringVar(&cmdLine.Host)
app.Flag("email", "Login email, use --email and --pwd or use --refreshToken, --accessToken, --apiToken or --rl10").StringVar(&cmdLine.Username)
app.Flag("pwd", "Login password, use --email and --pwd or use --refreshToken, --accessToken, --apiToken or --rl10").StringVar(&cmdLine.Password)
app.Flag("refreshToken", "OAuth refresh token, use --email and --pwd or use --refreshToken, --accessToken, --apiToken or --rl10").Short('r').StringVar(&cmdLine.OAuthToken)
app.Flag("accessToken", "OAuth access token, use --email and --pwd or use --refreshToken, --accessToken, --apiToken or --rl10").Short('s').StringVar(&cmdLine.OAuthAccessToken)
app.Flag("apiToken", "Instance API token, use --email and --pwd or use --refreshToken, --accessToken, --apiToken or --rl10").Short('p').StringVar(&cmdLine.APIToken)
app.Flag("rl10", "Proxy requests through RightLink 10 agent, use --email and --pwd or use --refreshToken, --accessToken, --apiToken or --rl10").BoolVar(&cmdLine.RL10)
app.Flag("flexeraOne", "Use Flexera One for authentication, with --refreshToken").Short('F').BoolVar(&cmdLine.FlexeraOne)
app.Flag("noAuth", "Make unauthenticated requests, used for testing").BoolVar(&cmdLine.NoAuth)
app.Flag("timeout", "Set the request timeout, defaults to 300s").Short('t').Default("300").IntVar(&cmdLine.Timeout)
app.Flag("x1", "Extract single value using JSON:select").StringVar(&cmdLine.ExtractOneSelect)
app.Flag("xm", "Extract zero, one or more values using JSON:select and return newline separated list").StringVar(&cmdLine.ExtractSelector)
app.Flag("xj", "Extract zero, one or more values using JSON:select and return JSON").StringVar(&cmdLine.ExtractSelectorJSON)
app.Flag("xh", "Extract header with given name").StringVar(&cmdLine.ExtractHeader)
app.Flag("fetch", "Fetch resource with href present in 'Location' header").BoolVar(&cmdLine.FetchResource)
app.Flag("dump", "Dump HTTP request and response. Possible values are 'debug' or 'json'.").EnumVar(&cmdLine.Dump, "debug", "json", "record")
app.Flag("verbose", "Dump HTTP request and response including auth requests and headers, enables --dump=debug by default, use --dump=json to switch format").Short('v').BoolVar(&cmdLine.Verbose)
app.Flag("pp", "Pretty print response body").BoolVar(&cmdLine.Pretty)
// Keep around for a few releases for backwards compatibility
app.Flag("key", "OAuth refresh token, use --email and --pwd or use --refreshToken, --accessToken, --apiToken or --rl10").Short('k').Hidden().StringVar(&cmdLine.OAuthToken)
args := os.Args[1:]
if len(args) == 0 {
args = []string{"--help"}
}
// This is a bit hacky: basically doing `rsc api15 index clouds --help` results
// in a command line that kingpin hijacks. So capture the `--help` try parsing
// without it so we can print our own help.
lastArgIndex := len(args)
help := args[lastArgIndex-1]
var cmd string
var err error
if help == "--help" || help == "-h" || help == "-help" || help == "-?" {
cmdLine.ShowHelp = true
lastArgIndex--
cmd, err = app.Parse(args[:lastArgIndex])
} else {
cmd, err = app.Parse(args)
}
if err != nil {
return nil, err
}
// 3. Complement with defaults from config at given path
if !cmdLine.NoAuth {
if config, err := LoadConfig(cmdLine.ConfigPath); err == nil {
if cmdLine.OAuthAccessToken == "" && cmdLine.OAuthToken == "" {
if cmdLine.Account == 0 {
cmdLine.Account = config.Account
}
if cmdLine.Username == "" {
cmdLine.Username = config.Email
}
if cmdLine.Password == "" {
cmdLine.Password = config.Password
}
}
if cmdLine.Host == "" {
cmdLine.Host = config.LoginHost
}
if cmdLine.OAuthToken == "" {
cmdLine.OAuthToken = config.RefreshToken
}
if cmdLine.FlexeraOne == false {
cmdLine.FlexeraOne = config.FlexeraOne
}
}
}
cmdLine.Command = cmd
// 4. Special RL10 case (auth is handled differently)
if strings.Split(cmdLine.Command, " ")[0] == "rl10" {
cmdLine.RL10 = true
}
// 6. Validate we have everything we need
validateCommandLine(&cmdLine)
// 7. We're done
return &cmdLine, nil
}
// Make sure all the required information is there
func validateCommandLine(cmdLine *cmd.CommandLine) {
if cmdLine.Command == "setup" ||
cmdLine.Command == "actions" ||
cmdLine.Command == "json" ||
cmdLine.ShowHelp ||
cmdLine.RL10 {
return
}
if cmdLine.Account == 0 && cmdLine.OAuthToken == "" && cmdLine.OAuthAccessToken == "" && cmdLine.APIToken == "" && !cmdLine.NoAuth {
kingpin.Fatalf("missing --account option")
}
if cmdLine.Host == "" {
kingpin.Fatalf("missing --host option")
}
if cmdLine.Password == "" && cmdLine.OAuthToken == "" && cmdLine.OAuthAccessToken == "" && cmdLine.APIToken == "" && !cmdLine.NoAuth {
kingpin.Fatalf("missing login info, use --email and --pwd or use --key, --apiToken or --rl10")
}
}
// Update the code below when adding new clients. This is the only place that needs to be changed.
// List all client commands below
const (
// Cm15Command is the command for API 1.5 client.
Cm15Command = "cm15"
// Cm16Command is the command for API 1.6 client.
Cm16Command = "cm16"
// SsCommand is the command for SS client.
SsCommand = "ss"
// Rl10Command is the command for RL10 client.
Rl10Command = "rl10"
// CaCommand is the command for CA client.
CaCommand = "ca"
// PolicyCommand is the command for Policy client.
PolicyCommand = "policy"
)
// APIClient instantiates a client with the given name from command line arguments.
func APIClient(name string, cmdLine *cmd.CommandLine) (cmd.CommandClient, error) {
switch name {
case Cm15Command:
return cm15.FromCommandLine(cmdLine)
case Cm16Command:
return cm16.FromCommandLine(cmdLine)
case SsCommand:
return ss.FromCommandLine(cmdLine)
case Rl10Command:
return rl10.FromCommandLine(cmdLine)
case CaCommand:
return ca.FromCommandLine(cmdLine)
case PolicyCommand:
return policy.FromCommandLine(cmdLine)
default:
return nil, fmt.Errorf("No client for '%s'", name)
}
}
// RegisterClientCommands registers all API client commands.
func RegisterClientCommands(app *kingpin.Application) {
cm15Cmd := app.Command(Cm15Command, cm15.APIName)
registrar := rsapi.Registrar{APICmd: cm15Cmd}
cm15.RegisterCommands(®istrar)
cm16Cmd := app.Command(Cm16Command, cm16.APIName)
registrar = rsapi.Registrar{APICmd: cm16Cmd}
cm16.RegisterCommands(®istrar)
ssCmd := app.Command(SsCommand, ss.APIName)
registrar = rsapi.Registrar{APICmd: ssCmd}
ss.RegisterCommands(®istrar)
rl10Cmd := app.Command(Rl10Command, rl10.APIName)
registrar = rsapi.Registrar{APICmd: rl10Cmd}
rl10.RegisterCommands(®istrar)
caCmd := app.Command(CaCommand, ca.APIName)
registrar = rsapi.Registrar{APICmd: caCmd}
ca.RegisterCommands(®istrar)
policyCmd := app.Command(PolicyCommand, policy.APIName)
registrar = rsapi.Registrar{APICmd: policyCmd}
policy.RegisterCommands(®istrar)
}
|
package sql
import (
"context"
"database/sql"
"fmt"
"reflect"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/zdao-pro/sky_blue/pkg/util"
)
var (
//ErrNoPtr ..
ErrNoPtr = errors.New("noptr")
//ErrNoResult ..
ErrNoResult = errors.New("noResult")
)
//Model ..
type Model struct {
DB *DB
Tx *Tx
}
//NewModel ..
func NewModel(db *DB) (md *Model) {
md = &Model{
DB: db,
}
return
}
// Exec executes a query without returning any rows.
// The args are for any placeholder parameters in the query.
func (m *Model) Exec(c context.Context, query string, args ...interface{}) (res sql.Result, err error) {
s := opentracing.SpanFromContext(c)
if nil != s {
span2 := opentracing.StartSpan(util.FuncName(3), opentracing.ChildOf(s.Context()))
span2.SetTag("sql", query)
span2.SetTag("args", args)
defer span2.Finish()
}
if m.Tx != nil {
return m.Tx.Exec(c, query, args...)
}
return m.DB.Exec(c, query, args...)
}
// Query executes a query that returns rows, typically a SELECT. The args are
// for any placeholder parameters in the query.
func (m *Model) Query(c context.Context, query string, args ...interface{}) (*Rows, error) {
s := opentracing.SpanFromContext(c)
if nil != s {
span2 := opentracing.StartSpan(util.FuncName(3), opentracing.ChildOf(s.Context()))
span2.SetTag("sql", query)
span2.SetTag("args", args)
defer span2.Finish()
}
if m.Tx != nil {
return m.Tx.Query(c, query, args...)
}
return m.DB.Query(c, query, args...)
}
// QueryRow executes a query that is expected to return at most one row.
// QueryRow always returns a non-nil value. Errors are deferred until Row's
// Scan method is called.
func (m *Model) QueryRow(c context.Context, query string, args ...interface{}) *Row {
s := opentracing.SpanFromContext(c)
if nil != s {
span2 := opentracing.StartSpan(util.FuncName(3), opentracing.ChildOf(s.Context()))
span2.SetTag("sql", query)
span2.SetTag("args", args)
defer span2.Finish()
}
if m.Tx != nil {
return m.Tx.QueryRow(c, query, args...)
}
return m.DB.QueryRow(c, query, args...)
}
//Select ..
func (m *Model) Select(c context.Context, dest interface{}, query string, args ...interface{}) (err error) {
s := opentracing.SpanFromContext(c)
if nil != s {
span2 := opentracing.StartSpan(util.FuncName(3), opentracing.ChildOf(s.Context()))
span2.SetTag("sql", query)
span2.SetTag("args", args)
defer span2.Finish()
}
var rs *Rows
if m.Tx != nil {
rs, err = m.Tx.Query(c, query, args...)
} else {
rs, err = m.DB.Query(c, query, args...)
}
if nil != err {
return
}
rt := reflect.ValueOf(dest)
kind := rt.Kind()
//desc should be ptr
if kind != reflect.Ptr {
err = ErrNoPtr
return
}
a := rt.Elem()
// convert the query result to the list of map
columns, _ := rs.Columns()
columnLength := len(columns)
cache := make([]interface{}, columnLength)
for index := range cache {
var a interface{}
cache[index] = &a
}
var list []map[string]interface{}
for rs.Next() {
_ = rs.Scan(cache...)
item := make(map[string]interface{})
for i, data := range cache {
item[columns[i]] = *data.(*interface{}) //get the real kind
}
list = append(list, item)
}
if len(list) <= 0 {
err = ErrNoResult
return
}
if reflect.Struct == a.Kind() {
vType := a.Type()
fieldMap := parseField(vType)
fmt.Println(fieldMap)
convertStruct(a, vType, list[0], fieldMap)
} else if reflect.Slice == a.Kind() {
vType := a.Type().Elem()
fieldMap := parseField(vType)
for _, data := range list {
v := reflect.New(vType).Elem()
convertStruct(v, vType, data, fieldMap)
a = reflect.Append(a, v)
// fmt.Println(a)
}
// rt := reflect.ValueOf(dest)
rt.Elem().Set(a)
}
return
}
// Begin starts a transaction. The isolation level is dependent on the driver.
func (m *Model) Begin(c context.Context) (err error) {
m.Tx, err = m.DB.Begin(c)
return err
}
// Rollback aborts the transaction.
func (m *Model) Rollback() (err error) {
err = m.Tx.Rollback()
return
}
// Commit commits the transaction.
func (m *Model) Commit() (err error) {
if m.Tx != nil {
err = m.Tx.Commit()
}
return
}
|
package stateful
import (
"context"
"fmt"
aliceapi "github.com/yandex-cloud/examples/serverless/alice-shareable-todolist/app/alice/api"
"github.com/yandex-cloud/examples/serverless/alice-shareable-todolist/app/errors"
"github.com/yandex-cloud/examples/serverless/alice-shareable-todolist/app/todolist"
)
type createAction struct {
name string
}
func (h *Handler) createFromScratch(ctx context.Context, req *aliceapi.Request) (*aliceapi.Response, errors.Err) {
if req.Request.Type == aliceapi.RequestTypeButton {
if req.Request.Payload == nil || !req.Request.Payload.CreateList {
return nil, nil
}
return h.doCreate(ctx, &createAction{name: req.Request.Payload.ChooseListName})
}
intnt := req.Request.NLU.Intents.CreateList
if intnt == nil {
return nil, nil
}
var action createAction
name, ok := intnt.Slots.ListName.AsString()
if ok {
action.name = name
}
return h.doCreate(ctx, &createAction{name: name})
}
func (h *Handler) createRequireName(ctx context.Context, req *aliceapi.Request) (*aliceapi.Response, errors.Err) {
if req.Request.Type != aliceapi.RequestTypeSimple {
return nil, nil
}
action := &createAction{name: req.Request.OriginalUtterance}
return h.doCreate(ctx, action)
}
func (h *Handler) doCreate(ctx context.Context, action *createAction) (*aliceapi.Response, errors.Err) {
if action.name == "" {
return &aliceapi.Response{
Response: &aliceapi.Resp{Text: "Как назвать новый список?"},
State: &aliceapi.StateData{State: aliceapi.StateCreateReqName},
}, nil
}
_, err := h.todoListService.Create(ctx, &todolist.ListCreateRequest{Name: action.name})
if err == nil {
return &aliceapi.Response{
Response: &aliceapi.Resp{
Text: fmt.Sprintf("Готово, создала список \"%s\"", action.name),
},
}, nil
}
switch err.GetCode() {
case errors.CodeDuplicateName:
dupErr := err.(*errors.DuplicateName)
return &aliceapi.Response{
Response: &aliceapi.Resp{Text: fmt.Sprintf(
"У вас уже есть список с похожим названием - \"%s\". Попробуйте придумать другое название",
dupErr.Name,
)},
State: &aliceapi.StateData{State: aliceapi.StateCreateReqName},
}, nil
case errors.CodeLimitExceeded:
return &aliceapi.Response{Response: &aliceapi.Resp{
Text: "У вас слишком много списков",
}}, nil
}
return nil, err
}
|
package main
import (
"bufio"
"bytes"
"fmt"
"os"
"strconv"
"strings"
)
func main() {
var (
n int
arr []int
)
fmt.Scanf("%d", &n)
s := bufio.NewScanner(os.Stdin)
s.Scan()
temp := s.Text()
for _, x := range strings.Split(temp, " ") {
num, _ := strconv.Atoi(x)
arr = append(arr, num)
}
var shifts int
for j := 1; j < n; j++ {
last := arr[j]
// fmt.Println("last", last)
var sorted = false
for i := j - 1; i >= 0; i-- {
// fmt.Println("comparing ", arr[i])
if arr[i] > last {
// num greater than last. shift right
arr[i+1] = arr[i]
shifts++
// print(arr)
} else {
// num is now higher than current cell. Stick it in right cell
arr[i+1] = last
sorted = true
break
}
}
if !sorted {
arr[0] = last
}
// print(arr)
}
fmt.Println(shifts)
}
func print(arr []int) {
b := new(bytes.Buffer)
for i, e := range arr {
s := strconv.Itoa(e)
if i != 0 {
b.Write([]byte(" "))
}
b.Write([]byte(s))
}
fmt.Println(string(b.Bytes()))
}
|
// ˅
package main
import (
"fmt"
"os"
)
// ˄
type CommandList struct {
// ˅
// ˄
nodes []INode
// ˅
// ˄
}
func NewCommandList() *CommandList {
// ˅
return &CommandList{}
// ˄
}
func (self *CommandList) Parse(context *Context) {
// ˅
for {
if context.GetToken() == "" {
fmt.Println("Missing 'end'")
os.Exit(1)
} else if context.GetToken() == "end" {
context.SlideToken("end")
break
} else {
commandNode := NewCommand()
commandNode.Parse(context)
self.nodes = append(self.nodes, commandNode)
}
}
// ˄
}
func (self *CommandList) ToString() string {
// ˅
var str string
for i := 0; i < len(self.nodes); i++ {
str += (self.nodes[i]).ToString()
if i < len(self.nodes)-1 {
str += " "
}
}
return str
// ˄
}
// ˅
// ˄
|
/*
Go Language Raspberry Pi Interface
(c) Copyright David Thorpe 2016-2018
All Rights Reserved
Documentation http://djthorpe.github.io/gopi/
For Licensing and Usage information, please see LICENSE.md
*/
// Low Noise Amplifier Settings
package rfm69
import "github.com/djthorpe/sensors"
func (this *rfm69) LNAImpedance() sensors.RFMLNAImpedance {
return this.lna_impedance
}
func (this *rfm69) LNAGain() sensors.RFMLNAGain {
return this.lna_gain
}
func (this *rfm69) LNACurrentGain() (sensors.RFMLNAGain, error) {
if _, _, lna_gain, err := this.getRegLNA(); err != nil {
return sensors.RFM_LNA_GAIN_AUTO, err
} else {
return lna_gain, nil
}
}
func (this *rfm69) SetLNA(impedance sensors.RFMLNAImpedance, gain sensors.RFMLNAGain) error {
this.log.Debug("<sensors.RFM69.SetLNA{ impedance=%v gain=%v }", impedance, gain)
// Mutex lock
this.lock.Lock()
defer this.lock.Unlock()
// Write
if err := this.setRegLNA(impedance, gain); err != nil {
return err
}
// Read
if impedance_read, gain_read, _, err := this.getRegLNA(); err != nil {
return err
} else if impedance_read != impedance {
this.log.Debug2("SetLNA expecting impedance=%v, got=%v", impedance, impedance_read)
return sensors.ErrUnexpectedResponse
} else if gain_read != gain {
this.log.Debug2("SetLNA expecting gain=%v, got=%v", gain, gain_read)
return sensors.ErrUnexpectedResponse
} else {
this.lna_impedance = impedance
this.lna_gain = gain
}
return nil
}
|
package main
import (
"fmt"
"os"
"github.com/containerd/containerd/pkg/seed"
"github.com/docker/buildx/commands"
"github.com/docker/buildx/version"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli-plugins/manager"
"github.com/docker/cli/cli-plugins/plugin"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/debug"
cliflags "github.com/docker/cli/cli/flags"
"github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/util/stack"
"github.com/moby/buildkit/util/tracing/detect"
"go.opentelemetry.io/otel"
_ "github.com/moby/buildkit/util/tracing/detect/delegated"
_ "github.com/moby/buildkit/util/tracing/env"
// FIXME: "k8s.io/client-go/plugin/pkg/client/auth/azure" is excluded because of compilation error
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
_ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
_ "github.com/docker/buildx/driver/docker"
_ "github.com/docker/buildx/driver/docker-container"
_ "github.com/docker/buildx/driver/kubernetes"
)
var experimental string
func init() {
seed.WithTimeAndRand()
stack.SetVersionInfo(version.Version, version.Revision)
detect.ServiceName = "buildx"
// do not log tracing errors to stdio
otel.SetErrorHandler(skipErrors{})
}
func main() {
if os.Getenv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND") == "" {
if len(os.Args) < 2 || os.Args[1] != manager.MetadataSubcommandName {
dockerCli, err := command.NewDockerCli()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
opts := cliflags.NewClientOptions()
dockerCli.Initialize(opts)
rootCmd := commands.NewRootCmd(os.Args[0], false, dockerCli)
if err := rootCmd.Execute(); err != nil {
os.Exit(1)
}
os.Exit(0)
}
}
dockerCli, err := command.NewDockerCli()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
p := commands.NewRootCmd("buildx", true, dockerCli)
meta := manager.Metadata{
SchemaVersion: "0.1.0",
Vendor: "Docker Inc.",
Version: version.Version,
Experimental: experimental != "",
}
if err := plugin.RunPlugin(dockerCli, p, meta); err != nil {
if sterr, ok := err.(cli.StatusError); ok {
if sterr.Status != "" {
fmt.Fprintln(dockerCli.Err(), sterr.Status)
}
// StatusError should only be used for errors, and all errors should
// have a non-zero exit status, so never exit with 0
if sterr.StatusCode == 0 {
os.Exit(1)
}
os.Exit(sterr.StatusCode)
}
for _, s := range errdefs.Sources(err) {
s.Print(dockerCli.Err())
}
if debug.IsEnabled() {
fmt.Fprintf(dockerCli.Err(), "error: %+v", stack.Formatter(err))
} else {
fmt.Fprintf(dockerCli.Err(), "error: %v\n", err)
}
os.Exit(1)
}
}
type skipErrors struct{}
func (skipErrors) Handle(err error) {}
|
package schema
import (
"encoding/json"
"fmt"
"net/http"
"github.com/AlecAivazis/survey/v2"
"github.com/MakeNowJust/heredoc"
"github.com/loginradius/lr-cli/api"
"github.com/loginradius/lr-cli/prompt"
"github.com/loginradius/lr-cli/request"
"github.com/loginradius/lr-cli/config"
"github.com/spf13/cobra"
)
func NewschemaCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "schema",
Short: "delete schema",
Long: `This commmand deletes schema fields`,
Example: heredoc.Doc(`$ lr delete schema
? Select the feild you Want to delete from the list:
...
...
The field has been sucessfully deleted
`),
RunE: func(cmd *cobra.Command, args []string) error {
return delete()
},
}
return cmd
}
func delete() error {
res, err := api.GetSites()
if err != nil {
return err
}
if res.Productplan.Name == "free" {
fmt.Println("Kindly Upgrade the plan to enable this command for your app")
return nil
}
var url string
conf := config.GetInstance()
activeFieldResp, err := api.GetStandardFields("active")
var options []string
for i := 0; i < len(activeFieldResp.Data); i++ {
options = append(options, activeFieldResp.Data[i].Display)
}
var ind int
err = prompt.SurveyAskOne(&survey.Select{
Message: "Select the feild you Want to delete from the list:",
Options: options,
}, &ind, survey.WithPageSize(15))
if err != nil {
return nil
}
if activeFieldResp.Data[ind].Name == "emailid" || activeFieldResp.Data[ind].Name == "password" {
fmt.Println("EmailId and Password fields cannot be deleted")
return nil
}
activeFieldResp.Data[ind].Enabled = false
body, _ := json.Marshal(activeFieldResp)
url = conf.AdminConsoleAPIDomain + "/platform-configuration/default-fields?"
var resultResp api.StandardFields
resp, err := request.Rest(http.MethodPost, url, nil, string(body))
err = json.Unmarshal(resp, &resultResp)
if err != nil {
return err
}
fmt.Println("The field has been sucessfully deleted")
return nil
}
|
package greek
// Greek
// TODO: These are just lowercase, should probably be a map of maps dividning
// upper and lowercase.
var Symbols = map[string]map[string]string{
"upper": map[string]string{
"alpha": "Α",
"beta": "Β",
"gamma": "Γ",
"delta": "Δ",
"epsilon": "Ε",
"zeta": "Ζ",
"eta": "Η",
"theta": "Θ",
"iota": "Ι",
"kappa": "Κ",
"lambda": "Λ",
"mu": "Μ",
"nu": "Ν",
"xi": "Ξ",
"omicron": "Ο",
"pi": "Π",
"ro": "Ρ",
"sigma": "Σ",
"tau": "Τ",
"upsilon": "Υ",
"phi": "Φ",
"chi": "Χ",
"psi": "Ψ",
"omega": "Ω",
},
"lower": map[string]string{
"alpha": "α",
"beta": "β",
"gamma": "γ",
"delta": "δ",
"epsilon": "ε",
"zeta": "ζ",
"eta": "η",
"theta": "θ",
"iota": "ι",
"kappa": "κ",
"lambda": "λ",
"mu": "μ",
"nu": "ν",
"xi": "ξ",
"omicron": "ο",
"pi": "π",
"ro": "ρ",
"sigma": "σ",
"sigma_alt": "ς", // This verison is used at the end of words
"tau": "τ",
"upsilon": "υ",
"phi": "φ",
"chi": "χ",
"psi": "ψ",
"omega": "ω",
},
}
|
package router
import (
"github.com/labstack/echo"
"teachEcho/control"
)
//必须要token
func AdmRouter(adm *echo.Group) {
adm.POST("/class/add", control.ClassAdd)
adm.GET("/class/drop/:id", control.ClassDrop)
adm.POST("/class/edit", control.ClassEdit)
adm.GET("/user/page", control.UserPage)
}
|
package main
import (
"bufio"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
"time"
)
var endpoints = []string{"cats", "dogs", "birds", "fish"}
var letterBytes = "abcdefghijklmnopqrstuvwxyz"
func RandStringBytesTesting(n int) string {
b := make([]byte, n+1)
rand.Seed(time.Now().UnixNano())
for i := range b {
b[i] = letterBytes[rand.Intn(len(letterBytes))]
}
return string(b)
}
// create our own test data
func CreateTestData(lineCount int) string {
f, err := ioutil.TempFile("", "sample")
if err != nil {
panic(err)
}
w := bufio.NewWriter(f)
for i := 0; i < lineCount; i++ {
tempString := RandStringBytesTesting(rand.Intn(10))
_, err = w.WriteString(tempString + "\n")
}
w.Flush()
f.Sync()
f.Close()
return f.Name()
}
// create mock http server
func CreateMockHTTPServer() *httptest.Server {
helloHandler := func(w http.ResponseWriter, req *http.Request) {
io.WriteString(w, "Hello, world!\n")
}
mux := http.NewServeMux()
for _, ep := range endpoints {
mux.HandleFunc(ep, helloHandler)
}
return httptest.NewUnstartedServer(mux)
}
/****************/
/** TESTS **/
/****************/
func TestScanLines(t *testing.T) {
recordCount := 1000
testFile := CreateTestData(recordCount)
testJobs := make(chan string, recordCount)
returnedCount, err := scanLines(testFile, testJobs)
if err != nil {
t.Errorf("scanLines() failed: %s", err)
}
if returnedCount != recordCount {
t.Errorf("returnedCount length is %d, should have been %d", returnedCount, recordCount)
}
_, err = scanLines("thisFileDoesntExist", testJobs)
if err == nil {
t.Errorf("scanLines() should have failed but didn't")
}
os.Remove(testFile)
}
func TestGetDir(t *testing.T) {
mockServer := CreateMockHTTPServer()
mockServer.Start()
client := &http.Client{
Timeout: 2 * time.Second,
}
// not sure how i feel about using localhost here...
err := getDir("http://localhost", "ballsandshit", client)
if err == nil {
t.Errorf("getDir() should have returned error but did not")
}
err = getDir(mockServer.URL, "cats", client)
if err != nil {
t.Errorf("getDir() returned an error: %s", err)
}
mockServer.Close()
}
func TestParseHost(t *testing.T) {
testGood := "http://dotcom.com/"
test1 := parseHost("dotcom.com")
if test1 != testGood {
t.Errorf("got %s, should have been %s", test1, testGood)
}
test2 := parseHost("http://dotcom.com")
if test2 != testGood {
t.Errorf("got %s, should have been %s", test2, testGood)
}
// sanity check
test3 := parseHost("http://dotcom.com/")
if test3 != testGood {
t.Errorf("got %s, should have been %s", test3, testGood)
}
}
func TestParseTitle(t *testing.T) {
testPage := `<html><head><title>testing</title></head><body>just some content</body></html>`
returnTitle := parseTitle(strings.NewReader(testPage))
if returnTitle != "testing" {
t.Errorf("got %s, should have been testing", returnTitle)
}
}
|
package sort
import "testing"
func TestMaxK(t *testing.T) {
nums := []int{2, 2, 1}
t.Log(MaxK(nums, 2))
}
|
package main
import (
"gitlab.nordstrom.com/huggin/stacktracker"
"github.com/vmware/govmomi"
)
func main() {
return
} |
package areamgr
import (
_ "pb"
"server"
"server/libs/log"
"server/share"
)
var (
App *AreaMgr
)
type AreaMgr struct {
*server.Server
quit chan int
Area *Areas
}
func (app *AreaMgr) OnPrepare() bool {
log.LogMessage(app.AppId, " prepared")
return true
}
func (app *AreaMgr) OnEvent(e string, args map[string]interface{}) {
switch e {
case server.MASERTINMSG:
msg := args["msg"].(server.MasterMsg)
if msg.Id == share.M_CREATEAPP_BAK {
var cab share.CreateAppBak
if err := share.DecodeMsg(msg.Body, &cab); err != nil {
log.LogError(err)
return
}
App.Area.createAppBak(cab)
}
}
}
func GetAllHandler() map[string]interface{} {
return server.GetAllHandler()
}
func init() {
App = &AreaMgr{
Area: NewAreas(),
}
server.RegisterRemote("AreaMgr", App.Area)
}
|
package libseccomp
// Action is seccomp trap action
type Action uint32
// Action defines seccomp action to the syscall
// default value 0 is invalid
const (
ActionAllow Action = iota + 1
ActionErrno
ActionTrace
ActionKill
)
// MsgDisallow, Msghandle defines the action needed when trapped by
// seccomp filter
const (
MsgDisallow int16 = iota + 1
MsgHandle
)
// Action get the basic action
func (a Action) Action() Action {
return Action(a & 0xffff)
}
|
package main
import "fmt"
/* func leftChild(i int) int {
return 2*i + 1
}
func percDown(nums []int, i, N int) {
var (
child, tmp int
)
for tmp = nums[i]; leftChild(i) < N; i = child {
child = leftChild(i)
if child != N-1 && nums[child+1] > nums[child] {
child++
}
if tmp < nums[child] {
nums[i] = nums[child]
} else {
break
}
}
nums[i] = tmp
}
func findKthLargest(nums []int, k int) int {
k = len(nums) - k + 1
for i := len(nums) / 2; i >= 0; i-- {
percDown(nums, i, len(nums))
}
for i := len(nums) - 1; i >= k; i-- {
nums[0], nums[i] = nums[i], nums[0]
percDown(nums, 0, i)
}
return nums[0]
} */
func quickSelect(nums []int, k, left, right int) int {
i, j := left+1, right
if left == right {
return nums[left]
}
pivot := nums[left]
for {
for i < right && nums[i] <= pivot {
i++
}
for j > left && nums[j] >= pivot {
j--
}
if i < j {
nums[i], nums[j] = nums[j], nums[i]
} else {
break
}
}
nums[left], nums[j] = nums[j], nums[left]
if k == j {
return nums[j]
}
if k > j {
return quickSelect(nums, k, j+1, right)
}
return quickSelect(nums, k, left, j-1)
}
func findKthLargest(nums []int, k int) int {
return quickSelect(nums, len(nums)-k, 0, len(nums)-1)
}
func main() {
fmt.Println(findKthLargest([]int{1, 2, 6, 8, 4}, 3))
}
|
/*
* Copyright 2018, CS Systemes d'Information, http://www.c-s.fr
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package client
import (
"context"
"fmt"
"sync"
"time"
pb "github.com/CS-SI/SafeScale/broker"
clitools "github.com/CS-SI/SafeScale/utils"
)
// network is the part of broker client handling Network
type network struct {
// session is not used currently
session *Session
}
// List ...
func (n *network) List(all bool, timeout time.Duration) (*pb.NetworkList, error) {
n.session.Connect()
defer n.session.Disconnect()
service := pb.NewNetworkServiceClient(n.session.connection)
ctx := context.Background()
return service.List(ctx, &pb.NWListRequest{
All: all,
})
}
// Delete deletes several networks at the same time in goroutines
func (n *network) Delete(names []string, timeout time.Duration) error {
n.session.Connect()
defer n.session.Disconnect()
service := pb.NewNetworkServiceClient(n.session.connection)
ctx := context.Background()
var (
wg sync.WaitGroup
errs int
)
networkDeleter := func(aname string) {
defer wg.Done()
_, err := service.Delete(ctx, &pb.Reference{Name: aname})
if err != nil {
fmt.Println(DecorateError(err, "deletion of network", true).Error())
errs++
} else {
fmt.Printf("Network '%s' deleted\n", aname)
}
}
wg.Add(len(names))
for _, target := range names {
go networkDeleter(target)
}
wg.Wait()
if errs > 0 {
return clitools.ExitOnRPC("")
}
return nil
}
// Inspect ...
func (n *network) Inspect(name string, timeout time.Duration) (*pb.Network, error) {
n.session.Connect()
defer n.session.Disconnect()
service := pb.NewNetworkServiceClient(n.session.connection)
ctx := context.Background()
return service.Inspect(ctx, &pb.Reference{Name: name})
}
// Create ...
func (n *network) Create(def pb.NetworkDefinition, timeout time.Duration) (*pb.Network, error) {
n.session.Connect()
defer n.session.Disconnect()
service := pb.NewNetworkServiceClient(n.session.connection)
ctx := context.Background()
return service.Create(ctx, &def)
}
|
package main
import (
cryptoRand "crypto/rand"
"encoding/json"
"fmt"
"io"
"io/fs"
"log"
"math"
"math/big"
"os"
"path/filepath"
"time"
"golang.org/x/exp/rand"
"gopkg.in/yaml.v3"
)
type Config struct {
RootDir string `yaml:"root_dir"`
Seed uint64 `yaml:"seed,omitempty"`
Changers []ChangerSpec `yaml:"changers"`
}
type ChangerSpec struct {
Append *AppendSpec `yaml:"append,omitempty"`
Replace *ReplaceSpec `yaml:"replace,omitempty"`
FlipByte *FlipByteSpec `yaml:"flipbyte,omitempty"`
Likelihood float64 `yaml:"likelihood"`
ID string `yaml:"id,omitempty"`
Corrupt bool `yaml:"corrupt,omitempty"`
}
type AppendSpec struct {
Size int64 `yaml:"size"`
}
type ReplaceSpec struct {
Size int64 `yaml:"size"`
}
type FlipByteSpec struct{}
type ChangeContext struct {
Path string
Rand *rand.Rand
ContentRand *rand.Rand
Info os.FileInfo
}
type Changer interface {
MethodName() string
ChangeFile(ctx *ChangeContext) error
}
var _ Changer = &FlipByteChanger{}
type FlipByteChanger struct {
spec *FlipByteSpec
}
func NewFlipByteChanger(spec *FlipByteSpec) *FlipByteChanger {
return &FlipByteChanger{
spec: spec,
}
}
func (c *FlipByteChanger) MethodName() string {
return "flipbyte"
}
func (c *FlipByteChanger) ChangeFile(ctx *ChangeContext) error {
fileSize := ctx.Info.Size()
if fileSize == 0 {
return nil
}
f, err := os.OpenFile(ctx.Path, os.O_RDWR, 0)
if err != nil {
return err
}
defer f.Close()
off := ctx.ContentRand.Int63n(fileSize)
var buf [1]byte
_, err = f.ReadAt(buf[:], off)
if err != nil {
return err
}
buf[0] = ^buf[0]
_, err = f.WriteAt(buf[:], off)
if err != nil {
return err
}
return nil
}
var _ Changer = &AppendChanger{}
type AppendChanger struct {
spec *AppendSpec
}
func NewAppendChanger(spec *AppendSpec) *AppendChanger {
return &AppendChanger{
spec: spec,
}
}
func (c *AppendChanger) MethodName() string {
return "append"
}
func (c *AppendChanger) ChangeFile(ctx *ChangeContext) error {
f, err := os.OpenFile(ctx.Path, os.O_WRONLY|os.O_APPEND, 0)
if err != nil {
return err
}
defer f.Close()
_, err = io.CopyN(f, ctx.ContentRand, c.spec.Size)
if err != nil {
return err
}
return nil
}
var _ Changer = &ReplaceChanger{}
type ReplaceChanger struct {
spec *ReplaceSpec
}
func NewReplaceChanger(spec *ReplaceSpec) *ReplaceChanger {
return &ReplaceChanger{
spec: spec,
}
}
func (c *ReplaceChanger) MethodName() string {
return "replace"
}
func (c *ReplaceChanger) ChangeFile(ctx *ChangeContext) error {
fileSize := ctx.Info.Size()
if fileSize == 0 {
return nil
}
f, err := os.OpenFile(ctx.Path, os.O_WRONLY, 0)
if err != nil {
return err
}
defer f.Close()
off := ctx.ContentRand.Int63n(fileSize)
f.Seek(off, io.SeekStart)
bytesToWrite := c.spec.Size
if bytesToWrite > fileSize-off {
bytesToWrite = fileSize - off
// Replace at least one byte
if bytesToWrite == 0 {
// fileSize > 0, so off >= 1
f.Seek(off-1, io.SeekStart)
bytesToWrite = 1
}
}
_, err = io.CopyN(f, ctx.ContentRand, bytesToWrite)
if err != nil {
return err
}
return nil
}
type Reporter struct {
encoder *json.Encoder
filesUnchanged int64
filesChanged int64
filesCorrupted int64
filesCorruptedOnly int64
}
type Summary struct {
FilesUnchanged int64
FilesChanged int64
FilesCorrupted int64
FilesCorruptedOnly int64
}
func (r *Reporter) Summarise() Summary {
return Summary{
FilesUnchanged: r.filesUnchanged,
FilesChanged: r.filesChanged,
FilesCorrupted: r.filesCorrupted,
FilesCorruptedOnly: r.filesCorruptedOnly,
}
}
func (r *Reporter) StartFile(path string) *reporterFileCtx {
return &reporterFileCtx{
reporter: r,
path: path,
}
}
type reporterFileCtx struct {
reporter *Reporter
path string
changers []changeLogChanger
corrupted bool
corruptedOnly bool
}
func (r *reporterFileCtx) AddChanger(id string, changer Changer, corrupted bool) {
r.changers = append(r.changers, changeLogChanger{
ID: id,
Method: changer.MethodName(),
})
if r.corruptedOnly {
if !corrupted {
r.corruptedOnly = false
}
} else {
if corrupted {
if len(r.changers) == 1 {
// This changer is the first
r.corruptedOnly = true
}
r.corrupted = true
}
}
}
type changeLogEvent struct {
Event string `json:"event"`
Path string `json:"path"`
Corrupted bool `json:"corrupted"`
CorruptedOnly bool `json:"corrupted_only"`
Changers []changeLogChanger `json:"changers"`
}
type changeLogChanger struct {
Method string `json:"method"`
ID string `json:"id"`
}
func (r *reporterFileCtx) Finish() {
if len(r.changers) == 0 {
r.reporter.filesUnchanged += 1
return
}
if r.reporter.encoder != nil {
r.reporter.encoder.Encode(changeLogEvent{
Event: "changed",
Path: r.path,
Corrupted: r.corrupted,
CorruptedOnly: r.corruptedOnly,
Changers: r.changers,
})
}
r.reporter.filesChanged += 1
if r.corrupted {
r.reporter.filesCorrupted += 1
}
if r.corruptedOnly {
r.reporter.filesCorruptedOnly += 1
}
}
func specFromConfig(config *Config) (*ChangeTreeSpec, error) {
steps := make([]ChangeStepSpec, len(config.Changers))
for i, c := range config.Changers {
var methodsFoundCount int
var changer Changer
id := c.ID
if id == "" {
id = fmt.Sprintf("#%d", i)
}
if c.Append != nil {
methodsFoundCount += 1
changer = NewAppendChanger(c.Append)
}
if c.Replace != nil {
methodsFoundCount += 1
changer = NewReplaceChanger(c.Replace)
}
if c.FlipByte != nil {
methodsFoundCount += 1
changer = NewFlipByteChanger(c.FlipByte)
}
if methodsFoundCount == 0 {
return &ChangeTreeSpec{}, fmt.Errorf("Method for changer %s could not be identified", id)
} else if methodsFoundCount > 1 {
return &ChangeTreeSpec{}, fmt.Errorf("Two change methods for changer %s", id)
}
steps[i] = ChangeStepSpec{
Changer: changer,
ID: id,
Corrupt: c.Corrupt,
Likelihood: c.Likelihood,
}
}
var seed uint64
if config.Seed != 0 {
seed = config.Seed
log.Println("Using specified seed:", seed)
} else {
// max is (1 << 64) - 1, resulting in the range [0, 1 << 64) for cryptoRand.Int
max := big.NewInt(0).SetUint64(math.MaxUint64)
bigInt, err := cryptoRand.Int(cryptoRand.Reader, max)
if err != nil {
return &ChangeTreeSpec{}, err
}
log.Println("Generated new seed:", bigInt.Uint64())
seed = bigInt.Uint64()
}
return &ChangeTreeSpec{
RootDir: config.RootDir,
Seed: seed,
Steps: steps,
}, nil
}
type ChangeTreeSpec struct {
RootDir string
Seed uint64
Steps []ChangeStepSpec
ChangeLogOutput io.Writer
}
type ChangeStepSpec struct {
Changer Changer
ID string
Corrupt bool
Likelihood float64
}
func ChangeTree(spec *ChangeTreeSpec) (Summary, error) {
src := rand.NewSource(spec.Seed)
rnd := rand.New(src)
contentSrc := rand.NewSource(spec.Seed)
contentRand := rand.New(contentSrc)
reporter := Reporter{}
if spec.ChangeLogOutput != nil {
reporter.encoder = json.NewEncoder(spec.ChangeLogOutput)
}
err := filepath.WalkDir(spec.RootDir, func(path string, entry fs.DirEntry, err error) error {
if err != nil {
return err
}
if entry.IsDir() {
return nil
}
repCtx := reporter.StartFile(path)
defer repCtx.Finish()
for _, c := range spec.Steps {
if rnd.Float64() <= c.Likelihood {
info, err := entry.Info()
if err != nil {
return err
}
ctx := &ChangeContext{
Path: path,
Rand: rnd,
ContentRand: contentRand,
Info: info,
}
err = c.Changer.ChangeFile(ctx)
if err != nil {
return err
}
if c.Corrupt {
err = os.Chtimes(path, time.Now(), info.ModTime())
if err != nil {
return err
}
}
repCtx.AddChanger(c.ID, c.Changer, c.Corrupt)
}
}
return nil
})
return reporter.Summarise(), err
}
func readConfig(path string) (*Config, error) {
configFile, err := os.Open(path)
if err != nil {
panic(err)
}
defer configFile.Close()
config := &Config{}
dec := yaml.NewDecoder(configFile)
err = dec.Decode(config)
if err != nil {
return nil, err
}
return config, nil
}
func main() {
if len(os.Args) != 2 {
fmt.Println("Usage:", os.Args[0], "<config.yaml>")
os.Exit(1)
}
log.SetOutput(os.Stderr)
config, err := readConfig(os.Args[1])
if err != nil {
panic(err)
}
changeTreeSpec, err := specFromConfig(config)
if err != nil {
panic(err)
}
changeTreeSpec.ChangeLogOutput = os.Stdout
summary, err := ChangeTree(changeTreeSpec)
if err != nil {
panic(err)
}
log.Printf("Summary: %+v", summary)
}
|
package lla
import (
"fmt"
"runtime"
"time"
)
// log level
const (
LogLevelEmpty int = iota
LogLevelPanic
LogLevelError
LogLevelWarn
LogLevelInfo
LogLevelDebug
LogLevelDump
LogLevelTrace
LogLevelMax
)
var logLevelNames = [LogLevelMax]string{
"EMPTY", "PANIC", "ERROR", "WARN ", "INFO ", "DEBUG", "DUMP ", "TRACE",
}
func GetLogLevelText(level int) string {
if level >= 0 && level < LogLevelMax {
return logLevelNames[level]
}
return "UNDEF"
}
type LogAgent struct {
logLevel int
modTitle string
}
func (log *LogAgent) Init(level int, title string) {
log.logLevel = level
log.modTitle = title
}
func (log LogAgent) IsTrace() bool {
return log.logLevel >= LogLevelTrace
}
func (log LogAgent) IsDump() bool {
return log.logLevel >= LogLevelDump
}
func (log LogAgent) IsDebug() bool {
return log.logLevel >= LogLevelDebug
}
func (log LogAgent) IsInfo() bool {
return log.logLevel >= LogLevelInfo
}
func (log LogAgent) IsWarn() bool {
return log.logLevel >= LogLevelWarn
}
func (log LogAgent) IsError() bool {
return log.logLevel >= LogLevelError
}
func (log LogAgent) IsPanic() bool {
return log.logLevel >= LogLevelPanic
}
func (log LogAgent) IsEmpty() bool {
return log.logLevel == LogLevelEmpty
}
func (log *LogAgent) Trace(format string, args ...interface{}) {
if log != nil && log.IsTrace() {
log.formatLine(LogLevelTrace, fmt.Sprintf(format, args...))
}
}
func (log *LogAgent) Dump(format string, args ...interface{}) {
if log != nil && log.IsDump() {
log.formatLine(LogLevelDump, fmt.Sprintf(format, args...))
}
}
func (log *LogAgent) Debug(format string, args ...interface{}) {
if log != nil && log.IsDebug() {
log.formatLine(LogLevelDebug, fmt.Sprintf(format, args...))
}
}
func (log *LogAgent) Info(format string, args ...interface{}) {
if log != nil && log.IsInfo() {
log.formatLine(LogLevelInfo, fmt.Sprintf(format, args...))
}
}
func (log *LogAgent) Warn(format string, args ...interface{}) {
if log != nil && log.IsWarn() {
log.formatLine(LogLevelWarn, fmt.Sprintf(format, args...))
}
}
func (log *LogAgent) Error(format string, args ...interface{}) {
if log != nil && log.IsError() {
log.formatLine(LogLevelError, fmt.Sprintf(format, args...))
}
}
func (log *LogAgent) Panic(format string, args ...interface{}) {
if log != nil && log.IsPanic() {
text := fmt.Sprintf(format, args...)
log.formatLine(LogLevelPanic, TraceCallStack(text, 2))
}
}
func (log *LogAgent) formatLine(level int, text string) {
t := time.Now()
moment := t.Format("2006-01-02 15:04:05.999999")
for size := len(moment); size < 26; size++ {
moment += "0"
}
gid := GetGID()
var mesg string
switch level {
case LogLevelDebug, LogLevelError, LogLevelPanic:
pc, file, line, ok := runtime.Caller(2)
if ok {
mesg = fmt.Sprintf("%s [%s %s %s] %s:%d %s() %s\n",
moment, logLevelNames[level], gid, log.modTitle, file, line, runtime.FuncForPC(pc).Name(), text)
} else {
mesg = fmt.Sprintf("%s [%s %s %s] %s\n", moment, logLevelNames[level], gid, log.modTitle, text)
}
case LogLevelTrace, LogLevelDump, LogLevelInfo, LogLevelWarn:
mesg = fmt.Sprintf("%s [%s %s %s] %s\n", moment, logLevelNames[level], gid, log.modTitle, text)
}
LogToFile(level, mesg)
}
func (log *LogAgent) PanicRecover() {
if r := recover(); r != nil {
if log != nil {
log.Warn("Panic Recovered: %+v", r)
}
}
}
|
package main
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/nsf/termbox-go"
"net"
"time"
)
type point struct {
X int32
Y int32
N int32
}
func main() {
adr, err := net.ResolveUDPAddr("udp", "127.0.0.1:5000")
if err != nil {
fmt.Println(err)
return
}
listener, err := net.ListenUDP("udp", adr)
if err != nil {
fmt.Println(err)
return
}
ch := make(chan point)
var p point
err = termbox.Init()
if err != nil {
panic(err)
}
defer termbox.Close()
for {
go handleConnection(listener, ch)
p = <-ch
termbox.Clear(termbox.ColorWhite, termbox.ColorBlack)
termbox.SetCell(int(p.X), int(p.Y), '*', termbox.ColorRed, termbox.ColorBlack)
termbox.SetCell(int(p.X-1), int(p.Y), '*', termbox.ColorGreen, termbox.ColorBlack)
termbox.SetCell(int(p.X+1), int(p.Y), '*', termbox.ColorGreen, termbox.ColorBlack)
termbox.SetCell(int(p.X), int(p.Y+1), '*', termbox.ColorGreen, termbox.ColorBlack)
termbox.SetCell(int(p.X), int(p.Y-1), '*', termbox.ColorGreen, termbox.ColorBlack)
termbox.SetCell(int(p.X+1), int(p.Y+1), '*', termbox.ColorWhite, termbox.ColorBlack)
termbox.SetCell(int(p.X-1), int(p.Y+1), '*', termbox.ColorWhite, termbox.ColorBlack)
termbox.SetCell(int(p.X+1), int(p.Y-1), '*', termbox.ColorWhite, termbox.ColorBlack)
termbox.SetCell(int(p.X-1), int(p.Y-1), '*', termbox.ColorWhite, termbox.ColorBlack)
termbox.Flush()
time.Sleep(1 * time.Second)
}
}
func handleConnection(con *net.UDPConn, ch chan point) {
buf := make([]byte, 2000)
n, err := con.Read(buf)
if err != nil {
fmt.Println(err)
return
}
buff := bytes.NewReader(buf[0:n])
var data point
err = binary.Read(buff, binary.LittleEndian, &data)
if err != nil {
fmt.Println(err)
return
}
ch <- data
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.