text
stringlengths 11
4.05M
|
|---|
package main
import (
"flag"
"fmt"
"log"
"os"
"github.com/slavrd/go-tfev4-backup/helpers"
)
var fpass = flag.String("pass", "", "Encryption password for the backup data. Can also be set via TFE_BACKUP_PASSWORD environment variable.")
var fhost = flag.String("host", "", "Hostname of the tfe instance. E.g. tfe.mydomain.com. Can also be set via TFE_HOSTNAME environment variable.")
var ftoken = flag.String("token", "", "TFE backup authorization token. Can also be set via TFE_BACKUP_TOKEN environment variable.")
var ffile = flag.String("file", "", "File to read/write TFE backup. Can also be set via TFE_BACKUP_FILE environment variable.")
func main() {
flag.Parse()
var host, token, pass, file, err = validateInput()
if err != nil {
fmt.Printf("%v\n", err)
printUsage()
os.Exit(1)
}
switch flag.Arg(0) {
case "backup":
log.Printf("saving backup to %q", file)
var f *os.File
f, err = os.Create(file)
if err != nil {
log.Fatalf("error saving backup: %v", err)
}
defer f.Close()
err = helpers.TfeBackup(host, token, pass, f)
if err != nil {
log.Fatalf("error making backup: %v", err)
}
log.Println("backup successful!")
case "restore":
log.Printf("restoring backup from %q", file)
var f *os.File
f, err = os.Open(file)
if err != nil {
log.Fatalf("error opening backup: %v", err)
}
defer f.Close()
err = helpers.TfeRestore(host, token, pass, f)
if err != nil {
log.Fatalf("error restoring backup: %v", err)
}
}
}
func validateInput() (host, token, pass, file string, err error) {
if !flag.Parsed() {
flag.Parse()
}
if len(flag.Args()) != 1 {
err = fmt.Errorf("missing operation - backup or restore")
return
}
if flag.Args()[0] != "backup" && flag.Args()[0] != "restore" {
err = fmt.Errorf("accepted operations are 'backup' or 'restore'")
return
}
var isErr = false
if *fpass != "" {
pass = *fpass
} else if os.Getenv("TFE_BACKUP_PASSWORD") != "" {
pass = os.Getenv("TFE_BACKUP_PASSWORD")
} else {
log.Printf("encryption password was not provided.")
isErr = true
}
if *fhost != "" {
host = *fhost
} else if os.Getenv("TFE_HOSTNAME") != "" {
host = os.Getenv("TFE_HOSTNAME")
} else {
log.Printf("TFE hostname was not provided.")
isErr = true
}
if *ftoken != "" {
token = *ftoken
} else if os.Getenv("TFE_BACKUP_TOKEN") != "" {
token = os.Getenv("TFE_BACKUP_TOKEN")
} else {
log.Printf("TFE backup authorization token was not provided.")
isErr = true
}
if *ffile != "" {
file = *ffile
} else if os.Getenv("TFE_BACKUP_FILE") != "" {
file = os.Getenv("TFE_BACKUP_FILE")
} else {
log.Printf("file to read/write TFE backup not provided.")
isErr = true
}
if isErr {
err = fmt.Errorf("missing required input")
}
return
}
func printUsage() {
fmt.Printf("usage %s [args] <backup|restore>\n", os.Args[0])
flag.Usage()
}
|
package ctx
func MustProvision(ctx *TestContext) func() {
deprovision, err := Provision(ctx)
if err != nil {
panic(err)
}
return deprovision
}
func MustInstall(ctx *TestContext) {
if err := Install(ctx); err != nil {
panic(err)
}
}
|
package leetcode
import (
"fmt"
"testing"
)
func TestLRUCache(t *testing.T) {
obj := Constructor(5)
obj.Put(1, 1)
obj.Put(2, 2)
obj.Put(3, 3)
obj.Put(4, 4)
obj.Put(5, 5)
fmt.Println(obj)
obj.Put(1, 51)
fmt.Println(obj)
obj.Put(6, 6)
fmt.Println(obj)
fmt.Println(obj.Get(4), obj)
}
func TestLRUCache2(t *testing.T) {
obj := Constructor(2)
obj.Put(1, 1)
fmt.Println(obj)
obj.Put(2, 2)
fmt.Println(obj)
fmt.Println(obj.Get(1))
fmt.Println(obj)
obj.Put(3, 3)
fmt.Println(obj)
fmt.Println(obj.Get(2))
fmt.Println(obj)
}
|
package main
import (
"fmt"
"github.com/jack0liu/vastflow"
)
type AttachVolumeRiver struct {
vastflow.River
}
func init() {
vastflow.RegisterStream(new(AttachVolumeRiver))
}
func (r *AttachVolumeRiver) Update(attr *vastflow.RiverAttr) {
attr.CycleInterval = 1
attr.CycleTimes = 3
attr.Durable = true
}
func (r *AttachVolumeRiver) Flow(headwaters *vastflow.Headwaters) (errCause string, err error) {
fmt.Println(fmt.Sprintf("AttachVolumeRiver flow, vmId:%s, volumeId:%s", headwaters.GetString("vmId"), headwaters.GetString("volId")))
fmt.Println("attach volume success")
return "", nil
}
func (r *AttachVolumeRiver) Cycle(headwaters *vastflow.Headwaters) (errCause string, err error) {
return "", nil
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docgen
import (
"context"
"os"
"path/filepath"
"strings"
"time"
"cuelang.org/go/cue"
"github.com/google/go-cmp/cmp"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
corev1beta1 "github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils/common"
)
const (
TestDir = "testdata"
DeployName = "deployments.testapps"
WebserviceName = "webservice.testapps"
)
var _ = Describe("DefinitionFiles", func() {
deployment := types.Capability{
Namespace: "testdef",
Name: DeployName,
Type: types.TypeComponentDefinition,
CrdName: "deployments.apps",
Description: "description not defined",
Category: types.CUECategory,
Parameters: []types.Parameter{
{
Name: "image",
Type: cue.StringKind,
Default: "",
Short: "i",
Required: true,
Usage: "Which image would you like to use for your service",
},
{
Name: "port",
Type: cue.IntKind,
Short: "p",
Default: int64(8080),
Usage: "Which port do you want customer traffic sent to",
},
{
Type: cue.ListKind,
Name: "env",
},
},
Labels: map[string]string{"usecase": "forplugintest"},
}
websvc := types.Capability{
Namespace: "testdef",
Name: WebserviceName,
Type: types.TypeComponentDefinition,
Description: "description not defined",
Category: types.CUECategory,
Parameters: []types.Parameter{
{
Name: "image",
Type: cue.StringKind,
Default: "",
Short: "i",
Required: true,
Usage: "Which image would you like to use for your service",
}, {
Name: "port",
Type: cue.IntKind,
Short: "p",
Default: int64(6379),
Usage: "Which port do you want customer traffic sent to",
},
{
Name: "env", Type: cue.ListKind,
},
},
CrdName: "deployments.apps",
Labels: map[string]string{"usecase": "forplugintest"},
}
req, _ := labels.NewRequirement("usecase", selection.Equals, []string{"forplugintest"})
selector := labels.NewSelector().Add(*req)
// Notice!! DefinitionPath Object is Cluster Scope object
// which means objects created in other DefinitionNamespace will also affect here.
It("getcomponents", func() {
arg := common.Args{}
arg.SetClient(k8sClient)
workloadDefs, _, err := GetComponentsFromCluster(context.Background(), DefinitionNamespace, arg, selector)
Expect(err).Should(BeNil())
for i := range workloadDefs {
// CueTemplate should always be fulfilled, even those whose CueTemplateURI is assigend,
By("check CueTemplate is fulfilled")
Expect(workloadDefs[i].CueTemplate).ShouldNot(BeEmpty())
workloadDefs[i].CueTemplate = ""
}
Expect(cmp.Diff(workloadDefs, []types.Capability{deployment, websvc})).Should(BeEquivalentTo(""))
})
It("getall", func() {
arg := common.Args{}
arg.SetClient(k8sClient)
alldef, err := GetCapabilitiesFromCluster(context.Background(), DefinitionNamespace, arg, selector)
Expect(err).Should(BeNil())
for i := range alldef {
alldef[i].CueTemplate = ""
}
Expect(cmp.Diff(alldef, []types.Capability{deployment, websvc})).Should(BeEquivalentTo(""))
})
})
var _ = Describe("test GetCapabilityByName", func() {
var (
ctx context.Context
c common.Args
ns string
defaultNS string
cd1 corev1beta1.ComponentDefinition
cd2 corev1beta1.ComponentDefinition
component1 string
component2 string
)
BeforeEach(func() {
c = common.Args{}
c.SetClient(k8sClient)
ctx = context.Background()
ns = "cluster-test-ns-suffix"
defaultNS = types.DefaultKubeVelaNS
component1 = "cd1"
component2 = "cd2"
By("create namespace")
Expect(k8sClient.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}})).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
Expect(k8sClient.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: defaultNS}})).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
By("create ComponentDefinition")
data, _ := os.ReadFile("testdata/componentDef.yaml")
yaml.Unmarshal(data, &cd1)
yaml.Unmarshal(data, &cd2)
cd1.Namespace = ns
cd1.Name = component1
Expect(k8sClient.Create(ctx, &cd1)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
cd2.Namespace = defaultNS
cd2.Name = component2
Expect(k8sClient.Create(ctx, &cd2)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
})
AfterEach(func() {
for _, obj := range []client.Object{&cd1, &cd2} {
key := client.ObjectKeyFromObject(obj)
Expect(k8sClient.Delete(ctx, obj)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(ctx, key, obj)).Should(Satisfy(errors.IsNotFound))
}, 10*time.Second).Should(Succeed())
}
})
It("get capability", func() {
By("ComponentDefinition is in the current namespace")
_, err := GetCapabilityByName(ctx, c, component1, ns, nil)
Expect(err).Should(BeNil())
By("ComponentDefinition is in the default namespace")
_, err = GetCapabilityByName(ctx, c, component2, ns, nil)
Expect(err).Should(BeNil())
By("capability cloud not be found")
_, err = GetCapabilityByName(ctx, c, "a-component-definition-not-existed", ns, nil)
Expect(err).Should(HaveOccurred())
})
})
var _ = Describe("test GetNamespacedCapabilitiesFromCluster", func() {
var (
ctx context.Context
c common.Args
ns string
defaultNS string
cd1 corev1beta1.ComponentDefinition
cd2 corev1beta1.ComponentDefinition
component1 string
component2 string
)
BeforeEach(func() {
c = common.Args{}
c.SetClient(k8sClient)
ctx = context.Background()
ns = "cluster-test-ns"
defaultNS = types.DefaultKubeVelaNS
component1 = "cd1"
component2 = "cd2"
By("create namespace")
Expect(k8sClient.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}})).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
Expect(k8sClient.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: defaultNS}})).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
By("create ComponentDefinition")
data, _ := os.ReadFile("testdata/componentDef.yaml")
yaml.Unmarshal(data, &cd1)
yaml.Unmarshal(data, &cd2)
cd1.Namespace = ns
cd1.Name = component1
Expect(k8sClient.Create(ctx, &cd1)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
cd2.Namespace = defaultNS
cd2.Name = component2
Expect(k8sClient.Create(ctx, &cd2)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
})
AfterEach(func() {
for _, obj := range []client.Object{&cd1, &cd2} {
key := client.ObjectKeyFromObject(obj)
Expect(k8sClient.Delete(ctx, obj)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(ctx, key, obj)).Should(Satisfy(errors.IsNotFound))
}, 10*time.Second).Should(Succeed())
}
})
It("get namespaced capabilities", func() {
By("found all capabilities")
capabilities, err := GetNamespacedCapabilitiesFromCluster(ctx, ns, c, nil)
Expect(len(capabilities)).Should(Equal(2))
Expect(err).Should(BeNil())
By("found two capabilities with a bad namespace")
capabilities, err = GetNamespacedCapabilitiesFromCluster(ctx, "a-bad-ns", c, nil)
Expect(len(capabilities)).Should(Equal(1))
Expect(err).Should(BeNil())
})
})
var _ = Describe("test GetCapabilityFromDefinitionRevision", func() {
var (
ctx context.Context
c common.Args
)
BeforeEach(func() {
c = common.Args{}
c.SetClient(k8sClient)
ctx = context.Background()
By("create namespace")
Expect(k8sClient.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "rev-test-custom-ns"}})).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
Expect(k8sClient.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "rev-test-ns"}})).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
// Load test DefinitionRevisions files into client
dir := filepath.Join("..", "..", "pkg", "definition", "testdata")
testFiles, err := os.ReadDir(dir)
Expect(err).Should(Succeed())
for _, file := range testFiles {
if !strings.HasSuffix(file.Name(), ".yaml") {
continue
}
content, err := os.ReadFile(filepath.Join(dir, file.Name()))
Expect(err).Should(Succeed())
def := &corev1beta1.DefinitionRevision{}
err = yaml.Unmarshal(content, def)
Expect(err).Should(Succeed())
client, err := c.GetClient()
Expect(err).Should(Succeed())
err = client.Create(context.TODO(), def)
Expect(err).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
}
})
It("non-existent defrev", func() {
_, err := GetCapabilityFromDefinitionRevision(ctx, c, nil, "rev-test-custom-ns", "not-a-name", 0)
Expect(err).ShouldNot(Succeed())
})
It("component type", func() {
_, err := GetCapabilityFromDefinitionRevision(ctx, c, nil, "rev-test-ns", "webservice", 0)
Expect(err).Should(Succeed())
})
It("trait type", func() {
_, err := GetCapabilityFromDefinitionRevision(ctx, c, nil, "rev-test-custom-ns", "affinity", 0)
Expect(err).Should(Succeed())
})
})
|
package timeseries
import (
"bytes"
"context"
"database/sql"
"encoding/gob"
"fmt"
"log"
"sync/atomic"
"time"
)
// DBQuery is the SQL client.
var DBQuery func(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
// DBExec is the SQL client.
var DBExec func(query string, args ...interface{}) (sql.Result, error)
// OutboundTimeout is an upperboundary for the amount of time for a followup on outbound events.
const OutboundTimeout = time.Hour
// LastBlockTrack is an in-memory copy of the write state.
var lastBlockTrack atomic.Value
// BlockTrack is a write state.
type blockTrack struct {
Height int64
Timestamp time.Time
Hash []byte
aggTrack
}
// AggTrack has a snapshot of runningTotals.
type aggTrack struct {
AssetE8DepthPerPool map[string]int64
RuneE8DepthPerPool map[string]int64
}
// Setup initializes the package. The previous state is restored (if there was any).
func Setup() (lastBlockHeight int64, lastBlockTimestamp time.Time, lastBlockHash []byte, err error) {
const q = "SELECT height, timestamp, hash, agg_state FROM block_log ORDER BY height DESC LIMIT 1"
rows, err := DBQuery(context.Background(), q)
if err != nil {
return 0, time.Time{}, nil, fmt.Errorf("last block lookup: %w", err)
}
defer rows.Close()
var track blockTrack
if rows.Next() {
var ns int64
var aggSerial []byte
rows.Scan(&track.Height, &ns, &track.Hash, &aggSerial)
track.Timestamp = time.Unix(0, ns)
if err := gob.NewDecoder(bytes.NewReader(aggSerial)).Decode(&track.aggTrack); err != nil {
return 0, time.Time{}, nil, fmt.Errorf("restore with malformed aggregation state denied on %w", err)
}
}
// sync in-memory tracker
lastBlockTrack.Store(&track)
// apply aggregation state to recorder
for pool, E8 := range track.AssetE8DepthPerPool {
v := E8 // copy
recorder.assetE8DepthPerPool[pool] = &v
}
for pool, E8 := range track.RuneE8DepthPerPool {
v := E8 // copy
recorder.runeE8DepthPerPool[pool] = &v
}
return track.Height, track.Timestamp, track.Hash, rows.Err()
}
// CommitBlock marks the given height as done.
// Invokation of EventListener during CommitBlock causes race conditions!
func CommitBlock(height int64, timestamp time.Time, hash []byte) error {
// in-memory snapshot
track := blockTrack{
Height: height,
Timestamp: timestamp,
Hash: make([]byte, len(hash)),
aggTrack: aggTrack{
AssetE8DepthPerPool: recorder.AssetE8DepthPerPool(),
RuneE8DepthPerPool: recorder.RuneE8DepthPerPool(),
},
}
copy(track.Hash, hash)
// persist to database
var aggSerial bytes.Buffer
if err := gob.NewEncoder(&aggSerial).Encode(&track.aggTrack); err != nil {
// won't bing the service down, but prevents state recovery
log.Print("aggregation state ommited from persistence:", err)
}
const q = "INSERT INTO block_log (height, timestamp, hash, agg_state) VALUES ($1, $2, $3, $4) ON CONFLICT DO NOTHING"
result, err := DBExec(q, height, timestamp.UnixNano(), hash, aggSerial.Bytes())
if err != nil {
return fmt.Errorf("persist block height %d: %w", height, err)
}
n, err := result.RowsAffected()
if err != nil {
return fmt.Errorf("persist block height %d result: %w", height, err)
}
if n == 0 {
log.Printf("block height %d already committed", height)
}
// calculate & reset
recorder.linkedEvents.ApplyOutboundQ(&recorder.runningTotals, height, timestamp)
recorder.linkedEvents.ApplyFeeQ(&recorder.runningTotals, height, timestamp)
// commit in-memory state
lastBlockTrack.Store(&track)
return nil
}
// LastBlock gets the most recent commit.
func LastBlock() (height int64, timestamp time.Time, hash []byte) {
track := lastBlockTrack.Load().(*blockTrack)
return track.Height, track.Timestamp, track.Hash
}
// AssetAndRuneDepths gets the current snapshot handle.
// The asset price is the asset depth divided by the RUNE depth.
func AssetAndRuneDepths() (assetE8PerPool, runeE8PerPool map[string]int64, timestamp time.Time) {
track := lastBlockTrack.Load().(*blockTrack)
return track.aggTrack.AssetE8DepthPerPool, track.aggTrack.RuneE8DepthPerPool, track.Timestamp
}
|
package server
import (
"context"
"encoding/json"
"fmt"
"net/http"
"path"
"strconv"
"strings"
"time"
"../cloudstorage"
"../db"
"../util"
"github.com/google/uuid"
"github.com/gorilla/mux"
"github.com/rs/zerolog/log"
)
type RestFile struct {
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Year int `json:"year,omitempty"`
CreatedAt *time.Time `json:"created_at,omitempty"`
Type string `json:"type,omitempty"`
LastModifiedAt *time.Time `json:"last_modified_at,omitempty"`
GetSignedURL string `json:"get_signed_url,omitempty"`
Metadata json.RawMessage `json:"metadata,omitempty"`
}
func (s *Server) getFileslistByUser(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
userID, ok := vars["id"]
if !ok {
log.Info().Msg("missing user id")
http.Error(w, "missing user id", http.StatusBadRequest)
return
}
ll := log.With().Str("user_id", userID).Logger()
files, err := s.DBHandle.GetAllFiles(userID)
if err != nil {
ll.Warn().Err(err).Msg("unable to get fileslist by user")
http.Error(w, "unable to get fileslist by user", http.StatusBadRequest)
return
}
var restFiles []*RestFile
for _, file := range files {
// We may need to log how long this takes.
fileGetSignedURL, err := s.getSignedURL(ctx, file.Path)
if err != nil {
// Log and continue.
ll.Warn().Err(err).Msg("unable to get file signed URL")
continue
}
restFiles = append(restFiles, &RestFile{
ID: file.ID,
Name: file.Name,
Year: file.Year,
CreatedAt: file.CreatedAt,
LastModifiedAt: file.LastModifiedAt,
GetSignedURL: fileGetSignedURL,
Metadata: file.Metadata,
})
}
RespondToRequest(w, restFiles)
return
}
func (s *Server) getStoreFileSignedURL(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
userID, ok := vars["id"]
if !ok {
log.Info().Msg("missing user id")
http.Error(w, "missing user id", http.StatusBadRequest)
return
}
ll := log.With().Str("user_id", userID).Logger()
propertyID, ok := vars["property_id"]
if !ok {
log.Info().Msg("missing property id")
http.Error(w, "missing property id", http.StatusBadRequest)
return
}
ll = log.With().Str("property_id", propertyID).Logger()
fileName, ok := vars["file_name"]
if !ok {
log.Info().Msg("missing file name")
http.Error(w, "missing file name", http.StatusBadRequest)
return
}
ll = log.With().Str("file_name", fileName).Logger()
url, err := s.generateStoreFileSignedURL(ctx, userID, propertyID, fileName)
if err != nil {
ll.Error().Err(err).Msg("unable to generate put signed url")
http.Error(w, "missing user id", http.StatusBadRequest)
return
}
RespondToRequest(w, url)
return
}
func (s *Server) getFilesSummary(w http.ResponseWriter, r *http.Request) {
// ctx := r.Context()
vars := mux.Vars(r)
userID, ok := vars["id"]
if !ok {
log.Info().Msg("missing user id")
http.Error(w, "missing user id", http.StatusBadRequest)
return
}
ll := log.With().Str("user_id", userID).Logger()
fileSummary, err := s.DBHandle.GetFilesSummaryByUserID(userID)
if err != nil {
ll.Error().Err(err).Msg("unable to fetch files summary")
http.Error(w, "unable to fetch files summary", http.StatusInternalServerError)
return
}
RespondToRequest(w, fileSummary)
return
}
func (s *Server) uploadFileByUser(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
userID, ok := vars["id"]
if !ok {
log.Info().Msg("missing user id")
http.Error(w, "missing user id", http.StatusBadRequest)
return
}
ll := log.With().Str("user_id", userID).Logger()
err := r.ParseMultipartForm(32 << 20) // maxMemory 32MB
if err != nil {
ll.Error().Err(err).Msg("failed to parse multipart message")
http.Error(w, "failed to parse multipart message", http.StatusBadRequest)
return
}
uploadedFile, handler, err := r.FormFile("file")
if err != nil {
ll.Error().Err(err).Msg("error retrieving the file")
http.Error(w, "error retrieving the file", http.StatusBadRequest)
return
}
defer uploadedFile.Close()
fileName := r.FormValue("file_name")
if fileName == "" {
fileName = handler.Filename
}
fileType := r.FormValue("file_type")
if fileType == "" {
fileType = "other"
}
metadataFileType := r.FormValue("metadata_file_type")
if metadataFileType == "" {
metadataFileType = "unknown"
}
fileSizeBytesVal := r.FormValue("metadata_file_size_bytes")
fileSizeBytes, err := strconv.Atoi(fileSizeBytesVal)
if err != nil {
fileSizeBytes = 0
}
year := r.FormValue("year")
if year == "" {
y := time.Now().Year()
year = fmt.Sprint(y)
}
properties := r.FormValue("properties")
associatedProperties := strings.Split(properties, ",")
ll = ll.With().Str("file_name", fileName).Logger()
fileID := uuid.New().String()
filePath := path.Join(userID, "files", fileID, fileName)
now := time.Now()
metadata := map[string]interface{}{
"type": metadataFileType,
}
marshalledMetadata, err := json.Marshal(metadata)
if err != nil {
// log and continue.
ll.Warn().Err(err).Msg("unable to marshal file metadata")
}
file := &db.File{
ID: fileID,
UserID: userID,
CreatedAt: &now,
LastModifiedAt: &now,
Name: fileName,
Type: db.FileType(fileType),
Year: util.GetYear(year),
Path: filePath,
SizeKB: fileSizeBytes,
Metadata: json.RawMessage(marshalledMetadata),
}
addFileToCloudStorage := func() func(ctx context.Context) error {
return func(ctx context.Context) error {
return cloudstorage.AddCloudstorageFile(ctx, s.StorageClient, uploadedFile, s.UsersBucket, filePath)
}
}
err = s.DBHandle.AddFile(ctx, userID, file, associatedProperties, addFileToCloudStorage())
if err != nil {
ll.Error().Err(err).Msg("unable to add file")
http.Error(w, "unable to add file", http.StatusInternalServerError)
return
}
ll.Info().Msg("successfully created file")
RespondToRequest(w, file)
return
}
// getFile either downloads an individual file or returns a signed URl.
func (s *Server) getFile(w http.ResponseWriter, r *http.Request) {
// ctx := r.Context()
// vars := mux.Vars(r)
// userID, ok := vars["id"]
// if !ok {
// log.Info().Msg("missing user id")
// http.Error(w, "missing user id", http.StatusBadRequest)
// return
// }
// ll := log.With().Str("user_id", userID).Logger()
// fileName, ok := vars["file_name"]
// if !ok {
// ll.Warn().Msg("file name not set")
// http.Error(w, "file name not set", http.StatusBadRequest)
// return
// }
// ll = ll.With().Str("file_name", fileName).Logger()
// request, ok := vars["request"]
// if !ok {
// ll.Warn().Msg("file type not set")
// http.Error(w, "file type not set", http.StatusBadRequest)
// return
// }
// ll = ll.With().Str("request", request).Logger()
// switch request {
// case "download":
// s.getFileData(ctx, userID, propertyID, fileName, w, r)
// return
// case "signed_url":
// key := path.Join(userID, propertyDelimiter, propertyID, fileName)
// url, err := s.getSignedURL(ctx, key)
// if err != nil {
// ll.Warn().Err(err).Msg("error getting signed url")
// http.Error(w, "error getting signed url", http.StatusBadRequest)
// }
// RespondToRequest(w, url)
// return
// }
// return
}
func (s *Server) getFileByID(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
userID, ok := vars["id"]
if !ok {
log.Info().Msg("missing user id")
http.Error(w, "missing user id", http.StatusBadRequest)
return
}
ll := log.With().Str("user_id", userID).Logger()
fileID, ok := vars["file_id"]
if !ok {
ll.Warn().Msg("file id not set")
http.Error(w, "file id not set", http.StatusBadRequest)
return
}
ll = ll.With().Str("file_id", fileID).Logger()
file, err := s.DBHandle.GetFileById(userID, fileID)
if err != nil {
ll.Warn().Err(err).Msg("unable to get file by id")
http.Error(w, "unable to get file by id", http.StatusInternalServerError)
return
}
key := file.Path
if key == "" {
ll.Warn().Err(err).Msg("missing cloudstorage path")
http.Error(w, "missing cloudstorage path", http.StatusInternalServerError)
return
}
fileGetSignedURL, err := s.getSignedURL(ctx, file.Path)
if err != nil {
ll.Warn().Err(err).Msg("unable to get file signed URL")
http.Error(w, "unable to get file signed URL", http.StatusInternalServerError)
return
}
restFile := &RestFile{
ID: file.ID,
Name: file.Name,
Year: file.Year,
CreatedAt: file.CreatedAt,
Type: string(file.Type),
LastModifiedAt: file.LastModifiedAt,
GetSignedURL: fileGetSignedURL,
Metadata: file.Metadata,
}
ll.Info().Msg("returned file by id")
RespondToRequest(w, restFile)
return
}
func (s *Server) getFilesByProperty(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
userID, ok := vars["id"]
if !ok {
log.Info().Msg("missing user id")
http.Error(w, "missing user id", http.StatusBadRequest)
return
}
ll := log.With().Str("user_id", userID).Logger()
propertyID, ok := vars["property_id"]
if !ok {
ll.Warn().Msg("property id not set")
http.Error(w, "property id not set", http.StatusBadRequest)
return
}
ll = ll.With().Str("property_id", propertyID).Logger()
files, err := s.DBHandle.GetFilesByProperty(userID, propertyID)
if err != nil {
ll.Warn().Err(err).Msg("unable to get files by property")
http.Error(w, "unable to get files by property", http.StatusInternalServerError)
return
}
var restFiles []*RestFile
for _, file := range files {
fileGetSignedURL, err := s.getSignedURL(ctx, file.Path)
if err != nil {
ll.Warn().Err(err).Msg("unable to get file signed URL")
continue
}
restFile := &RestFile{
ID: file.ID,
Name: file.Name,
Year: file.Year,
CreatedAt: file.CreatedAt,
Type: string(file.Type),
LastModifiedAt: file.LastModifiedAt,
GetSignedURL: fileGetSignedURL,
Metadata: file.Metadata,
}
restFiles = append(restFiles, restFile)
}
RespondToRequest(w, restFiles)
return
}
func (s *Server) deleteFile(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
userID, ok := vars["id"]
if !ok {
log.Info().Msg("missing user id")
http.Error(w, "missing user id", http.StatusBadRequest)
return
}
ll := log.With().Str("user_id", userID).Logger()
fileID, ok := vars["file_id"]
if !ok {
ll.Warn().Msg("file id not set")
http.Error(w, "file id not set", http.StatusBadRequest)
return
}
ll = ll.With().Str("file_id", fileID).Logger()
deleteFileFromCloudstorage := func() func(ctx context.Context, filePath string) error {
return func(ctx context.Context, filePath string) error {
return s.deleteStorageFile(ctx, filePath)
}
}
err := s.DBHandle.DeleteFileByID(ctx, userID, fileID, deleteFileFromCloudstorage())
if err != nil {
ll.Warn().Err(err).Msg("unable to delete file")
http.Error(w, "unable to delete file", http.StatusInternalServerError)
return
}
// key := path.Join(userID, propertyDelimiter, propertyID, fileName)
// err := s.deleteStorageFile(ctx, key)
// if err != nil {
// ll.Warn().Err(err).Msg("unable to delete file")
// http.Error(w, "unable to delete file", http.StatusInternalServerError)
// return
// }
ll.Info().Msg("file deleted successfully")
w.Write([]byte("success"))
return
}
|
package resource
// Drive holds information about a team drive.
type Drive struct {
ID ID `json:"id"`
Version Version `json:"version"`
DriveData
}
|
package database
/*type postsRepository struct{}
func (pr postsRepository) GetPosts() ([]*model.Post, error) {
panic("implement me")
}
func NewPostsRepository() repository.PostsRepository {
return &postsRepository{}
}*/
|
// Galang - Golang common utilities
// Copyright (c) 2020-present, gakkiiyomi@gamil.com
//
// gakkiyomi is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
// See the Mulan PSL v2 for more details.
package file
import (
"encoding/json"
"io/ioutil"
"os"
"github.com/beevik/etree"
"github.com/songtianyi/rrframework/logs"
)
type GalangFile byte
var File GalangFile
//get the length in bytes of file of the specified path.
func (*GalangFile) GetFileSize(path string) int64 {
file, err := os.Stat(path)
if err != nil {
logs.Error(err)
return -1
}
return file.Size()
}
//check one file is json file
func (file *GalangFile) IsJSONFile(path string) bool {
b, err := ioutil.ReadFile(path)
if err != nil {
logs.Error(err)
return false
}
return file.IsJSONByte(b)
}
//check one []byte is json file
func (*GalangFile) IsJSONByte(b []byte) bool {
return json.Valid(b)
}
//check one file is xml file
func (file *GalangFile) IsXmlFile(path string) bool {
b, err := ioutil.ReadFile(path)
if err != nil {
return false
}
return file.IsXmlByte(b)
}
//check one []byte is xml file
func (*GalangFile) IsXmlByte(b []byte) bool {
doc := etree.NewDocument()
if err := doc.ReadFromBytes(b); err != nil {
return false
}
return true
}
|
package main
import . "leetcode"
func main() {
}
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func leafSimilar(root1 *TreeNode, root2 *TreeNode) bool {
var travel func(root *TreeNode) []int
travel = func(root *TreeNode) []int {
if root == nil {
return nil
}
if root.Left == nil && root.Right == nil {
return []int{root.Val}
}
return append(travel(root.Left), travel(root.Right)...)
}
t1 := travel(root1)
t2 := travel(root2)
if len(t1) != len(t2) {
return false
}
for i := 0; i < len(t1); i++ {
if t1[i] != t2[i] {
return false
}
}
return true
}
|
package matrix
// Matrix represents a two-dimensional array
type Matrix [][]float64
// NewMatrix creates a new matrix
func NewMatrix(rows int, columns int, generator func() float64) Matrix {
r := make(Matrix, rows)
for row := range r {
r[row] = make([]float64, columns)
for column := range r[row] {
r[row][column] = generator()
}
}
return r
}
// Zeroes returns a float 0.0
func Zeroes() float64 {
return 0.0
}
// Ones returns a float 1.0
func Ones() float64 {
return 1.0
}
// Rows returns the number of rows
func (m Matrix) Rows() int {
return len(m)
}
// Columns returns the number of columns
func (m Matrix) Columns() int {
return len(m[0])
}
// Shape returns a 2-d array of the dimensions
func (m Matrix) Shape() [2]int {
return [2]int{m.Rows(), m.Columns()}
}
// Multiply multiplies a matrix by a scalar
func (m Matrix) Multiply(f float64) Matrix {
r := m
for row := range m {
for column := range m[row] {
r[row][column] *= f
}
}
return r
}
// ArrMult multiplies the values with the same index and then ads it up
func ArrMult(a []float64, b []float64) float64 {
r := 0.0
for i := range a {
r += a[i] * b[i]
}
return r
}
// GetRow returns a row from a matrix
func (m Matrix) GetRow(idx int) []float64 {
return m[idx]
}
// GetColumns returns a column from a matrix
func (m Matrix) GetColumn(idx int) []float64 {
r := make([]float64, len(m))
for i := range m {
r[i] = m[i][idx]
}
return r
}
// Dot returns the dot product of two matrices
func Dot(a Matrix, b Matrix) Matrix {
r := NewMatrix(a.Rows(), b.Columns(), Zeroes)
for row := 0; row < a.Rows(); row++ {
for column := 0; column < b.Columns(); column++ {
r[row][column] = ArrMult(a.GetRow(row), b.GetColumn(column))
}
}
return r
}
// MatMul multiplies two matrices of the same size
func MatMul(a Matrix, b Matrix) Matrix {
r := NewMatrix(a.Rows(), b.Columns(), Zeroes)
for row := range r {
for column := range r[row] {
r[row][column] = a[row][column] * b[row][column]
}
}
return r
}
// Add adds two matrices
func Add(a Matrix, b Matrix) Matrix {
r := NewMatrix(a.Rows(), b.Columns(), Zeroes)
for row := range r {
for column := range r[row] {
r[row][column] = a[row][column] + b[row][column]
}
}
return r
}
// Sub subtracts two matrices
func Sub(a Matrix, b Matrix) Matrix {
r := NewMatrix(a.Rows(), b.Columns(), Zeroes)
for row := range r {
for column := range r[row] {
r[row][column] = a[row][column] - b[row][column]
}
}
return r
}
// Apply applies a function to a matrix
func Apply(m Matrix, f func(i float64) float64) Matrix {
r := NewMatrix(m.Rows(), m.Columns(), Zeroes)
for row := range r {
for column := range r[row] {
r[row][column] = f(m[row][column])
}
}
return r
}
|
package binutil
import (
"errors"
"fmt"
)
const minPrintable rune = 0x20
const maxPrintable rune = 0x7e
func StringBytesCheckingAscii(s string) ([]byte, error) {
for _, r := range s {
if r < minPrintable || r > maxPrintable {
return nil, errors.New(
fmt.Sprintf("character %c out of range of printable ASCII characters", r))
}
}
return []byte(s), nil
}
|
package core
import (
"github.com/gorilla/websocket"
)
type App struct {
WebSocketUpgrader *websocket.Upgrader
center *Center
config *Config
Twitter *Twitter
Tumblr *Tumblr
}
func NewApp(config *Config) (*App, error) {
twitter, err := NewTwitter(config.Twitter)
if err != nil {
return nil, err
}
tumblr, err := NewTumblr(config.Tumblr)
if err != nil {
return nil, err
}
return &App{
Tumblr: tumblr,
Twitter: twitter,
config: config,
center: &Center{},
WebSocketUpgrader: &websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
},
}, nil
}
|
package login
import (
"github.com/xeha-gmbh/homelab/shared"
)
var (
ErrAuth = shared.ErrorFactory(10)("authentication_error")
)
|
package main
import (
"fmt"
"log"
)
// struct 结构体 关键字 强类型 node json
type User struct {
Name string
Age int
}
type Person struct {
Name string
Age int
}
func main() {
u := User{
Name: "陈方闻",
Age: 18,
} // 声明变量, 并赋值,类推推段
// f 格式化
log.Printf("hello struct %s, age is %d", u.Name, u.Age)
// 输出 加一个换新
log.Println("hello world")
log.Println("你好")
var p1 Person // var 声明变量 内存空间
p1.Name = "Tom"
p1.Age = 30
var p2 = Person{Name: "刘", Age: 20}
// p3 := Person{ Name: "Aaron", Age: 32}
// format
fmt.Println("p2=", p2)
// 应用调bug 多打一些LOG
log.SetFlags(log.Lshortfile | log.Ldate | log.Lmicroseconds)
log.Printf("%s login, age: %d", u.Name, u.Age)
}
|
package observe
import (
"context"
"fmt"
"github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-client-go"
"io"
"testing"
)
var testErr = fmt.Errorf("test error")
func newTestTracer() (opentracing.Tracer, io.Closer){
reporter := jaeger.NewInMemoryReporter()
sampler := jaeger.NewConstSampler(true)
return jaeger.NewTracer("test", sampler, reporter)
}
func TestError(t *testing.T) {
var staticF1 = New("test")
f1 := func() (retErr error) {
_, obs := staticF1.FromContext(context.Background())
defer obs.End(&retErr)
return testErr
}
f1()
}
|
package util
import "fmt"
func Log(sign, addr, msg string) string {
if addr == "" {
return fmt.Sprintf("[type : %v act: %v] ", sign, msg)
}
return fmt.Sprintf("[type : %v user: %v act: %v] ", sign, addr, msg)
}
func Loger(sign, msg string) string {
if sign == "api" {
return fmt.Sprintf("< --API-- act: %v > ", msg)
}
return fmt.Sprintf("[type : %v act: %v] ", sign, msg)
}
|
package test
import "fmt"
func Test() {
fmt.Println("version TWO")
}
|
package main
import (
"fmt"
"time"
"github.com/subchen/gstack/errors"
)
func createFile() error {
return errors.New("file not permission")
}
func writeFile() error {
err := createFile()
if err != nil {
return errors.Wrap(err, "file write error")
}
return nil
}
func main() {
err := writeFile()
fmt.Printf("error %%v: %v\n\n", err)
fmt.Printf("error %%+v: %+v\n\n", err)
cause := errors.Cause(err)
fmt.Printf("cause %%v: %v\n\n", cause)
fmt.Printf("cause %%+v: %+v\n\n", cause)
go func() {
err := writeFile()
fmt.Printf("go error %%v: %v\n\n", err)
fmt.Printf("go error %%+v: %+v\n\n", err)
}()
time.Sleep(1 * time.Second)
}
|
// Copyright (C) 2019 Cisco Systems Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package services
import (
"net"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/projectcalico/vpp-dataplane/v3/calico-vpp-agent/cni"
"github.com/projectcalico/vpp-dataplane/v3/calico-vpp-agent/common"
"github.com/projectcalico/vpp-dataplane/v3/config"
"github.com/projectcalico/vpp-dataplane/v3/vpplink"
"github.com/projectcalico/vpp-dataplane/v3/vpplink/types"
)
func getCnatBackendDstPort(servicePort *v1.ServicePort, endpointPort *v1.EndpointPort) uint16 {
targetPort := servicePort.TargetPort
if targetPort.Type == intstr.Int {
if targetPort.IntVal == 0 {
// Unset targetport
return uint16(servicePort.Port)
} else {
return uint16(targetPort.IntVal)
}
} else {
return uint16(endpointPort.Port)
}
}
func getServicePortProto(proto v1.Protocol) types.IPProto {
switch proto {
case v1.ProtocolUDP:
return types.UDP
case v1.ProtocolSCTP:
return types.SCTP
case v1.ProtocolTCP:
return types.TCP
default:
return types.TCP
}
}
func isEndpointAddressLocal(endpointAddress *v1.EndpointAddress) bool {
if endpointAddress != nil && endpointAddress.NodeName != nil && *endpointAddress.NodeName != *config.NodeName {
return false
}
return true
}
func getCnatLBType(lbType lbType) types.CnatLbType {
if lbType == lbTypeMaglev || lbType == lbTypeMaglevDSR {
return types.MaglevLB
}
return types.DefaultLB
}
func getCnatVipDstPort(servicePort *v1.ServicePort, isNodePort bool) uint16 {
if isNodePort {
return uint16(servicePort.NodePort)
}
return uint16(servicePort.Port)
}
func buildCnatEntryForServicePort(servicePort *v1.ServicePort, service *v1.Service, ep *v1.Endpoints, serviceIP net.IP, isNodePort bool, svcInfo serviceInfo) *types.CnatTranslateEntry {
backends := make([]types.CnatEndpointTuple, 0)
isLocalOnly := IsLocalOnly(service)
if isNodePort {
isLocalOnly = false
}
// Find the endpoint subset port that exposes the port we're interested in
for _, endpointSubset := range ep.Subsets {
for _, endpointPort := range endpointSubset.Ports {
if servicePort.Name == endpointPort.Name {
for _, endpointAddress := range endpointSubset.Addresses {
var flags uint8 = 0
if !isEndpointAddressLocal(&endpointAddress) && isLocalOnly {
continue
}
if !isEndpointAddressLocal(&endpointAddress) {
/* dont NAT to remote endpoints unless this is a nodeport */
if svcInfo.lbType == lbTypeMaglevDSR && !isNodePort {
flags = flags | types.CnatNoNat
}
}
ip := net.ParseIP(endpointAddress.IP)
if ip != nil {
backend := types.CnatEndpointTuple{
DstEndpoint: types.CnatEndpoint{
Port: getCnatBackendDstPort(servicePort, &endpointPort),
IP: ip,
},
Flags: flags,
}
/* In nodeports, we need to sNAT when endpoint is not local to have a symmetric traffic */
if isNodePort && !isEndpointAddressLocal(&endpointAddress) {
backend.SrcEndpoint.IP = serviceIP
}
backends = append(backends, backend)
}
}
break
}
}
}
return &types.CnatTranslateEntry{
Proto: getServicePortProto(servicePort.Protocol),
Endpoint: types.CnatEndpoint{
Port: getCnatVipDstPort(servicePort, isNodePort),
IP: serviceIP,
},
Backends: backends,
IsRealIP: isNodePort,
LbType: getCnatLBType(svcInfo.lbType),
}
}
func (s *Server) GetLocalService(service *v1.Service, ep *v1.Endpoints) (localService *LocalService) {
localService = &LocalService{
Entries: make([]types.CnatTranslateEntry, 0),
SpecificRoutes: make([]net.IP, 0),
ServiceID: serviceID(&service.ObjectMeta), /* ip.ObjectMeta should yield the same id */
}
serviceSpec := s.ParseServiceAnnotations(service.Annotations, service.Name)
clusterIP := net.ParseIP(service.Spec.ClusterIP)
nodeIP := s.getNodeIP(vpplink.IsIP6(clusterIP))
for _, servicePort := range service.Spec.Ports {
if !clusterIP.IsUnspecified() {
entry := buildCnatEntryForServicePort(&servicePort, service, ep, clusterIP, false /* isNodePort */, *serviceSpec)
localService.Entries = append(localService.Entries, *entry)
}
for _, eip := range service.Spec.ExternalIPs {
extIP := net.ParseIP(eip)
if !extIP.IsUnspecified() {
entry := buildCnatEntryForServicePort(&servicePort, service, ep, extIP, false /* isNodePort */, *serviceSpec)
localService.Entries = append(localService.Entries, *entry)
if IsLocalOnly(service) && len(entry.Backends) > 0 {
localService.SpecificRoutes = append(localService.SpecificRoutes, extIP)
}
}
}
for _, ingress := range service.Status.LoadBalancer.Ingress {
ingressIP := net.ParseIP(ingress.IP)
if !ingressIP.IsUnspecified() {
entry := buildCnatEntryForServicePort(&servicePort, service, ep, ingressIP, false /* isNodePort */, *serviceSpec)
localService.Entries = append(localService.Entries, *entry)
if IsLocalOnly(service) && len(entry.Backends) > 0 {
localService.SpecificRoutes = append(localService.SpecificRoutes, ingressIP)
}
}
}
if service.Spec.Type == v1.ServiceTypeNodePort {
if !nodeIP.IsUnspecified() {
entry := buildCnatEntryForServicePort(&servicePort, service, ep, nodeIP, true /* isNodePort */, *serviceSpec)
localService.Entries = append(localService.Entries, *entry)
}
}
// Create NodePort for external LB
// Note: type=LoadBalancer only makes sense on cloud providers which support external load balancers and the actual
// creation of the load balancer happens asynchronously.
if service.Spec.Type == v1.ServiceTypeLoadBalancer && *service.Spec.AllocateLoadBalancerNodePorts {
if !nodeIP.IsUnspecified() {
entry := buildCnatEntryForServicePort(&servicePort, service, ep, nodeIP, true /* isNodePort */, *serviceSpec)
localService.Entries = append(localService.Entries, *entry)
}
}
}
return
}
func (s *Server) isAddressExternalServiceIP(IPAddress net.IP) bool {
_, serviceExternalIPNets, serviceLBIPNets := s.getServiceIPs()
for _, serviceIPNet := range append(serviceExternalIPNets, serviceLBIPNets...) {
if serviceIPNet.Contains(IPAddress) {
return true
}
}
return false
}
func (s *Server) advertiseSpecificRoute(added []net.IP, deleted []net.IP) {
for _, specificRoute := range deleted {
if s.isAddressExternalServiceIP(specificRoute) {
common.SendEvent(common.CalicoVppEvent{
Type: common.LocalPodAddressDeleted,
Old: cni.NetworkPod{ContainerIP: common.ToMaxLenCIDR(specificRoute), NetworkVni: 0},
})
s.log.Infof("Withdrawing advertisement for service specific route Addresses %+v", specificRoute)
}
}
for _, specificRoute := range added {
if s.isAddressExternalServiceIP(specificRoute) {
common.SendEvent(common.CalicoVppEvent{
Type: common.LocalPodAddressAdded,
New: cni.NetworkPod{ContainerIP: common.ToMaxLenCIDR(specificRoute), NetworkVni: 0},
})
s.log.Infof("Announcing service specific route Addresses %+v", specificRoute)
}
}
}
func (s *Server) deleteServiceEntries(entries []types.CnatTranslateEntry, oldService *LocalService) {
for _, entry := range entries {
oldServiceState, found := s.serviceStateMap[entry.Key()]
if !found {
s.log.Infof("svc(del) key=%s Cnat entry not found", entry.Key())
continue
}
s.log.Infof("svc(del) key=%s %s vpp-id=%d", entry.Key(), entry.String(), oldServiceState.VppID)
if oldServiceState.OwnerServiceID != oldService.ServiceID {
s.log.Infof("Cnat entry found but changed owner since")
continue
}
err := s.vpp.CnatTranslateDel(oldServiceState.VppID)
if err != nil {
s.log.Errorf("Cnat entry delete errored %s", err)
continue
}
delete(s.serviceStateMap, entry.Key())
}
}
func (s *Server) deleteServiceByName(serviceID string) {
s.lock.Lock()
defer s.lock.Unlock()
for key, oldServiceState := range s.serviceStateMap {
if oldServiceState.OwnerServiceID != serviceID {
continue
}
err := s.vpp.CnatTranslateDel(oldServiceState.VppID)
if err != nil {
s.log.Errorf("Cnat entry delete errored %s", err)
continue
}
delete(s.serviceStateMap, key)
}
}
func (s *Server) sameServiceEntries(entries []types.CnatTranslateEntry, service *LocalService) {
for _, entry := range entries {
if serviceState, found := s.serviceStateMap[entry.Key()]; found {
serviceState.OwnerServiceID = service.ServiceID
s.serviceStateMap[entry.Key()] = serviceState
} else {
s.log.Warnf("Cnat entry not found key=%s", entry.Key())
}
}
}
func (s *Server) addServiceEntries(entries []types.CnatTranslateEntry, service *LocalService) {
for _, entry := range entries {
entryID, err := s.vpp.CnatTranslateAdd(&entry)
if err != nil {
s.log.Errorf("svc(add) Error adding translation %s %s", entry.String(), err)
continue
}
s.log.Infof("svc(add) key=%s %s vpp-id=%d", entry.Key(), entry.String(), entryID)
s.serviceStateMap[entry.Key()] = ServiceState{
OwnerServiceID: service.ServiceID,
VppID: entryID,
}
}
}
|
package main
//Create a value and assign it to a variable.
//Print the address of that value.
import "fmt"
func main() {
x := 789
y := &x
fmt.Println(y)
}
|
package task
import "context"
var (
impl Task
)
// Implementor returns the task service implementor.
func Implementor() Task {
return impl
}
// RegisterImplementor registers the task service implementor.
func RegisterImplementor(c Task) {
impl = c
}
type Executor interface {
Execute(context.Context, interface{}) error
}
type ExecutorFunc func(ctx context.Context, arg interface{}) error
func (fn ExecutorFunc) Execute(ctx context.Context, arg interface{}) error {
return fn(ctx, arg)
}
type Task interface {
// Schedule a task.
// Supported spec, refer: https://github.com/robfig/cron/tree/v3.0.1
Schedule(spec string, exec Executor, opts ...Option) error
}
|
package cmd
import (
"fmt"
"github.com/bb-orz/gt/libs/libStarter"
"github.com/bb-orz/gt/utils"
"github.com/urfave/cli/v2"
"io"
)
func StarterCommand() *cli.Command {
return &cli.Command{
Name: "starter",
Usage: "Add Goinfras Starter",
UsageText: "gt starter [--name|-n=][StarterName]",
Description: "The starter command create a new starter base on goinfras ,this command will generate some necessary files or dir in starter directory .",
Flags: []cli.Flag{
&cli.StringFlag{Name: "name", Aliases: []string{"n"}, Value: "example"},
&cli.StringFlag{Name: "output_path", Aliases: []string{"o"}, Value: "./starter"},
},
Action: StarterCommandAction,
}
}
func StarterCommandAction(ctx *cli.Context) error {
var err error
var cmdParams = libStarter.CmdParams{
Name: ctx.String("name"),
OutputPath: ctx.String("output_path"),
}
// 创建starter/config/test/readme/x
// create service,server,client file
var starterFileWriter, configFileWriter, testingFileWriter, readmeFileWriter, xFileWriter io.Writer
var starterFileName, configFileName, testingFileName, readmeFileName, xFileName string
// 创建 starter 文件
starterFileName = cmdParams.OutputPath + "/" + cmdParams.Name + "/starter.go"
if !IsServiceFileExist(starterFileName) {
if starterFileWriter, err = utils.CreateFile(starterFileName); err != nil {
utils.CommandLogger.Error(utils.CommandNameStarter, err)
return nil
} else {
utils.CommandLogger.OK(utils.CommandNameStarter, fmt.Sprintf("Create %s Starter File Successful! >>> FilePath:%s", utils.CamelString(cmdParams.Name), starterFileName))
// 格式化写入
if err = libStarter.NewFormatterStarter().Format(&cmdParams).WriteOut(starterFileWriter); err != nil {
utils.CommandLogger.Error(utils.CommandNameStarter, err)
return nil
} else {
utils.CommandLogger.OK(utils.CommandNameStarter, fmt.Sprintf("Write %s Starter File Successful!", utils.CamelString(cmdParams.Name)))
}
}
} else {
utils.CommandLogger.Warning(utils.CommandNameStarter, fmt.Sprintf("%s Starter File Is Exist!", utils.CamelString(cmdParams.Name)))
return nil
}
// 创建 starter config 文件
configFileName = cmdParams.OutputPath + "/" + cmdParams.Name + "/config.go"
if !IsServiceFileExist(configFileName) {
if configFileWriter, err = utils.CreateFile(configFileName); err != nil {
utils.CommandLogger.Error(utils.CommandNameStarter, err)
return nil
} else {
utils.CommandLogger.OK(utils.CommandNameStarter, fmt.Sprintf("Create %s Starter Config File Successful! >>> FilePath:%s", utils.CamelString(cmdParams.Name), configFileName))
// 格式化写入
if err = libStarter.NewFormatterStarterConfig().Format(&cmdParams).WriteOut(configFileWriter); err != nil {
utils.CommandLogger.Error(utils.CommandNameStarter, err)
return nil
} else {
utils.CommandLogger.OK(utils.CommandNameStarter, fmt.Sprintf("Write %s Starter Config File Successful!", utils.CamelString(cmdParams.Name)))
}
}
} else {
utils.CommandLogger.Warning(utils.CommandNameStarter, fmt.Sprintf("%s Starter Config File Is Exist!", utils.CamelString(cmdParams.Name)))
return nil
}
// 创建 starter testing 文件
testingFileName = cmdParams.OutputPath + "/" + cmdParams.Name + "/run_test.go"
if !IsServiceFileExist(testingFileName) {
if testingFileWriter, err = utils.CreateFile(testingFileName); err != nil {
utils.CommandLogger.Error(utils.CommandNameStarter, err)
return nil
} else {
utils.CommandLogger.OK(utils.CommandNameStarter, fmt.Sprintf("Create %s Starter Testing File Successful! >>> FilePath:%s", utils.CamelString(cmdParams.Name), testingFileName))
// 格式化写入
if err = libStarter.NewFormatterStarterTesting().Format(&cmdParams).WriteOut(testingFileWriter); err != nil {
utils.CommandLogger.Error(utils.CommandNameStarter, err)
return nil
} else {
utils.CommandLogger.OK(utils.CommandNameStarter, fmt.Sprintf("Write %s Starter Testing File Successful!", utils.CamelString(cmdParams.Name)))
}
}
} else {
utils.CommandLogger.Warning(utils.CommandNameStarter, fmt.Sprintf("%s Starter Testing File Is Exist!", utils.CamelString(cmdParams.Name)))
return nil
}
// 创建 starter x 文件
xFileName = cmdParams.OutputPath + "/" + cmdParams.Name + "/x.go"
if !IsServiceFileExist(xFileName) {
if xFileWriter, err = utils.CreateFile(xFileName); err != nil {
utils.CommandLogger.Error(utils.CommandNameStarter, err)
} else {
utils.CommandLogger.OK(utils.CommandNameStarter, fmt.Sprintf("Create %s Starter X Instence File Successful! >>> FilePath:%s", utils.CamelString(cmdParams.Name), xFileName))
// 格式化写入
if err = libStarter.NewFormatterStarterX().Format(&cmdParams).WriteOut(xFileWriter); err != nil {
utils.CommandLogger.Error(utils.CommandNameStarter, err)
return nil
} else {
utils.CommandLogger.OK(utils.CommandNameStarter, fmt.Sprintf("Write %s Starter X Instence File Successful!", utils.CamelString(cmdParams.Name)))
}
}
} else {
utils.CommandLogger.Warning(utils.CommandNameStarter, fmt.Sprintf("%s Starter X Instence File Is Exist!", utils.CamelString(cmdParams.Name)))
return nil
}
// 创建 starter readme 文件
readmeFileName = cmdParams.OutputPath + "/" + cmdParams.Name + "/README.md"
if !IsServiceFileExist(readmeFileName) {
if readmeFileWriter, err = utils.CreateFile(readmeFileName); err != nil {
utils.CommandLogger.Error(utils.CommandNameStarter, err)
} else {
utils.CommandLogger.OK(utils.CommandNameStarter, fmt.Sprintf("Create %s Starter README.md File Successful! >>> FilePath:%s", utils.CamelString(cmdParams.Name), readmeFileName))
// 格式化写入
if err = libStarter.NewFormatterStarterReadme().Format(&cmdParams).WriteOut(readmeFileWriter); err != nil {
utils.CommandLogger.Error(utils.CommandNameStarter, err)
return nil
} else {
utils.CommandLogger.OK(utils.CommandNameStarter, fmt.Sprintf("Write %s Starter README.md File Successful!", utils.CamelString(cmdParams.Name)))
}
}
} else {
utils.CommandLogger.Warning(utils.CommandNameStarter, fmt.Sprintf("%s Starter README.md File Is Exist!", utils.CamelString(cmdParams.Name)))
return nil
}
utils.CommandLogger.Info(utils.CommandNameStarter, fmt.Sprintf("Please implement %s starter ...", cmdParams.Name))
utils.CommandLogger.Info(utils.CommandNameStarter, "Please register starter in app/register.go")
return nil
}
|
package chain
import (
"fmt"
"net"
"sync"
"sync/atomic"
"github.com/btcsuite/btcd/btcjson"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/rpcclient"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/ticker"
)
const (
// rawBlockZMQCommand is the command used to receive raw block
// notifications from bitcoind through ZMQ.
rawBlockZMQCommand = "rawblock"
// rawTxZMQCommand is the command used to receive raw transaction
// notifications from bitcoind through ZMQ.
rawTxZMQCommand = "rawtx"
// maxRawBlockSize is the maximum size in bytes for a raw block received
// from bitcoind through ZMQ.
maxRawBlockSize = 4e6
// maxRawTxSize is the maximum size in bytes for a raw transaction
// received from bitcoind through ZMQ.
maxRawTxSize = maxRawBlockSize
// seqNumLen is the length of the sequence number of a message sent from
// bitcoind through ZMQ.
seqNumLen = 4
// errBlockPrunedStr is the error message returned by bitcoind upon
// calling GetBlock on a pruned block.
errBlockPrunedStr = "Block not available (pruned data)"
)
// BitcoindConfig contains all of the parameters required to establish a
// connection to a bitcoind's RPC.
type BitcoindConfig struct {
// ChainParams are the chain parameters the bitcoind server is running
// on.
ChainParams *chaincfg.Params
// Host is the IP address and port of the bitcoind's RPC server.
Host string
// User is the username to use to authenticate to bitcoind's RPC server.
User string
// Pass is the passphrase to use to authenticate to bitcoind's RPC
// server.
Pass string
// ZMQConfig holds the configuration settings required for setting up
// zmq connections to bitcoind.
ZMQConfig *ZMQConfig
// PollingConfig holds the configuration settings required for using
// RPC polling for block and transaction notifications instead of the
// ZMQ interface.
PollingConfig *PollingConfig
// Dialer is a closure we'll use to dial Bitcoin peers. If the chain
// backend is running over Tor, this must support dialing peers over Tor
// as well.
Dialer Dialer
// PrunedModeMaxPeers is the maximum number of peers we'll attempt to
// retrieve pruned blocks from.
//
// NOTE: This only applies for pruned bitcoind nodes.
PrunedModeMaxPeers int
}
// BitcoindConn represents a persistent client connection to a bitcoind node
// that listens for events read from a ZMQ connection.
type BitcoindConn struct {
started int32 // To be used atomically.
stopped int32 // To be used atomically.
// rescanClientCounter is an atomic counter that assigns a unique ID to
// each new bitcoind rescan client using the current bitcoind
// connection.
rescanClientCounter uint64
cfg BitcoindConfig
// client is the RPC client to the bitcoind node.
client *rpcclient.Client
// prunedBlockDispatcher handles all of the pruned block requests.
//
// NOTE: This is nil when the bitcoind node is not pruned.
prunedBlockDispatcher *PrunedBlockDispatcher
// events handles the block and transaction events that are received or
// retrieved from bitcoind.
events BitcoindEvents
// rescanClients is the set of active bitcoind rescan clients to which
// ZMQ event notifications will be sent to.
rescanClientsMtx sync.Mutex
rescanClients map[uint64]*BitcoindClient
quit chan struct{}
wg sync.WaitGroup
}
// Dialer represents a way to dial Bitcoin peers. If the chain backend is
// running over Tor, this must support dialing peers over Tor as well.
type Dialer = func(string) (net.Conn, error)
// NewBitcoindConn creates a client connection to the node described by the host
// string. The ZMQ connections are established immediately to ensure liveness.
// If the remote node does not operate on the same bitcoin network as described
// by the passed chain parameters, the connection will be disconnected.
func NewBitcoindConn(cfg *BitcoindConfig) (*BitcoindConn, error) {
clientCfg := &rpcclient.ConnConfig{
Host: cfg.Host,
User: cfg.User,
Pass: cfg.Pass,
DisableAutoReconnect: false,
DisableConnectOnNew: true,
DisableTLS: true,
HTTPPostMode: true,
}
client, err := rpcclient.New(clientCfg, nil)
if err != nil {
return nil, err
}
// Verify that the node is running on the expected network.
net, err := getCurrentNet(client)
if err != nil {
return nil, err
}
if net != cfg.ChainParams.Net {
return nil, fmt.Errorf("expected network %v, got %v",
cfg.ChainParams.Net, net)
}
// Check if the node is pruned, as we'll need to perform additional
// operations if so.
chainInfo, err := client.GetBlockChainInfo()
if err != nil {
return nil, fmt.Errorf("unable to determine if bitcoind is "+
"pruned: %v", err)
}
// Only initialize the PrunedBlockDispatcher when the connected bitcoind
// node is pruned.
var prunedBlockDispatcher *PrunedBlockDispatcher
if chainInfo.Pruned {
prunedBlockDispatcher, err = NewPrunedBlockDispatcher(
&PrunedBlockDispatcherConfig{
ChainParams: cfg.ChainParams,
NumTargetPeers: cfg.PrunedModeMaxPeers,
Dial: cfg.Dialer,
GetPeers: client.GetPeerInfo,
GetNodeAddresses: client.GetNodeAddresses,
PeerReadyTimeout: defaultPeerReadyTimeout,
RefreshPeersTicker: ticker.New(defaultRefreshPeersInterval),
MaxRequestInvs: wire.MaxInvPerMsg,
},
)
if err != nil {
return nil, err
}
}
bc := &BitcoindConn{
cfg: *cfg,
client: client,
prunedBlockDispatcher: prunedBlockDispatcher,
rescanClients: make(map[uint64]*BitcoindClient),
quit: make(chan struct{}),
}
bc.events, err = NewBitcoindEventSubscriber(cfg, client)
if err != nil {
return nil, err
}
return bc, nil
}
// Start attempts to establish a RPC and ZMQ connection to a bitcoind node. If
// successful, a goroutine is spawned to read events from the ZMQ connection.
// It's possible for this function to fail due to a limited number of connection
// attempts. This is done to prevent waiting forever on the connection to be
// established in the case that the node is down.
func (c *BitcoindConn) Start() error {
if !atomic.CompareAndSwapInt32(&c.started, 0, 1) {
return nil
}
// If we're connected to a pruned backend, we'll need to also start our
// pruned block dispatcher to handle pruned block requests.
if c.prunedBlockDispatcher != nil {
log.Debug("Detected pruned bitcoind backend")
if err := c.prunedBlockDispatcher.Start(); err != nil {
return err
}
}
c.wg.Add(2)
go c.sendBlockToClients()
go c.sendTxToClients()
return c.events.Start()
}
// Stop terminates the RPC and ZMQ connection to a bitcoind node and removes any
// active rescan clients.
func (c *BitcoindConn) Stop() {
if !atomic.CompareAndSwapInt32(&c.stopped, 0, 1) {
return
}
for _, client := range c.rescanClients {
client.Stop()
}
close(c.quit)
c.client.Shutdown()
if err := c.events.Stop(); err != nil {
log.Errorf("error shutting down bitcoind events: %w", err)
}
if c.prunedBlockDispatcher != nil {
c.prunedBlockDispatcher.Stop()
}
c.client.WaitForShutdown()
c.wg.Wait()
}
// sendBlockToClients is used to notify all rescan clients of a new block. It
// MUST be run in a goroutine.
func (c *BitcoindConn) sendBlockToClients() {
defer c.wg.Done()
// sendBlock is a helper function that sends the given block to each
// of the rescan clients
sendBlock := func(block *wire.MsgBlock) {
c.rescanClientsMtx.Lock()
defer c.rescanClientsMtx.Unlock()
for _, client := range c.rescanClients {
select {
case client.blockNtfns <- block:
case <-client.quit:
case <-c.quit:
return
}
}
}
var block *wire.MsgBlock
for {
select {
case block = <-c.events.BlockNotifications():
case <-c.quit:
return
}
sendBlock(block)
}
}
// sendTxToClients is used to notify all rescan clients of a new transaction.
// It MUST be run as a goroutine.
func (c *BitcoindConn) sendTxToClients() {
defer c.wg.Done()
sendTx := func(tx *wire.MsgTx) {
c.rescanClientsMtx.Lock()
defer c.rescanClientsMtx.Unlock()
for _, client := range c.rescanClients {
select {
case client.txNtfns <- tx:
case <-client.quit:
case <-c.quit:
return
}
}
}
var tx *wire.MsgTx
for {
select {
case tx = <-c.events.TxNotifications():
case <-c.quit:
return
}
sendTx(tx)
}
}
// getCurrentNet returns the network on which the bitcoind node is running.
func getCurrentNet(client *rpcclient.Client) (wire.BitcoinNet, error) {
hash, err := client.GetBlockHash(0)
if err != nil {
return 0, err
}
switch *hash {
case *chaincfg.TestNet3Params.GenesisHash:
return chaincfg.TestNet3Params.Net, nil
case *chaincfg.RegressionNetParams.GenesisHash:
return chaincfg.RegressionNetParams.Net, nil
case *chaincfg.SigNetParams.GenesisHash:
return chaincfg.SigNetParams.Net, nil
case *chaincfg.MainNetParams.GenesisHash:
return chaincfg.MainNetParams.Net, nil
default:
return 0, fmt.Errorf("unknown network with genesis hash %v", hash)
}
}
// NewBitcoindClient returns a bitcoind client using the current bitcoind
// connection. This allows us to share the same connection using multiple
// clients.
func (c *BitcoindConn) NewBitcoindClient() *BitcoindClient {
return &BitcoindClient{
quit: make(chan struct{}),
id: atomic.AddUint64(&c.rescanClientCounter, 1),
chainConn: c,
rescanUpdate: make(chan interface{}),
watchedAddresses: make(map[string]struct{}),
watchedOutPoints: make(map[wire.OutPoint]struct{}),
watchedTxs: make(map[chainhash.Hash]struct{}),
notificationQueue: NewConcurrentQueue(20),
txNtfns: make(chan *wire.MsgTx, 1000),
blockNtfns: make(chan *wire.MsgBlock, 100),
mempool: make(map[chainhash.Hash]struct{}),
expiredMempool: make(map[int32]map[chainhash.Hash]struct{}),
}
}
// AddClient adds a client to the set of active rescan clients of the current
// chain connection. This allows the connection to include the specified client
// in its notification delivery.
//
// NOTE: This function is safe for concurrent access.
func (c *BitcoindConn) AddClient(client *BitcoindClient) {
c.rescanClientsMtx.Lock()
defer c.rescanClientsMtx.Unlock()
c.rescanClients[client.id] = client
}
// RemoveClient removes the client with the given ID from the set of active
// rescan clients. Once removed, the client will no longer receive block and
// transaction notifications from the chain connection.
//
// NOTE: This function is safe for concurrent access.
func (c *BitcoindConn) RemoveClient(id uint64) {
c.rescanClientsMtx.Lock()
defer c.rescanClientsMtx.Unlock()
delete(c.rescanClients, id)
}
// isBlockPrunedErr determines if the error returned by the GetBlock RPC
// corresponds to the requested block being pruned.
func isBlockPrunedErr(err error) bool {
rpcErr, ok := err.(*btcjson.RPCError)
return ok && rpcErr.Code == btcjson.ErrRPCMisc &&
rpcErr.Message == errBlockPrunedStr
}
// GetBlock returns a raw block from the server given its hash. If the server
// has already pruned the block, it will be retrieved from one of its peers.
func (c *BitcoindConn) GetBlock(hash *chainhash.Hash) (*wire.MsgBlock, error) {
block, err := c.client.GetBlock(hash)
// Got the block from the backend successfully, return it.
if err == nil {
return block, nil
}
// We failed getting the block from the backend for whatever reason. If
// it wasn't due to the block being pruned, return the error
// immediately.
if !isBlockPrunedErr(err) || c.prunedBlockDispatcher == nil {
return nil, err
}
// Now that we know the block has been pruned for sure, request it from
// our backend peers.
blockChan, errChan := c.prunedBlockDispatcher.Query(
[]*chainhash.Hash{hash},
)
for {
select {
case block := <-blockChan:
return block, nil
case err := <-errChan:
if err != nil {
return nil, err
}
// errChan fired before blockChan with a nil error, wait
// for the block now.
case <-c.quit:
return nil, ErrBitcoindClientShuttingDown
}
}
}
// isASCII is a helper method that checks whether all bytes in `data` would be
// printable ASCII characters if interpreted as a string.
func isASCII(s string) bool {
for _, c := range s {
if c < 32 || c > 126 {
return false
}
}
return true
}
|
package testhelpers
import (
"testing"
"github.com/gobuffalo/httptest"
"github.com/ory/viper"
"github.com/ory/kratos/driver"
"github.com/ory/kratos/driver/configuration"
"github.com/ory/kratos/x"
)
func NewKratosServer(t *testing.T, reg driver.Registry) (public, admin *httptest.Server) {
rp := x.NewRouterPublic()
ra := x.NewRouterAdmin()
public = httptest.NewServer(rp)
admin = httptest.NewServer(ra)
viper.Set(configuration.ViperKeyURLsSelfPublic, public.URL)
viper.Set(configuration.ViperKeyURLsSelfAdmin, admin.URL)
reg.RegisterRoutes(rp, ra)
t.Cleanup(public.Close)
t.Cleanup(admin.Close)
return
}
|
//An interface type is defined as a set of method signatures.
//A value of interface type can hold any value that implements those methods.
package main
import (
intf "github.com/parit90/interfaces/first"
sendf "github.com/parit90/interfaces/second"
)
/*
interfaces has two main uses
1. The first is to use it as a type that can hold anything
2. The second is a collection of functions .
*/
func main() {
intf.Firsteg()
intf.Secondeg()
sendf.SecondIntf()
}
|
// Copyright 2020 Kuei-chun Chen. All rights reserved.
package mdb
import (
"context"
"os"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
var UnitTestURL = "mongodb://localhost/?replicaSet=replset"
func getMongoClient() *mongo.Client {
var err error
var client *mongo.Client
if os.Getenv("DATABASE_URL") != "" {
UnitTestURL = os.Getenv("DATABASE_URL")
}
if client, err = mongo.Connect(context.Background(), options.Client().ApplyURI(UnitTestURL)); err != nil {
panic(err)
}
return client
}
|
package stringutils
import (
"encoding/hex"
"regexp"
"strings"
)
// RemoveDuplicates removes duplicate strings from the slice.
// Comparision is case-insensitive
func RemoveDuplicates(strs []string) []string {
m := make(map[string]struct{})
var res []string
for _, s := range strs {
ls := strings.ToLower(strings.TrimSpace(s))
if _, ok := m[ls]; ok {
continue
}
res = append(res, s)
m[ls] = struct{}{}
}
return res
}
// ContainsStringMatch returns true if regex match for given str is found
func ContainsStringMatch(match string, str string) bool {
elem := regexp.MustCompile(match)
found := elem.FindAllString(str, 1)
return len(found) > 0
}
// ContainsStringMatchInSlice returns true if string is found as match in slice
func ContainsStringMatchInSlice(slice []string, str string) bool {
for _, s := range slice {
if ContainsStringMatch(s, str) {
return true
}
}
return false
}
// ContainsBytesMatch takes a hex string match regex and a []byte value and tries to find a match
func ContainsBytesMatch(match string, val []byte) bool {
enc := hex.EncodeToString(val)
return ContainsStringMatch(match, enc)
}
// ContainsBytesMatchInSlice takes a hex string match regex and a []byte value and tries to find a match
func ContainsBytesMatchInSlice(slice []string, val []byte) bool {
for _, s := range slice {
if ContainsBytesMatch(s, val) {
return true
}
}
return false
}
|
package amelia
import (
"fmt"
"io/ioutil"
"path/filepath"
"time"
)
// User represents a GitHub user.
type User struct {
Login *string `json:"login,omitempty"`
Name *string `json:"name,omitempty"`
}
// GistFilename represents filename on a gist.
type GistFilename string
// GistFile represents a file on a gist.
type GistFile struct {
Filename *string `json:"filename,omitempty"`
Content *string `json:"content,omitempty"`
}
// Gist represents a GitHub gist.
type Gist struct {
Description *string `json:"description,omitempty"`
Public *bool `json:"public,omitempty"`
Owner *User `json:"owner,omitempty"`
Files map[GistFilename]GistFile `json:"files,omitempty"`
HTMLURL *string `json:"html_url,omitempty"`
CreatedAt *time.Time `json:"created_at,omitempty"`
}
// NewGist reads the given files and creates a new Gist
func NewGist(description *string, public *bool, fileNames []string) (*Gist, error) {
files := make(map[GistFilename]GistFile)
for _, fileName := range fileNames {
raw, err := ioutil.ReadFile(fileName)
if err != nil {
return nil, fmt.Errorf("unable to read file: %v", err)
}
baseName := filepath.Base(fileName)
content := string(raw)
files[GistFilename(baseName)] = GistFile{
Filename: &baseName,
Content: &content,
}
}
g := &Gist{
Description: description,
Public: public,
Files: files,
}
return g, nil
}
|
package advent
import (
"bufio"
"fmt"
"os"
"strconv"
)
func day1() error {
i, err := os.Open("day1.input")
if err != nil {
return err
}
defer i.Close()
var frequency int
frequencies := []int{}
reach := map[int]int{0: 1}
scanner := bufio.NewScanner(i)
for scanner.Scan() {
i, err := strconv.Atoi(scanner.Text())
if err != nil {
return err
}
frequency += i
frequencies = append(frequencies, i)
}
fmt.Printf("Day 1 part1: %d\n", frequency)
if err := scanner.Err(); err != nil {
return err
}
frequency = 0
part2Done := false
for part2Done == false {
for _, f := range frequencies {
frequency += f
reach[frequency]++
if reach[frequency] == 2 {
fmt.Printf("Day 1 part2: %d\n", frequency)
part2Done = true
break
}
}
}
return nil
}
|
package main
import (
"fmt"
"os"
)
func main() {
//os.Open 只读方式打开
//fp,err := os.Open("D:/a.txt")
//os.OpenFile(文件名,打开方式,打开权限)
fp,err := os.OpenFile("D:/a.txt",os.O_RDWR,6)
if err!=nil {
fmt.Println("打开文件失败")
}
fp.WriteString("hello")
fp.WriteAt([]byte("hello"),25)
defer fp.Close()
}
|
// Copyright 2016 The Gem Authors. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package gem
import (
"net"
"os"
"github.com/go-gem/log"
"github.com/go-gem/sessions"
"github.com/valyala/fasthttp"
)
const (
// Gem name
name = "Gem"
// Gem version
version = "0.0.1"
)
var (
defaultLogger = log.New(os.Stderr, log.LstdFlags, log.LevelAll)
)
// Name returns server name.
func Name() string {
return name
}
// Version returns current version of Gem.
func Version() string {
return version
}
// Middleware interface.
type Middleware interface {
Handle(next Handler) Handler
}
// Server an extended edition of fasthttp.Server,
// see fasthttp.Server for details.
type Server struct {
*fasthttp.Server
// logger
logger log.Logger
// sessions store
sessionsStore sessions.Store
}
// New returns a new Server instance.
func New() *Server {
return &Server{
Server: &fasthttp.Server{
Name: name,
},
logger: defaultLogger,
}
}
// SetLogger set logger.
func (s *Server) SetLogger(logger log.Logger) {
s.logger = logger
}
// SetSessionStore set sessions store.
func (s *Server) SetSessionsStore(store sessions.Store) {
s.sessionsStore = store
}
// Init for testing, should not invoke this method anyway.
func (s *Server) Init(handler HandlerFunc) {
s.init(handler)
}
// init initialize server.
func (s *Server) init(handler HandlerFunc) {
// Initialize fasthttp.Server's Handler.
s.Server.Handler = func(ctx *fasthttp.RequestCtx) {
c := acquireContext(s, ctx)
defer c.close()
handler(c)
}
}
// ListenAndServe serves HTTP requests from the given TCP addr.
func (s *Server) ListenAndServe(addr string, handler HandlerFunc) error {
s.init(handler)
return s.Server.ListenAndServe(addr)
}
// ListenAndServeUNIX serves HTTP requests from the given UNIX addr.
//
// The function deletes existing file at addr before starting serving.
//
// The server sets the given file mode for the UNIX addr.
func (s *Server) ListenAndServeUNIX(addr string, mode os.FileMode, handler HandlerFunc) error {
s.init(handler)
return s.Server.ListenAndServeUNIX(addr, mode)
}
// ListenAndServeTLS serves HTTPS requests from the given TCP4 addr.
//
// certFile and keyFile are paths to TLS certificate and key files.
//
// Pass custom listener to Serve if you need listening on non-TCP4 media
// such as IPv6.
func (s *Server) ListenAndServeTLS(addr, certFile, keyFile string, handler HandlerFunc) error {
s.init(handler)
return s.Server.ListenAndServeTLS(addr, certFile, keyFile)
}
// ListenAndServeTLSEmbed serves HTTPS requests from the given TCP4 addr.
//
// certData and keyData must contain valid TLS certificate and key data.
//
// Pass custom listener to Serve if you need listening on arbitrary media
// such as IPv6.
func (s *Server) ListenAndServeTLSEmbed(addr string, certData, keyData []byte, handler HandlerFunc) error {
s.init(handler)
return s.Server.ListenAndServeTLSEmbed(addr, certData, keyData)
}
// Serve serves incoming connections from the given listener.
//
// Serve blocks until the given listener returns permanent error.
func (s *Server) Serve(ln net.Listener, handler HandlerFunc) error {
s.init(handler)
return s.Server.Serve(ln)
}
// ServeConn serves HTTP requests from the given connection.
//
// ServeConn returns nil if all requests from the c are successfully served.
// It returns non-nil error otherwise.
//
// Connection c must immediately propagate all the data passed to Write()
// to the client. Otherwise requests' processing may hang.
//
// ServeConn closes c before returning.
func (s *Server) ServeConn(c net.Conn, handler HandlerFunc) error {
s.init(handler)
return s.Server.ServeConn(c)
}
// ServeTLS serves HTTPS requests from the given net.Listener.
//
// certFile and keyFile are paths to TLS certificate and key files.
func (s *Server) ServeTLS(ln net.Listener, certFile, keyFile string, handler HandlerFunc) error {
s.init(handler)
return s.Server.ServeTLS(ln, certFile, keyFile)
}
// ServeTLSEmbed serves HTTPS requests from the given net.Listener.
//
// certData and keyData must contain valid TLS certificate and key data.
func (s *Server) ServeTLSEmbed(ln net.Listener, certData, keyData []byte, handler HandlerFunc) error {
s.init(handler)
return s.Server.ServeTLSEmbed(ln, certData, keyData)
}
|
package dht
import (
"bytes"
"testing"
proto "gx/ipfs/QmdxUuburamoF6zF9qjeQC4WYcWGbWuRmdLacMEsW8ioD8/gogo-protobuf/proto"
recpb "gx/ipfs/QmexPd3srWxHC76gW2p5j5tQvwpPuCoW7b9vFhJ8BRPyh9/go-libp2p-record/pb"
)
func TestCleanRecordSigned(t *testing.T) {
actual := new(recpb.Record)
actual.TimeReceived = "time"
actual.Value = []byte("value")
actual.Key = []byte("key")
cleanRecord(actual)
actualBytes, err := proto.Marshal(actual)
if err != nil {
t.Fatal(err)
}
expected := new(recpb.Record)
expected.Value = []byte("value")
expected.Key = []byte("key")
expectedBytes, err := proto.Marshal(expected)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(actualBytes, expectedBytes) {
t.Error("failed to clean record")
}
}
func TestCleanRecord(t *testing.T) {
actual := new(recpb.Record)
actual.TimeReceived = "time"
actual.Key = []byte("key")
actual.Value = []byte("value")
cleanRecord(actual)
actualBytes, err := proto.Marshal(actual)
if err != nil {
t.Fatal(err)
}
expected := new(recpb.Record)
expected.Key = []byte("key")
expected.Value = []byte("value")
expectedBytes, err := proto.Marshal(expected)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(actualBytes, expectedBytes) {
t.Error("failed to clean record")
}
}
|
package main
import (
"fmt"
)
type Person struct {
// Public fields
Name string
Surname string
Age int
// Private field
id string
}
func (person *Person) getFullName() string {
return fmt.Sprintf("%s %s", person.Name, person.Surname)
}
func main() {
var p = Person{"Arturo", "Tarin", 50, "0001"}
println(p.getFullName())
println(p.id)
}
|
package main
import (
"fmt"
)
type veículo struct {
portas int
cor string
}
type caminhonete struct {
veículo
traçãoNasQuatro bool
}
type sedan struct {
veículo
modeloLuxo bool
}
func main() {
carrãodotio := sedan{veículo{4, "abóbora"}, true}
fubicadovô := caminhonete{
veículo: veículo{
portas: 8,
cor: "ferrugem",
},
traçãoNasQuatro: false,
}
fmt.Println(carrãodotio)
fmt.Println(fubicadovô)
fmt.Println(carrãodotio.cor)
fmt.Println(fubicadovô.traçãoNasQuatro)
}
|
/**
* @Author: yanKoo
* @Date: 2019/3/11 10:39
* @Description: main
*/
package main
import (
cfgWs "configs/web_server"
"flag"
"fmt"
"github.com/gin-gonic/gin"
"github.com/lestrrat/go-file-rotatelogs"
"github.com/rifflock/lfshook"
"github.com/sirupsen/logrus"
"github.com/unrolled/secure"
"log"
"net/http"
"os"
"controllers"
"strings"
"time"
)
func main() {
engine := Prepare()
//engine.Use(TlsHandler())
//if err := engine.RunTLS(":"+cfgWs.WebPort, cfgWs.CertFile, cfgWs.KeyFile); err != nil {
// log.Printf("Read pem key file error: %+v", err)
//}
if err := engine.Run(":" + cfgWs.WebPort); err != nil {
log.Println("listen is error", err)
}
}
func Prepare() *gin.Engine {
// 禁用控制台颜色
gin.DisableConsoleColor()
// 创建记录日志的文件
//f, _ := os.Create("backend-web.log")
//gin.DefaultWriter = io.MultiWriter(f)
engine := gin.Default()
// 日志, 解决跨域问题
engine.Use(Logger(), Cors())
// 注册路由
// account
engine.POST("/account/login.do/:account_name", controllers.SignIn)
engine.POST("/account/logout.do/:account_name", controllers.SignOut)
engine.POST("/account", controllers.CreateAccountBySuperior)
engine.GET("/account/:account_name", controllers.GetAccountInfo)
engine.POST("/account/info/update", controllers.UpdateAccountInfo)
engine.POST("/account/pwd/update", controllers.UpdateAccountPwd)
engine.GET("/account_class/:accountId/:searchId", controllers.GetAccountClass)
engine.GET("/account_device/:accountId/:getAdviceId", controllers.GetAccountDevice)
engine.POST("/account_device/:accountId", controllers.TransAccountDevice)
// group
engine.POST("/group", controllers.CreateGroup)
engine.POST("/group/update", controllers.UpdateGroup)
engine.POST("/group/delete", controllers.DeleteGroup)
engine.POST("/group/devices/update", controllers.UpdateGroupDevice)
// device
engine.POST("/device/import/:account_name", controllers.ImportDeviceByRoot)
engine.POST("/device/update", controllers.UpdateDeviceInfo)
// upload file
engine.POST("/upload", controllers.UploadFile)
// im server
engine.GET("/im-server/:accountId", controllers.ImPush)
return engine
}
func Cors() gin.HandlerFunc {
return func(c *gin.Context) {
method := c.Request.Method //请求方法
origin := c.Request.Header.Get("Origin") //请求头部
var headerKeys []string // 声明请求头keys
for k := range c.Request.Header {
headerKeys = append(headerKeys, k)
}
headerStr := strings.Join(headerKeys, ",")
if headerStr != "" {
headerStr = fmt.Sprintf("access-control-allow-origin, access-control-allow-headers, %s", headerStr)
} else {
headerStr = "access-control-allow-origin, access-control-allow-headers"
}
if origin != "" {
c.Writer.Header().Set("Access-Control-Allow-Origin", "*")
c.Header("Access-Control-Allow-Origin", "*") // 允许访问所有域
c.Header("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE,UPDATE") //服务器支持的所有跨域请求的方法,为了避免浏览次请求的多次'预检'请求
// header的类型
c.Header("Access-Control-Allow-Headers", "Authorization, Content-Length, X-CSRF-Token, Token,session,X_Requested_With,Accept, Origin, Host, Connection, Accept-Encoding, Accept-Language,DNT, X-CustomHeader, Keep-Alive, User-Agent, X-Requested-With, If-Modified-Since, Cache-Control, Content-Type, Pragma")
// 允许跨域设置 可以返回其他子段
c.Header("Access-Control-Expose-Headers", "Content-Length, Access-Control-Allow-Origin, Access-Control-Allow-Headers,Cache-Control,Content-Language,Content-Type,Expires,Last-Modified,Pragma,FooBar") // 跨域关键设置 让浏览器可以解析
c.Header("Access-Control-Max-Age", "172800") // 缓存请求信息 单位为秒
c.Header("Access-Control-Allow-Credentials", "false") // 跨域请求是否需要带cookie信息 默认设置为true
c.Set("content-type", "application/json") // 设置返回格式是json
}
//放行所有OPTIONS方法
if method == "OPTIONS" {
c.JSON(http.StatusOK, "Options Request!")
}
// 处理请求
c.Next() // 处理请求
}
}
func Logger() gin.HandlerFunc {
logClient := logrus.New()
//禁止logrus的输出
src, err := os.OpenFile(os.DevNull, os.O_APPEND|os.O_WRONLY, os.ModeAppend)
if err != nil {
fmt.Println("err", err)
}
logClient.Out = src
logClient.SetLevel(logrus.DebugLevel)
apiLogPath := "web_server.log"
logWriter, err := rotatelogs.New(
apiLogPath+".%Y-%m-%d-%H-%M.log",
rotatelogs.WithLinkName(apiLogPath), // 生成软链,指向最新日志文件
rotatelogs.WithMaxAge(7*24*time.Hour), // 文件最大保存时间
rotatelogs.WithRotationTime(24*time.Hour), // 日志切割时间间隔
)
writeMap := lfshook.WriterMap{
logrus.InfoLevel: logWriter,
logrus.FatalLevel: logWriter,
}
lfHook := lfshook.NewHook(writeMap, &logrus.JSONFormatter{})
logClient.AddHook(lfHook)
return func(c *gin.Context) {
// 开始时间
start := time.Now()
// 处理请求
c.Next()
// 结束时间
end := time.Now()
//执行时间
latency := end.Sub(start)
path := c.Request.URL.Path
clientIP := c.ClientIP()
method := c.Request.Method
statusCode := c.Writer.Status()
logClient.Infof("| %3d | %13v | %15s | %s %s |",
statusCode,
latency,
clientIP,
method, path,
)
}
}
func TlsHandler() gin.HandlerFunc {
addr := flag.String("a", "localhost", "ssl 默认主机" )
flag.Parse()
return func(c *gin.Context) {
secureMiddleware := secure.New(secure.Options{
SSLRedirect: true,
SSLHost: *addr + ":"+ cfgWs.WebPort,
})
err := secureMiddleware.Process(c.Writer, c.Request)
// If there was an error, do not continue.
if err != nil {
return
}
c.Next()
}
}
|
package base
import (
"logicdata/entity"
"server"
"server/data/datatype"
"server/share"
)
type Player struct {
server.Callee
}
func (p *Player) OnLoad(self datatype.Entity, typ int) int {
//player := self.(*entity.Player)
if typ == share.LOAD_DB {
}
return 1
}
func (c *Player) OnPropertyChange(self datatype.Entity, prop string, old interface{}) int {
//player := self.(*entity.Player)
switch prop {
}
return 1
}
func (c *Player) OnStore(self datatype.Entity, typ int) int {
return 1
}
func (c *Player) OnDisconnect(self datatype.Entity) int {
return 1
}
func (c *Player) OnCommand(self datatype.Entity, sender datatype.Entity, msgid int, msg interface{}) int {
player := self.(*entity.Player)
switch msgid {
case share.PLAYER_FIRST_LAND:
c.FirstLand(player)
}
return 1
}
func (c *Player) FirstLand(player *entity.Player) {
}
func (c *Player) OnReady(self datatype.Entity, first bool) int {
return 1
}
|
package sandbox_tests
import (
"github.com/iotaledger/wasp/packages/solo"
"github.com/iotaledger/wasp/packages/vm/core/testcore/sandbox_tests/test_sandbox_sc"
"github.com/stretchr/testify/require"
"strings"
"testing"
)
func TestPanicFull(t *testing.T) { run2(t, testPanicFull) }
func testPanicFull(t *testing.T, w bool) {
_, chain := setupChain(t, nil)
setupTestSandboxSC(t, chain, nil, w)
req := solo.NewCallParams(test_sandbox_sc.Interface.Name, test_sandbox_sc.FuncPanicFullEP)
_, err := chain.PostRequest(req, nil)
require.Error(t, err)
require.EqualValues(t, 1, strings.Count(err.Error(), test_sandbox_sc.MsgFullPanic))
}
func TestPanicViewCall(t *testing.T) { run2(t, testPanicViewCall) }
func testPanicViewCall(t *testing.T, w bool) {
_, chain := setupChain(t, nil)
setupTestSandboxSC(t, chain, nil, w)
_, err := chain.CallView(test_sandbox_sc.Interface.Name, test_sandbox_sc.FuncPanicViewEP)
require.Error(t, err)
require.EqualValues(t, 1, strings.Count(err.Error(), test_sandbox_sc.MsgViewPanic))
}
func TestCallPanicFull(t *testing.T) { run2(t, testCallPanicFull) }
func testCallPanicFull(t *testing.T, w bool) {
_, chain := setupChain(t, nil)
setupTestSandboxSC(t, chain, nil, w)
req := solo.NewCallParams(test_sandbox_sc.Interface.Name, test_sandbox_sc.FuncCallPanicFullEP)
_, err := chain.PostRequest(req, nil)
require.Error(t, err)
require.EqualValues(t, 1, strings.Count(err.Error(), test_sandbox_sc.MsgFullPanic))
}
func TestCallPanicViewFromFull(t *testing.T) { run2(t, testCallPanicViewFromFull) }
func testCallPanicViewFromFull(t *testing.T, w bool) {
_, chain := setupChain(t, nil)
setupTestSandboxSC(t, chain, nil, w)
req := solo.NewCallParams(test_sandbox_sc.Interface.Name, test_sandbox_sc.FuncCallPanicViewEPFromFull)
_, err := chain.PostRequest(req, nil)
require.Error(t, err)
require.EqualValues(t, 1, strings.Count(err.Error(), test_sandbox_sc.MsgViewPanic))
}
func TestCallPanicViewFromView(t *testing.T) { run2(t, testCallPanicViewFromView) }
func testCallPanicViewFromView(t *testing.T, w bool) {
_, chain := setupChain(t, nil)
setupTestSandboxSC(t, chain, nil, w)
_, err := chain.CallView(test_sandbox_sc.Interface.Name, test_sandbox_sc.FuncCallPanicViewEPFromView)
require.Error(t, err)
require.EqualValues(t, 1, strings.Count(err.Error(), test_sandbox_sc.MsgViewPanic))
}
|
package main
import (
"bufio"
"fmt"
"os"
)
func main() {
scanner := bufio.NewScanner(os.Stdin)
scanner.Split(bufio.ScanLines)
scanner.Scan()
a := scanner.Text()
scanner.Scan()
b := scanner.Text()
if len(a) < len(b) {
fmt.Print("no")
} else {
fmt.Println("go")
}
}
|
package stateful
import (
"context"
aliceapi "github.com/yandex-cloud/examples/serverless/alice-shareable-todolist/app/alice/api"
"github.com/yandex-cloud/examples/serverless/alice-shareable-todolist/app/errors"
)
type scenario = func(context.Context, *aliceapi.Request) (*aliceapi.Response, errors.Err)
func (h *Handler) setupScenarios() {
h.stateScenarios = map[aliceapi.State]scenario{
aliceapi.StateAddItemReqItem: h.addItemReqItem,
aliceapi.StateAddItemReqList: h.addItemReqList,
aliceapi.StateCreateReqName: h.createRequireName,
aliceapi.StateDelItemReqList: h.deleteItemReqList,
aliceapi.StateDelItemReqItem: h.deleteItemReqItem,
aliceapi.StateDelReqName: h.deleteListReqList,
aliceapi.StateDelReqConfirm: h.deleteListReqConfirm,
aliceapi.StateViewReqName: h.viewListReqName,
}
h.scratchScenarios = []scenario{
h.viewListFromScratch,
h.listAllListsFromScratch,
h.createFromScratch,
h.addItemFromScratch,
h.deleteListFromScratch,
h.deleteItemFromScratch,
}
}
|
//go:build !tinygo
// +build !tinygo
package vugu
import "reflect"
func rvIsZero(rv reflect.Value) bool {
return rv.IsZero()
}
|
package sortfunc
import "testing"
func TestQuickSortArrays(t*testing.T){
// nums := []int{-1,0,1,2,-1,-4}
nums := []int{6,0,1,2,-1,-4}
t.Log(nums)
err := QuickSortArrays(nums)
if err == nil{
t.Log(nums)
}
}
|
// Copyright (c) 2017, 0qdk4o. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package domain
import (
"bytes"
"encoding/json"
"strings"
)
// Checkavailable represents operation type
type Checkavailable Cmd
// SplicingURL splice pieces of variable to long url
func (a *Checkavailable) SplicingURL(p *Registrar, domains []string) string {
if len(domains) < 1 || p == nil {
return ""
}
var t = Cmd(*a)
url := bytes.NewBufferString((&t).SplicingURL(p, domains))
uniqToplv := make(map[string]bool)
uniqSecondlv := make(map[string]bool)
for _, v := range domains {
namesSlice := strings.Split(v, ".")
namesLen := len(namesSlice)
if namesLen < 2 {
return ""
}
if !uniqSecondlv[namesSlice[namesLen-2]] {
url.WriteString("&domain-name=")
url.WriteString(namesSlice[namesLen-2])
uniqSecondlv[namesSlice[namesLen-2]] = true
}
if !uniqToplv[namesSlice[namesLen-1]] {
url.WriteString("&tlds=")
url.WriteString(namesSlice[namesLen-1])
uniqToplv[namesSlice[namesLen-1]] = true
}
}
return url.String()
}
// ProcessHTTPData should implemented by derived struct
func (a *Checkavailable) ProcessHTTPData(b []byte) (*Result, error) {
var res Result
decoder := json.NewDecoder(bytes.NewBuffer(b))
_, err := decoder.Token() // return '{'
if err != nil {
return nil, err
}
st, err := decoder.Token() // check if status field is output in the json
if err != nil {
return nil, err
}
if strings.Compare(st.(string), "status") == 0 {
err = json.Unmarshal(b, &res)
} else {
err = json.Unmarshal(b, &res.NameRes)
}
return &res, err
}
func (a *Checkavailable) String() string {
return string(*a)
}
|
package clock
import "fmt"
const testVersion = 4
// Clock : Complete the type definition. Pick a suitable data type.
type Clock struct {
hour int
minute int
}
// New creates a new clock
func New(hour, minute int) Clock {
c := Clock{0, 0}
if hour >= 0 && minute >= 0 {
min := minute + (hour * 60)
c = c.Add(min)
} else if minute >= 0 {
h := 24 - (24 % (hour * -1))
fmt.Println("****------- HOUR -----****", h)
min := minute + (h * 60)
c = c.Add(min)
}
return c
}
// Returns the time of a clock as a string
func (c Clock) String() string {
h := fmt.Sprintf("%v", c.hour)
min := fmt.Sprintf("%v", c.minute)
if len(h) == 1 {
h = "0" + h
}
if len(min) == 1 {
min = "0" + min
}
// fmt.Printf("TEST :::::: %v:%v\n", h, min) // TEST
return fmt.Sprintf("%v:%v", h, min)
}
// Add some minutes to the clock
func (c Clock) Add(minutes int) Clock {
if c.minute >= 0 {
min := c.minute + (c.hour * 60) + minutes
h := min / 60
fmt.Println("******** ADD *** ", min)
if h > 24 {
h %= 24
}
if h == 24 {
h = 0
}
min %= 60
fmt.Println("HOUR \t", h, "\t MINUTES \t", min)
return Clock{h, min}
}
return Clock{c.hour, c.minute}
}
// Remember to delete all of the stub comments.
// They are just noise, and reviewers will complain.
|
package main
import (
"codeci/src/util/k8s"
"os"
// "log"
)
func main() {
}
|
package common
import (
"math/big"
)
const (
HGS = "hgs"
HNB = "hnb"
)
type Address [20]byte
type Hash [32]byte
type Transactions []*Transaction
func (h *Hash) GetBytes() []byte {
var m []byte
m = make([]byte, 32)
copy(m, h[:])
return m
}
func (a *Address) SetBytes(b []byte) {
if len(b) > len(a) {
b = b[len(b)-20:]
}
copy(a[20-len(b):], b)
}
func (a *Address) GetBytes() []byte {
var m []byte
m = make([]byte, 20)
copy(m, a[:])
return m
}
type Transaction struct {
ContractName string `json:"contractName"` //交易类型
Payload []byte `json:"payload"` //具体交易的数据序列化后的
Txid Hash `json:"txid"` //交易的
From Address `json:"from"` //账户发送方
NonceValue uint64 `json:"nonceValue"`
V *big.Int `json:"v"` //`json:"v" gencodec:"required"`
R *big.Int `json:"r"` //`json:"r" gencodec:"required"`
S *big.Int `json:"s"` //`json:"s" gencodec:"required"`
}
func NewTransaction() *Transaction {
return new(Transaction)
}
func (tx *Transaction) Hash() Hash {
return tx.Txid
}
func (tx *Transaction) Nonce() uint64 {
return tx.NonceValue
}
func (tx *Transaction) FromAddress() Address {
return tx.From
}
type TxByNonce Transactions
func (s TxByNonce) Len() int { return len(s) }
func (s TxByNonce) Less(i, j int) bool { return s[i].NonceValue < s[j].NonceValue }
func (s TxByNonce) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
package leetcode
import (
"reflect"
"testing"
"github.com/ironzhang/leetcode/util"
)
func TestReverseKNodes(t *testing.T) {
tests := []struct {
input []int
k int
output []int
prev []int
next []int
}{
{
input: []int{1, 2, 3},
k: 2,
output: []int{2, 1, 3},
prev: []int{1, 3},
next: []int{3},
},
{
input: []int{1, 2, 3, 4, 5},
k: 2,
output: []int{2, 1, 3, 4, 5},
prev: []int{1, 3, 4, 5},
next: []int{3, 4, 5},
},
{
input: []int{1, 2, 3, 4, 5},
k: 3,
output: []int{3, 2, 1, 4, 5},
prev: []int{1, 4, 5},
next: []int{4, 5},
},
}
for i, tt := range tests {
var n ListNode
h := util.Slice2List(tt.input)
prev, next := reverseKNodes(&n, h, tt.k)
if got, want := util.List2Slice(n.Next), tt.output; !reflect.DeepEqual(got, want) {
t.Errorf("%d: reverseKNodes: list: got %v, want %v", i, got, want)
} else {
t.Logf("%d: reverseKNodes: list: got %v", i, got)
}
if got, want := util.List2Slice(prev), tt.prev; !reflect.DeepEqual(got, want) {
t.Errorf("%d: reverseKNodes: prev: got %v, want %v", i, got, want)
} else {
t.Logf("%d: reverseKNodes: prve: got %v", i, got)
}
if got, want := util.List2Slice(next), tt.next; !reflect.DeepEqual(got, want) {
t.Errorf("%d: reverseKNodes: next: got %v, want %v", i, got, want)
} else {
t.Logf("%d: reverseKNodes: next: got %v", i, got)
}
}
}
func TestReverseKGroup(t *testing.T) {
tests := []struct {
input []int
k int
output []int
}{
{
input: []int{1, 2, 3},
k: 2,
output: []int{2, 1, 3},
},
{
input: []int{1, 2, 3, 4, 5},
k: 2,
output: []int{2, 1, 4, 3, 5},
},
{
input: []int{1, 2, 3, 4, 5},
k: 3,
output: []int{3, 2, 1, 4, 5},
},
{
input: []int{1, 2, 3, 4, 5},
k: 1,
output: []int{1, 2, 3, 4, 5},
},
{
input: []int{1, 2, 3, 4, 5},
k: 5,
output: []int{5, 4, 3, 2, 1},
},
{
input: []int{1, 2, 3, 4, 5},
k: 4,
output: []int{4, 3, 2, 1, 5},
},
}
for i, tt := range tests {
input := util.Slice2List(tt.input)
output := reverseKGroup(input, tt.k)
if got, want := util.List2Slice(output), tt.output; !reflect.DeepEqual(got, want) {
t.Errorf("%d: reverseKGroup: got %v, want %v", i, got, want)
} else {
t.Logf("%d: reverseKGroup: got %v", i, got)
}
}
}
|
package Controllers
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/james-vaughn/PersonalWebsite/Models"
"github.com/james-vaughn/PersonalWebsite/Services"
)
type GenerativeArtController struct {
PagesService *Services.PagesService
pages []Models.Page
}
const GenerativeArtControllerName = "art"
func NewGenerativeArtController(pagesService *Services.PagesService) *GenerativeArtController {
return &GenerativeArtController{
PagesService: pagesService,
pages: pagesService.GetPagesFor(GenerativeArtControllerName),
}
}
func (c *GenerativeArtController) RegisterRoutes(r *gin.Engine) {
r.GET("/"+GenerativeArtControllerName+"/", c.Index)
for _, page := range c.pages {
url := c.PagesService.GetUrlFor(page)
r.GET(url, c.ProjectPage(page))
}
}
func (c *GenerativeArtController) Index(context *gin.Context) {
context.HTML(http.StatusOK, "projectstable.tmpl", gin.H{
"title": "Generative Art",
"projects": c.pages,
})
}
func (c *GenerativeArtController) ProjectPage(page Models.Page) func(*gin.Context) {
return func(context *gin.Context) {
prevPage := c.PagesService.GetPageFromList(c.pages, page.PrevId)
nextPage := c.PagesService.GetPageFromList(c.pages, page.NextId)
context.HTML(http.StatusOK, page.Url+".tmpl", gin.H{
"title": page.Title,
"prev_url": c.PagesService.GetUrlFor(prevPage),
"prev": prevPage.Title,
"next_url": c.PagesService.GetUrlFor(nextPage),
"next": nextPage.Title,
})
}
}
|
package env
// Convenience functions for working with environment variables
import (
"os"
"strconv"
"strings"
)
// Default gets the value of an environment variable or a default if not set
func Default(key string, def string) string {
env := os.Getenv(key)
if env == "" {
return def
}
return env
}
// Bool returns the boolean value of a environment variable
// "true", "1", "yes", "on" in any casing are considered true
func Bool(key string) bool {
env := strings.ToLower(os.Getenv(key))
if env == "1" || env == "true" || env == "yes" || env == "on" {
return true
}
return false
}
// DefaultInt gets the parsed int value of an environment variable or a default if not set or parsing failed
func DefaultInt(key string, def int) int {
env := os.Getenv(key)
if env == "" {
return def
}
i, err := strconv.ParseInt(env, 10, 32)
if err != nil {
return def
}
return int(i)
}
|
package arch
import (
"fmt"
"math"
)
/**
* This file contains the implementations of Chip8 instructions.
*/
// Graphics controls
func (c8 *Chip8) ClearScreen() {
if c8.Debug {
fmt.Println("Executing ClearScreen()")
}
c8.Screen.ClearScreen()
c8.DrawFlag = true
}
func (c8 *Chip8) DrawSprite() {
if c8.Debug {
fmt.Println("Executing DrawSprite()")
}
// All variables are promoted to uint16 for easier manipulation.
xCoord := uint16(c8.Registers[c8.Opcode.Xreg])
yCoord := uint16(c8.Registers[c8.Opcode.Yreg])
height := c8.Opcode.Value & 0xF
width := uint16(8) // Width is hardcoded.
shiftConst := uint16(0x80) // Shifting 128 right allows us to check indiv bits.
c8.Registers[0xF] = 0 // Assume we don't unset any pixels.
var yLine, xLine uint16
for yLine = 0; yLine < height; yLine++ {
pixel := uint16(c8.Memory[c8.IndexReg+yLine])
for xLine = 0; xLine < width; xLine++ {
x, y := xCoord+xLine, yCoord+yLine
inBounds := c8.Screen.InBounds(x, y)
// If we need to draw this pixel...
if pixel&(shiftConst>>xLine) != 0 && inBounds {
// XOR the pixel, saving whether we set it.
if c8.Screen.GetPixel(x, y) {
c8.Registers[0xF] = 1
}
c8.Screen.XorPixel(x, y)
}
}
}
c8.DrawFlag = true
}
func (c8 *Chip8) SetIndexToSprite() {
if c8.Debug {
fmt.Println("Executing SetIndexToSprite()")
}
char := c8.Registers[c8.Opcode.Xreg]
offset := uint8(len(c8.Fontset) / 16) // Number of sprites per character.
// Set index register to location of the
// first fontset sprite of the matching character.
c8.IndexReg = uint16(offset * char)
}
// Control flow
func (c8 *Chip8) CallRCA1802() {
address := c8.Opcode.Value & 0xFFF
panic(fmt.Sprintf("Unimplemented opcode CallRCA1802 called with address: %v\n",
address))
}
func (c8 *Chip8) Return() {
if c8.Debug {
fmt.Println("Executing Return()")
}
// No return values, just stack movement.
c8.SP--
c8.PC = c8.Stack[c8.SP]
}
func (c8 *Chip8) Jump() {
if c8.Debug {
fmt.Println("Executing Jump()")
}
c8.PC = c8.Opcode.Literal
c8.UpdatePC = 0 // Don't increment PC
}
func (c8 *Chip8) JumpIndexLiteralOffset() {
if c8.Debug {
fmt.Println("Executing JumpIndexLiteralOffset()")
}
newAddr := c8.Opcode.Literal + uint16(c8.Registers[0])
c8.PC = newAddr
c8.UpdatePC = 0 // Don't increment PC
}
func (c8 *Chip8) Call() {
if c8.Debug {
fmt.Println("Executing Call()")
}
// Store the PC in the stack pointer.
c8.Stack[c8.SP] = c8.PC
c8.SP++ // TODO: Overflow?
c8.PC = c8.Opcode.Literal
c8.UpdatePC = 0 // Don't increment PC
}
func (c8 *Chip8) SkipInstrEqualLiteral() {
if c8.Debug {
fmt.Println("Executing SkipInstrEqualLiteral()")
}
literal := c8.Opcode.Value & 0xFF
// If the register contents equal the literal...
if uint16(c8.Registers[c8.Opcode.Xreg]) == literal {
c8.UpdatePC = 4 // skip an instruction.
}
}
func (c8 *Chip8) SkipInstrNotEqualLiteral() {
if c8.Debug {
fmt.Println("Executing SkipInstrNotEqualLiteral()")
}
literal := c8.Opcode.Value & 0xFF
// If the register contents don't equal the literal...
if uint16(c8.Registers[c8.Opcode.Xreg]) != literal {
c8.UpdatePC = 4 // skip an instruction.
}
}
func (c8 *Chip8) SkipInstrEqualReg() {
if c8.Debug {
fmt.Println("Executing SkipInstrEqualReg()")
}
// If the register contents are equal...
if c8.Registers[c8.Opcode.Xreg] == c8.Registers[c8.Opcode.Yreg] {
c8.UpdatePC = 4 // skip an instruction.
}
}
func (c8 *Chip8) SkipInstrNotEqualReg() {
if c8.Debug {
fmt.Println("Executing SkipInstrNotEqualReg()")
}
// If the register contents are not equal...
if c8.Registers[c8.Opcode.Xreg] != c8.Registers[c8.Opcode.Yreg] {
c8.UpdatePC = 4 // skip an instruction.
}
}
func (c8 *Chip8) SkipInstrKeyPressed() {
if c8.Debug {
fmt.Println("Executing SkipInstrKeyPressed()")
}
if c8.Controller.KeyPressed(c8.Registers[c8.Opcode.Xreg]) {
c8.UpdatePC = 4
}
}
func (c8 *Chip8) SkipInstrKeyNotPressed() {
if c8.Debug {
fmt.Println("Executing SkipInstrKeyNotPressed()")
}
if !c8.Controller.KeyPressed(c8.Registers[c8.Opcode.Xreg]) {
c8.UpdatePC = 4
}
}
// Manipulating data registers
func (c8 *Chip8) SetRegToLiteral() {
if c8.Debug {
fmt.Println("Executing SetRegToLiteral()")
}
literal := c8.Opcode.Value & 0xFF
// WARNING, MAY NOT FIT!
c8.Registers[c8.Opcode.Xreg] = uint8(literal)
}
func (c8 *Chip8) SetRegToReg() {
if c8.Debug {
fmt.Println("Executing SetRegToReg()")
}
c8.Registers[c8.Opcode.Xreg] = c8.Registers[c8.Opcode.Yreg]
}
func (c8 *Chip8) Add() {
if c8.Debug {
fmt.Println("Executing Add()")
}
literal := c8.Opcode.Value & 0xFF
// WARNING, MIGHT NOT FIT
c8.Registers[c8.Opcode.Xreg] += uint8(literal)
}
func (c8 *Chip8) AddWithCarry() {
if c8.Debug {
fmt.Println("Executing AddWithCarry()")
}
sum := int(c8.Registers[c8.Opcode.Xreg]) +
int(c8.Registers[c8.Opcode.Yreg])
c8.Registers[c8.Opcode.Xreg] = uint8(sum)
if sum > math.MaxUint8 {
c8.Registers[0xF] = 1 // If overflow, save 1 into last reg.
} else {
c8.Registers[0xF] = 0 // Else, save 0 into last reg.
}
}
func (c8 *Chip8) Or() {
if c8.Debug {
fmt.Println("Executing Or()")
}
c8.Registers[c8.Opcode.Xreg] =
c8.Registers[c8.Opcode.Xreg] | c8.Registers[c8.Opcode.Yreg]
}
func (c8 *Chip8) And() {
if c8.Debug {
fmt.Println("Executing And()")
}
c8.Registers[c8.Opcode.Xreg] =
c8.Registers[c8.Opcode.Xreg] & c8.Registers[c8.Opcode.Yreg]
}
func (c8 *Chip8) Xor() {
if c8.Debug {
fmt.Println("Executing Xor()")
}
c8.Registers[c8.Opcode.Xreg] =
c8.Registers[c8.Opcode.Xreg] ^ c8.Registers[c8.Opcode.Yreg]
}
func (c8 *Chip8) SubXFromY() {
if c8.Debug {
fmt.Println("Executing SubXFromY()")
}
diff := int(c8.Registers[c8.Opcode.Yreg]) -
int(c8.Registers[c8.Opcode.Xreg])
c8.Registers[c8.Opcode.Xreg] = uint8(diff)
if diff < 0 {
c8.Registers[0xF] = 0 // If underflow, save 0 into last reg.
} else {
c8.Registers[0xF] = 1 // Else, save 1 into last reg.
}
}
func (c8 *Chip8) SubYFromX() {
if c8.Debug {
fmt.Println("Executing SubYFromX()")
}
diff := int(c8.Registers[c8.Opcode.Xreg]) -
int(c8.Registers[c8.Opcode.Yreg])
c8.Registers[c8.Opcode.Xreg] = uint8(diff)
if diff < 0 {
c8.Registers[0xF] = 0 // If underflow, save 0 into last reg.
} else {
c8.Registers[0xF] = 1 // Else, save 1 into last reg.
}
}
func (c8 *Chip8) ShiftRight() {
if c8.Debug {
fmt.Println("Executing ShiftRight()")
}
// Set VF to least significant bit of Xreg before shifting.
c8.Registers[0xF] = c8.Registers[c8.Opcode.Xreg] & 0x1
c8.Registers[c8.Opcode.Xreg] = c8.Registers[c8.Opcode.Xreg] >> 1
}
func (c8 *Chip8) ShiftLeft() {
if c8.Debug {
fmt.Println("Executing ShiftLeft()")
}
// Set VF to most significant bit of Xreg before shifting.
c8.Registers[0xF] = (c8.Registers[c8.Opcode.Xreg] >> 15) & 0x1
c8.Registers[c8.Opcode.Xreg] = c8.Registers[c8.Opcode.Xreg] << 1
}
func (c8 *Chip8) SetRegisterRandomMask() {
if c8.Debug {
fmt.Println("Executing SetRegisterRandomMask()")
}
mask := uint8(c8.Opcode.Value & 0xFF)
randNum := uint8(c8.Rando.Uint32() % 256) // Needs to fit in a uint8.
c8.Registers[c8.Opcode.Xreg] = mask & randNum
}
func (c8 *Chip8) SaveBinaryCodedDecimal() {
if c8.Debug {
fmt.Println("Executing SaveBinaryCodedDecimal()")
}
valueToConvert := c8.Registers[c8.Opcode.Xreg]
// Store the decimal representation of value in memory so that
// the hundreths digit of the value is in Mem[Index],
// the tenths digit is in Mem[Index+1], and
// the ones digit is in Mem[Index+2].
c8.Memory[c8.IndexReg] = valueToConvert / 100
c8.Memory[c8.IndexReg+1] = (valueToConvert / 10) % 10
c8.Memory[c8.IndexReg+2] = (valueToConvert % 100) % 10
}
func (c8 *Chip8) GetKeyPress() {
if c8.Debug {
fmt.Println("Executing GetKeyPress()")
}
var key uint8
for key = 0; key < 16; key++ { // TODO: REMOVE HARDCODE
if c8.Controller.KeyPressed(key) {
c8.Registers[c8.Opcode.Xreg] = uint8(key)
return
}
}
// Else, don't increment PC, wait another cycle for the key.
c8.UpdatePC = 0
}
func (c8 *Chip8) GetDelayTimer() {
if c8.Debug {
fmt.Println("Executing GetDelayTimer()")
}
c8.Registers[c8.Opcode.Xreg] = c8.DelayTimer // Save delay timer in reg.
}
// Manipulating special registers
func (c8 *Chip8) AddRegisterToIndex() {
if c8.Debug {
fmt.Println("Executing AddRegisterToIndex()")
}
c8.IndexReg += uint16(c8.Registers[c8.Opcode.Xreg])
}
func (c8 *Chip8) SetIndexLiteral() {
if c8.Debug {
fmt.Println("Executing SetIndexLiteral()")
}
c8.IndexReg = c8.Opcode.Literal
}
func (c8 *Chip8) SetDelayTimer() {
if c8.Debug {
fmt.Println("Executing SetDelayTimer()")
}
c8.DelayTimer = c8.Registers[c8.Opcode.Xreg]
}
func (c8 *Chip8) SetSoundTimer() {
if c8.Debug {
fmt.Println("Executing SetSoundTimer()")
}
c8.SoundTimer = c8.Registers[c8.Opcode.Xreg]
}
// Context Switching
func (c8 *Chip8) SaveRegisters() {
if c8.Debug {
fmt.Println("Executing SaveRegisters()")
}
// Store all registers up to last register in memory,
// starting in memory at the location in the index register.
for loc, reg := c8.IndexReg, uint16(0); reg <= uint16(c8.Opcode.Xreg); loc, reg = loc+1, reg+1 {
c8.Memory[loc] = c8.Registers[reg] // TODO: check overflow
}
}
func (c8 *Chip8) RestoreRegisters() {
if c8.Debug {
fmt.Println("Executing RestoreRegisters()")
}
// Load all registers up to last register from memory,
// starting in memory at the location in the index register.
for loc, reg := c8.IndexReg, uint16(0); reg <= uint16(c8.Opcode.Xreg); loc, reg = loc+1, reg+1 {
c8.Registers[reg] = c8.Memory[loc] // TODO: check overflow
}
}
// Special
func (c8 *Chip8) UnknownInstruction() {
panic(fmt.Sprintf("Unknown instruction: %v\n", c8.Opcode))
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2021/12/11 8:56 上午
# @File : lt_12_整数转罗马数字.go
# @Description :
# @Attention :
*/
package hot100
// func intToRoman(num int) string {
// r := ""
// romans := []string{"M", "CM", "D", "CD", "C", "XC", "L", "XL", "X", "IX", "V", "IV", "I"}
// ints := []int{1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1}
//
// for i := 0; i < len(ints); i++ {
// for num >= ints[i] {
// r += romans[i]
// num -= ints[i]
// }
// }
//
// return r
// }
// 关键: 先列举出所有 数字对应的罗马数字,然后遍历 ,如1000 , num有几个1000,就可以加几个1000对应的罗马数字即可
func intToRoman(num int) string {
ret := ""
m := map[int]string{
1: "I",
4: "IV",
5: "V",
9: "IX",
10: "X",
40: "XL",
50: "L",
90: "XC",
100: "C",
400: "CD",
500: "D",
900: "CM",
1000: "M",
}
nums := []int{1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1}
for _, k := range nums {
if num == 0 {
break
}
v := m[k]
if num/k > 0 {
count := num / k
for i := 0; i < count; i++ {
ret += v
}
num %= k
}
}
return ret
}
|
package transport
import (
"bufio"
"bytes"
"context"
"encoding/binary"
"errors"
"io"
"net"
"sync"
"sync/atomic"
"time"
"github.com/tochka/tcached/cache"
)
func NewServer(c cache.Cache, address string) *Server {
return &Server{
Cache: c,
Address: address,
}
}
type Server struct {
Cache cache.Cache
Address string
inShutdown int32
listener net.Listener
}
func (s *Server) Listen() (err error) {
var wg sync.WaitGroup
s.listener, err = net.Listen("tcp", s.Address)
if err != nil {
return err
}
ctx, cancel := context.WithCancel(context.Background())
for {
var conn net.Conn
conn, err = s.listener.Accept()
if err != nil {
if atomic.LoadInt32(&s.inShutdown) == 1 {
err = nil
}
break
}
wg.Add(1)
go worker(conn, s.Cache, &wg, ctx)
}
cancel()
wg.Wait()
return err
}
func (s *Server) Shotdown() {
atomic.StoreInt32(&s.inShutdown, 1)
s.listener.Close()
}
const (
ItemHeaderSize = 10 //binary.Size(ItemHeader{})
OperationHeaderSize = 5 // binary.Size(OperationHeader{})
)
func worker(conn net.Conn, c cache.Cache, wg *sync.WaitGroup, ctx context.Context) {
var (
opHederBuf [OperationHeaderSize]byte
oh OperationHeader
op Operation
reader = bufio.NewReader(conn)
writer = bufio.NewWriter(conn)
)
defer func() {
conn.Close()
wg.Done()
}()
for {
select {
case <-ctx.Done():
return
default:
if _, err := io.ReadFull(reader, opHederBuf[:]); err != nil {
return
}
oh.Code = OpCode(opHederBuf[0])
oh.PayloadLen = binary.BigEndian.Uint32(opHederBuf[1:])
payload := make([]byte, int(oh.PayloadLen))
if _, err := io.ReadFull(reader, payload); err != nil {
return
}
switch oh.Code {
case OpCodeGet:
op = get(payload, c)
case OpCodeSet:
op = set(payload, c)
case OpCodeDel:
op = del(payload, c)
case OpCodeGetKeys:
op = getkeys(payload, c)
default:
op = Operation{Code: StatusError, Payload: UnsupportedOperationMsg}
}
opHederBuf[0] = byte(op.Code)
binary.BigEndian.PutUint32(opHederBuf[1:], uint32(len(op.Payload)))
if _, err := writer.Write(opHederBuf[:]); err != nil {
return
}
if _, err := writer.Write(op.Payload); err != nil {
return
}
writer.Flush()
}
}
}
func getkeys(_ []byte, c cache.Cache) Operation {
var b [8]byte // int64
keys := c.Keys()
buf := bytes.NewBuffer(nil)
binary.BigEndian.PutUint64(b[:], uint64(len(keys)))
buf.Write(b[:])
for _, k := range keys {
binary.BigEndian.PutUint16(b[:], uint16(len(k)))
buf.Write(b[:2])
buf.WriteString(k)
}
return Operation{Code: StatusSuccess, Payload: buf.Bytes()}
}
func del(payload []byte, c cache.Cache) Operation {
key := string(payload)
c.Remove(key)
return Operation{Code: StatusSuccess}
}
func get(payload []byte, c cache.Cache) Operation {
key := string(payload)
val, ok := c.Get(key)
if !ok {
return Operation{Code: StatusNotFound}
}
if time.Now().UTC().Unix() >= int64(val.Expired) {
return Operation{Code: StatusNotFound}
}
return Operation{Code: StatusSuccess, Payload: EncodeItem(key, val)}
}
func set(payload []byte, c cache.Cache) Operation {
key, v, err := ParseItem(payload)
if err != nil {
return Operation{Code: StatusError, Payload: []byte(err.Error())}
}
if time.Now().UTC().Unix() >= int64(v.Expired) {
return Operation{Code: StatusSuccess}
}
c.Add(key, v)
return Operation{Code: StatusSuccess}
}
func EncodeItem(key string, value cache.Value) []byte {
buf := make([]byte, ItemHeaderSize+len(key)+len(value.Value))
binary.BigEndian.PutUint32(buf, value.Expired)
binary.BigEndian.PutUint16(buf[4:], uint16(len(key)))
binary.BigEndian.PutUint32(buf[6:], uint32(len(value.Value)))
copy(buf[ItemHeaderSize:], key)
copy(buf[ItemHeaderSize+len(key):], value.Value)
return buf
}
func ParseItem(data []byte) (key string, v cache.Value, err error) {
if len(data) < ItemHeaderSize {
return key, v, ErrSetItemSizeTooSmall
}
var h ItemHeader = ItemHeader{
Expired: binary.BigEndian.Uint32(data),
KeyLen: binary.BigEndian.Uint16(data[4:]),
ValueLen: binary.BigEndian.Uint32(data[6:]),
}
data = data[ItemHeaderSize:]
if len(data) != int(h.KeyLen)+int(h.ValueLen) {
return key, v, ErrSetItemSizeTooSmall
}
key = string(data[:h.KeyLen])
v.Expired = h.Expired
v.Value = make([]byte, int(h.ValueLen))
copy(v.Value, data[int(h.KeyLen):])
return key, v, nil
}
type ItemHeader struct {
Expired uint32
ValueLen uint32
KeyLen uint16
}
type OperationHeader struct {
Code OpCode
PayloadLen uint32
}
type Operation struct {
Code OpCode
Payload []byte
}
type OpCode byte
const (
OpCodeUnknown OpCode = iota
OpCodeSet
OpCodeGet
OpCodeDel
OpCodeGetKeys
)
const (
StatusSuccess OpCode = 0xFF - iota
StatusError
StatusNotFound
)
var (
UnsupportedOperationMsg = []byte("unsupported operation")
ErrSetItemSizeTooSmall = errors.New("set item size too small")
ErrSetItemSizeIncorrect = errors.New("set item size incorrect")
)
|
package main
import (
"context"
"errors"
"os"
"time"
"github.com/aws/aws-lambda-go/lambda"
"github.com/dghubble/go-twitter/twitter"
"github.com/po3rin/qiitter/oauth"
"github.com/po3rin/qiitter/qiita"
"golang.org/x/sync/errgroup"
)
var hash = os.Getenv("HASH_TAG")
func post() error {
var c = qiita.Client{
Endpoint: "https://qiita.com/api/v2/items",
Time: time.Now().Format("2006-01-02"),
Tag: os.Getenv("TARGET_TAG"),
}
ch1 := make(chan *twitter.Client)
ch2 := make(chan *[]qiita.Item)
go oauth.Client(ch1)
go c.GetQiitaItems(ch2)
client := <-ch1
items := <-ch2
boundary := time.Now().Add(time.Duration(-1) * time.Hour).Unix()
eg, ctx := errgroup.WithContext(context.Background())
ctx, cancel := context.WithCancel(ctx)
defer cancel()
for _, i := range *items {
eg.Go(func() error {
select {
case <-ctx.Done():
return errors.New("Error Post")
default:
t, err := time.Parse("2006-01-02T15:04:05+09:00", i.CreatedAt)
if err != nil {
cancel()
}
created := t.Add(time.Duration(-9) * time.Hour)
createdString := t.Format("01/02 15:04")
createdUnix := created.Unix()
if createdUnix > boundary {
post := createdString + "に投稿されました\n" + i.Title + "\n" + hash + "\n" + i.URL
_, _, err := client.Statuses.Update(post, nil)
if err != nil {
cancel()
}
}
return nil
}
})
}
if err := eg.Wait(); err != nil {
return err
}
return nil
}
func main() {
lambda.Start(post)
}
|
package lyrics
type backend interface {
init(qartist, qtitle string)
getTrackInfo() (TrackInfo, error)
getLyrics() (string, error)
}
|
// Copyright 2017 Jeff Foley. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package dnssrv
import (
"math/rand"
"net"
"strings"
"time"
"github.com/OWASP/Amass/amass/core"
"github.com/OWASP/Amass/amass/utils"
evbus "github.com/asaskevich/EventBus"
"github.com/miekg/dns"
)
const (
numOfWildcardTests = 5
maxDNSNameLen = 253
maxDNSLabelLen = 63
maxLabelLen = 24
// The hyphen has been removed
ldhChars = "abcdefghijklmnopqrstuvwxyz0123456789"
)
// Names for the different types of wildcards that can be detected.
const (
WildcardTypeNone = iota
WildcardTypeStatic
WildcardTypeDynamic
)
type wildcard struct {
WildcardType int
Answers []core.DNSAnswer
}
type wildcardRequest struct {
Request *core.AmassRequest
WildcardType chan int
}
var (
// InitialQueryTypes include the DNS record types that are
// initially requested for a discovered name
InitialQueryTypes = []string{
"TXT",
"CNAME",
"A",
"AAAA",
}
badSubnets = []string{
"198.105.244.0/24",
"198.105.254.0/24",
"88.204.137.0/24",
}
)
// DNSService is the AmassService that handles all DNS name resolution requests within
// the architecture. This is achieved by receiving all the DNSQUERY and DNSSWEEP events.
type DNSService struct {
core.BaseAmassService
bus evbus.Bus
// Ensures we do not resolve names more than once
filter *utils.StringFilter
wildcards map[string]*wildcard
wildcardRequests chan wildcardRequest
cidrBlacklist []*net.IPNet
}
// NewDNSService requires the enumeration configuration and event bus as parameters.
// The object returned is initialized, but has not yet been started.
func NewDNSService(config *core.AmassConfig, bus evbus.Bus) *DNSService {
ds := &DNSService{
bus: bus,
filter: utils.NewStringFilter(),
wildcards: make(map[string]*wildcard),
wildcardRequests: make(chan wildcardRequest),
}
for _, n := range badSubnets {
if _, ipnet, err := net.ParseCIDR(n); err == nil {
ds.cidrBlacklist = append(ds.cidrBlacklist, ipnet)
}
}
ds.BaseAmassService = *core.NewBaseAmassService("DNS Service", config, ds)
return ds
}
// OnStart implements the AmassService interface
func (ds *DNSService) OnStart() error {
ds.BaseAmassService.OnStart()
ds.bus.SubscribeAsync(core.NEWSUB, ds.newSubdomain, false)
ds.bus.SubscribeAsync(core.DNSQUERY, ds.addRequest, false)
ds.bus.SubscribeAsync(core.DNSSWEEP, ds.reverseDNSSweep, false)
go ds.processRequests()
go ds.processWildcardRequests()
return nil
}
// OnStop implements the AmassService interface
func (ds *DNSService) OnStop() error {
ds.BaseAmassService.OnStop()
ds.bus.Unsubscribe(core.NEWSUB, ds.newSubdomain)
ds.bus.Unsubscribe(core.DNSQUERY, ds.addRequest)
ds.bus.Unsubscribe(core.DNSSWEEP, ds.reverseDNSSweep)
return nil
}
func (ds *DNSService) addRequest(req *core.AmassRequest) {
if req == nil || req.Name == "" || req.Domain == "" {
ds.Config().MaxFlow.Release(1)
return
}
if ds.filter.Duplicate(req.Name) || ds.Config().Blacklisted(req.Name) {
ds.Config().MaxFlow.Release(1)
return
}
if !core.TrustedTag(req.Tag) && ds.GetWildcardType(req) == WildcardTypeDynamic {
ds.Config().MaxFlow.Release(1)
return
}
ds.SendRequest(req)
}
func (ds *DNSService) sendResolved(req *core.AmassRequest) {
if !core.TrustedTag(req.Tag) && ds.MatchesWildcard(req) {
return
}
ds.bus.Publish(core.RESOLVED, req)
}
func (ds *DNSService) processRequests() {
var paused bool
for {
select {
case <-ds.PauseChan():
paused = true
case <-ds.ResumeChan():
paused = false
case <-ds.Quit():
return
default:
if paused {
time.Sleep(time.Second)
continue
}
if req := ds.NextRequest(); req != nil {
core.MaxConnections.Acquire(len(InitialQueryTypes))
ds.Config().MaxFlow.Release(1)
go ds.performRequest(req)
} else {
time.Sleep(100 * time.Millisecond)
}
}
}
}
func (ds *DNSService) performRequest(req *core.AmassRequest) {
defer core.MaxConnections.Release(len(InitialQueryTypes))
ds.SetActive()
var answers []core.DNSAnswer
for _, t := range InitialQueryTypes {
if a, err := Resolve(req.Name, t); err == nil {
if ds.goodDNSRecords(a) {
answers = append(answers, a...)
}
// Do not continue if a CNAME was discovered
if t == "CNAME" {
break
}
} else {
ds.Config().Log.Print(err)
}
}
ds.SetActive()
req.Records = answers
if len(req.Records) == 0 {
return
}
go ds.sendResolved(req)
}
func (ds *DNSService) goodDNSRecords(records []core.DNSAnswer) bool {
for _, r := range records {
if r.Type != int(dns.TypeA) {
continue
}
for _, cidr := range ds.cidrBlacklist {
if cidr.Contains(net.ParseIP(r.Data)) {
return false
}
}
}
return true
}
func (ds *DNSService) newSubdomain(req *core.AmassRequest, times int) {
if times != 1 {
return
}
ds.basicQueries(req.Name, req.Domain)
go ds.queryServiceNames(req.Name, req.Domain)
}
func (ds *DNSService) basicQueries(subdomain, domain string) {
var answers []core.DNSAnswer
core.MaxConnections.Acquire(4)
defer core.MaxConnections.Release(4)
// Obtain the DNS answers for the NS records related to the domain
if ans, err := Resolve(subdomain, "NS"); err == nil {
for _, a := range ans {
pieces := strings.Split(a.Data, ",")
a.Data = pieces[len(pieces)-1]
if ds.Config().Active {
go ds.attemptZoneXFR(subdomain, domain, a.Data)
}
answers = append(answers, a)
}
} else {
ds.Config().Log.Printf("DNS NS record query error: %s: %v", subdomain, err)
}
// Obtain the DNS answers for the MX records related to the domain
if ans, err := Resolve(subdomain, "MX"); err == nil {
for _, a := range ans {
answers = append(answers, a)
}
} else {
ds.Config().Log.Printf("DNS MX record query error: %s: %v", subdomain, err)
}
// Obtain the DNS answers for the SOA records related to the domain
if ans, err := Resolve(subdomain, "SOA"); err == nil {
answers = append(answers, ans...)
} else {
ds.Config().Log.Printf("DNS SOA record query error: %s: %v", subdomain, err)
}
// Obtain the DNS answers for the SPF records related to the domain
if ans, err := Resolve(subdomain, "SPF"); err == nil {
answers = append(answers, ans...)
} else {
ds.Config().Log.Printf("DNS SPF record query error: %s: %v", subdomain, err)
}
if len(answers) > 0 {
ds.sendResolved(&core.AmassRequest{
Name: subdomain,
Domain: domain,
Records: answers,
Tag: core.DNS,
Source: "Forward DNS",
})
}
}
func (ds *DNSService) attemptZoneXFR(sub, domain, server string) {
core.MaxConnections.Acquire(1)
defer core.MaxConnections.Release(1)
if names, err := ZoneTransfer(sub, domain, server); err == nil {
for _, name := range names {
ds.SendRequest(&core.AmassRequest{
Name: name,
Domain: domain,
Tag: core.AXFR,
Source: "DNS Zone XFR",
})
}
} else {
ds.Config().Log.Printf("DNS zone xfr failed: %s: %v", sub, err)
}
}
func (ds *DNSService) queryServiceNames(subdomain, domain string) {
// Check all the popular SRV records
for _, name := range popularSRVRecords {
srvName := name + "." + subdomain
if ds.filter.Duplicate(srvName) {
continue
}
core.MaxConnections.Acquire(1)
if a, err := Resolve(srvName, "SRV"); err == nil {
ds.sendResolved(&core.AmassRequest{
Name: srvName,
Domain: domain,
Records: a,
Tag: core.DNS,
Source: "Forward DNS",
})
}
core.MaxConnections.Release(1)
}
}
func (ds *DNSService) reverseDNSSweep(addr string, cidr *net.IPNet) {
var ips []net.IP
// Get a subset of nearby IP addresses
if ds.Config().Active {
ips = utils.CIDRSubset(cidr, addr, 500)
} else {
ips = utils.CIDRSubset(cidr, addr, 100)
}
for _, ip := range ips {
a := ip.String()
if ds.filter.Duplicate(a) {
continue
}
core.MaxConnections.Acquire(1)
go ds.reverseDNSRoutine(a)
}
}
func (ds *DNSService) reverseDNSRoutine(ip string) {
defer core.MaxConnections.Release(1)
ds.SetActive()
ptr, answer, err := Reverse(ip)
if err != nil {
return
}
domain := ds.Config().WhichDomain(answer)
if domain == "" {
return
}
ds.sendResolved(&core.AmassRequest{
Name: ptr,
Domain: domain,
Records: []core.DNSAnswer{{
Name: ptr,
Type: 12,
TTL: 0,
Data: answer,
}},
Tag: core.DNS,
Source: "Reverse DNS",
})
}
// MatchesWildcard returns true if the request provided resolved to a DNS wildcard.
func (ds *DNSService) MatchesWildcard(req *core.AmassRequest) bool {
res := make(chan int)
ds.wildcardRequests <- wildcardRequest{
Request: req,
WildcardType: res,
}
if WildcardTypeNone == <-res {
return false
}
return true
}
// GetWildcardType returns the DNS wildcard type for the provided subdomain name.
func (ds *DNSService) GetWildcardType(req *core.AmassRequest) int {
res := make(chan int)
ds.wildcardRequests <- wildcardRequest{
Request: req,
WildcardType: res,
}
return <-res
}
func (ds *DNSService) processWildcardRequests() {
for {
select {
case <-ds.Quit():
return
case r := <-ds.wildcardRequests:
r.WildcardType <- ds.performWildcardRequest(r.Request)
}
}
}
func (ds *DNSService) performWildcardRequest(req *core.AmassRequest) int {
base := len(strings.Split(req.Domain, "."))
labels := strings.Split(req.Name, ".")
for i := len(labels) - base; i > 0; i-- {
sub := strings.Join(labels[i:], ".")
w := ds.getWildcard(sub)
if w.WildcardType == WildcardTypeDynamic {
return WildcardTypeDynamic
} else if w.WildcardType == WildcardTypeStatic {
if len(req.Records) == 0 {
return WildcardTypeStatic
} else if ds.compareAnswers(req.Records, w.Answers) {
return WildcardTypeStatic
}
}
}
return WildcardTypeNone
}
func (ds *DNSService) getWildcard(sub string) *wildcard {
entry, found := ds.wildcards[sub]
if !found {
entry = &wildcard{
WildcardType: WildcardTypeNone,
Answers: nil,
}
ds.wildcards[sub] = entry
// Query multiple times with unlikely names against this subdomain
set := make([][]core.DNSAnswer, numOfWildcardTests)
for i := 0; i < numOfWildcardTests; i++ {
a := ds.wildcardTestResults(sub)
if a == nil {
ds.Config().Log.Printf("%s has no DNS wildcard", sub)
return entry
}
set[i] = a
time.Sleep(time.Second)
}
// Check if we have a static DNS wildcard
match := true
for i := 0; i < numOfWildcardTests-1; i++ {
if !ds.compareAnswers(set[i], set[i+1]) {
match = false
break
}
}
if match {
entry.WildcardType = WildcardTypeStatic
entry.Answers = set[0]
ds.Config().Log.Printf("%s has a static DNS wildcard: %v", sub, set[0])
} else {
entry.WildcardType = WildcardTypeDynamic
ds.Config().Log.Printf("%s has a dynamic DNS wildcard", sub)
}
ds.wildcards[sub] = entry
}
return entry
}
func (ds *DNSService) compareAnswers(ans1, ans2 []core.DNSAnswer) bool {
var match bool
loop:
for _, a1 := range ans1 {
for _, a2 := range ans2 {
if strings.EqualFold(a1.Data, a2.Data) {
match = true
break loop
}
}
}
return match
}
func (ds *DNSService) wildcardTestResults(sub string) []core.DNSAnswer {
var answers []core.DNSAnswer
name := UnlikelyName(sub)
if name == "" {
return nil
}
// Check if the name resolves
core.MaxConnections.Acquire(3)
if a, err := Resolve(name, "CNAME"); err == nil {
answers = append(answers, a...)
}
if a, err := Resolve(name, "A"); err == nil {
answers = append(answers, a...)
}
if a, err := Resolve(name, "AAAA"); err == nil {
answers = append(answers, a...)
}
core.MaxConnections.Release(3)
if len(answers) == 0 {
return nil
}
return answers
}
// UnlikelyName takes a subdomain name and returns an unlikely DNS name within that subdomain
func UnlikelyName(sub string) string {
var newlabel string
ldh := []rune(ldhChars)
ldhLen := len(ldh)
// Determine the max label length
l := maxDNSNameLen - (len(sub) + 1)
if l > maxLabelLen {
l = maxLabelLen
} else if l < 1 {
return ""
}
// Shuffle our LDH characters
rand.Shuffle(ldhLen, func(i, j int) {
ldh[i], ldh[j] = ldh[j], ldh[i]
})
l = (rand.Int() % l) + 1
for i := 0; i < l; i++ {
sel := rand.Int() % ldhLen
// The first nor last char may be a hyphen
if (i == 0 || i == l-1) && ldh[sel] == '-' {
continue
}
newlabel = newlabel + string(ldh[sel])
}
if newlabel == "" {
return newlabel
}
return newlabel + "." + sub
}
|
package tanggal
import (
"errors"
"fmt"
"strconv"
"strings"
"time"
)
type Format string
type Timezone string
const (
Hari Format = "hari"
NamaHari Format = "namaHari"
NamaHariDenganKoma Format = "namaHariDenganKoma"
Minggu Format = "minggu"
NamaMinggu Format = "namaMinggu"
Bulan Format = "bulan"
NamaBulan Format = "namaBulan"
Tahun Format = "tahun"
Pukul Format = "pukul"
PukulDenganDetik Format = "pukulDenganDetik"
Lokasi Format = "lokasi"
LokasiDenganKoma Format = "lokasiDenganKoma"
ZonaWaktu Format = "zonaWaktu"
)
const WIB, WITA, WIT, NONE Timezone = "WIB", "WITA", "WIT", "NONE"
type Tanggal struct {
Hari uint8
NamaHari string
NamaHariDenganKoma string
Bulan uint8
NamaBulan string
Tahun uint64
Pukul string
PukulDenganDetik string
Lokasi string
LokasiDenganKoma string
Timezone string
}
func (t Tanggal) Format(separator string, formats []Format) string {
var f []string
for _, v := range formats {
switch v {
case Hari:
f = append(f, strconv.FormatUint(uint64(t.Hari), 10))
case NamaHari:
f = append(f, t.NamaHari)
case NamaHariDenganKoma:
f = append(f, t.NamaHariDenganKoma)
case Bulan:
f = append(f, strconv.FormatUint(uint64(t.Bulan), 10))
case NamaBulan:
f = append(f, t.NamaBulan)
case Tahun:
f = append(f, strconv.FormatUint(t.Tahun, 10))
case Pukul:
f = append(f, t.Pukul)
case PukulDenganDetik:
f = append(f, t.PukulDenganDetik)
case Lokasi:
if t.Lokasi != "" {
f = append(f, t.Lokasi)
}
case LokasiDenganKoma:
if t.Lokasi != "" {
f = append(f, t.LokasiDenganKoma)
}
case ZonaWaktu:
if t.Timezone != "NONE" {
f = append(f, t.Timezone)
}
}
}
return strings.Join(f, separator)
}
// Papar merubah dari Time menjadi struct yang diisi dengan memaparkan Time yang dioper.
//
// Kalau lokasi diisi dengan string kosong, ketika memapar jadi string, akan mengabaikan format Lokasi dan LokasiDenganKoma.
func Papar(t time.Time, lokasi string, tz Timezone) (Tanggal, error) {
add, err := parseTimeZone(tz)
if err != nil {
return Tanggal{}, err
}
now := t.UTC().Add(time.Hour * add).String()
year, _ := strconv.ParseUint(now[0:4], 10, 0)
month, _ := strconv.ParseUint(now[5:7], 10, 0)
day, _ := strconv.ParseUint(now[8:10], 10, 0)
clock := now[11:19]
shortClock := now[11:16]
return Tanggal{
Hari: uint8(day),
NamaHari: cariNamaHari(t),
Bulan: uint8(month),
NamaBulan: cariNamaBulan(uint8(month)),
Tahun: year,
PukulDenganDetik: clock,
Pukul: shortClock,
Lokasi: lokasi,
LokasiDenganKoma: lokasi + ",",
NamaHariDenganKoma: cariNamaHari(t) + ",",
Timezone: string(tz),
}, nil
}
func cariNamaHari(t time.Time) string {
switch int(t.Weekday()) {
case 0:
return "Minggu"
case 1:
return "Senin"
case 2:
return "Selasa"
case 3:
return "Rabu"
case 4:
return "Kamis"
case 5:
return "Jumat"
case 6:
return "Sabtu"
}
return ""
}
func cariNamaBulan(m uint8) string {
switch m {
case 1:
return "Januari"
case 2:
return "Februari"
case 3:
return "Maret"
case 4:
return "April"
case 5:
return "Mei"
case 6:
return "Juni"
case 7:
return "Juli"
case 8:
return "Agustus"
case 9:
return "September"
case 10:
return "Oktober"
case 11:
return "November"
case 12:
return "Desember"
}
return ""
}
func parseTimeZone(tz Timezone) (time.Duration, error) {
switch tz {
case WIB:
return time.Duration(7), nil
case WITA:
return time.Duration(8), nil
case WIT:
return time.Duration(9), nil
case NONE:
return 0, nil
default:
return time.Duration(0), errors.New(fmt.Sprintf("Failed to parse timezone. %s is not supported", tz))
}
}
|
package json
import (
"io"
"testing"
"github.com/polydawn/refmt/tok/fixtures"
)
func testArray(t *testing.T) {
t.Run("empty array", func(t *testing.T) {
seq := fixtures.SequenceMap["empty array"]
checkCanonical(t, seq, `[]`)
t.Run("decode with extra whitespace", func(t *testing.T) {
checkDecoding(t, seq, ` [ ] `, nil)
})
})
t.Run("single entry array", func(t *testing.T) {
seq := fixtures.SequenceMap["single entry array"]
checkCanonical(t, seq, `["value"]`)
t.Run("decode with extra whitespace", func(t *testing.T) {
checkDecoding(t, seq, ` [ "value" ] `, nil)
})
})
t.Run("duo entry array", func(t *testing.T) {
seq := fixtures.SequenceMap["duo entry array"]
checkCanonical(t, seq, `["value","v2"]`)
})
t.Run("reject dangling arr open", func(t *testing.T) {
seq := fixtures.SequenceMap["dangling arr open"]
checkDecoding(t, seq, `[`, io.EOF)
})
}
|
package hooks
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"sync"
"time"
"github.com/samkreter/go-core/log"
"github.com/sirupsen/logrus"
)
const (
timeFormat = "2006-01-02T15:04:05.000Z07:00"
channelBufferSize = 1024
defaultHTTPClientTimeout = time.Second * 30
defaultFlushInterval = time.Second * 30
)
var defaultLevels = []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
logrus.WarnLevel,
logrus.InfoLevel,
}
// LoggingHubEntriesReq request for the log entries
type LoggingHubEntriesReq struct {
Senders []string `json:"senders"`
Entries []*LoggingHubEntry `json:"entries"`
}
// LoggingHubEntry logging hub log entry
type LoggingHubEntry struct {
Log string `json:"log"`
Time string `json:"time"`
Level string `json:"level"`
Fields map[string]string `json:"fields"`
}
// LoggingHubHook logrus hook for the logging agent
type LoggingHubHook struct {
config Config
levels []logrus.Level
pendingLogs *pendingLogs
channel chan *LoggingHubEntry
ignoreFields map[string]struct{}
filters map[string]func(interface{}) interface{}
}
// Config configuration for the logging agent hook
type Config struct {
LoggingHubURL string
Senders []string
LogLevels []logrus.Level
DefaultIgnoreFields map[string]struct{}
DefaultFilters map[string]func(interface{}) interface{}
BatchSizeInLines int
RequestSizeLimit int
FlushInterval time.Duration
}
// pendingLogs allows for thread safe access to the slice of logs
type pendingLogs struct {
sync.Mutex
items []*LoggingHubEntry
totalSize int
}
func (pl *pendingLogs) appendLog(entry *LoggingHubEntry) (int, int) {
pl.Lock()
defer pl.Unlock()
pl.totalSize = pl.totalSize + getSize(entry)
pl.items = append(pl.items, entry)
return len(pl.items), pl.totalSize
}
func (pl *pendingLogs) flush() []*LoggingHubEntry {
pl.Lock()
defer pl.Unlock()
entries := pl.items[0:]
pl.items = []*LoggingHubEntry{}
return entries
}
// NewLoggingHubHook creates a new logging agent hook
func NewLoggingHubHook(loggingHubURL string, senders []string) (*LoggingHubHook, error) {
return NewWithConfig(Config{
LoggingHubURL: loggingHubURL,
Senders: senders,
FlushInterval: defaultFlushInterval,
})
}
// NewWithConfig returns initialized logrus hook by config setting.
func NewWithConfig(conf Config) (*LoggingHubHook, error) {
if conf.LoggingHubURL == "" {
return nil, fmt.Errorf("loggingHubURL can no be empty")
}
if len(conf.Senders) == 0 {
return nil, fmt.Errorf("configuration must have at least one sender")
}
if conf.FlushInterval == 0 {
conf.FlushInterval = defaultFlushInterval
}
hook := &LoggingHubHook{
config: conf,
levels: conf.LogLevels,
ignoreFields: make(map[string]struct{}),
filters: make(map[string]func(interface{}) interface{}),
channel: make(chan *LoggingHubEntry, channelBufferSize),
pendingLogs: &pendingLogs{},
}
// set default values
if len(hook.levels) == 0 {
hook.levels = defaultLevels
}
for k, v := range conf.DefaultIgnoreFields {
hook.ignoreFields[k] = v
}
for k, v := range conf.DefaultFilters {
hook.filters[k] = v
}
go hook.Run()
return hook, nil
}
// Levels returns logging level to fire this hook.
func (hook *LoggingHubHook) Levels() []logrus.Level {
return hook.levels
}
// SetLevels sets logging level to fire this hook.
func (hook *LoggingHubHook) SetLevels(levels []logrus.Level) {
hook.levels = levels
}
// AddIgnore adds field name to ignore.
func (hook *LoggingHubHook) AddIgnore(name string) {
hook.ignoreFields[name] = struct{}{}
}
// AddFilter adds a custom filter function.
func (hook *LoggingHubHook) AddFilter(name string, fn func(interface{}) interface{}) {
hook.filters[name] = fn
}
// Fire is invoked by logrus and sends log to fluentd logger.
func (hook *LoggingHubHook) Fire(entry *logrus.Entry) error {
loggingHubEntry := &LoggingHubEntry{
Log: entry.Message,
Level: entry.Level.String(),
Time: entry.Time.UTC().Format(timeFormat),
Fields: make(map[string]string),
}
for k, v := range entry.Data {
if _, ok := hook.ignoreFields[k]; ok {
continue
}
if fn, ok := hook.filters[k]; ok {
v = fn(v)
} else {
v = formatData(v)
}
vStr := fmt.Sprintf("%v", v)
loggingHubEntry.Fields[k] = vStr
}
hook.channel <- loggingHubEntry
return nil
}
// Run handles time based operations
func (hook *LoggingHubHook) Run() {
ticker := time.NewTicker(hook.config.FlushInterval)
for {
select {
case logEntry := <-hook.channel:
numRecords, _ := hook.pendingLogs.appendLog(logEntry)
//TODO(sakreter): Add totalSize check
if numRecords >= hook.config.BatchSizeInLines {
hook.Flush()
}
case <-ticker.C:
hook.Flush()
}
}
}
// Flush flushes the logs
func (hook *LoggingHubHook) Flush() {
entries := hook.pendingLogs.flush()
if len(entries) == 0 {
return
}
b, err := json.Marshal(LoggingHubEntriesReq{
Senders: hook.config.Senders,
Entries: entries,
})
client := &http.Client{
Timeout: defaultHTTPClientTimeout,
}
resp, err := client.Post(hook.config.LoggingHubURL, "application/json", bytes.NewBuffer(b))
if err != nil {
log.G(context.TODO()).WithError(err).Error("Error flushing logs")
}
if resp.StatusCode >= 300 {
log.G(context.TODO()).WithFields(
logrus.Fields{
"statusCode": resp.StatusCode,
"error": getHTTPErrorMsg(resp),
},
).Errorf("Error posting logs")
}
}
func getHTTPErrorMsg(resp *http.Response) string {
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.G(context.TODO()).WithError(err).Error("Failed to parse response body")
return ""
}
return string(b)
}
func getSize(entry *LoggingHubEntry) int {
//TODO(sakreter): Add fields to size requirement
return len(entry.Log) +
len(entry.Time) +
len(entry.Level)
}
func formatData(value interface{}) (formatted interface{}) {
switch value := value.(type) {
case json.Marshaler:
return value
case error:
return value.Error()
case fmt.Stringer:
return value.String()
default:
return value
}
}
|
//go:generate mockgen -destination=./mock/storage_mock.go github.com/nomkhonwaan/myblog/pkg/storage Storage
package storage
import (
"context"
"io"
)
// Storage uses to storing or retrieving file from cloud or remote server
type Storage interface {
Delete(ctx context.Context, path string) error
Download(ctx context.Context, path string) (io.ReadCloser, error)
Upload(ctx context.Context, body io.Reader, path string) error
}
|
package main
import "fmt"
type T1 struct {
}
type T3 = T1
func (t1 T1) say() {
}
type S struct {
T1
T3
}
func (t3 T3) greeting() (i int){
fmt.Println("xxx.txt")
i=0
return i
}
func main() {
var s S
s.say()
//var t1 T1
//var t3 T3
//
//t1.say()
//t1.greeting()
//
//
//t3.say()
//t3.greeting()
}
|
package solutions
/*
* @lc app=leetcode id=8 lang=golang
*
* [8] String to Integer (atoi)
*/
/**
Note: Really disappointed with test cases come with this question.
Wasting many hours to handle all the weird edge case but you can't just send a format error.
Worst LeetCode experience ever.
I should have check thumbs up/down first.
*/
// @lc code=start
func myAtoi(s string) int {
t := map[byte]int{
'0': 0,
'1': 1,
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9,
}
sum := 0
multiplier := 1
sign := false
negative := false
whiteSpace := false
overflow := false
for i := len(s) - 1; i > -1; i-- {
if s[i] == '-' {
if sign {
return 0
}
sum *= -1
negative = true
sign = true
} else if s[i] == '+' {
if sign {
return 0
}
sign = true
} else if s[i] == ' ' {
if multiplier != 1 {
whiteSpace = true
}
} else if s[i] >= '0' && s[i] <= '9' {
if sign || whiteSpace {
return 0
}
if s[i] == '0' {
sum += t[s[i]] * multiplier
} else if multiplier == 10000000000 {
overflow = true
} else if multiplier == 1000000000 {
if s[i] == '0' || s[i] == '1' {
sum += t[s[i]] * multiplier
} else if s[i] == '2' {
if sum > 147483647 {
overflow = true
} else {
sum += t[s[i]] * multiplier
}
} else {
overflow = true
}
} else {
sum += t[s[i]] * multiplier
}
multiplier *= 10
} else if multiplier != 1 {
sum = 0
multiplier = 1
}
}
if overflow || sum > 2147483647 || sum < -2147483648 {
if negative {
return -2147483648
}
return 2147483647
}
return int(sum)
}
// @lc code=end
|
package messenger
type (
// Field represents a field in facebook graph API
Field string
// Fields is a []Field
Fields []Field
)
// Stringify converts Fields to []string
func (f Fields) Stringify() []string {
var ret []string
for _, i := range f {
ret = append(ret, string(i))
}
return ret
}
// Available fields
// https://developers.facebook.com/docs/messenger-platform/identity/user-profile
const (
Name Field = "name"
FirstName Field = "first_name"
LastName Field = "last_name"
ProfilePicture Field = "profile_pic"
Locale Field = "locale"
Timezone Field = "timezone"
Gender Field = "gender"
)
// Profile struct holds data associated with Facebook profile
type Profile struct {
Name string `json:"name"`
FirstName string `json:"first_name"`
LastName string `json:"last_name"`
ProfilePicture string `json:"profile_pic,omitempty"`
Locale string `json:"locale,omitempty"`
Timezone float64 `json:"timezone,omitempty"`
Gender string `json:"gender,omitempty"`
}
type accountLinking struct {
//Recipient is Page Scoped ID
Recipient string `json:"recipient"`
}
|
package Problem0026
func removeDuplicates(nums []int) int {
if len(nums) <= 1 {
return len(nums)
}
res := 1
i := 1
for ; i < len(nums); i++ {
if nums[i] == nums[i-1] {
continue
}
if res != i {
nums[res] = nums[i]
}
res++
}
return res
}
|
package resolvers
import (
"context"
"github.com/syncromatics/kafmesh/internal/graph/generated"
"github.com/syncromatics/kafmesh/internal/graph/model"
"github.com/pkg/errors"
)
//go:generate mockgen -source=./pod.go -destination=./pod_mock_test.go -package=resolvers_test
// PodLoader is the dataloaders for a pod
type PodLoader interface {
ProcessorsByPod(int) ([]*model.Processor, error)
SinksByPod(int) ([]*model.Sink, error)
SourcesByPod(int) ([]*model.Source, error)
ViewSinksByPod(int) ([]*model.ViewSink, error)
ViewSourcesByPod(int) ([]*model.ViewSource, error)
ViewsByPod(int) ([]*model.View, error)
}
var _ generated.PodResolver = &PodResolver{}
// PodResolver resolves a pod's relationships
type PodResolver struct {
*Resolver
}
// Processors returns the pod's processors
func (r *PodResolver) Processors(ctx context.Context, pod *model.Pod) ([]*model.Processor, error) {
result, err := r.DataLoaders.PodLoader(ctx).ProcessorsByPod(pod.ID)
if err != nil {
return nil, errors.Wrap(err, "failed to get processors from loader")
}
return result, nil
}
// Sinks returns the pod's sinks
func (r *PodResolver) Sinks(ctx context.Context, pod *model.Pod) ([]*model.Sink, error) {
result, err := r.DataLoaders.PodLoader(ctx).SinksByPod(pod.ID)
if err != nil {
return nil, errors.Wrap(err, "failed to get sinks from loader")
}
return result, nil
}
// Sources returns the pod's sources
func (r *PodResolver) Sources(ctx context.Context, pod *model.Pod) ([]*model.Source, error) {
result, err := r.DataLoaders.PodLoader(ctx).SourcesByPod(pod.ID)
if err != nil {
return nil, errors.Wrap(err, "failed to get sources from loader")
}
return result, nil
}
// ViewSinks returns the pod's view sinks
func (r *PodResolver) ViewSinks(ctx context.Context, pod *model.Pod) ([]*model.ViewSink, error) {
result, err := r.DataLoaders.PodLoader(ctx).ViewSinksByPod(pod.ID)
if err != nil {
return nil, errors.Wrap(err, "failed to get view sinks from loader")
}
return result, nil
}
// ViewSources returns the pod's view sources
func (r *PodResolver) ViewSources(ctx context.Context, pod *model.Pod) ([]*model.ViewSource, error) {
result, err := r.DataLoaders.PodLoader(ctx).ViewSourcesByPod(pod.ID)
if err != nil {
return nil, errors.Wrap(err, "failed to get view sources from loader")
}
return result, nil
}
// Views returns the pod's views
func (r *PodResolver) Views(ctx context.Context, pod *model.Pod) ([]*model.View, error) {
result, err := r.DataLoaders.PodLoader(ctx).ViewsByPod(pod.ID)
if err != nil {
return nil, errors.Wrap(err, "failed to get views from loader")
}
return result, nil
}
|
package order
import (
"github.com/jinzhu/gorm"
"mall_server/internal/models/wx"
"time"
)
type Order struct {
OrderCode string `json:"order_code"`
ThirdOrderCode string `json:"third_order_code"`
GoodsId int64 `json:"goods_id"`
GoodsName string `json:"goods_name"`
SkuId int64 `json:"sku_id"`
SkuName string `json:"sku_name"`
Count int64 `json:"count"`
Price int64 `json:"price"`
DealPrice int `json:"deal_price"`
Uid int64 `json:"uid"`
Status int `json:"status"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
type Model struct {
Db *gorm.DB
Name string
}
func NewModel(db *gorm.DB) *Model {
return &Model{
Db: db,
Name: "orders",
}
}
func (s *Model) Create(in Order) (err error) {
db := s.Db.Table(s.Name)
err = db.Create(&in).Error
if err != nil {
return
}
return
}
func (s *Model) GetByStatus(status int) (ret []Order, err error) {
db := s.Db.Table(s.Name)
err = db.Where("status = ?", status).Find(&ret).Error
if err != nil {
return
}
return
}
func (s *Model) UpdateStatusByOrderCode(orderCode string, status int) (err error) {
db := s.Db.Table(s.Name)
err = db.Where("order_code = ?", orderCode).UpdateColumn("status", status).Error
if err != nil {
return
}
return
}
func (s *Model) UpdateByOrderCode(in *wx.WxpayReq) (err error) {
db := s.Db.Table(s.Name)
err = db.Where("order_code = ?", in.OutTradeNo).Updates(map[string]interface{}{
"third_order_code": in.TransactionId,
"deal_price": in.TotalFee,
"status": 1,
}).Error
if err != nil {
return
}
return
}
|
package entity
type UmsAdminPermissionRelation struct {
Id int64 `json:"id" xorm:"pk autoincr BIGINT(20) 'id'"`
AdminId int64 `json:"admin_id" xorm:"default NULL BIGINT(20) 'admin_id'"`
PermissionId int64 `json:"permission_id" xorm:"default NULL BIGINT(20) 'permission_id'"`
Type int `json:"type" xorm:"default NULL INT(1) 'type'"`
}
|
package vo
// json2go
// https://www.sojson.com/json/json2go.html
// ErrorMsg
type ErrorMsg struct {
Msg string `json:"msg"`
Code int `json:"code"`
}
// Json Server Req GetList
type JSReqGetList struct {
Start int `form:"_start"`
Limit int `form:"_limit"`
Order string `form:"_order"`
Sort string `form:"_sort"`
Q string `form:"q"`
}
|
package main
import "fmt"
func runeToByteBoard(rBoard [][]rune) [][]byte {
rows := len(rBoard)
cols := len(rBoard[0])
bBoard := make([][]byte, rows)
for r := 0; r < rows; r++ {
bBoard[r] = make([]byte, cols)
for c := 0; c < cols; c++ {
bBoard[r][c] = byte(rBoard[r][c])
}
}
return bBoard
}
func clearTraversed(traversed [][]bool) {
for r := 0; r < len(traversed); r++ {
for c := 0; c < len(traversed[0]); c++ {
traversed[r][c] = false
}
}
}
func checkTraversed(traversed [][]bool) {
for r := 0; r < len(traversed); r++ {
for c := 0; c < len(traversed[0]); c++ {
if traversed[r][c] {
fmt.Printf("traversed[%d][%d] is true", r, c)
return
}
}
}
}
func checkForWord(board [][]byte, traversed [][]bool, word []rune, fromRow, fromCol, start int) bool {
if start >= len(word) {
return true
}
if board[fromRow][fromCol] != byte(word[start]) {
return false
}
if start == len(word)-1 {
return true
}
dirR := []int{0, 0, -1, 1}
dirC := []int{-1, 1, 0, 0}
numRows := len(board)
numCols := len(board[0])
for d := 0; d < len(dirR); d++ {
newR, newC := fromRow+dirR[d], fromCol+dirC[d]
if newR < 0 || newR >= numRows || newC < 0 || newC >= numCols || traversed[newR][newC] {
continue
}
traversed[newR][newC] = true
if checkForWord(board, traversed, word, newR, newC, start+1) {
traversed[newR][newC] = false
return true
}
traversed[newR][newC] = false
}
return false
}
func findWords(board [][]byte, words []string) []string {
if len(board) == 0 {
return []string{}
}
rows := len(board)
cols := len(board[0])
result := []string{}
traversed := make([][]bool, rows)
for r := 0; r < rows; r++ {
traversed[r] = make([]bool, cols)
}
for w := 0; w < len(words); w++ {
wordRunes := []rune(words[w])
wordFound := false
for r := 0; r < rows && !wordFound; r++ {
for c := 0; c < cols && !wordFound; c++ {
// checkTraversed(traversed)
if (byte)(wordRunes[0]) != board[r][c] {
continue
}
// fmt.Printf("found ")
traversed[r][c] = true
if checkForWord(board, traversed, wordRunes, r, c, 0) {
traversed[r][c] = false
wordFound = true
break
}
traversed[r][c] = false
}
}
if wordFound {
result = append(result, words[w])
}
}
return result
}
func test1() {
rBoard := [][]rune{
[]rune{'o', 'a', 'a', 'n'},
[]rune{'e', 't', 'a', 'e'},
[]rune{'i', 'h', 'k', 'r'},
[]rune{'i', 'f', 'l', 'v'},
}
bBoard := runeToByteBoard(rBoard)
words := []string{"oath", "pea", "eat", "rain"}
result := findWords(bBoard, words)
fmt.Printf("result: %v\n", result)
}
func main() {
test1()
}
|
package filehandler
import (
"path/filepath"
"strings"
)
// FileSet stores the file permissions in the hierarchical set
type FileSet struct {
Set map[string]bool
SystemRoot bool
}
// FilePerm stores the permission apply to the file
type FilePerm int
// FilePermWrite / Read / Stat are permissions
const (
FilePermWrite = iota + 1
FilePermRead
FilePermStat
)
// NewFileSet creates the new file set
func NewFileSet() FileSet {
return FileSet{make(map[string]bool), false}
}
// IsInSetSmart same from uoj-judger
func (s *FileSet) IsInSetSmart(name string) bool {
if s.Set[name] {
return true
}
if name == "/" && s.SystemRoot {
return true
}
// check ...
level := 0
for level = 0; name != ""; level++ {
if level == 1 && s.Set[name+"/*"] {
return true
}
if s.Set[name+"/"] {
return true
}
name = dirname(name)
}
if level == 1 && s.Set["/*"] {
return true
}
if s.Set["/"] {
return true
}
return false
}
// Add adds a single file path into the FileSet
func (s *FileSet) Add(name string) {
if name == "/" {
s.SystemRoot = true
} else {
s.Set[name] = true
}
}
// AddRange adds multiple files into the FileSet
// If path is relative path, add according to the workPath
func (s *FileSet) AddRange(names []string, workPath string) {
for _, n := range names {
if filepath.IsAbs(n) {
if n == "/" {
s.SystemRoot = true
} else {
s.Set[n] = true
}
} else {
s.Set[filepath.Join(workPath, n)+"/"] = true
}
}
}
// FileSets aggregates multiple permissions including write / read / stat / soft ban
type FileSets struct {
Writable, Readable, Statable, SoftBan FileSet
}
// NewFileSets creates new FileSets struct
func NewFileSets() *FileSets {
return &FileSets{NewFileSet(), NewFileSet(), NewFileSet(), NewFileSet()}
}
// IsWritableFile determines whether the file path inside the write set
func (s *FileSets) IsWritableFile(name string) bool {
return s.Writable.IsInSetSmart(name) || s.Writable.IsInSetSmart(realPath(name))
}
// IsReadableFile determines whether the file path inside the read / write set
func (s *FileSets) IsReadableFile(name string) bool {
return s.IsWritableFile(name) || s.Readable.IsInSetSmart(name) || s.Readable.IsInSetSmart(realPath(name))
}
// IsStatableFile determines whether the file path inside the stat / read / write set
func (s *FileSets) IsStatableFile(name string) bool {
return s.IsReadableFile(name) || s.Statable.IsInSetSmart(name) || s.Statable.IsInSetSmart(realPath(name))
}
// IsSoftBanFile determines whether the file path inside the softban set
func (s *FileSets) IsSoftBanFile(name string) bool {
return s.SoftBan.IsInSetSmart(name) || s.SoftBan.IsInSetSmart(realPath(name))
}
// AddFilePermission adds the file into fileSets according to the given permission
func (s *FileSets) AddFilePermission(name string, mode FilePerm) {
if mode == FilePermWrite {
s.Writable.Add(name)
} else if mode == FilePermRead {
s.Readable.Add(name)
} else if mode == FilePermStat {
s.Statable.Add(name)
}
for name = dirname(name); name != ""; name = dirname(name) {
s.Statable.Add(name)
}
}
// GetExtraSet evaluates the concatenated file set according to real path or raw path
func GetExtraSet(extra, raw []string) []string {
rt := make([]string, 0, len(extra)+len(raw))
rt = append(rt, raw...)
for _, v := range extra {
rt = append(rt, realPath(v))
}
return rt
}
// basename return path with last "/"
func basename(path string) string {
if p := strings.LastIndex(path, "/"); p >= 0 {
return path[:p+1]
}
return path
}
// dirname return path without last "/"
func dirname(path string) string {
if p := strings.LastIndex(path, "/"); p >= 0 {
return path[:p]
}
return ""
}
func realPath(p string) string {
f, err := filepath.EvalSymlinks(p)
if err != nil {
return ""
}
return f
}
|
package main
import (
"io/ioutil"
"testing"
"github.com/stretchr/testify/assert"
)
func TestEqualFieldsSorted(t *testing.T) {
jsonFileArrayBytes1, _ := ioutil.ReadFile("response-host-1.json")
jsonFileArrayBytes2, _ := ioutil.ReadFile("response-host-2.json")
leftJSON, _ := unmarshal(jsonFileArrayBytes1)
rightJSON, _ := unmarshal(jsonFileArrayBytes2)
excludeFields := []string{"paging", "results.#.payer_costs.#.payment_method_option_id"}
if len(excludeFields) > 0 {
for _, excludeField := range excludeFields {
Remove(leftJSON, excludeField)
Remove(rightJSON, excludeField)
}
}
isEqual, fieldError := Equal(leftJSON, rightJSON)
assert.Equal(t, isEqual, false)
assert.Equal(t, fieldError, "results.#.id")
}
|
package main
import "github.com/QisFj/godry/gen/graph"
type Entry []string // one object's different field
type Group []Entry // objects with same type
type Data []Group // objects with different type
func (data Data) Len() int { return len(data) }
func (data Data) Get(i int) graph.LayerI { return data[i] }
func (group Group) Len() int { return len(group) }
func (group Group) Get(i int) graph.NodeI { return group[i] }
type Arg struct {
Data Data
ExData Data
Template string
}
|
package c34_mitm_diffie_hellman
import (
"crypto/sha1"
"math/big"
"github.com/vodafon/cryptopals/set2/c10_implement_cbc_mode"
)
type MITM struct {
name string
P *big.Int
receiverA Point
receiverB Point
receiverN int
side int
decryptedMessage []byte
}
func NewMITM(name string) *MITM {
return &MITM{
name: name,
}
}
func (obj *MITM) SetReceiver(p Point) {
if obj.receiverN == 0 {
obj.receiverA = p
obj.receiverN = 1
return
}
obj.receiverB = p
obj.receiverN = 0
}
func (obj *MITM) ReceivePGK(p, g, pK *big.Int) {
obj.P = p
obj.receiverB.ReceivePGK(p, g, p)
}
func (obj *MITM) ReceiveK(pK *big.Int) {
obj.receiverA.ReceiveK(obj.P)
}
func (obj *MITM) ReceiveMessage(ciphertext []byte) {
obj.decrypt(ciphertext)
if obj.side == 0 {
obj.receiverB.ReceiveMessage(ciphertext)
obj.side = 1
return
}
obj.receiverA.ReceiveMessage(ciphertext)
obj.side = 0
}
func (obj *MITM) decrypt(ciphertext []byte) {
key := sha1.Sum(big.NewInt(0).Bytes())
iv := ciphertext[len(ciphertext)-16:]
enc := ciphertext[:len(ciphertext)-16]
msg := c10_implement_cbc_mode.Decrypt(enc, key[:16], iv)
obj.decryptedMessage = msg
}
func (obj *MITM) SendPGK() {}
func (obj *MITM) SendK() {}
func (obj *MITM) SendMessage(msg []byte) {}
func (obj *MITM) ReturnMessage() {}
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"strings"
)
func checkDependencyPr(depUrl string, prUrl string, statusUrl string) {
client := &http.Client{}
req, err := http.NewRequest("GET", depUrl, nil)
if err != nil {
panic(err)
}
req.Header.Add("Authorization", `Basic dGNyYW5kczpiYWlsZXkxMjM=`)
resp, err := client.Do(req)
defer resp.Body.Close()
if resp.StatusCode == 200 {
githubDataDependency := &GithubDataDependency{}
decoder := json.NewDecoder(resp.Body)
err := decoder.Decode(&githubDataDependency)
if err != nil {
panic(err)
}
state := githubDataDependency.State
prRepoName := githubDataDependency.Head.Repo.Name
if state == "closed" {
status := "success"
changePrStatus(depUrl, status, prRepoName, statusUrl)
} else {
status := "failure"
newUrl := strings.Replace(depUrl, "api.github.com/repos", "github.com", 1)
newDepUrl := strings.Replace(newUrl, "pulls", "pull", 1)
storedPRData := &StoredPRData{
PrUrl: prUrl,
DepUrl: newDepUrl,
StatusUrl: statusUrl,
}
go updateDatabase(storedPRData, newDepUrl)
go changePrStatus(newDepUrl, status, prRepoName, statusUrl)
}
}
}
func changePrStatus(depUrl string, status string, prRepoName string, statusUrl string) {
client := &http.Client{}
message := prepareMessage(prRepoName, status)
var jsonStr = []byte(`{"state": "` + status + `", "target_url": "` + depUrl + `", "description": "` + message + `", "context": "Dependency Manager"}`)
fmt.Println(bytes.NewBuffer(jsonStr))
req, err := http.NewRequest("POST", statusUrl, bytes.NewBuffer(jsonStr))
if err != nil {
panic(err)
}
req.Header.Add("Authorization", `Basic dGNyYW5kczpiYWlsZXkxMjM=`)
resp, err := client.Do(req)
defer resp.Body.Close()
if resp.StatusCode == 201 {
fmt.Println("Pull Request Status Updated")
} else {
fmt.Println(resp.StatusCode)
}
}
func prepareMessage(prRepoName string, status string) string {
if status == "failure" {
return "Requires: " + strings.ToUpper(prRepoName) + " (Click details ->)"
} else if status == "success" {
return "No Dependencies Found"
} else {
return "An Error has occured"
}
}
|
// ⚡️ Fiber is an Express inspired web framework written in Go with ☕️
// 📝 Github Repository: https://github.com/gofiber/fiber
// 📌 API Documentation: https://docs.gofiber.io
package fiber
import (
"fmt"
"testing"
"github.com/gofiber/fiber/v2/utils"
)
// go test -race -run Test_Path_parseRoute
func Test_Path_parseRoute(t *testing.T) {
var rp routeParser
rp = parseRoute("/shop/product/::filter/color::color/size::size")
utils.AssertEqual(t, routeParser{
segs: []*routeSegment{
{Const: "/shop/product/:", Length: 15},
{IsParam: true, ParamName: "filter", ComparePart: "/color:", PartCount: 1},
{Const: "/color:", Length: 7},
{IsParam: true, ParamName: "color", ComparePart: "/size:", PartCount: 1},
{Const: "/size:", Length: 6},
{IsParam: true, ParamName: "size", IsLast: true},
},
params: []string{"filter", "color", "size"},
}, rp)
rp = parseRoute("/api/v1/:param/abc/*")
utils.AssertEqual(t, routeParser{
segs: []*routeSegment{
{Const: "/api/v1/", Length: 8},
{IsParam: true, ParamName: "param", ComparePart: "/abc", PartCount: 1},
{Const: "/abc/", Length: 5, HasOptionalSlash: true},
{IsParam: true, ParamName: "*1", IsGreedy: true, IsOptional: true, IsLast: true},
},
params: []string{"param", "*1"},
wildCardCount: 1,
}, rp)
rp = parseRoute("/api/*/:param/:param2")
utils.AssertEqual(t, routeParser{
segs: []*routeSegment{
{Const: "/api/", Length: 5, HasOptionalSlash: true},
{IsParam: true, ParamName: "*1", IsGreedy: true, IsOptional: true, ComparePart: "/", PartCount: 2},
{Const: "/", Length: 1},
{IsParam: true, ParamName: "param", ComparePart: "/", PartCount: 1},
{Const: "/", Length: 1},
{IsParam: true, ParamName: "param2", IsLast: true},
},
params: []string{"*1", "param", "param2"},
wildCardCount: 1,
}, rp)
rp = parseRoute("/test:optional?:optional2?")
utils.AssertEqual(t, routeParser{
segs: []*routeSegment{
{Const: "/test", Length: 5},
{IsParam: true, ParamName: "optional", IsOptional: true, Length: 1},
{IsParam: true, ParamName: "optional2", IsOptional: true, IsLast: true},
},
params: []string{"optional", "optional2"},
}, rp)
rp = parseRoute("/config/+.json")
utils.AssertEqual(t, routeParser{
segs: []*routeSegment{
{Const: "/config/", Length: 8},
{IsParam: true, ParamName: "+1", IsGreedy: true, IsOptional: false, ComparePart: ".json", PartCount: 1},
{Const: ".json", Length: 5, IsLast: true},
},
params: []string{"+1"},
plusCount: 1,
}, rp)
rp = parseRoute("/api/:day.:month?.:year?")
utils.AssertEqual(t, routeParser{
segs: []*routeSegment{
{Const: "/api/", Length: 5},
{IsParam: true, ParamName: "day", IsOptional: false, ComparePart: ".", PartCount: 2},
{Const: ".", Length: 1},
{IsParam: true, ParamName: "month", IsOptional: true, ComparePart: ".", PartCount: 1},
{Const: ".", Length: 1},
{IsParam: true, ParamName: "year", IsOptional: true, IsLast: true},
},
params: []string{"day", "month", "year"},
}, rp)
rp = parseRoute("/*v1*/proxy")
utils.AssertEqual(t, routeParser{
segs: []*routeSegment{
{Const: "/", Length: 1, HasOptionalSlash: true},
{IsParam: true, ParamName: "*1", IsGreedy: true, IsOptional: true, ComparePart: "v1", PartCount: 1},
{Const: "v1", Length: 2},
{IsParam: true, ParamName: "*2", IsGreedy: true, IsOptional: true, ComparePart: "/proxy", PartCount: 1},
{Const: "/proxy", Length: 6, IsLast: true},
},
params: []string{"*1", "*2"},
wildCardCount: 2,
}, rp)
}
// go test -race -run Test_Path_matchParams
func Test_Path_matchParams(t *testing.T) {
t.Parallel()
type testparams struct {
url string
params []string
match bool
partialCheck bool
}
var ctxParams [maxParams]string
testCase := func(r string, cases []testparams) {
parser := parseRoute(r)
for _, c := range cases {
match := parser.getMatch(c.url, c.url, &ctxParams, c.partialCheck)
utils.AssertEqual(t, c.match, match, fmt.Sprintf("route: '%s', url: '%s'", r, c.url))
if match && len(c.params) > 0 {
utils.AssertEqual(t, c.params[0:len(c.params)-1], ctxParams[0:len(c.params)-1], fmt.Sprintf("route: '%s', url: '%s'", r, c.url))
}
}
}
testCase("/api/v1/:param/*", []testparams{
{url: "/api/v1/entity", params: []string{"entity", ""}, match: true},
{url: "/api/v1/entity/", params: []string{"entity", ""}, match: true},
{url: "/api/v1/entity/1", params: []string{"entity", "1"}, match: true},
{url: "/api/v", params: nil, match: false},
{url: "/api/v2", params: nil, match: false},
{url: "/api/v1/", params: nil, match: false},
})
testCase("/api/v1/:param/+", []testparams{
{url: "/api/v1/entity", params: nil, match: false},
{url: "/api/v1/entity/", params: nil, match: false},
{url: "/api/v1/entity/1", params: []string{"entity", "1"}, match: true},
{url: "/api/v", params: nil, match: false},
{url: "/api/v2", params: nil, match: false},
{url: "/api/v1/", params: nil, match: false},
})
testCase("/api/v1/:param?", []testparams{
{url: "/api/v1", params: []string{""}, match: true},
{url: "/api/v1/", params: []string{""}, match: true},
{url: "/api/v1/optional", params: []string{"optional"}, match: true},
{url: "/api/v", params: nil, match: false},
{url: "/api/v2", params: nil, match: false},
{url: "/api/xyz", params: nil, match: false},
})
testCase("/api/v1/*", []testparams{
{url: "/api/v1", params: []string{""}, match: true},
{url: "/api/v1/", params: []string{""}, match: true},
{url: "/api/v1/entity", params: []string{"entity"}, match: true},
{url: "/api/v1/entity/1/2", params: []string{"entity/1/2"}, match: true},
{url: "/api/v1/Entity/1/2", params: []string{"Entity/1/2"}, match: true},
{url: "/api/v", params: nil, match: false},
{url: "/api/v2", params: nil, match: false},
{url: "/api/abc", params: nil, match: false},
})
testCase("/api/v1/:param", []testparams{
{url: "/api/v1/entity", params: []string{"entity"}, match: true},
{url: "/api/v1/entity/8728382", params: nil, match: false},
{url: "/api/v1", params: nil, match: false},
{url: "/api/v1/", params: nil, match: false},
})
testCase("/api/v1/:param-:param2", []testparams{
{url: "/api/v1/entity-entity2", params: []string{"entity", "entity2"}, match: true},
{url: "/api/v1/entity/8728382", params: nil, match: false},
{url: "/api/v1/entity-8728382", params: []string{"entity", "8728382"}, match: true},
{url: "/api/v1", params: nil, match: false},
{url: "/api/v1/", params: nil, match: false},
})
testCase("/api/v1/:filename.:extension", []testparams{
{url: "/api/v1/test.pdf", params: []string{"test", "pdf"}, match: true},
{url: "/api/v1/test/pdf", params: nil, match: false},
{url: "/api/v1/test-pdf", params: nil, match: false},
{url: "/api/v1/test_pdf", params: nil, match: false},
{url: "/api/v1", params: nil, match: false},
{url: "/api/v1/", params: nil, match: false},
})
testCase("/api/v1/const", []testparams{
{url: "/api/v1/const", params: []string{}, match: true},
{url: "/api/v1", params: nil, match: false},
{url: "/api/v1/", params: nil, match: false},
{url: "/api/v1/something", params: nil, match: false},
})
testCase("/api/:param/fixedEnd", []testparams{
{url: "/api/abc/fixedEnd", params: []string{"abc"}, match: true},
{url: "/api/abc/def/fixedEnd", params: nil, match: false},
})
testCase("/shop/product/::filter/color::color/size::size", []testparams{
{url: "/shop/product/:test/color:blue/size:xs", params: []string{"test", "blue", "xs"}, match: true},
{url: "/shop/product/test/color:blue/size:xs", params: nil, match: false},
})
testCase("/::param?", []testparams{
{url: "/:hello", params: []string{"hello"}, match: true},
{url: "/:", params: []string{""}, match: true},
{url: "/", params: nil, match: false},
})
// successive parameters, each take one character and the last parameter gets everything
testCase("/test:sign:param", []testparams{
{url: "/test-abc", params: []string{"-", "abc"}, match: true},
{url: "/test", params: nil, match: false},
})
// optional parameters are not greedy
testCase("/:param1:param2?:param3", []testparams{
{url: "/abbbc", params: []string{"a", "b", "bbc"}, match: true},
//{url: "/ac", params: []string{"a", "", "c"}, match: true}, // TODO: fix it
{url: "/test", params: []string{"t", "e", "st"}, match: true},
})
testCase("/test:optional?:mandatory", []testparams{
//{url: "/testo", params: []string{"", "o"}, match: true}, // TODO: fix it
{url: "/testoaaa", params: []string{"o", "aaa"}, match: true},
{url: "/test", params: nil, match: false},
})
testCase("/test:optional?:optional2?", []testparams{
{url: "/testo", params: []string{"o", ""}, match: true},
{url: "/testoaaa", params: []string{"o", "aaa"}, match: true},
{url: "/test", params: []string{"", ""}, match: true},
{url: "/tes", params: nil, match: false},
})
testCase("/foo:param?bar", []testparams{
{url: "/foofaselbar", params: []string{"fasel"}, match: true},
{url: "/foobar", params: []string{""}, match: true},
{url: "/fooba", params: nil, match: false},
{url: "/fobar", params: nil, match: false},
})
testCase("/foo*bar", []testparams{
{url: "/foofaselbar", params: []string{"fasel"}, match: true},
{url: "/foobar", params: []string{""}, match: true},
{url: "/", params: nil, match: false},
})
testCase("/foo+bar", []testparams{
{url: "/foofaselbar", params: []string{"fasel"}, match: true},
{url: "/foobar", params: nil, match: false},
{url: "/", params: nil, match: false},
})
testCase("/a*cde*g/", []testparams{
{url: "/abbbcdefffg", params: []string{"bbb", "fff"}, match: true},
{url: "/acdeg", params: []string{"", ""}, match: true},
{url: "/", params: nil, match: false},
})
testCase("/*v1*/proxy", []testparams{
{url: "/customer/v1/cart/proxy", params: []string{"customer/", "/cart"}, match: true},
{url: "/v1/proxy", params: []string{"", ""}, match: true},
{url: "/v1/", params: nil, match: false},
})
// successive wildcard -> first wildcard is greedy
testCase("/foo***bar", []testparams{
{url: "/foo*abar", params: []string{"*a", "", ""}, match: true},
{url: "/foo*bar", params: []string{"*", "", ""}, match: true},
{url: "/foobar", params: []string{"", "", ""}, match: true},
{url: "/fooba", params: nil, match: false},
})
// chars in front of an parameter
testCase("/name::name", []testparams{
{url: "/name:john", params: []string{"john"}, match: true},
})
testCase("/@:name", []testparams{
{url: "/@john", params: []string{"john"}, match: true},
})
testCase("/-:name", []testparams{
{url: "/-john", params: []string{"john"}, match: true},
})
testCase("/.:name", []testparams{
{url: "/.john", params: []string{"john"}, match: true},
})
testCase("/api/v1/:param/abc/*", []testparams{
{url: "/api/v1/well/abc/wildcard", params: []string{"well", "wildcard"}, match: true},
{url: "/api/v1/well/abc/", params: []string{"well", ""}, match: true},
{url: "/api/v1/well/abc", params: []string{"well", ""}, match: true},
{url: "/api/v1/well/ttt", params: nil, match: false},
})
testCase("/api/:day/:month?/:year?", []testparams{
{url: "/api/1", params: []string{"1", "", ""}, match: true},
{url: "/api/1/", params: []string{"1", "", ""}, match: true},
{url: "/api/1//", params: []string{"1", "", ""}, match: true},
{url: "/api/1/-/", params: []string{"1", "-", ""}, match: true},
{url: "/api/1-", params: []string{"1-", "", ""}, match: true},
{url: "/api/1.", params: []string{"1.", "", ""}, match: true},
{url: "/api/1/2", params: []string{"1", "2", ""}, match: true},
{url: "/api/1/2/3", params: []string{"1", "2", "3"}, match: true},
{url: "/api/", params: nil, match: false},
})
testCase("/api/:day.:month?.:year?", []testparams{
{url: "/api/1", params: nil, match: false},
{url: "/api/1/", params: nil, match: false},
{url: "/api/1.", params: nil, match: false},
{url: "/api/1..", params: []string{"1", "", ""}, match: true},
{url: "/api/1.2", params: nil, match: false},
{url: "/api/1.2.", params: []string{"1", "2", ""}, match: true},
{url: "/api/1.2.3", params: []string{"1", "2", "3"}, match: true},
{url: "/api/", params: nil, match: false},
})
testCase("/api/:day-:month?-:year?", []testparams{
{url: "/api/1", params: nil, match: false},
{url: "/api/1/", params: nil, match: false},
{url: "/api/1-", params: nil, match: false},
{url: "/api/1--", params: []string{"1", "", ""}, match: true},
{url: "/api/1-/", params: nil, match: false},
//{url: "/api/1-/-", params: nil, match: false}, // TODO: fix this part
{url: "/api/1-2", params: nil, match: false},
{url: "/api/1-2-", params: []string{"1", "2", ""}, match: true},
{url: "/api/1-2-3", params: []string{"1", "2", "3"}, match: true},
{url: "/api/", params: nil, match: false},
})
testCase("/api/*", []testparams{
{url: "/api/", params: []string{""}, match: true},
{url: "/api/joker", params: []string{"joker"}, match: true},
{url: "/api", params: []string{""}, match: true},
{url: "/api/v1/entity", params: []string{"v1/entity"}, match: true},
{url: "/api2/v1/entity", params: nil, match: false},
{url: "/api_ignore/v1/entity", params: nil, match: false},
})
testCase("/partialCheck/foo/bar/:param", []testparams{
{url: "/partialCheck/foo/bar/test", params: []string{"test"}, match: true, partialCheck: true},
{url: "/partialCheck/foo/bar/test/test2", params: []string{"test"}, match: true, partialCheck: true},
{url: "/partialCheck/foo/bar", params: nil, match: false, partialCheck: true},
{url: "/partiaFoo", params: nil, match: false, partialCheck: true},
})
testCase("/", []testparams{
{url: "/api", params: nil, match: false},
{url: "", params: []string{}, match: true},
{url: "/", params: []string{}, match: true},
})
testCase("/config/abc.json", []testparams{
{url: "/config/abc.json", params: []string{}, match: true},
{url: "config/abc.json", params: nil, match: false},
{url: "/config/efg.json", params: nil, match: false},
{url: "/config", params: nil, match: false},
})
testCase("/config/*.json", []testparams{
{url: "/config/abc.json", params: []string{"abc"}, match: true},
{url: "/config/efg.json", params: []string{"efg"}, match: true},
{url: "/config/.json", params: []string{""}, match: true},
{url: "/config/efg.csv", params: nil, match: false},
{url: "config/abc.json", params: nil, match: false},
{url: "/config", params: nil, match: false},
})
testCase("/config/+.json", []testparams{
{url: "/config/abc.json", params: []string{"abc"}, match: true},
{url: "/config/.json", params: nil, match: false},
{url: "/config/efg.json", params: []string{"efg"}, match: true},
{url: "/config/efg.csv", params: nil, match: false},
{url: "config/abc.json", params: nil, match: false},
{url: "/config", params: nil, match: false},
})
testCase("/xyz", []testparams{
{url: "xyz", params: nil, match: false},
{url: "xyz/", params: nil, match: false},
})
testCase("/api/*/:param?", []testparams{
{url: "/api/", params: []string{"", ""}, match: true},
{url: "/api/joker", params: []string{"joker", ""}, match: true},
{url: "/api/joker/batman", params: []string{"joker", "batman"}, match: true},
{url: "/api/joker//batman", params: []string{"joker/", "batman"}, match: true},
{url: "/api/joker/batman/robin", params: []string{"joker/batman", "robin"}, match: true},
{url: "/api/joker/batman/robin/1", params: []string{"joker/batman/robin", "1"}, match: true},
{url: "/api/joker/batman/robin/1/", params: []string{"joker/batman/robin/1", ""}, match: true},
{url: "/api/joker-batman/robin/1", params: []string{"joker-batman/robin", "1"}, match: true},
{url: "/api/joker-batman-robin/1", params: []string{"joker-batman-robin", "1"}, match: true},
{url: "/api/joker-batman-robin-1", params: []string{"joker-batman-robin-1", ""}, match: true},
{url: "/api", params: []string{"", ""}, match: true},
})
testCase("/api/*/:param", []testparams{
{url: "/api/test/abc", params: []string{"test", "abc"}, match: true},
{url: "/api/joker/batman", params: []string{"joker", "batman"}, match: true},
{url: "/api/joker/batman/robin", params: []string{"joker/batman", "robin"}, match: true},
{url: "/api/joker/batman/robin/1", params: []string{"joker/batman/robin", "1"}, match: true},
{url: "/api/joker/batman-robin/1", params: []string{"joker/batman-robin", "1"}, match: true},
{url: "/api/joker-batman-robin-1", params: nil, match: false},
{url: "/api", params: nil, match: false},
})
testCase("/api/+/:param", []testparams{
{url: "/api/test/abc", params: []string{"test", "abc"}, match: true},
{url: "/api/joker/batman/robin/1", params: []string{"joker/batman/robin", "1"}, match: true},
{url: "/api/joker", params: nil, match: false},
{url: "/api", params: nil, match: false},
})
testCase("/api/*/:param/:param2", []testparams{
{url: "/api/test/abc/1", params: []string{"test", "abc", "1"}, match: true},
{url: "/api/joker/batman", params: nil, match: false},
{url: "/api/joker/batman-robin/1", params: []string{"joker", "batman-robin", "1"}, match: true},
{url: "/api/joker-batman-robin-1", params: nil, match: false},
{url: "/api/test/abc", params: nil, match: false},
{url: "/api/joker/batman/robin", params: []string{"joker", "batman", "robin"}, match: true},
{url: "/api/joker/batman/robin/1", params: []string{"joker/batman", "robin", "1"}, match: true},
{url: "/api/joker/batman/robin/1/2", params: []string{"joker/batman/robin", "1", "2"}, match: true},
{url: "/api", params: nil, match: false},
{url: "/api/:test", params: nil, match: false},
})
}
func Test_Utils_GetTrimmedParam(t *testing.T) {
t.Parallel()
res := GetTrimmedParam("*")
utils.AssertEqual(t, "*", res)
res = GetTrimmedParam(":param")
utils.AssertEqual(t, "param", res)
res = GetTrimmedParam(":param1?")
utils.AssertEqual(t, "param1", res)
res = GetTrimmedParam("noParam")
utils.AssertEqual(t, "noParam", res)
}
// go test -race -run Test_Path_matchParams
func Benchmark_Path_matchParams(t *testing.B) {
type testparams struct {
url string
params []string
match bool
partialCheck bool
}
var ctxParams [maxParams]string
benchCase := func(r string, cases []testparams) {
parser := parseRoute(r)
for _, c := range cases {
var matchRes bool
state := "match"
if !c.match {
state = "not match"
}
t.Run(r+" | "+state+" | "+c.url, func(b *testing.B) {
for i := 0; i <= b.N; i++ {
if match := parser.getMatch(c.url, c.url, &ctxParams, c.partialCheck); match {
// Get params from the original path
matchRes = true
}
}
utils.AssertEqual(t, c.match, matchRes, fmt.Sprintf("route: '%s', url: '%s'", r, c.url))
if matchRes && len(c.params) > 0 {
utils.AssertEqual(t, c.params[0:len(c.params)-1], ctxParams[0:len(c.params)-1], fmt.Sprintf("route: '%s', url: '%s'", r, c.url))
}
})
}
}
benchCase("/api/:param/fixedEnd", []testparams{
{url: "/api/abc/fixedEnd", params: []string{"abc"}, match: true},
{url: "/api/abc/def/fixedEnd", params: nil, match: false},
})
benchCase("/api/v1/:param/*", []testparams{
{url: "/api/v1/entity", params: []string{"entity", ""}, match: true},
{url: "/api/v1/entity/", params: []string{"entity", ""}, match: true},
{url: "/api/v1/entity/1", params: []string{"entity", "1"}, match: true},
{url: "/api/v", params: nil, match: false},
{url: "/api/v2", params: nil, match: false},
{url: "/api/v1/", params: nil, match: false},
})
benchCase("/api/v1/:param", []testparams{
{url: "/api/v1/entity", params: []string{"entity"}, match: true},
{url: "/api/v1/entity/8728382", params: nil, match: false},
{url: "/api/v1", params: nil, match: false},
{url: "/api/v1/", params: nil, match: false},
})
benchCase("/api/v1", []testparams{
{url: "/api/v1", params: []string{}, match: true},
{url: "/api/v2", params: nil, match: false},
})
benchCase("/api/v1/:param/*", []testparams{
{url: "/api/v1/entity", params: []string{"entity", ""}, match: true},
{url: "/api/v1/entity/", params: []string{"entity", ""}, match: true},
{url: "/api/v1/entity/1", params: []string{"entity", "1"}, match: true},
{url: "/api/v", params: nil, match: false},
{url: "/api/v2", params: nil, match: false},
{url: "/api/v1/", params: nil, match: false},
})
}
|
package controllers
import (
"nepliteApi/models"
"github.com/astaxie/beego"
"fmt"
"github.com/astaxie/beego/orm"
"crypto/md5"
"github.com/astaxie/beego/logs"
"encoding/json"
"nepliteApi/comm"
)
// 用户权限这里就和 普通的用户表切开,
// 即 : user power 表只有管理者 还有 最高用户, user表就是 普通的消费客户
type UserPowerController struct {
beego.Controller
}
// 新建一个power
func (userPower *UserPowerController) Add() {
result := make(map[string]interface{})
result["num"] = 0
result["err"] = ""
result["result"] = ""
var _l_userpower models.UserPower
logs.Info("请求数据是 :%s ", userPower.Ctx.Input.RequestBody)
err := json.Unmarshal(userPower.Ctx.Input.RequestBody, &_l_userpower)
if err != nil {
result["err"] = -1
result["result"] = err
userPower.Data["json"] = result
userPower.ServeJSON()
}
// todo 正常的情况 是这里就应该去判断 和过滤 , 但是为了偷懒 就把异常交给数据库
// !!!!!以后小朋友 看到以后 千万不要学!!!!!!!!!
if id, err := models.AddUserPower(_l_userpower); err != nil {
result["err"] = -2
result["result"] = err
userPower.Data["json"] = result
userPower.ServeJSON()
} else {
result["result"] = id
}
userPower.Data["json"] = result
userPower.ServeJSON()
}
func (userPower *UserPowerController) GetAll() {
result := make(map[string]interface{})
result["err"] = 0
result["num"] = 0
power_list, num, err := models.GetAllPower()
result["result"] = power_list
result["err"] = err
result["num"] = num
userPower.Data["json"] = result
userPower.ServeJSON()
}
// 获得权限信息
func (userPower *UserPowerController) GetPower() {
result := make(map[string]interface{})
result["err"] = 0
result["num"] = 0
var l_user_id string
if user_id := userPower.GetString("user_id"); user_id == "" {
result["err"] = -1
result["result"] = "用户id异常"
userPower.Data["json"] = result
userPower.ServeJSON()
} else {
l_user_id = user_id
result["result"] = user_id
}
logs.Error("GetPower : user id == %s", l_user_id)
if r_userpower, err := models.GetPower(l_user_id); err != nil {
result["err"] = -2
result["result"] = err
} else {
result["result"] = r_userpower
}
userPower.Data["json"] = result
userPower.ServeJSON()
}
// 获得非管理员用户的权限
func (userPower *UserPowerController) GetNormalPower() {
result := comm.Result{Ret: map[string]interface{}{"err": "", "num": 0, "result": ""}}
logs.Info("result : == ", result.Ret)
var query_start int
var query_limit int
var err error
if query_start, err = userPower.GetInt("start"); err != nil {
result.SetValue("-1", 0, "参数开始位置异常")
userPower.Data["json"] = result.Get()
userPower.ServeJSON()
}
if query_limit, err = userPower.GetInt("limit"); err != nil {
result.SetValue("-2", 0, "参数极限位置异常")
userPower.Data["json"] = result.Get()
userPower.ServeJSON()
}
if _l_uplist, num, err := models.GetPowerNornamls(query_start, query_limit); err != nil {
result.SetValue("-3", num, _l_uplist)
userPower.Data["json"] = result.Get()
userPower.ServeJSON()
} else {
result.SetValue("0", num, _l_uplist)
}
userPower.Data["json"] = result.Get()
userPower.ServeJSON()
}
// 推送权限信息 // 测试用的已经放弃了
func (userPower *UserPowerController) PutPower() {
result := make(map[string]interface{})
uid, err := userPower.GetInt64(":uid")
logs.Warn("uid : %d err : %d", uid, err)
userPower.Data["json"] = result
userPower.ServeJSON()
}
func (userPower *UserPowerController) UpdatePower() {
result := make(map[string]interface{})
result["err"] = 0
result["num"] = 0
result["result"] = ""
var l_uid string
var l_err error
if l_uid = userPower.GetString("uid"); l_uid == "" {
result["err"] = -1
result["result"] = "用户id异常"
logs.Info("uid === %s", l_uid)
userPower.Data["json"] = result
userPower.ServeJSON()
}
/*
var l_userpower models.UserPower
if l_userpower, l_err = models.GetPower(l_uid); l_err != nil {
result["err"] = -2
result["result"] = "获取power信息失败"
userPower.Data["json"] = result
userPower.ServeJSON()
}*/
var _l_userpower models.UserPower
logs.Info("请求数据是 :%s ", userPower.Ctx.Input.RequestBody)
err := json.Unmarshal(userPower.Ctx.Input.RequestBody, &_l_userpower)
//logs.Info("err == ! == " + err.Error())
if err != nil {
result["err"] = -2
result["result"] = err
userPower.Data["json"] = result
userPower.ServeJSON()
}
var r_num int64
if r_num, l_err = models.UpdateUserPower(l_uid, _l_userpower); l_err!=nil{
result["num"] = r_num
result["err"] = -3
result["result"] = l_err
userPower.Data["json"] = result
userPower.ServeJSON()
}else {
result["num"] = r_num
}
result["result"] = _l_userpower.PowerInfo
userPower.Data["json"] = result
userPower.ServeJSON()
}
func (userPower *UserPowerController) DeletePower() {
result := make(map[string]interface{})
result["err"] = 0
result["num"] = 0
result["result"] = ""
var _l_user_id string
var _l_err error
var _l_num int64
if _l_user_id = userPower.GetString("user_id"); _l_user_id == "" {
result["err"] = -1
result["result"] = "用户名一样"
logs.Info("user id 的结果是 %d", _l_user_id)
userPower.Data["json"] = result
userPower.ServeJSON()
}
if _l_num, _l_err = models.DelPowerById(_l_user_id); _l_err != nil {
logs.Info("被操作的条目 %d", _l_num)
result["err"] = -2
result["result"] = "条目不正确"
userPower.Data["json"] = result
userPower.ServeJSON()
}
userPower.Data["json"] = result
userPower.ServeJSON()
}
func (userPower *UserPowerController) Login() {
result := make(map[string]interface{})
result["err"] = 0
result["num"] = 0
username := userPower.GetString("username")
password := userPower.GetString("password")
utime := userPower.GetString("utime")
o := orm.NewOrm()
var user models.UserPower
o.QueryTable(models.UserPower{}).Filter("UserID", username).One(&user)
logs.Error("err: %s", o.Read(&user))
if o.Read(&user) != nil {
result["result"] = "用户名或者密码错误"
result["err"] = -1
userPower.Data["json"] = result
userPower.ServeJSON()
}
logs.Info("user name : %s", user)
// TODO 密码机制脆弱,不安全,先偷懒,以后更新
in_password := user.PassWord
//------------------------------------------------------------------------------//
// 判断 加密 是不是对的
// 用户名 + “neplite” + 密码 + "iampassword" + unix 时间戳
password_key := username + "neplite" + in_password + "iampassword" + utime
// 判断是不是 加密正常
logs.Info("加密保密串是 ==%s== ", password_key)
beego.Warn("明文密码是: - %s -", in_password)
data := []byte(password_key)
has := md5.Sum(data)
md5string := fmt.Sprintf("%X", has)
if md5string != password {
result["err"] = -2
// Todo 神经病。
result["result"] = "密码不正确 正确的密码应该是" + md5string
userPower.Data["json"] = result
userPower.ServeJSON()
}
//------------------------------------------------------------------------------//
//------------------------------------------------------------------------------//
// 反馈密码正确 ! 并且 反馈一个密码 交给客户端去判断
fmt.Println(password)
// 内密码 + username + utime + “iampassword”
// 加密
r_password_key := in_password + username + utime + "iampassword"
data2 := []byte(r_password_key)
has2 := md5.Sum(data2)
r_passsowrd := fmt.Sprintf("%x", has2)
logs.Info("我的密码是% s", r_passsowrd)
//beego.Warning("我的密码是%s", r_passsowrd)
result["result"] = r_passsowrd
result["err"] = 0
userPower.Data["json"] = result
userPower.ServeJSON()
}
|
package cmd
import (
"github.com/spf13/cobra"
"github.com/root-gg/plik/server/server"
)
// cleanCmd represents all clean command
var cleanCmd = &cobra.Command{
Use: "clean",
Short: "Delete expired upload and files",
Run: clean,
}
func init() {
rootCmd.AddCommand(cleanCmd)
}
func clean(cmd *cobra.Command, args []string) {
plik := server.NewPlikServer(config)
initializeMetadataBackend()
plik.WithMetadataBackend(metadataBackend)
initializeDataBackend()
plik.WithDataBackend(dataBackend)
// Delete expired upload and files
plik.Clean()
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2021/11/18 1:12 下午
# @File : shell.go
# @Description :
# @Attention :
*/
package sort
// 希尔排序
// 关键: 是插入排序的优化
// 插入排序: 假设之前的都是有序的,步长为1
func shellSort(arr []int) []int {
stride := len(arr)
for stride != 1 {
stride >>= 1
for i := 0; i < stride; i += stride {
for j := 0; j < len(arr); j += stride {
tmp := arr[j]
k := j - stride
for ; k >= 0 && arr[k] > tmp; k -= stride {
arr[k+stride] = arr[k]
}
arr[k+stride] = tmp
}
}
}
return arr
}
|
package main
import (
"bytes"
"io/ioutil"
"testing"
)
func TestIngressCleanup(t *testing.T) {
testContents, err := ioutil.ReadFile("testdata/ingress-cleaned.yaml")
if err != nil {
t.Errorf("Unexpected error reading test data file: %s", err)
}
cleanedContents := cleanOpenshiftConfigFile("testdata/ingress-original.yaml")
if bytes.Compare(cleanedContents, testContents) != 0 {
t.Logf("Expected: \n%s", string(testContents))
t.Logf("Received: \n%s", string(cleanedContents))
t.Error("Cleaned list contents don't match")
}
}
func TestSecretCleanup(t *testing.T) {
testContents, err := ioutil.ReadFile("testdata/secret-cleaned.yaml")
if err != nil {
t.Errorf("Unexpected error reading test data file: %s", err)
}
cleanedContents := cleanOpenshiftConfigFile("testdata/secret-original.yaml")
if bytes.Compare(cleanedContents, testContents) != 0 {
t.Logf("Expected: \n%s", string(testContents))
t.Logf("Received: \n%s", string(cleanedContents))
t.Error("Cleaned list contents don't match")
}
}
func TestListCleanup1(t *testing.T) {
testContents, err := ioutil.ReadFile("testdata/openshift-list-1-cleaned.yaml")
if err != nil {
t.Errorf("Unexpected error reading test data file: %s", err)
}
cleanedContents := cleanOpenshiftConfigFile("testdata/openshift-list-1-original.yaml")
if bytes.Compare(cleanedContents, testContents) != 0 {
t.Logf("Expected: \n%s", string(testContents))
t.Logf("Received: \n%s", string(cleanedContents))
t.Error("Cleaned list contents don't match")
}
}
func TestListCleanup2(t *testing.T) {
testContents, err := ioutil.ReadFile("testdata/openshift-list-2-cleaned.yaml")
if err != nil {
t.Errorf("Unexpected error reading test data file: %s", err)
}
cleanedContents := cleanOpenshiftConfigFile("testdata/openshift-list-2-original.yaml")
if bytes.Compare(cleanedContents, testContents) != 0 {
t.Logf("Expected: \n%s", string(testContents))
t.Logf("Received: \n%s", string(cleanedContents))
t.Error("Cleaned list contents don't match")
}
}
func TestListCleanupNodejsEx(t *testing.T) {
testContents, err := ioutil.ReadFile("testdata/openshift-list-nodejs-ex-cleaned.yaml")
if err != nil {
t.Errorf("Unexpected error reading test data file: %s", err)
}
cleanedContents := cleanOpenshiftConfigFile("testdata/openshift-list-nodejs-ex-original.yaml")
if bytes.Compare(cleanedContents, testContents) != 0 {
t.Logf("Expected: \n%s", string(testContents))
t.Logf("Received: \n%s", string(cleanedContents))
t.Error("Cleaned list contents don't match")
}
}
func TestListCleanupNodejsExJson(t *testing.T) {
testContents, err := ioutil.ReadFile("testdata/openshift-list-nodejs-ex-cleaned.yaml")
if err != nil {
t.Errorf("Unexpected error reading test data file: %s", err)
}
cleanedContents := cleanOpenshiftConfigFile("testdata/openshift-list-nodejs-ex-original.json")
if bytes.Compare(cleanedContents, testContents) != 0 {
t.Logf("Expected: \n%s", string(testContents))
t.Logf("Received: \n%s", string(cleanedContents))
t.Error("Cleaned list contents don't match")
}
}
func TestTemplateCleanup1(t *testing.T) {
testContents, err := ioutil.ReadFile("testdata/openshift-template-1-cleaned.yaml")
if err != nil {
t.Errorf("Unexpected error reading test data file: %s", err)
}
cleanedContents := cleanOpenshiftConfigFile("testdata/openshift-template-1-original.yaml")
if bytes.Compare(cleanedContents, testContents) != 0 {
t.Error("Cleaned template contents don't match")
}
}
func TestTemplateCleanup2(t *testing.T) {
testContents, err := ioutil.ReadFile("testdata/openshift-template-2-cleaned.yaml")
if err != nil {
t.Errorf("Unexpected error reading test data file: %s", err)
}
cleanedContents := cleanOpenshiftConfigFile("testdata/openshift-template-2-original.yaml")
if bytes.Compare(cleanedContents, testContents) != 0 {
t.Error("Cleaned template contents don't match")
}
}
|
package pie
import "golang.org/x/exp/constraints"
// Sequence generates all numbers in range or returns nil if params invalid
//
// There are 3 variations to generate:
// 1. [0, n).
// 2. [min, max).
// 3. [min, max) with step.
//
// if len(params) == 1 considered that will be returned slice between 0 and n,
// where n is the first param, [0, n).
// if len(params) == 2 considered that will be returned slice between min and max,
// where min is the first param, max is the second, [min, max).
// if len(params) > 2 considered that will be returned slice between min and max with step,
// where min is the first param, max is the second, step is the third one, [min, max) with step,
// others params will be ignored
func Sequence[T constraints.Integer | constraints.Float](ss []T, params ...int) []T {
var creator = func(i int) T {
return T(i)
}
return SequenceUsing(ss, creator, params...)
}
|
package main
import (
"bufio"
"errors"
"fmt"
"os"
"strconv"
"strings"
)
// сюда писать код
// фукция main тоже будет тут
type Stack struct {
Data []int
Last int
Size int
}
func NewStack() *Stack{
return &Stack{make([]int, 0), 0, 0}
}
func (s *Stack) Push(element int) {
s.Data = append(s.Data, element)
s.Last = element
s.Size++
}
func (s *Stack) Pop() int {
if s.Size == 0 {
panic("Stack is empty")
}
s.Size--
element := s.Last
if s.Size == 0 {
s.Last = -1
} else {
s.Last = s.Data[s.Size - 1]
}
s.Data = s.Data[:s.Size]
return element
}
func main() {
in := bufio.NewReader(os.Stdin)
line, err := in.ReadString('\n')
if err == nil {
fmt.Println(line)
}
//fmt.Printf("Result = %d\n", Calculate(line))
}
func Calculate(expression string) (int, error) {
stack := NewStack()
elements := strings.Split(expression, " ")
for _, char := range elements {
switch char {
case " ":
continue
case "=":
return stack.Pop(), nil
case "+":
stack.Push(stack.Pop() + stack.Pop())
case "-":
stack.Push(-stack.Pop() + stack.Pop())
case "*":
stack.Push(stack.Pop() * stack.Pop())
case "/":
stack.Push(stack.Pop() / stack.Pop())
default:
value, err := strconv.Atoi(char)
if err != nil {
return 0, err
}
stack.Push(value)
}
}
return 0, errors.New("not enough operations in input string")
}
|
// This file was generated for SObject AccountCleanInfo, API Version v43.0 at 2018-07-30 03:47:56.659680394 -0400 EDT m=+43.003900451
package sobjects
import (
"fmt"
"strings"
)
type AccountCleanInfo struct {
BaseSObject
AccountId string `force:",omitempty"`
AccountSite string `force:",omitempty"`
Address *Address `force:",omitempty"`
AnnualRevenue string `force:",omitempty"`
City string `force:",omitempty"`
CleanedByJob bool `force:",omitempty"`
CleanedByUser bool `force:",omitempty"`
CompanyName string `force:",omitempty"`
CompanyStatusDataDotCom string `force:",omitempty"`
Country string `force:",omitempty"`
CreatedById string `force:",omitempty"`
CreatedDate string `force:",omitempty"`
DandBCompanyDunsNumber string `force:",omitempty"`
DataDotComId string `force:",omitempty"`
Description string `force:",omitempty"`
DunsNumber string `force:",omitempty"`
DunsRightMatchConfidence int `force:",omitempty"`
DunsRightMatchGrade string `force:",omitempty"`
Fax string `force:",omitempty"`
GeocodeAccuracy string `force:",omitempty"`
Id string `force:",omitempty"`
Industry string `force:",omitempty"`
IsDeleted bool `force:",omitempty"`
IsDifferentAccountSite bool `force:",omitempty"`
IsDifferentAnnualRevenue bool `force:",omitempty"`
IsDifferentCity bool `force:",omitempty"`
IsDifferentCompanyName bool `force:",omitempty"`
IsDifferentCountry bool `force:",omitempty"`
IsDifferentCountryCode bool `force:",omitempty"`
IsDifferentDandBCompanyDunsNumber bool `force:",omitempty"`
IsDifferentDescription bool `force:",omitempty"`
IsDifferentDunsNumber bool `force:",omitempty"`
IsDifferentFax bool `force:",omitempty"`
IsDifferentIndustry bool `force:",omitempty"`
IsDifferentNaicsCode bool `force:",omitempty"`
IsDifferentNaicsDescription bool `force:",omitempty"`
IsDifferentNumberOfEmployees bool `force:",omitempty"`
IsDifferentOwnership bool `force:",omitempty"`
IsDifferentPhone bool `force:",omitempty"`
IsDifferentPostalCode bool `force:",omitempty"`
IsDifferentSic bool `force:",omitempty"`
IsDifferentSicDescription bool `force:",omitempty"`
IsDifferentState bool `force:",omitempty"`
IsDifferentStateCode bool `force:",omitempty"`
IsDifferentStreet bool `force:",omitempty"`
IsDifferentTickerSymbol bool `force:",omitempty"`
IsDifferentTradestyle bool `force:",omitempty"`
IsDifferentWebsite bool `force:",omitempty"`
IsDifferentYearStarted bool `force:",omitempty"`
IsFlaggedWrongAccountSite bool `force:",omitempty"`
IsFlaggedWrongAddress bool `force:",omitempty"`
IsFlaggedWrongAnnualRevenue bool `force:",omitempty"`
IsFlaggedWrongCompanyName bool `force:",omitempty"`
IsFlaggedWrongDescription bool `force:",omitempty"`
IsFlaggedWrongDunsNumber bool `force:",omitempty"`
IsFlaggedWrongFax bool `force:",omitempty"`
IsFlaggedWrongIndustry bool `force:",omitempty"`
IsFlaggedWrongNaicsCode bool `force:",omitempty"`
IsFlaggedWrongNaicsDescription bool `force:",omitempty"`
IsFlaggedWrongNumberOfEmployees bool `force:",omitempty"`
IsFlaggedWrongOwnership bool `force:",omitempty"`
IsFlaggedWrongPhone bool `force:",omitempty"`
IsFlaggedWrongSic bool `force:",omitempty"`
IsFlaggedWrongSicDescription bool `force:",omitempty"`
IsFlaggedWrongTickerSymbol bool `force:",omitempty"`
IsFlaggedWrongTradestyle bool `force:",omitempty"`
IsFlaggedWrongWebsite bool `force:",omitempty"`
IsFlaggedWrongYearStarted bool `force:",omitempty"`
IsInactive bool `force:",omitempty"`
IsReviewedAccountSite bool `force:",omitempty"`
IsReviewedAddress bool `force:",omitempty"`
IsReviewedAnnualRevenue bool `force:",omitempty"`
IsReviewedCompanyName bool `force:",omitempty"`
IsReviewedDandBCompanyDunsNumber bool `force:",omitempty"`
IsReviewedDescription bool `force:",omitempty"`
IsReviewedDunsNumber bool `force:",omitempty"`
IsReviewedFax bool `force:",omitempty"`
IsReviewedIndustry bool `force:",omitempty"`
IsReviewedNaicsCode bool `force:",omitempty"`
IsReviewedNaicsDescription bool `force:",omitempty"`
IsReviewedNumberOfEmployees bool `force:",omitempty"`
IsReviewedOwnership bool `force:",omitempty"`
IsReviewedPhone bool `force:",omitempty"`
IsReviewedSic bool `force:",omitempty"`
IsReviewedSicDescription bool `force:",omitempty"`
IsReviewedTickerSymbol bool `force:",omitempty"`
IsReviewedTradestyle bool `force:",omitempty"`
IsReviewedWebsite bool `force:",omitempty"`
IsReviewedYearStarted bool `force:",omitempty"`
LastMatchedDate string `force:",omitempty"`
LastModifiedById string `force:",omitempty"`
LastModifiedDate string `force:",omitempty"`
LastStatusChangedById string `force:",omitempty"`
LastStatusChangedDate string `force:",omitempty"`
Latitude float64 `force:",omitempty"`
Longitude float64 `force:",omitempty"`
NaicsCode string `force:",omitempty"`
NaicsDescription string `force:",omitempty"`
Name string `force:",omitempty"`
NumberOfEmployees int `force:",omitempty"`
Ownership string `force:",omitempty"`
Phone string `force:",omitempty"`
PostalCode string `force:",omitempty"`
Sic string `force:",omitempty"`
SicDescription string `force:",omitempty"`
State string `force:",omitempty"`
Street string `force:",omitempty"`
SystemModstamp string `force:",omitempty"`
TickerSymbol string `force:",omitempty"`
Tradestyle string `force:",omitempty"`
Website string `force:",omitempty"`
YearStarted string `force:",omitempty"`
}
func (t *AccountCleanInfo) ApiName() string {
return "AccountCleanInfo"
}
func (t *AccountCleanInfo) String() string {
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("AccountCleanInfo #%s - %s\n", t.Id, t.Name))
builder.WriteString(fmt.Sprintf("\tAccountId: %v\n", t.AccountId))
builder.WriteString(fmt.Sprintf("\tAccountSite: %v\n", t.AccountSite))
builder.WriteString(fmt.Sprintf("\tAddress: %v\n", t.Address))
builder.WriteString(fmt.Sprintf("\tAnnualRevenue: %v\n", t.AnnualRevenue))
builder.WriteString(fmt.Sprintf("\tCity: %v\n", t.City))
builder.WriteString(fmt.Sprintf("\tCleanedByJob: %v\n", t.CleanedByJob))
builder.WriteString(fmt.Sprintf("\tCleanedByUser: %v\n", t.CleanedByUser))
builder.WriteString(fmt.Sprintf("\tCompanyName: %v\n", t.CompanyName))
builder.WriteString(fmt.Sprintf("\tCompanyStatusDataDotCom: %v\n", t.CompanyStatusDataDotCom))
builder.WriteString(fmt.Sprintf("\tCountry: %v\n", t.Country))
builder.WriteString(fmt.Sprintf("\tCreatedById: %v\n", t.CreatedById))
builder.WriteString(fmt.Sprintf("\tCreatedDate: %v\n", t.CreatedDate))
builder.WriteString(fmt.Sprintf("\tDandBCompanyDunsNumber: %v\n", t.DandBCompanyDunsNumber))
builder.WriteString(fmt.Sprintf("\tDataDotComId: %v\n", t.DataDotComId))
builder.WriteString(fmt.Sprintf("\tDescription: %v\n", t.Description))
builder.WriteString(fmt.Sprintf("\tDunsNumber: %v\n", t.DunsNumber))
builder.WriteString(fmt.Sprintf("\tDunsRightMatchConfidence: %v\n", t.DunsRightMatchConfidence))
builder.WriteString(fmt.Sprintf("\tDunsRightMatchGrade: %v\n", t.DunsRightMatchGrade))
builder.WriteString(fmt.Sprintf("\tFax: %v\n", t.Fax))
builder.WriteString(fmt.Sprintf("\tGeocodeAccuracy: %v\n", t.GeocodeAccuracy))
builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id))
builder.WriteString(fmt.Sprintf("\tIndustry: %v\n", t.Industry))
builder.WriteString(fmt.Sprintf("\tIsDeleted: %v\n", t.IsDeleted))
builder.WriteString(fmt.Sprintf("\tIsDifferentAccountSite: %v\n", t.IsDifferentAccountSite))
builder.WriteString(fmt.Sprintf("\tIsDifferentAnnualRevenue: %v\n", t.IsDifferentAnnualRevenue))
builder.WriteString(fmt.Sprintf("\tIsDifferentCity: %v\n", t.IsDifferentCity))
builder.WriteString(fmt.Sprintf("\tIsDifferentCompanyName: %v\n", t.IsDifferentCompanyName))
builder.WriteString(fmt.Sprintf("\tIsDifferentCountry: %v\n", t.IsDifferentCountry))
builder.WriteString(fmt.Sprintf("\tIsDifferentCountryCode: %v\n", t.IsDifferentCountryCode))
builder.WriteString(fmt.Sprintf("\tIsDifferentDandBCompanyDunsNumber: %v\n", t.IsDifferentDandBCompanyDunsNumber))
builder.WriteString(fmt.Sprintf("\tIsDifferentDescription: %v\n", t.IsDifferentDescription))
builder.WriteString(fmt.Sprintf("\tIsDifferentDunsNumber: %v\n", t.IsDifferentDunsNumber))
builder.WriteString(fmt.Sprintf("\tIsDifferentFax: %v\n", t.IsDifferentFax))
builder.WriteString(fmt.Sprintf("\tIsDifferentIndustry: %v\n", t.IsDifferentIndustry))
builder.WriteString(fmt.Sprintf("\tIsDifferentNaicsCode: %v\n", t.IsDifferentNaicsCode))
builder.WriteString(fmt.Sprintf("\tIsDifferentNaicsDescription: %v\n", t.IsDifferentNaicsDescription))
builder.WriteString(fmt.Sprintf("\tIsDifferentNumberOfEmployees: %v\n", t.IsDifferentNumberOfEmployees))
builder.WriteString(fmt.Sprintf("\tIsDifferentOwnership: %v\n", t.IsDifferentOwnership))
builder.WriteString(fmt.Sprintf("\tIsDifferentPhone: %v\n", t.IsDifferentPhone))
builder.WriteString(fmt.Sprintf("\tIsDifferentPostalCode: %v\n", t.IsDifferentPostalCode))
builder.WriteString(fmt.Sprintf("\tIsDifferentSic: %v\n", t.IsDifferentSic))
builder.WriteString(fmt.Sprintf("\tIsDifferentSicDescription: %v\n", t.IsDifferentSicDescription))
builder.WriteString(fmt.Sprintf("\tIsDifferentState: %v\n", t.IsDifferentState))
builder.WriteString(fmt.Sprintf("\tIsDifferentStateCode: %v\n", t.IsDifferentStateCode))
builder.WriteString(fmt.Sprintf("\tIsDifferentStreet: %v\n", t.IsDifferentStreet))
builder.WriteString(fmt.Sprintf("\tIsDifferentTickerSymbol: %v\n", t.IsDifferentTickerSymbol))
builder.WriteString(fmt.Sprintf("\tIsDifferentTradestyle: %v\n", t.IsDifferentTradestyle))
builder.WriteString(fmt.Sprintf("\tIsDifferentWebsite: %v\n", t.IsDifferentWebsite))
builder.WriteString(fmt.Sprintf("\tIsDifferentYearStarted: %v\n", t.IsDifferentYearStarted))
builder.WriteString(fmt.Sprintf("\tIsFlaggedWrongAccountSite: %v\n", t.IsFlaggedWrongAccountSite))
builder.WriteString(fmt.Sprintf("\tIsFlaggedWrongAddress: %v\n", t.IsFlaggedWrongAddress))
builder.WriteString(fmt.Sprintf("\tIsFlaggedWrongAnnualRevenue: %v\n", t.IsFlaggedWrongAnnualRevenue))
builder.WriteString(fmt.Sprintf("\tIsFlaggedWrongCompanyName: %v\n", t.IsFlaggedWrongCompanyName))
builder.WriteString(fmt.Sprintf("\tIsFlaggedWrongDescription: %v\n", t.IsFlaggedWrongDescription))
builder.WriteString(fmt.Sprintf("\tIsFlaggedWrongDunsNumber: %v\n", t.IsFlaggedWrongDunsNumber))
builder.WriteString(fmt.Sprintf("\tIsFlaggedWrongFax: %v\n", t.IsFlaggedWrongFax))
builder.WriteString(fmt.Sprintf("\tIsFlaggedWrongIndustry: %v\n", t.IsFlaggedWrongIndustry))
builder.WriteString(fmt.Sprintf("\tIsFlaggedWrongNaicsCode: %v\n", t.IsFlaggedWrongNaicsCode))
builder.WriteString(fmt.Sprintf("\tIsFlaggedWrongNaicsDescription: %v\n", t.IsFlaggedWrongNaicsDescription))
builder.WriteString(fmt.Sprintf("\tIsFlaggedWrongNumberOfEmployees: %v\n", t.IsFlaggedWrongNumberOfEmployees))
builder.WriteString(fmt.Sprintf("\tIsFlaggedWrongOwnership: %v\n", t.IsFlaggedWrongOwnership))
builder.WriteString(fmt.Sprintf("\tIsFlaggedWrongPhone: %v\n", t.IsFlaggedWrongPhone))
builder.WriteString(fmt.Sprintf("\tIsFlaggedWrongSic: %v\n", t.IsFlaggedWrongSic))
builder.WriteString(fmt.Sprintf("\tIsFlaggedWrongSicDescription: %v\n", t.IsFlaggedWrongSicDescription))
builder.WriteString(fmt.Sprintf("\tIsFlaggedWrongTickerSymbol: %v\n", t.IsFlaggedWrongTickerSymbol))
builder.WriteString(fmt.Sprintf("\tIsFlaggedWrongTradestyle: %v\n", t.IsFlaggedWrongTradestyle))
builder.WriteString(fmt.Sprintf("\tIsFlaggedWrongWebsite: %v\n", t.IsFlaggedWrongWebsite))
builder.WriteString(fmt.Sprintf("\tIsFlaggedWrongYearStarted: %v\n", t.IsFlaggedWrongYearStarted))
builder.WriteString(fmt.Sprintf("\tIsInactive: %v\n", t.IsInactive))
builder.WriteString(fmt.Sprintf("\tIsReviewedAccountSite: %v\n", t.IsReviewedAccountSite))
builder.WriteString(fmt.Sprintf("\tIsReviewedAddress: %v\n", t.IsReviewedAddress))
builder.WriteString(fmt.Sprintf("\tIsReviewedAnnualRevenue: %v\n", t.IsReviewedAnnualRevenue))
builder.WriteString(fmt.Sprintf("\tIsReviewedCompanyName: %v\n", t.IsReviewedCompanyName))
builder.WriteString(fmt.Sprintf("\tIsReviewedDandBCompanyDunsNumber: %v\n", t.IsReviewedDandBCompanyDunsNumber))
builder.WriteString(fmt.Sprintf("\tIsReviewedDescription: %v\n", t.IsReviewedDescription))
builder.WriteString(fmt.Sprintf("\tIsReviewedDunsNumber: %v\n", t.IsReviewedDunsNumber))
builder.WriteString(fmt.Sprintf("\tIsReviewedFax: %v\n", t.IsReviewedFax))
builder.WriteString(fmt.Sprintf("\tIsReviewedIndustry: %v\n", t.IsReviewedIndustry))
builder.WriteString(fmt.Sprintf("\tIsReviewedNaicsCode: %v\n", t.IsReviewedNaicsCode))
builder.WriteString(fmt.Sprintf("\tIsReviewedNaicsDescription: %v\n", t.IsReviewedNaicsDescription))
builder.WriteString(fmt.Sprintf("\tIsReviewedNumberOfEmployees: %v\n", t.IsReviewedNumberOfEmployees))
builder.WriteString(fmt.Sprintf("\tIsReviewedOwnership: %v\n", t.IsReviewedOwnership))
builder.WriteString(fmt.Sprintf("\tIsReviewedPhone: %v\n", t.IsReviewedPhone))
builder.WriteString(fmt.Sprintf("\tIsReviewedSic: %v\n", t.IsReviewedSic))
builder.WriteString(fmt.Sprintf("\tIsReviewedSicDescription: %v\n", t.IsReviewedSicDescription))
builder.WriteString(fmt.Sprintf("\tIsReviewedTickerSymbol: %v\n", t.IsReviewedTickerSymbol))
builder.WriteString(fmt.Sprintf("\tIsReviewedTradestyle: %v\n", t.IsReviewedTradestyle))
builder.WriteString(fmt.Sprintf("\tIsReviewedWebsite: %v\n", t.IsReviewedWebsite))
builder.WriteString(fmt.Sprintf("\tIsReviewedYearStarted: %v\n", t.IsReviewedYearStarted))
builder.WriteString(fmt.Sprintf("\tLastMatchedDate: %v\n", t.LastMatchedDate))
builder.WriteString(fmt.Sprintf("\tLastModifiedById: %v\n", t.LastModifiedById))
builder.WriteString(fmt.Sprintf("\tLastModifiedDate: %v\n", t.LastModifiedDate))
builder.WriteString(fmt.Sprintf("\tLastStatusChangedById: %v\n", t.LastStatusChangedById))
builder.WriteString(fmt.Sprintf("\tLastStatusChangedDate: %v\n", t.LastStatusChangedDate))
builder.WriteString(fmt.Sprintf("\tLatitude: %v\n", t.Latitude))
builder.WriteString(fmt.Sprintf("\tLongitude: %v\n", t.Longitude))
builder.WriteString(fmt.Sprintf("\tNaicsCode: %v\n", t.NaicsCode))
builder.WriteString(fmt.Sprintf("\tNaicsDescription: %v\n", t.NaicsDescription))
builder.WriteString(fmt.Sprintf("\tName: %v\n", t.Name))
builder.WriteString(fmt.Sprintf("\tNumberOfEmployees: %v\n", t.NumberOfEmployees))
builder.WriteString(fmt.Sprintf("\tOwnership: %v\n", t.Ownership))
builder.WriteString(fmt.Sprintf("\tPhone: %v\n", t.Phone))
builder.WriteString(fmt.Sprintf("\tPostalCode: %v\n", t.PostalCode))
builder.WriteString(fmt.Sprintf("\tSic: %v\n", t.Sic))
builder.WriteString(fmt.Sprintf("\tSicDescription: %v\n", t.SicDescription))
builder.WriteString(fmt.Sprintf("\tState: %v\n", t.State))
builder.WriteString(fmt.Sprintf("\tStreet: %v\n", t.Street))
builder.WriteString(fmt.Sprintf("\tSystemModstamp: %v\n", t.SystemModstamp))
builder.WriteString(fmt.Sprintf("\tTickerSymbol: %v\n", t.TickerSymbol))
builder.WriteString(fmt.Sprintf("\tTradestyle: %v\n", t.Tradestyle))
builder.WriteString(fmt.Sprintf("\tWebsite: %v\n", t.Website))
builder.WriteString(fmt.Sprintf("\tYearStarted: %v\n", t.YearStarted))
return builder.String()
}
type AccountCleanInfoQueryResponse struct {
BaseQuery
Records []AccountCleanInfo `json:"Records" force:"records"`
}
|
/*
* Create a new intra-datacenter firewall policy.
*/
package main
import (
"flag"
"fmt"
"os"
"path"
"strings"
"github.com/grrtrr/clcv2"
"github.com/grrtrr/clcv2/clcv2cli"
"github.com/grrtrr/exit"
)
func main() {
var src, dst clcv2.CIDRs
var ports clcv2.PortSpecs
var acct = flag.String("da", "", "Destination account (defaults to source account)")
flag.Var(&src, "src", "Source network(s) in CIDR notation (option may be repeated)")
flag.Var(&dst, "dst", "Destination network(s) in CIDR notation (option may be repeated)")
flag.Var(&ports, "p", "Port spec(s), number(s) or service name(s) (option may be repeated)\n"+
" - ping: use ping or icmp\n"+
" - full spec: tcp/20081-20083, udp/554, udp/6080-7000, ...\n"+
" - tcp names: rdp, http, https, http-alt, ssh, ftp, ftps, ...\n"+
" - tcp ports: 22, 443, 80, 20081-20083, ...\n"+
" - DEFAULTS: ping, ssh, http")
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "usage: %s [options] -src <SrcCIDR> -dst <DstCIDR> <Location>\n", path.Base(os.Args[0]))
flag.PrintDefaults()
}
flag.Parse()
if flag.NArg() != 1 || len(src) == 0 || len(dst) == 0 {
flag.Usage()
os.Exit(0)
}
client, err := clcv2cli.NewCLIClient()
if err != nil {
exit.Fatal(err.Error())
}
if *acct == "" {
*acct = client.AccountAlias
}
req := clcv2.IntraDataCenterFirewallPolicyReq{
SourceCIDR: src,
DestCIDR: dst,
DestAccount: *acct,
Ports: clcv2.PortSpecString(ports),
}
id, err := client.CreateIntraDataCenterFirewallPolicy(flag.Arg(0), &req)
if err != nil {
exit.Fatalf("failed to create intra-datacenter firewall policy in %s: %s", flag.Arg(0), err)
}
fmt.Printf("Successfully created intra-datacenter firewall policy %s in %s.\n", id, strings.ToUpper(flag.Arg(0)))
}
|
package image
import (
"github.com/projecteru2/cli/cmd/utils"
"github.com/urfave/cli/v2"
)
const (
specFileURI = "<spec file uri>"
)
// Command exports image subcommands
func Command() *cli.Command {
return &cli.Command{
Name: "image",
Usage: "image commands",
Subcommands: []*cli.Command{
{
Name: "build",
Usage: "build image",
ArgsUsage: specFileURI,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "name",
Usage: "name of image",
},
&cli.StringSliceFlag{
Name: "tag",
Usage: "tag of image",
},
&cli.BoolFlag{
Name: "raw",
Usage: "build image from dir",
},
&cli.BoolFlag{
Name: "exist",
Usage: "build image from exist",
},
&cli.StringFlag{
Name: "user",
Usage: "user of image",
Value: "",
DefaultText: "root",
},
&cli.StringFlag{
Name: "stop-signal",
Usage: "customize stop signal",
},
&cli.IntFlag{
Name: "uid",
Usage: "uid of image",
Value: 0,
DefaultText: "1",
},
},
Action: utils.ExitCoder(cmdImageBuild),
},
{
Name: "cache",
Usage: "cache image",
ArgsUsage: "name of images",
Flags: []cli.Flag{
&cli.StringSliceFlag{
Name: "nodename",
Usage: "nodename if you just want to cache on one node",
},
&cli.StringFlag{
Name: "podname",
Usage: "name of pod, if you want to cache on all nodes in one pod",
},
&cli.IntFlag{
Name: "concurrent",
Usage: "how many workers to pull images",
Value: 10,
},
},
Action: utils.ExitCoder(cmdImageCache),
},
{
Name: "remove",
Usage: "remove image",
ArgsUsage: "name of images",
Flags: []cli.Flag{
&cli.StringSliceFlag{
Name: "nodename",
Usage: "nodename if you just want to clean on one node",
},
&cli.StringFlag{
Name: "podname",
Usage: "name of pod, if you want to clean on all nodes in one pod",
},
&cli.IntFlag{
Name: "concurrent",
Usage: "how many workers to pull images",
Value: 10,
},
&cli.BoolFlag{
Name: "prune",
Usage: "prune node",
Value: false,
},
},
Action: utils.ExitCoder(cmdImageClean),
},
{
Name: "list",
Aliases: []string{"ls"},
Usage: "list image(s) by podname or nodename(s)",
ArgsUsage: "[podname/nodenames]",
Flags: []cli.Flag{
&cli.StringSliceFlag{
Name: "nodename",
Usage: "nodename if you just want to list on specific nodes",
},
&cli.StringFlag{
Name: "podname",
Usage: "name of pod, if you want to list on all nodes in one pod",
},
&cli.StringFlag{
Name: "filter",
Usage: "filter the name of image",
Required: false,
},
},
Action: utils.ExitCoder(cmdImageList),
},
},
}
}
|
// package rest gives rest APIs info for sdk/mesher providers
package rest
// path parameters
const (
Id = "id"
Ms = "ms"
Service = "service"
InstanceName = "instanceName"
StatusCode = "StatusCode"
)
// api path
const (
Hello = "/hello"
SayHello = "/sayhello/{id}"
Svc = "/svc" //return provider instance info
Fail = "/fail/{StatusCode}" //return specified status code
//fail twice and succeed once;
//return instance info if succeed, return specified status code if fail
FailTwice = "/failTwice/{StatusCode}"
//fail if instance name is equal to the {instanceName};
//return instance info if succeed, return specified status code if fail
FailInstance = "/failInstance/{instanceName}/{StatusCode}"
//delay and return instance info
Delay = "/delay/{ms}"
//delay only if instance name is equal to the {instanceName}
// return instance info after delay
DelayInstance = "/delayInstance/{instanceName}/{ms}"
FailV3 = "/failV3"
Provider = "/provider"
ProxyTo = "/proxyTo/{service}"
// cc
ConfigCenterAdd = "/config_center"
)
|
package osbuild2
// Stage to copy items from inputs to mount points or the tree. Multiple items
// can be copied. The source and destination is a URL.
type CopyStageOptions struct {
Paths []CopyStagePath `json:"paths"`
}
type CopyStagePath struct {
From string `json:"from"`
To string `json:"to"`
}
func (CopyStageOptions) isStageOptions() {}
type CopyStageInputs map[string]CopyStageInput
type CopyStageInput struct {
inputCommon
References CopyStageReferences `json:"references"`
}
func (CopyStageInputs) isStageInputs() {}
type CopyStageReferences []string
func (CopyStageReferences) isReferences() {}
type CopyStageDevices map[string]Device
func (CopyStageDevices) isStageDevices() {}
type CopyStageMounts []Mount
func (CopyStageMounts) isStageMounts() {}
func NewCopyStage(options *CopyStageOptions, inputs *CopyStageInputs, devices *CopyStageDevices, mounts CopyStageMounts) *Stage {
return &Stage{
Type: "org.osbuild.copy",
Options: options,
Inputs: inputs,
Devices: devices,
Mounts: mounts,
}
}
|
package gateway
import (
"dao"
"input"
"github.com/fatih/structs"
)
var albumDao *dao.AlbumDao
func init() {
albumDao = new(dao.AlbumDao)
}
type AlbumGateway struct{}
func (g AlbumGateway) Store(input input.NewAlbum) bool {
mapInput := structs.Map(input)
return albumDao.Store(mapInput)
}
func (g AlbumGateway) ListAll() []dao.Album {
return albumDao.ListAll()
}
func (g AlbumGateway) GetById(id int) dao.Album {
return albumDao.GetById(id)
}
func (g AlbumGateway) DeleteById(id int) bool {
return albumDao.DeleteById(id)
}
func (g AlbumGateway) UpdateById(id int, input input.NewAlbum) bool {
mapInput := structs.Map(input)
return albumDao.UpdateById(id, mapInput)
}
|
package main
import (
"context"
"flag"
"fmt"
"net/http"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"time"
"github.com/azzzak/fakecast/api"
"github.com/azzzak/fakecast/fs"
"github.com/azzzak/fakecast/store"
)
var version string
func main() {
var (
host string = ""
root string = "/fakecast"
credential string = ""
listenPort int = 80
)
flag.StringVar(&host, "host", lookupEnvOrString("HOST", host), "host url")
flag.StringVar(&root, "root", lookupEnvOrString("ROOT", root), "root of content directory")
flag.StringVar(&credential, "credential", lookupEnvOrString("CREDENTIAL", credential), "access credential")
flag.IntVar(&listenPort, "port", lookupEnvOrInt("PORT", listenPort), "port")
flag.Parse()
if host == "" {
fmt.Println("You must set HOST env variable to proper work of app")
os.Exit(1)
}
if !strings.HasPrefix(host, "http://") && !strings.HasPrefix(host, "https://") {
host = fmt.Sprintf("https://%s", host)
}
host = strings.TrimSuffix(host, "/")
s, err := store.NewStore(root)
if err != nil {
fmt.Printf("Error while connecting to DB: %s\n", err)
os.Exit(1)
}
defer s.Close()
fs := fs.NewRoot(root)
cfg := &api.Cfg{
Store: s,
FS: fs,
Host: host,
Credential: credential,
}
srv := &http.Server{
Addr: fmt.Sprintf(":%d", listenPort),
Handler: api.InitHandlers(cfg),
ReadTimeout: 3 * time.Second,
WriteTimeout: 5 * time.Second,
IdleTimeout: 10 * time.Second,
}
go func() {
if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
fmt.Printf("Could not listen on port %d: %v\n", listenPort, err)
}
}()
fmt.Printf("fakecast %s is running\n", version)
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt, syscall.SIGTERM)
<-stop
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
if err := srv.Shutdown(ctx); err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println("fakecast is stopped")
}
func lookupEnvOrString(key string, defaultVal string) string {
if val, ok := os.LookupEnv(key); ok {
return val
}
return defaultVal
}
func lookupEnvOrInt(key string, defaultVal int) int {
if val, ok := os.LookupEnv(key); ok {
v, err := strconv.Atoi(val)
if err != nil {
fmt.Printf("Env[%s]: %v", key, err)
os.Exit(1)
}
return v
}
return defaultVal
}
|
package common
import (
"fmt"
"os"
)
// ExitWithError exits the program after printing the given error's message.
func ExitWithError(err error) {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
// Message generates a message payload based off a path and expiry time.
func Message(remote, path string, expiresAt int64) []byte {
return []byte(fmt.Sprintf("%v|%v|%v", remote, path, expiresAt))
}
|
package main
import (
"fmt"
"log"
"database/sql"
_ "github.com/lib/pq"
"github.com/jmoiron/sqlx"
"os"
)
const (
host = "localhost"
port = 5432
user = "postgres"
password = "postgres"
dbname = "postgres"
sslmode = "disable"
)
func connect() *sql.DB {
t := "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s"
connectionString := fmt.Sprintf(t, host, port, user, password, dbname, sslmode)
db, err := sql.Open("postgres", connectionString)
if err != nil {
log.Fatal(err)
}
err = db.Ping()
if err != nil {
log.Fatal(err)
}
return db
}
func connectx() *sqlx.DB {
t := "host=%s port=%d user=%s password=%s dbname=%s sslmode=disable"
connectionString := fmt.Sprintf(t, host, port, user, password, dbname)
db := sqlx.MustConnect("postgres", connectionString)
return db
}
func createSchema(db *sql.DB) {
schema := `
CREATE TABLE IF NOT EXISTS genius (
id SERIAL PRIMARY KEY,
name TEXT UNIQUE,
iq INTEGER,
nationality TEXT
);
`
_, err := db.Exec(schema)
if err != nil {
log.Fatal(err)
}
}
func createSchemax(db *sqlx.DB) {
schema := `
CREATE TABLE IF NOT EXISTS genius (
id SERIAL PRIMARY KEY,
name TEXT UNIQUE,
iq INTEGER,
nationality TEXT
);
`
db.MustExec(schema)
}
type Genius struct {
Name string `db:"name"`
IQ int `db:"iq"`
Nationality string `db:"nationality"`
}
func exec(db *sql.DB, command string) {
_, err := db.Exec(command)
if err != nil {
log.Fatal(err)
}
}
func cleanDB(db *sql.DB) {
exec(db, "DELETE FROM genius")
}
func populateDB(db *sql.DB) {
data := []Genius{
{"Charles Dickens", 165, "English"},
{"Rafael", 170, "Italian"},
{"Michael Faraday", 175, "English"},
{"Baruch Spinoza", 175, "Dutch"},
{"Michaelangelo", 177, "Italian"},
{"Desiderius Erasmus", 177, "Dutch"},
{"Rene Descartes", 177, "French"},
{"Galileo Galilei", 182, "Italian"},
{"John Stuart Mill", 182, "English"},
{"Gottfried Wilhelm Leibnitz", 191, "German"},
{"Isaac Newton", 192, "English"},
{"Leonardo Da Vinci", 200, "Italian"},
{"Johann Wolfgang von Goethe", 220, "German"},
}
for _, g := range data {
t := "INSERT INTO genius (name, iq, nationality) VALUES ('%s', %d, '%s')"
command := fmt.Sprintf(t, g.Name, g.IQ, g.Nationality)
exec(db, command)
}
}
func populateDBx(db *sqlx.DB) {
data := []Genius{
{"Charles Dickens", 165, "English"},
{"Rafael", 170, "Italian"},
{"Michael Faraday", 175, "English"},
{"Baruch Spinoza", 175, "Dutch"},
{"Michaelangelo", 177, "Italian"},
{"Desiderius Erasmus", 177, "Dutch"},
{"Rene Descartes", 177, "French"},
{"Galileo Galilei", 182, "Italian"},
{"John Stuart Mill", 182, "English"},
{"Gottfried Wilhelm Leibnitz", 191, "German"},
{"Isaac Newton", 192, "English"},
{"Leonardo Da Vinci", 200, "Italian"},
{"Johann Wolfgang von Goethe", 220, "German"},
}
for _, g := range data {
t := "INSERT INTO genius (name, iq, nationality) VALUES ('%s', %d, '%s')"
command := fmt.Sprintf(t, g.Name, g.IQ, g.Nationality)
db.MustExec(command)
}
}
func getEnglishGeniuses(db *sql.DB) {
rows, err := db.Query("SELECT name, iq FROM genius WHERE nationality='English'")
if err != nil {
log.Fatal(err)
}
var name string
var iq int
for rows.Next() {
err = rows.Scan(&name, &iq)
if err != nil {
log.Fatal(err)
}
fmt.Println("name:", name, "IQ:", iq)
}
}
func getEnglishGeniusesx(db *sqlx.DB) {
geniuses := []Genius{}
db.Select(&geniuses, "SELECT name, iq FROM genius WHERE nationality='English'")
for _, g := range geniuses {
fmt.Println("name:", g.Name, "IQ:", g.IQ)
}
}
func increaseIntelligenceOfDutchGeniusesx(db *sqlx.DB) {
geniuses := []Genius{}
db.Select(&geniuses, "SELECT name, iq FROM genius WHERE nationality='Dutch'")
tx, err := db.Beginx()
if err != nil {
panic("Can't start transaction")
}
for _, g := range geniuses {
t := "UPDATE genius SET iq = %d WHERE name = '%s'"
command := fmt.Sprintf(t, g.IQ + 10, g.Name)
_, err = tx.Exec(command)
if err != nil {
fmt.Println("Rolling back transaction")
tx.Rollback()
return
}
}
tx.Commit()
}
func main() {
useSqlx := len(os.Args) > 1 && os.Args[1] == "--use-sqlx"
if useSqlx {
fmt.Println("--- Using sqlx")
db := connectx()
defer db.Close()
createSchemax(db)
db.MustExec("DELETE FROM genius")
populateDBx(db)
getEnglishGeniusesx(db)
increaseIntelligenceOfDutchGeniusesx(db)
} else {
fmt.Println("--- Using database/sql")
db := connect()
defer db.Close()
createSchema(db)
cleanDB(db)
populateDB(db)
getEnglishGeniuses(db)
}
}
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package variable
import "context"
// MockGlobalAccessor implements GlobalVarAccessor interface. it's used in tests
type MockGlobalAccessor struct {
SessionVars *SessionVars // can be overwritten if needed for correctness.
vals map[string]string
testSuite bool
}
// NewMockGlobalAccessor implements GlobalVarAccessor interface.
func NewMockGlobalAccessor() *MockGlobalAccessor {
return new(MockGlobalAccessor)
}
// NewMockGlobalAccessor4Tests creates a new MockGlobalAccessor for use in the testsuite.
// It behaves like the real GlobalVarAccessor and has a list of sessionvars.
// Because we use the real GlobalVarAccessor outside of tests,
// this is unsafe to use by default (performance regression).
func NewMockGlobalAccessor4Tests() *MockGlobalAccessor {
tmp := new(MockGlobalAccessor)
tmp.vals = make(map[string]string)
tmp.testSuite = true
// There's technically a test bug here where the sessionVars won't match
// the session vars in the test which this MockGlobalAccessor is assigned to.
// But if the test requires accurate sessionVars, it can do the following:
//
// vars := NewSessionVars()
// mock := NewMockGlobalAccessor()
// mock.SessionVars = vars
// vars.GlobalVarsAccessor = mock
tmp.SessionVars = NewSessionVars(nil)
// Set all sysvars to the default value
for k, sv := range GetSysVars() {
tmp.vals[k] = sv.Value
}
return tmp
}
// GetGlobalSysVar implements GlobalVarAccessor.GetGlobalSysVar interface.
func (m *MockGlobalAccessor) GetGlobalSysVar(name string) (string, error) {
if !m.testSuite {
v, ok := sysVars[name]
if ok {
return v.Value, nil
}
return "", nil
}
v, ok := m.vals[name]
if ok {
return v, nil
}
return "", ErrUnknownSystemVar.GenWithStackByArgs(name)
}
// SetGlobalSysVar implements GlobalVarAccessor.SetGlobalSysVar interface.
func (m *MockGlobalAccessor) SetGlobalSysVar(ctx context.Context, name string, value string) (err error) {
sv := GetSysVar(name)
if sv == nil {
return ErrUnknownSystemVar.GenWithStackByArgs(name)
}
if value, err = sv.Validate(m.SessionVars, value, ScopeGlobal); err != nil {
return err
}
if err = sv.SetGlobalFromHook(ctx, m.SessionVars, value, false); err != nil {
return err
}
m.vals[name] = value
return nil
}
// SetGlobalSysVarOnly implements GlobalVarAccessor.SetGlobalSysVarOnly interface.
func (m *MockGlobalAccessor) SetGlobalSysVarOnly(ctx context.Context, name string, value string, _ bool) error {
sv := GetSysVar(name)
if sv == nil {
return ErrUnknownSystemVar.GenWithStackByArgs(name)
}
m.vals[name] = value
return nil
}
// GetTiDBTableValue implements GlobalVarAccessor.GetTiDBTableValue interface.
func (m *MockGlobalAccessor) GetTiDBTableValue(name string) (string, error) {
// add for test tidb_gc_max_wait_time validation
if name == "tikv_gc_life_time" {
sv := GetSysVar(TiDBGCLifetime)
if sv == nil {
panic("Get SysVar Failed")
}
return sv.Value, nil
}
panic("not supported")
}
// SetTiDBTableValue implements GlobalVarAccessor.SetTiDBTableValue interface.
func (m *MockGlobalAccessor) SetTiDBTableValue(name, value, comment string) error {
panic("not supported")
}
|
package dao
import (
"fmt"
"github.com/xormplus/xorm"
"go.uber.org/zap"
"mix/test/codes"
entity "mix/test/entity/core/transaction"
mapper "mix/test/mapper/core/transaction"
"mix/test/utils/status"
)
func (p *Dao) CreateMember(logger *zap.Logger, session *xorm.Session, item *entity.Member) (id int64, err error) {
res, err := mapper.CreateMember(session, item)
if err != nil {
logger.Error("Call mapper.CreateMember error", zap.Error(err))
return
}
id, err = res.LastInsertId()
if err != nil {
logger.Error("Get id error", zap.Error(err))
return
}
return
}
func (p *Dao) GetMember(logger *zap.Logger, session *xorm.Session, id int64) (item *entity.Member, err error) {
item, err = mapper.GetMember(session, id)
if err != nil {
logger.Error("Call mapper.GetMember error", zap.Error(err))
return
}
return
}
func (p *Dao) MustGetMember(logger *zap.Logger, session *xorm.Session, id int64) (item *entity.Member, err error) {
item, err = p.GetMember(logger, session, id)
if err != nil {
return
}
if item == nil {
err = status.Code(codes.MemberNotFound)
logger.Error(
"Get member error",
zap.Error(err),
zap.Int64("id", id),
)
return
}
return
}
func (p *Dao) GetMemberList(logger *zap.Logger, session *xorm.Session) (items []*entity.Member, err error) {
items, err = mapper.GetMemberList(session)
if err != nil {
logger.Error("Call mapper.GetMemberList error", zap.Error(err))
return
}
return
}
func (p *Dao) RemoveMember(logger *zap.Logger, session *xorm.Session, id int64) (err error) {
_, err = mapper.RemoveMember(session, id)
if err != nil {
logger.Error("Call mapper.RemoveMember error", zap.Error(err))
return
}
return
}
func (p *Dao) MustRemoveMember(logger *zap.Logger, session *xorm.Session, id int64) (err error) {
res, err := mapper.RemoveMember(session, id)
if err != nil {
logger.Error("Call mapper.RemoveMember error", zap.Error(err))
return
}
affected, err := res.RowsAffected()
if err != nil {
logger.Error("Get affected error", zap.Error(err))
return
}
if affected != 1 {
err = fmt.Errorf("update affected error")
logger.Error("Call mapper.RemoveMember error",
zap.Int64("affected", affected),
zap.Int64("id",
id),
zap.Error(err))
return
}
return
}
func (p *Dao) UpdateMember(logger *zap.Logger, engine *xorm.EngineGroup, session *xorm.Session, item *entity.Member) (err error) {
_, err = mapper.UpdateMember(engine, session, item)
if err != nil {
logger.Error("Call mapper.UpdateMember error", zap.Error(err))
return
}
return
}
func (p *Dao) MustUpdateMember(logger *zap.Logger, engine *xorm.EngineGroup, session *xorm.Session, item *entity.Member) (err error) {
res, err := mapper.UpdateMember(engine, session, item)
if err != nil {
logger.Error("Call mapper.UpdateMember error", zap.Error(err))
return
}
affected, err := res.RowsAffected()
if err != nil {
logger.Error("Get affected error", zap.Error(err))
return
}
if affected != 1 {
err = fmt.Errorf("update affected error")
logger.Error("Call mapper.UpdateMember error",
zap.Int64("affected", affected),
zap.Int64("item.Id", item.Id),
zap.Error(err))
return
}
return
}
|
// Package microwebhook provides a MicroMDM-emulating webhook
package microwebhook
import (
"net/http"
"time"
"github.com/micromdm/nanomdm/mdm"
)
type MicroWebhook struct {
url string
client *http.Client
}
func New(url string) *MicroWebhook {
return &MicroWebhook{
url: url,
client: http.DefaultClient,
}
}
func (w *MicroWebhook) Authenticate(r *mdm.Request, m *mdm.Authenticate) error {
ev := &Event{
Topic: "mdm.Authenticate",
CreatedAt: time.Now(),
CheckinEvent: &CheckinEvent{
UDID: m.UDID,
EnrollmentID: m.EnrollmentID,
RawPayload: m.Raw,
},
}
return postWebhookEvent(r.Context, w.client, w.url, ev)
}
func (w *MicroWebhook) TokenUpdate(r *mdm.Request, m *mdm.TokenUpdate) error {
ev := &Event{
Topic: "mdm.TokenUpdate",
CreatedAt: time.Now(),
CheckinEvent: &CheckinEvent{
UDID: m.UDID,
EnrollmentID: m.EnrollmentID,
RawPayload: m.Raw,
},
}
return postWebhookEvent(r.Context, w.client, w.url, ev)
}
func (w *MicroWebhook) CheckOut(r *mdm.Request, m *mdm.CheckOut) error {
ev := &Event{
Topic: "mdm.CheckOut",
CreatedAt: time.Now(),
CheckinEvent: &CheckinEvent{
UDID: m.UDID,
EnrollmentID: m.EnrollmentID,
RawPayload: m.Raw,
},
}
return postWebhookEvent(r.Context, w.client, w.url, ev)
}
func (w *MicroWebhook) CommandAndReportResults(r *mdm.Request, results *mdm.CommandResults) (*mdm.Command, error) {
ev := &Event{
Topic: "mdm.Connect",
CreatedAt: time.Now(),
AcknowledgeEvent: &AcknowledgeEvent{
UDID: results.UDID,
EnrollmentID: results.EnrollmentID,
Status: results.Status,
CommandUUID: results.CommandUUID,
RawPayload: results.Raw,
},
}
return nil, postWebhookEvent(r.Context, w.client, w.url, ev)
}
|
package machine
import (
"testing"
"github.com/google/go-cmp/cmp"
)
// TestTakeSteps tests rotors' movement using different step and cycle
// sizes.
func TestTakeSteps(t *testing.T) {
for i, test := range []struct {
rotors *Rotors
steps []int
expected [][]int
}{
{
rotors: newTestRotors(
t,
[]int{0, 0, 0},
[]int{1, 1, 1},
[]int{26, 26, 26},
),
steps: []int{10, 26 * 26 * 26},
expected: [][]int{{10, 0, 0}, {10, 0, 0}},
},
{
rotors: newTestRotors(
t,
[]int{1, 0, 0},
[]int{1, 1, 1},
[]int{2, 2, 2},
),
steps: []int{3, 20},
expected: [][]int{{4, 2, 1}, {24, 12, 6}},
},
{
rotors: newTestRotors(
t,
[]int{0, 13, 0},
[]int{13, 13, 13},
[]int{2, 2, 2},
),
steps: []int{2, 8},
expected: [][]int{{0, 0, 13}, {0, 0, 13}},
},
{
rotors: newTestRotors(
t,
[]int{0, 0, 0},
[]int{2, 2, 2},
[]int{13, 13, 13},
),
steps: []int{1, 14},
expected: [][]int{{2, 0, 0}, {4, 2, 0}},
},
{
rotors: newTestRotors(
t,
[]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
[]int{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
[]int{26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26},
),
steps: []int{5, 26 * 26 * 26 * 26},
expected: [][]int{
{5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{5, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0},
},
},
{
rotors: newTestRotors(
t,
[]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
[]int{13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13},
[]int{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2},
),
steps: []int{15, 30},
expected: [][]int{
{13, 13, 13, 13, 0, 0, 0, 0, 0, 0, 0, 0},
{13, 0, 13, 13, 0, 13, 0, 0, 0, 0, 0, 0},
},
},
{
rotors: newTestRotors(
t,
[]int{0, 0, 0},
[]int{1, 2, 1},
[]int{1, 13, 26},
),
steps: []int{8, 2},
expected: [][]int{{8, 16, 0}, {10, 20, 0}},
},
{
rotors: newTestRotors(
t,
[]int{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
},
[]int{
13, 2, 13, 2, 13, 2, 13, 2, 13, 2,
13, 2, 13, 2, 13, 2, 13, 2, 13, 2,
13, 2, 13, 2, 13, 2, 13, 2, 13, 2,
},
[]int{
2, 13, 2, 13, 2, 13, 2, 13, 2, 13,
2, 13, 2, 13, 2, 13, 2, 13, 2, 13,
2, 13, 2, 13, 2, 13, 2, 13, 2, 13,
},
),
steps: []int{30, 30},
expected: [][]int{
{
0, 4, 13, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
},
{
0, 8, 0, 2, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
},
},
},
} {
for j, step := range test.steps {
for k := 0; k < step; k++ {
test.rotors.takeStep()
}
if diff := cmp.Diff(test.expected[j], test.rotors.Setting()); diff != "" {
t.Errorf("test %d: mismatch (-want +got):\n%s", i, diff)
}
}
}
}
// BenchmarkTakeStep benchmarks the stepping of a 1000-rotor machine.
func BenchmarkTakeStep(b *testing.B) {
r := GenerateRotors(1000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
for j := 0; j < 1000; j++ {
r.takeStep()
}
}
}
// TestNewRotors test config validation.
func TestNewRotors(t *testing.T) {
for i, test := range []struct {
arr []*Rotor
shouldErr bool
}{
{
arr: newRotorArr(
t,
[]int{0, 0, 0},
[]int{1, 1, 1},
[]int{26, 26, 26},
),
shouldErr: false,
},
{
arr: newRotorArr(
t,
[]int{-1, 11, 24},
[]int{1, 1, 1},
[]int{26, 26, 26},
),
shouldErr: true,
},
} {
_, err := NewRotors(test.arr)
if test.shouldErr && err == nil {
t.Errorf("test %d: want error, got nil", i)
} else if !test.shouldErr && err != nil {
t.Errorf("test %d: want nil, got %w", i, err)
}
}
}
// TestNewRotor test rotor validation.
func TestNewRotor(t *testing.T) {
var pathways [alphabetSize]int
for i := 0; i < alphabetSize; i++ {
pathways[i] = i
}
for i, test := range []struct {
position int
step int
cycle int
shouldErr bool
}{
{
position: 0,
step: 1,
cycle: 26,
shouldErr: false,
},
{
position: -121,
step: 1,
cycle: 26,
shouldErr: true,
},
{
position: 0,
step: 2,
cycle: 26,
shouldErr: true,
},
{
position: 0,
step: -21,
cycle: 26,
shouldErr: true,
},
{
position: 0,
step: 1,
cycle: 0,
shouldErr: true,
},
} {
_, err := NewRotor(pathways, test.position, test.step, test.cycle)
if test.shouldErr && err == nil {
t.Errorf("test %d: want error, got nil", i)
} else if !test.shouldErr && err != nil {
t.Errorf("test %d: want nil, got %w", i, err)
}
}
}
// TestStepCycle tests step-cycle compatability.
func TestStepCycle(t *testing.T) {
var pathways [alphabetSize]int
for i := 0; i < alphabetSize; i++ {
pathways[i] = i
}
for i, test := range []struct {
step int
cycle int
shouldErr bool
}{
{
step: 1,
cycle: 1,
shouldErr: false,
},
{
step: 1,
cycle: 2,
shouldErr: false,
},
{
step: 2,
cycle: 1,
shouldErr: false,
},
{
step: 1,
cycle: 13,
shouldErr: false,
},
{
step: 13,
cycle: 1,
shouldErr: false,
},
{
step: 1,
cycle: 26,
shouldErr: false,
},
{
step: 26,
cycle: 1,
shouldErr: false,
},
{
step: 2,
cycle: 13,
shouldErr: false,
},
{
step: 13,
cycle: 2,
shouldErr: false,
},
{
step: 23,
cycle: 32,
shouldErr: true,
},
{
step: 1,
cycle: -3,
shouldErr: true,
},
{
step: 0,
cycle: 1,
shouldErr: true,
},
} {
err := verifyRotor(pathways, 0, test.step, test.cycle)
if test.shouldErr && err == nil {
t.Errorf("test %d: want error, got nil", i)
} else if !test.shouldErr && err != nil {
t.Errorf("test %d: want nil, got %w", i, err)
}
}
}
// newTestRotors creates and returns a Rotors with the given properties
// for testing, should be used only for testing as errors are not accounted for.
func newTestRotors(t *testing.T, setting []int, steps []int, cycles []int) *Rotors {
t.Helper()
rotors := newRotorArr(t, setting, steps, cycles)
return &Rotors{
rotors: rotors,
count: len(rotors),
}
}
// newRotorArr creates a new Rotor array using given properties to use
// for testing.
func newRotorArr(t *testing.T, setting []int, steps []int, cycles []int) []*Rotor {
t.Helper()
var pathways [alphabetSize]int
for i := 0; i < alphabetSize; i++ {
pathways[i] = i
}
rotors := make([]*Rotor, len(setting))
for i := 0; i < len(setting); i++ {
rotors[i], _ = NewRotor(pathways, setting[i], steps[i], cycles[i])
}
return rotors
}
|
// Copyright (c) 2020 Xiaozhe Yao & AICAMP.CO.,LTD
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
package entities
import (
"strings"
"time"
"github.com/BurntSushi/toml"
"github.com/autoai-org/aid/components/cmd/pkg/storage"
"github.com/autoai-org/aid/components/cmd/pkg/utilities"
)
var logger = utilities.NewDefaultLogger()
// Package defines basic package information
type Package struct {
ID string `db:"id"`
Name string `db:"name"`
LocalPath string `db:"localpath"`
Vendor string `db:"vendor"`
Status string `db:"status"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
RemoteURL string `db:"remote_url"`
}
// Pretrained defines the basic structure of pretrained file,
// it do not need to be stored in database
// and therefore has no `db` bindings.
type Pretrained struct {
Name string `toml:"name"`
URL string `toml:"url"`
}
// Pretraineds is the collection/list of pretrained files
// this definition is used for toml parser
type Pretraineds struct {
Models []Pretrained `toml:"models"`
}
// TableName defines the tablename in database
func (p *Package) TableName() string {
return "package"
}
// PK defines the primary key of Package
func (p *Package) PK() string {
return "id"
}
// PackageConfig is the toml interface as in aid.toml
type PackageConfig struct {
Solvers []Solver
Package Package
}
// LoadPackageFromConfig reads the config string and returns a PackageConfig
func LoadPackageFromConfig(tomlString string) PackageConfig {
var packageConfig PackageConfig
_, err := toml.Decode(tomlString, &packageConfig)
utilities.CheckError(err, "Cannot Load Solvers from toml string, please check its syntax!")
return packageConfig
}
// Save stores package into database
func (p *Package) Save() error {
p.ID = utilities.GenerateUUIDv4()
db := storage.GetDefaultDB()
db.Connect()
return db.Insert(p)
}
// FetchPackages returns all packages
func FetchPackages() []Package {
packagesPointers := make([]*Package, 0)
db := storage.GetDefaultDB()
db.Fetch(&packagesPointers)
packages := make([]Package, len(packagesPointers))
for i := range packagesPointers {
packages[i] = *packagesPointers[i]
}
return packages
}
// GetPackage returns a single package by given vendorName and packageName
func GetPackage(vendorName string, packageName string) Package {
reqPackage := Package{Vendor: vendorName, Name: packageName}
db := storage.GetDefaultDB()
err := db.FetchOne(&reqPackage)
utilities.CheckError(err, "Cannot fetch package object "+vendorName+":"+packageName)
return reqPackage
}
// LoadPretrainedsFromConfig reads the config string and returns the objects
func LoadPretrainedsFromConfig(tomlString string) Pretraineds {
var pretraineds Pretraineds
_, err := toml.Decode(tomlString, &pretraineds)
utilities.CheckError(err, "Cannot Load Solvers")
return pretraineds
}
// GetPackageByImageID returns the package object by given image id
func GetPackageByImageID(ImageID string) Package {
var reqPackage Package
image := GetImage(ImageID)
imageinfo := strings.Split(image.Name, "-")
vendor, packageName := imageinfo[2], imageinfo[3]
packages := FetchPackages()
for i := range packages {
if packages[i].Vendor == vendor && packages[i].Name == packageName {
reqPackage = packages[i]
}
}
return reqPackage
}
|
package ipproxy
import (
"context"
"sync"
"github.com/google/netstack/tcpip"
"github.com/google/netstack/tcpip/buffer"
"github.com/google/netstack/tcpip/network/ipv4"
"github.com/google/netstack/tcpip/transport/tcp"
"github.com/google/netstack/waiter"
"github.com/getlantern/errors"
"github.com/getlantern/eventual"
)
func (p *proxy) onTCP(pkt ipPacket) {
dstAddr := pkt.ft().dst
o := p.tcpOrigins[dstAddr]
if o == nil {
var err error
o, err = p.createTCPOrigin(dstAddr)
if err != nil {
log.Error(err)
return
}
p.tcpOrigins[dstAddr] = o
p.addTCPOrigin()
}
o.channelEndpoint.InjectInbound(ipv4.ProtocolNumber, tcpip.PacketBuffer{
Data: buffer.View(pkt.raw).ToVectorisedView(),
})
}
func (p *proxy) createTCPOrigin(dstAddr addr) (*tcpOrigin, error) {
o := &tcpOrigin{
conns: make(map[tcpip.FullAddress]*baseConn),
}
o.origin = *newOrigin(p, tcp.NewProtocol(), dstAddr, nil, func(_o *origin) error {
o.closeAllConns()
return nil
})
if err := o.init(tcp.ProtocolNumber, tcpip.FullAddress{nicID, o.ipAddr, dstAddr.port}); err != nil {
o.closeNow()
return nil, errors.New("Unable to initialize TCP origin: %v", err)
}
if pErr := o.stack.SetPromiscuousMode(nicID, true); pErr != nil {
o.closeNow()
return nil, errors.New("Unable to set NIC to promiscuous mode: %v", pErr)
}
if err := o.ep.Listen(p.opts.TCPConnectBacklog); err != nil {
o.closeNow()
return nil, errors.New("Unable to listen for TCP connections: %v", err)
}
go acceptTCP(o)
return o, nil
}
func acceptTCP(o *tcpOrigin) {
defer o.closeNow()
for {
acceptedEp, wq, err := o.ep.Accept()
if err != nil {
if err == tcpip.ErrWouldBlock {
select {
case <-o.closeCh:
return
case <-o.notifyCh:
continue
}
}
log.Errorf("Accept() failed: %v", err)
return
}
go o.onAccept(acceptedEp, wq)
}
}
func (o *tcpOrigin) onAccept(acceptedEp tcpip.Endpoint, wq *waiter.Queue) {
upstream, dialErr := o.p.opts.DialTCP(context.Background(), "tcp", o.addr.String())
if dialErr != nil {
log.Errorf("Unexpected error dialing upstream to %v: %v", o.addr, dialErr)
return
}
upstreamValue := eventual.NewValue()
upstreamValue.Set(upstream)
downstreamAddr, _ := acceptedEp.GetRemoteAddress()
tcpConn := newBaseConn(o.p, upstreamValue, wq, func() error {
o.removeConn(downstreamAddr)
return nil
})
tcpConn.ep = acceptedEp
go tcpConn.copyToUpstream(nil)
go tcpConn.copyFromUpstream(tcpip.WriteOptions{})
o.addConn(downstreamAddr, tcpConn)
}
type tcpOrigin struct {
origin
conns map[tcpip.FullAddress]*baseConn
connsMx sync.Mutex
}
func (o *tcpOrigin) addConn(addr tcpip.FullAddress, conn *baseConn) {
o.connsMx.Lock()
o.conns[addr] = conn
o.connsMx.Unlock()
o.p.addTCPConn()
}
func (o *tcpOrigin) removeConn(addr tcpip.FullAddress) {
o.connsMx.Lock()
_, found := o.conns[addr]
if found {
delete(o.conns, addr)
}
o.connsMx.Unlock()
if found {
o.p.removeTCPConn()
}
}
func (o *tcpOrigin) closeAllConns() {
o.connsMx.Lock()
conns := make([]*baseConn, 0, len(o.conns))
for _, conn := range o.conns {
conns = append(conns, conn)
}
o.connsMx.Unlock()
for _, conn := range conns {
conn.closeNow()
}
}
func (p *proxy) reapTCP() {
for a, o := range p.tcpOrigins {
o.connsMx.Lock()
conns := make([]*baseConn, 0, len(o.conns))
for _, conn := range o.conns {
conns = append(conns, conn)
}
o.connsMx.Unlock()
timeSinceOriginLastActive := o.timeSinceLastActive()
if len(conns) > 0 {
for _, conn := range conns {
timeSinceConnLastActive := conn.timeSinceLastActive()
if timeSinceConnLastActive > p.opts.IdleTimeout {
log.Debug("Reaping TCP conn")
go conn.closeNow()
}
if timeSinceConnLastActive < timeSinceOriginLastActive {
timeSinceOriginLastActive = timeSinceConnLastActive
}
}
}
if timeSinceOriginLastActive > p.opts.IdleTimeout {
go o.closeNow()
delete(p.tcpOrigins, a)
p.removeTCPOrigin()
}
}
}
func (p *proxy) closeTCP() {
for a, o := range p.tcpOrigins {
log.Debug("Closing all conns")
o.closeAllConns()
log.Debug("Closing origin")
o.closeNow()
delete(p.tcpOrigins, a)
p.removeTCPOrigin()
log.Debug("Removed origin")
}
}
|
package entities
import "fmt"
type Product struct {
Id int64 `json:"id"`
Data string `json:"data"`
Prices int64 `json:"prices"`
}
func (product Product) ToString() string {
return fmt.Sprintf("id: %d\n name: %s\n ", product.Id, product.Data, product.Prices)
}
|
package sdp
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"strconv"
"strings"
"time"
)
var (
ErrSyntax = errors.New("syntax error")
ErrInvalid = errors.New("invalid")
)
const (
NetTypeIN = "IN"
AddrType4 = "IP4"
AddrType6 = "IP6"
ModeIncl = "incl"
ModeExcl = "excl"
)
const (
MediaAudio = "audio"
MediaVideo = "video"
MediaText = "text"
MediaApp = "application"
MediaMesg = "message"
)
const epoch = 2208988800
type Bandwidth struct {
Type string
Value int64
}
type Attribute struct {
Name string
Value string
}
func findAttributes(name string, attrs []Attribute) (Attribute, bool) {
for i := range attrs {
if attrs[i].Name == name {
return attrs[i], true
}
}
return Attribute{}, false
}
type ConnInfo struct {
NetType string
AddrType string
Addr string
TTL int64
}
func (c ConnInfo) IsZero() bool {
return c.NetType == "" && c.AddrType == "" && c.Addr == ""
}
type Session struct {
User string
ID int64
Ver int64
ConnInfo
Name string
Info string
URI string
}
type Interval struct {
Starts time.Time
Ends time.Time
}
func (i Interval) IsUnbound() bool {
return i.Ends.IsZero()
}
func (i Interval) IsPermanent() bool {
return i.Starts.IsZero() && i.Ends.IsZero()
}
type SourceInfo struct {
Mode string
NetType string
AddrType string
Addr string
List []string
}
func (s SourceInfo) Include() bool {
return s.Mode == ModeIncl
}
func parseSourceInfo(line string) (SourceInfo, error) {
var (
parts = split(line)
size = len(line)
info SourceInfo
)
if size < 5 {
return info, ErrSyntax
}
if err := validModeType(parts[0]); err != nil {
return info, err
}
if err := validNetType(parts[1]); err != nil {
return info, err
}
if err := validAddrType(parts[2], true); err != nil {
return info, err
}
info.Mode = parts[0]
info.NetType = parts[1]
info.AddrType = parts[2]
info.Addr = parts[3]
info.List = append(info.List, parts[4:]...)
return info, nil
}
type MediaInfo struct {
Media string
Port uint16
Count uint16
Proto string
Attrs []string
Info string
ConnInfo ConnInfo
Bandwidth []Bandwidth
Attributes []Attribute
}
func (m MediaInfo) PortRange() []uint16 {
if m.Count == 0 {
return []uint16{m.Port}
}
var arr []uint16
for i := 0; i < int(m.Count); i++ {
arr = append(arr, m.Port+uint16(i))
}
return arr
}
func (m MediaInfo) SourceFilter() (SourceInfo, error) {
a, ok := findAttributes("source-filter", m.Attributes)
if !ok {
return SourceInfo{}, fmt.Errorf("source-filter not set")
}
return parseSourceInfo(a.Value)
}
type File struct {
Version int
Session
Email []string
Phone []string
ConnInfo
Bandwidth []Bandwidth
Attributes []Attribute
Intervals []Interval
Medias []MediaInfo
}
func (f File) Dump() string {
var buf bytes.Buffer
f.DumpTo(&buf)
return buf.String()
}
func (f File) DumpTo(w io.Writer) {
ws := bufio.NewWriter(w)
defer ws.Flush()
writePrefix(ws, 'v')
ws.WriteString(strconv.Itoa(f.Version))
writeEOL(ws)
writeSession(ws, f.Session)
for i := range f.Email {
writePrefix(ws, 'e')
writeLine(ws, f.Email[i])
}
for i := range f.Phone {
writePrefix(ws, 'p')
writeLine(ws, f.Phone[i])
}
writeConnInfo(ws, f.ConnInfo, true)
writeBandwidths(ws, f.Bandwidth)
writeAttributes(ws, f.Attributes)
writeIntervals(ws, f.Intervals)
for i := range f.Medias {
writeMediaInfo(ws, f.Medias[i])
}
}
func (f File) Types() []string {
var arr []string
for i := range f.Medias {
arr = append(arr, f.Medias[i].Media)
}
return arr
}
func (f File) SourceFilter() (SourceInfo, error) {
a, ok := findAttributes("source-filter", f.Attributes)
if !ok {
return SourceInfo{}, fmt.Errorf("source-filter not set")
}
return parseSourceInfo(a.Value)
}
func Parse(r io.Reader) (File, error) {
var (
rs = bufio.NewReader(r)
file File
)
for i := range parsers {
p := parsers[i]
if err := p.parse(&file, rs, p.prefix); err != nil {
if errors.Is(err, io.EOF) {
break
}
return file, err
}
}
return file, nil
}
var parsers = []struct {
prefix string
parse func(*File, *bufio.Reader, string) error
}{
{prefix: "v", parse: parseVersion},
{prefix: "o", parse: parseOrigin},
{prefix: "s", parse: parseName},
{prefix: "i", parse: parseInfo},
{prefix: "u", parse: parseURI},
{prefix: "e", parse: parseEmail},
{prefix: "p", parse: parsePhone},
{prefix: "c", parse: parseConnInfo},
{prefix: "b", parse: parseBandwidth},
{prefix: "t", parse: parseInterval},
{prefix: "a", parse: parseAttributes},
{prefix: "r", parse: skip},
{prefix: "z", parse: skip},
{prefix: "m", parse: parseMedia},
}
var mediaparsers = []struct {
prefix string
parse func(*MediaInfo, *bufio.Reader, string) error
}{
{prefix: "i", parse: parseMediaInfo},
{prefix: "c", parse: parseMediaConnInfo},
{prefix: "b", parse: parseMediaBandwidth},
{prefix: "a", parse: parseMediaAttributes},
}
func parseMedia(file *File, rs *bufio.Reader, prefix string) error {
for {
if !hasPrefix(rs, prefix) {
break
}
line, err := checkLine(rs, prefix)
if err != nil {
return err
}
mi, err := parseMediaDescription(line, rs)
if err != nil {
return err
}
file.Medias = append(file.Medias, mi)
}
return nil
}
func parseMediaDescription(line string, rs *bufio.Reader) (MediaInfo, error) {
var (
mi MediaInfo
err error
parts = split(line)
)
if len(parts) < 4 {
return mi, ErrSyntax
}
mi.Media = parts[0]
if x := strings.Index(parts[1], "/"); x > 0 {
var n uint64
if n, err = strconv.ParseUint(parts[1][x:], 10, 16); err != nil {
return mi, err
}
mi.Port = uint16(n)
if n, err = strconv.ParseUint(parts[1][x+1:], 10, 16); err != nil {
return mi, err
}
mi.Count = uint16(n)
} else {
n, err := strconv.ParseUint(parts[1], 10, 16)
if err != nil {
return mi, err
}
mi.Port = uint16(n)
}
mi.Proto = parts[2]
mi.Attrs = append(mi.Attrs, parts[3:]...)
for i := range mediaparsers {
p := mediaparsers[i]
if err := p.parse(&mi, rs, p.prefix); err != nil {
return mi, err
}
}
return mi, nil
}
func parseInterval(file *File, rs *bufio.Reader, prefix string) error {
parse := func(str string) (time.Time, error) {
n, err := strconv.ParseInt(str, 10, 64)
if err != nil || n == 0 {
return time.Time{}, err
}
return time.Unix(n-epoch, 0).UTC(), nil
}
for {
if !hasPrefix(rs, prefix) {
break
}
line, err := checkLine(rs, prefix)
if err != nil {
return err
}
parts := split(line)
if len(parts) != 2 {
return ErrSyntax
}
var i Interval
if i.Starts, err = parse(parts[0]); err != nil {
return err
}
if i.Ends, err = parse(parts[1]); err != nil {
return err
}
file.Intervals = append(file.Intervals, i)
}
return nil
}
func parseAttributes(file *File, rs *bufio.Reader, prefix string) error {
var err error
file.Attributes, err = parseAttributeLines(rs, prefix)
return err
}
func parseMediaAttributes(media *MediaInfo, rs *bufio.Reader, prefix string) error {
var err error
media.Attributes, err = parseAttributeLines(rs, prefix)
return err
}
func parseBandwidth(file *File, rs *bufio.Reader, prefix string) error {
var err error
file.Bandwidth, err = parseBandwidthLines(rs, prefix)
return err
}
func parseMediaBandwidth(media *MediaInfo, rs *bufio.Reader, prefix string) error {
var err error
media.Bandwidth, err = parseBandwidthLines(rs, prefix)
return err
}
func parseConnInfo(file *File, rs *bufio.Reader, prefix string) error {
line, err := setString(rs, prefix, false)
if err != nil || line == "" {
return err
}
file.ConnInfo, err = parseConnectionInfo(split(line))
return err
}
func parseMediaConnInfo(media *MediaInfo, rs *bufio.Reader, prefix string) error {
line, err := setString(rs, prefix, false)
if err != nil || line == "" {
return err
}
media.ConnInfo, err = parseConnectionInfo(split(line))
return err
}
func parsePhone(file *File, rs *bufio.Reader, prefix string) error {
var err error
file.Phone, err = setArray(rs, prefix)
return err
}
func parseEmail(file *File, rs *bufio.Reader, prefix string) error {
var err error
file.Email, err = setArray(rs, prefix)
return err
}
func parseURI(file *File, rs *bufio.Reader, prefix string) error {
var err error
file.Session.URI, err = setString(rs, prefix, false)
return err
}
func parseInfo(file *File, rs *bufio.Reader, prefix string) error {
var err error
file.Session.Info, err = setString(rs, prefix, false)
return err
}
func parseMediaInfo(media *MediaInfo, rs *bufio.Reader, prefix string) error {
var err error
media.Info, err = setString(rs, prefix, false)
return err
}
func parseName(file *File, rs *bufio.Reader, prefix string) error {
var err error
file.Session.Name, err = setString(rs, prefix, true)
if err == nil && file.Session.Name == "" {
err = fmt.Errorf("empty session name")
}
return err
}
// o=<username> <sess-id> <sess-version> <nettype> <addrtype> <unicast-address>
func parseOrigin(file *File, rs *bufio.Reader, prefix string) error {
line, err := checkLine(rs, prefix)
if err != nil {
return err
}
parts := split(line)
if len(parts) != 6 {
return ErrSyntax
}
if parts[0] != "-" {
file.Session.User = parts[0]
}
if file.Session.ID, err = strconv.ParseInt(parts[1], 10, 64); err != nil {
return fmt.Errorf("%w - session id: %s", ErrSyntax, err)
}
if file.Session.Ver, err = strconv.ParseInt(parts[2], 10, 64); err != nil {
return fmt.Errorf("%w - session version: %s", ErrSyntax, err)
}
file.Session.ConnInfo, err = parseConnectionInfo(parts[3:])
return err
}
func parseConnectionInfo(parts []string) (ConnInfo, error) {
var ci ConnInfo
if len(parts) != 3 {
return ci, fmt.Errorf("%w: not enough elemnt in line %s", ErrSyntax, parts)
}
if err := validNetType(parts[0]); err != nil {
return ci, err
}
if err := validAddrType(parts[1], false); err != nil {
return ci, err
}
ci.NetType = parts[0]
ci.AddrType = parts[1]
ci.Addr = parts[2]
if x := strings.Index(ci.Addr, "/"); x > 0 {
var err error
if ci.TTL, err = strconv.ParseInt(ci.Addr[x+1:], 10, 16); err != nil {
return ci, err
}
ci.Addr = ci.Addr[:x]
}
return ci, nil
}
func parseVersion(file *File, rs *bufio.Reader, prefix string) error {
line, err := checkLine(rs, prefix)
if err != nil {
return err
}
file.Version, err = strconv.Atoi(line)
if file.Version != 0 {
return fmt.Errorf("%w: unsupported version", ErrInvalid)
}
return err
}
func skip(_ *File, rs *bufio.Reader, prefix string) error {
for {
if !hasPrefix(rs, prefix) {
break
}
_, err := checkLine(rs, prefix)
if err != nil {
return err
}
}
return nil
}
func parseAttributeLines(rs *bufio.Reader, prefix string) ([]Attribute, error) {
var (
arr []Attribute
atb Attribute
)
for hasPrefix(rs, prefix) {
line, err := checkLine(rs, prefix)
if err != nil {
return nil, err
}
x := strings.Index(line, ":")
if x < 0 {
atb.Name = line
continue
}
atb.Name = line[:x]
atb.Value = line[x+1:]
arr = append(arr, atb)
}
return arr, nil
}
func parseBandwidthLines(rs *bufio.Reader, prefix string) ([]Bandwidth, error) {
var (
arr []Bandwidth
bwd Bandwidth
)
for hasPrefix(rs, prefix) {
line, err := checkLine(rs, prefix)
if err != nil {
return nil, err
}
x := strings.Index(line, ":")
if x <= 0 || x >= len(line)-1 {
return nil, fmt.Errorf("%w: parsing bandwidth (%s)", ErrSyntax, line)
}
bwd.Type = line[:x]
if bwd.Value, err = strconv.ParseInt(line[x+1:], 10, 64); err != nil {
return nil, err
}
arr = append(arr, bwd)
}
return arr, nil
}
func split(line string) []string {
return strings.Split(line, " ")
}
func setString(rs *bufio.Reader, prefix string, required bool) (string, error) {
if !required && !hasPrefix(rs, prefix) {
return "", nil
}
return checkLine(rs, prefix)
}
func setArray(rs *bufio.Reader, prefix string) ([]string, error) {
var arr []string
for {
if !hasPrefix(rs, prefix) {
break
}
line, err := checkLine(rs, prefix)
if err != nil {
return nil, err
}
arr = append(arr, line)
}
return arr, nil
}
func hasPrefix(rs *bufio.Reader, prefix string) bool {
peek, _ := rs.Peek(len(prefix))
return string(peek) == prefix
}
func checkLine(rs *bufio.Reader, prefix string) (string, error) {
line, err := rs.ReadString('\n')
if err != nil && !errors.Is(err, io.EOF) {
return "", err
}
line = strings.TrimRight(line, "\r\n")
prefix += "="
if !strings.HasPrefix(line, prefix) {
return "", fmt.Errorf("%w: missing prefix %s", ErrSyntax, prefix)
}
return line[len(prefix):], nil
}
func validAddrType(str string, star bool) error {
switch str {
case AddrType4, AddrType6:
default:
if !star && str != "*" {
return fmt.Errorf("%w: unknown addr type %s", ErrInvalid, str)
}
}
return nil
}
func validNetType(str string) error {
if str == NetTypeIN {
return nil
}
return fmt.Errorf("%w: unknown net type %s", ErrInvalid, str)
}
func validModeType(str string) error {
if str == ModeIncl || str == ModeExcl {
return nil
}
return fmt.Errorf("%w: unknown mode type %s", ErrInvalid, str)
}
func writeIntervals(w *bufio.Writer, is []Interval) {
convert := func(t time.Time) string {
if t.IsZero() {
return "0"
}
return strconv.FormatInt(t.Unix()+epoch, 10)
}
for i := range is {
writePrefix(w, 't')
w.WriteString(convert(is[i].Starts))
w.WriteByte(' ')
w.WriteString(convert(is[i].Ends))
writeEOL(w)
}
}
func writeSession(w *bufio.Writer, sess Session) {
writePrefix(w, 'o')
if sess.User == "" {
sess.User = "-"
}
w.WriteString(sess.User)
w.WriteByte(' ')
w.WriteString(strconv.FormatInt(sess.ID, 10))
w.WriteByte(' ')
w.WriteString(strconv.FormatInt(sess.Ver, 10))
w.WriteByte(' ')
writeConnInfo(w, sess.ConnInfo, false)
writePrefix(w, 's')
writeLine(w, sess.Name)
if sess.Info != "" {
writePrefix(w, 'i')
writeLine(w, sess.Info)
}
if sess.URI != "" {
writePrefix(w, 'u')
writeLine(w, sess.URI)
}
}
func writeMediaInfo(w *bufio.Writer, m MediaInfo) {
writePrefix(w, 'm')
w.WriteString(m.Media)
w.WriteByte(' ')
w.WriteString(strconv.FormatUint(uint64(m.Port), 10))
if m.Count > 0 {
w.WriteByte('/')
w.WriteString(strconv.FormatUint(uint64(m.Count), 10))
}
w.WriteByte(' ')
w.WriteString(m.Proto)
for i := range m.Attrs {
w.WriteByte(' ')
w.WriteString(m.Attrs[i])
}
writeEOL(w)
if m.Info != "" {
writePrefix(w, 'i')
writeLine(w, m.Info)
}
writeConnInfo(w, m.ConnInfo, true)
writeBandwidths(w, m.Bandwidth)
writeAttributes(w, m.Attributes)
}
func writeConnInfo(w *bufio.Writer, conn ConnInfo, prefix bool) {
if conn.IsZero() {
return
}
if prefix {
writePrefix(w, 'c')
}
w.WriteString(conn.NetType)
w.WriteByte(' ')
w.WriteString(conn.AddrType)
w.WriteByte(' ')
w.WriteString(conn.Addr)
if conn.TTL > 0 {
w.WriteByte('/')
w.WriteString(strconv.FormatInt(conn.TTL, 10))
}
writeEOL(w)
}
func writeBandwidths(w *bufio.Writer, bws []Bandwidth) {
for i := range bws {
writePrefix(w, 'b')
w.WriteString(bws[i].Type)
w.WriteByte(':')
w.WriteString(strconv.FormatInt(bws[i].Value, 10))
writeEOL(w)
}
}
func writeAttributes(w *bufio.Writer, attrs []Attribute) {
for i := range attrs {
writePrefix(w, 'a')
w.WriteString(attrs[i].Name)
w.WriteByte(':')
w.WriteString(attrs[i].Value)
writeEOL(w)
}
}
func writePrefix(w *bufio.Writer, prefix byte) {
w.WriteByte(prefix)
w.WriteByte('=')
}
func writeLine(w *bufio.Writer, line string) {
w.WriteString(line)
writeEOL(w)
}
func writeEOL(w *bufio.Writer) {
w.WriteByte('\r')
w.WriteByte('\n')
}
|
package main
import "fmt"
//要求
//f1(f2)
func f1(f func()) {
fmt.Println("this is f1")
f()
}
func f2(x, y int) {
fmt.Println("this is f2")
fmt.Println(x + y)
}
func f3(f func(int, int), x, y int) func() {
tmp := func() {
f(x, y)
}
return tmp
}
func main() {
ret := f3(f2, 100, 200)
f1(ret)
var i, j, k int
fmt.Scanln(&i, &j, &k)
fmt.Println(i, j, k)
}
|
package ovirt
import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"fmt"
"io"
"net/http"
"os"
"strconv"
"strings"
"github.com/AlecAivazis/survey/v2"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
var errHTTPNotFound = errors.New("http response 404")
// readFile reads a file provided in the args and return
// the content or in case of failure return an error
func readFile(pathFile string) ([]byte, error) {
content, err := os.ReadFile(pathFile)
if err != nil {
return content, errors.Wrapf(err, "failed to read file: %s", pathFile)
}
return content, nil
}
// Add PEM into the System Pool
func (c *clientHTTP) addTrustBundle(pemContent string, engineConfig *Config) error {
c.certPool, _ = x509.SystemCertPool()
if c.certPool == nil {
logrus.Debug("failed to load cert pool.... Creating new cert pool")
c.certPool = x509.NewCertPool()
}
if len(pemContent) != 0 {
if !c.certPool.AppendCertsFromPEM([]byte(pemContent)) {
return errors.New("unable to load certificate")
}
logrus.Debugf("loaded %s into the system pool: ", engineConfig.CAFile)
engineConfig.CABundle = strings.TrimSpace(string(pemContent))
}
return nil
}
// downloadFile from specificed URL and store via filepath
// Return error in case of failure
func (c *clientHTTP) downloadFile() error {
tr := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: c.skipVerify,
RootCAs: c.certPool,
},
}
if c.saveFilePath == "" {
return errors.New("saveFilePath must be specified")
}
client := &http.Client{Transport: tr}
resp, err := client.Get(c.urlAddr)
switch resp.StatusCode {
case http.StatusNotFound:
return fmt.Errorf("%s: %w", c.urlAddr, errHTTPNotFound)
}
if err != nil {
return err
}
defer resp.Body.Close()
out, err := os.Create(c.saveFilePath)
if err != nil {
return err
}
defer out.Close()
_, err = io.Copy(out, resp.Body)
return err
}
// checkURLResponse performs a GET on the provided urlAddr to ensure that
// the url actually exists. Users can set skipVerify as true or false to
// avoid cert validation. In case of failure, returns error.
func (c *clientHTTP) checkURLResponse() error {
logrus.Debugf("checking URL response... urlAddr: %s skipVerify: %s", c.urlAddr, strconv.FormatBool(c.skipVerify))
tr := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: c.skipVerify,
RootCAs: c.certPool,
},
}
client := &http.Client{Transport: tr}
resp, err := client.Get(c.urlAddr)
if err != nil {
return errors.Wrapf(err, "error checking URL response")
}
defer resp.Body.Close()
return nil
}
// askPassword will ask the password to connect to the Engine API.
// The password provided will be added in the Config struct.
// If an error happens, it will ask again username for users.
func askPassword(c *Config) error {
err := survey.Ask([]*survey.Question{
{
Prompt: &survey.Password{
Message: "Engine password",
Help: "Password for the chosen username, Press Ctrl+C to change username",
},
Validate: survey.ComposeValidators(survey.Required, authenticated(c)),
},
}, &c.Password)
if err != nil {
return err
}
return nil
}
// askUsername will ask username to connect to the Engine API.
// The username provided will be added in the Config struct.
// Returns Config and error if failure.
func askUsername(c *Config) error {
err := survey.Ask([]*survey.Question{
{
Prompt: &survey.Input{
Message: "Engine username",
Help: "The username to connect to the Engine API",
Default: "admin@internal",
},
Validate: survey.ComposeValidators(survey.Required),
},
}, &c.Username)
if err != nil {
return err
}
return nil
}
// askQuestionTrueOrFalse generic function to ask question to users which
// requires true (Yes) or false (No) as answer
func askQuestionTrueOrFalse(question string, helpMessage string) (bool, error) {
value := false
err := survey.AskOne(
&survey.Confirm{
Message: question,
Help: helpMessage,
},
&value,
survey.WithValidator(survey.Required),
)
if err != nil {
return value, err
}
return value, nil
}
// askCredentials will handle username and password for connecting with Engine
func askCredentials(c Config) (Config, error) {
loginAttempts := 3
logrus.Debugf("login attempts available: %d", loginAttempts)
for loginAttempts > 0 {
err := askUsername(&c)
if err != nil {
return c, err
}
err = askPassword(&c)
if err != nil {
loginAttempts = loginAttempts - 1
logrus.Debugf("login attempts now: %d", loginAttempts)
if loginAttempts == 0 {
return c, err
}
} else {
break
}
}
return c, nil
}
// showPEM will print information about PEM file provided in param or error
// if a failure happens
func showPEM(pemFilePath string) error {
certpem, err := os.ReadFile(pemFilePath)
if err != nil {
return errors.Wrapf(err, "failed to read the cert: %s", pemFilePath)
}
block, _ := pem.Decode(certpem)
if block == nil {
return errors.New("failed to parse certificate PEM")
}
if block.Type != "CERTIFICATE" {
return errors.New("PEM-block should be CERTIFICATE type")
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
logrus.Debugf("Failed to read the cert: %s", err)
return errors.Wrapf(err, "failed to read the cert: %s", pemFilePath)
}
logrus.Info("Loaded the following PEM file:")
logrus.Info("\tVersion: ", cert.Version)
logrus.Info("\tSignature Algorithm: ", cert.SignatureAlgorithm.String())
logrus.Info("\tSerial Number: ", cert.SerialNumber)
logrus.Info("\tIssuer: ", cert.Issuer.String())
logrus.Info("\tValidity:")
logrus.Info("\t\tNot Before: ", cert.NotBefore)
logrus.Info("\t\tNot After: ", cert.NotAfter)
logrus.Info("\tSubject: ", cert.Subject.ToRDNSequence())
return nil
}
// askPEMFile ask users the PEM bundle and returns the bundle string
// or in case of failure returns error
func askPEMFile() (string, error) {
bundlePEM := ""
err := survey.AskOne(
&survey.Multiline{
Message: "Certificate bundle",
Help: "The certificate bundle to installer be able to communicate with oVirt API",
},
&bundlePEM,
survey.WithValidator(survey.Required),
)
if err != nil {
return bundlePEM, err
}
return bundlePEM, nil
}
// engineSetup will ask users: FQDN, execute validations and about
// the credentials. In case of failure, returns Config and error
func engineSetup() (Config, error) {
engineConfig := Config{}
httpResource := clientHTTP{}
err := survey.Ask([]*survey.Question{
{
Prompt: &survey.Input{
Message: "Engine FQDN[:PORT]",
Help: "The Engine FQDN[:PORT] (engine.example.com:443)",
},
Validate: survey.ComposeValidators(survey.Required),
},
}, &engineConfig.FQDN)
if err != nil {
return engineConfig, err
}
logrus.Debug("engine FQDN: ", engineConfig.FQDN)
// By default, we set Insecure true
engineConfig.Insecure = true
// Set c.URL with the API endpoint
engineConfig.URL = fmt.Sprintf("https://%s/ovirt-engine/api", engineConfig.FQDN)
logrus.Debug("Engine URL: ", engineConfig.URL)
// Start creating clientHTTP struct for checking if Engine FQDN is responding
httpResource.skipVerify = true
httpResource.urlAddr = engineConfig.URL
err = httpResource.checkURLResponse()
if err != nil {
return engineConfig, err
}
// Set Engine PEM URL for Download
engineConfig.PemURL = fmt.Sprintf(
"https://%s/ovirt-engine/services/pki-resource?resource=ca-certificate&format=X509-PEM-CA",
engineConfig.FQDN)
logrus.Debug("PEM URL: ", engineConfig.PemURL)
// Create tmpFile to store the Engine PEM file
tmpFile, err := os.CreateTemp(os.TempDir(), "engine-")
if err != nil {
return engineConfig, err
}
defer os.Remove(tmpFile.Name())
// Download PEM
httpResource.saveFilePath = tmpFile.Name()
httpResource.skipVerify = true
httpResource.urlAddr = engineConfig.PemURL
err = httpResource.downloadFile()
if errors.Is(err, errHTTPNotFound) {
return engineConfig, err
}
if err != nil {
logrus.Warning("cannot download PEM file from Engine!", err)
answer, err := askQuestionTrueOrFalse(
"Would you like to continue?",
"By not using a trusted CA, insecure connections can "+
"cause man-in-the-middle attacks among many others.")
if err != nil || !answer {
return engineConfig, err
}
} else {
err = showPEM(httpResource.saveFilePath)
if err != nil {
engineConfig.Insecure = true
} else {
answer, err := askQuestionTrueOrFalse(
"Would you like to use the above certificate to connect to the Engine? ",
"Certificate to connect to the Engine. Make sure this cert CA is trusted locally.")
if err != nil {
return engineConfig, err
}
if answer {
pemFile, err := readFile(httpResource.saveFilePath)
engineConfig.CABundle = string(pemFile)
if err != nil {
return engineConfig, err
}
if len(engineConfig.CABundle) > 0 {
engineConfig.Insecure = false
}
} else {
answer, err = askQuestionTrueOrFalse(
"Would you like to import another PEM bundle?",
"You can use your own PEM bundle to connect to the Engine API")
if err != nil {
return engineConfig, err
}
if answer {
engineConfig.CABundle, _ = askPEMFile()
if len(engineConfig.CABundle) > 0 {
engineConfig.Insecure = false
}
}
}
}
}
if !engineConfig.Insecure {
err = httpResource.addTrustBundle(engineConfig.CABundle, &engineConfig)
if err != nil {
engineConfig.Insecure = true
}
}
if engineConfig.Insecure {
logrus.Error(
"****************************************************************************\n",
"* Could not configure secure communication to the oVirt engine. *\n",
"* As of 4.7 insecure mode for oVirt is no longer supported in the *\n",
"* installer. Please see the help article titled \"Installing OpenShift on *\n",
"* RHV/oVirt in insecure mode\" for details how to configure insecure mode *\n",
"* manually. *\n",
"****************************************************************************",
)
return engineConfig,
errors.New(
"cannot detect engine ca cert imported in the system",
)
}
return askCredentials(engineConfig)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.