text stringlengths 11 4.05M |
|---|
package cmd
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"net/url"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/fatih/color"
"github.com/porter-dev/porter/cli/cmd/api"
"github.com/porter-dev/porter/cli/cmd/github"
"github.com/spf13/cobra"
"github.com/docker/cli/cli/config/configfile"
"github.com/docker/cli/cli/config/types"
)
var dockerCmd = &cobra.Command{
Use: "docker",
Short: "Commands to configure Docker for a project",
}
var configureCmd = &cobra.Command{
Use: "configure",
Short: "Configures the host's Docker instance",
Run: func(cmd *cobra.Command, args []string) {
err := checkLoginAndRun(args, dockerConfig)
if err != nil {
os.Exit(1)
}
},
}
func init() {
rootCmd.AddCommand(dockerCmd)
dockerCmd.AddCommand(configureCmd)
}
func dockerConfig(user *api.AuthCheckResponse, client *api.Client, args []string) error {
pID := config.Project
// get all registries that should be added
regToAdd := make([]string, 0)
// get the list of namespaces
registries, err := client.ListRegistries(
context.Background(),
pID,
)
if err != nil {
return err
}
for _, registry := range registries {
if registry.URL != "" {
rURL := registry.URL
if !strings.Contains(rURL, "http") {
rURL = "http://" + rURL
}
// strip the protocol
regURL, err := url.Parse(rURL)
if err != nil {
continue
}
regToAdd = append(regToAdd, regURL.Host)
}
}
dockerConfigFile := filepath.Join(home, ".docker", "config.json")
// determine if configfile exists
if info, err := os.Stat(dockerConfigFile); info.IsDir() || os.IsNotExist(err) {
// if it does not exist, create it
err := ioutil.WriteFile(dockerConfigFile, []byte("{}"), 0700)
if err != nil {
return err
}
}
// read the file bytes
configBytes, err := ioutil.ReadFile(dockerConfigFile)
if err != nil {
return err
}
// check if the docker credential helper exists
if !commandExists("docker-credential-porter") {
err := downloadCredMatchingRelease()
if err != nil {
color.New(color.FgRed).Println("Failed to download credential helper binary:", err.Error())
os.Exit(1)
}
}
// otherwise, check the version flag of the binary
cmdVersionCred := exec.Command("docker-credential-porter", "--version")
writer := &versionWriter{}
cmdVersionCred.Stdout = writer
err = cmdVersionCred.Run()
if err != nil || writer.Version != Version {
err := downloadCredMatchingRelease()
if err != nil {
color.New(color.FgRed).Println("Failed to download credential helper binary:", err.Error())
os.Exit(1)
}
}
configFile := &configfile.ConfigFile{
Filename: dockerConfigFile,
}
err = json.Unmarshal(configBytes, config)
if err != nil {
return err
}
if configFile.CredentialHelpers == nil {
configFile.CredentialHelpers = make(map[string]string)
}
if configFile.AuthConfigs == nil {
configFile.AuthConfigs = make(map[string]types.AuthConfig)
}
for _, regURL := range regToAdd {
// if this is a dockerhub registry, see if an auth config has already been generated
// for index.docker.io
if strings.Contains(regURL, "index.docker.io") {
isAuthenticated := false
for key, _ := range configFile.AuthConfigs {
if key == "https://index.docker.io/v1/" {
isAuthenticated = true
}
}
if !isAuthenticated {
// get a dockerhub token from the Porter API
tokenResp, err := client.GetDockerhubAuthorizationToken(context.Background(), config.Project)
if err != nil {
return err
}
decodedToken, err := base64.StdEncoding.DecodeString(tokenResp.Token)
if err != nil {
return fmt.Errorf("Invalid token: %v", err)
}
parts := strings.SplitN(string(decodedToken), ":", 2)
if len(parts) < 2 {
return fmt.Errorf("Invalid token: expected two parts, got %d", len(parts))
}
configFile.AuthConfigs["https://index.docker.io/v1/"] = types.AuthConfig{
Auth: tokenResp.Token,
Username: parts[0],
Password: parts[1],
}
// since we're using token-based auth, unset the credstore
configFile.CredentialsStore = ""
}
} else {
configFile.CredentialHelpers[regURL] = "porter"
}
}
return configFile.Save()
}
func downloadCredMatchingRelease() error {
// download the porter cred helper
z := &github.ZIPReleaseGetter{
AssetName: "docker-credential-porter",
AssetFolderDest: "/usr/local/bin",
ZipFolderDest: filepath.Join(home, ".porter"),
ZipName: "docker-credential-porter_latest.zip",
EntityID: "porter-dev",
RepoName: "porter",
IsPlatformDependent: true,
Downloader: &github.ZIPDownloader{
ZipFolderDest: filepath.Join(home, ".porter"),
AssetFolderDest: "/usr/local/bin",
ZipName: "docker-credential-porter_latest.zip",
},
}
return z.GetRelease(Version)
}
func commandExists(cmd string) bool {
_, err := exec.LookPath(cmd)
return err == nil
}
|
/*
* median-stats : Calculate Hub median statistics
*/
package main
import (
"encoding/csv"
"fmt"
"io"
"log"
"os"
"sort"
"strconv"
"strings"
)
func main() {
distroheavy := map[string]int{
"debian": 1,
"ubuntu": 1,
"centos": 1,
"fedora": 1}
file, err := os.Open("docker-hub-stats.csv")
if err != nil {
log.Fatal(err)
}
defer file.Close()
in := csv.NewReader(file)
idx := make(map[string]int)
idx["distro"] = -1
heavy := make(map[string][]int)
light := make(map[string][]int)
for {
record, err := in.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatal(err)
}
if idx["distro"] < 0 {
for i, val := range record {
switch val {
case "distro", "image_size", "packages", "files", "package_files":
idx[val] = i
}
}
} else {
distro := record[idx["distro"]]
sz, _ := strconv.Atoi(record[idx["image_size"]])
pkgs, _ := strconv.Atoi(record[idx["packages"]])
files, _ := strconv.Atoi(record[idx["files"]])
pfiles, _ := strconv.Atoi(record[idx["package_files"]])
if _, ok := distroheavy[distro]; ok {
heavy["image_size"] =
append(heavy["image_size"], sz)
heavy["packages"] =
append(heavy["packages"], pkgs)
heavy["files"] =
append(heavy["files"], files)
heavy["ufiles"] =
append(heavy["ufiles"], files - pfiles)
} else {
light["image_size"] =
append(light["image_size"], sz)
light["packages"] =
append(light["packages"], pkgs)
light["files"] =
append(light["files"], files)
light["ufiles"] =
append(light["ufiles"], files - pfiles)
}
}
}
var median = func(data []int) int{
sort.Ints(data)
return data[len(data) / 2]
}
fmt.Println("Type,Image Size,Packages,Files,Unmanaged Files")
datah := [...]string{
"Heavyweight",
strconv.Itoa(median(heavy["image_size"][:])),
strconv.Itoa(median(heavy["packages"][:])),
strconv.Itoa(median(heavy["files"][:])),
strconv.Itoa(median(heavy["ufiles"][:])),
}
fmt.Println(strings.Join(datah[:], ","))
datal := [...]string{
"Lightweight",
strconv.Itoa(median(light["image_size"][:])),
strconv.Itoa(median(light["packages"][:])),
strconv.Itoa(median(light["files"][:])),
strconv.Itoa(median(light["ufiles"][:])),
}
fmt.Println(strings.Join(datal[:], ","))
}
|
package stream
import (
"context"
"errors"
"net/http"
"net/http/httptest"
"net/url"
"sync"
"testing"
"time"
"github.com/sirupsen/logrus"
"github.com/danfaizer/gowse"
agent "github.com/adevinta/vulcan-agent"
"github.com/adevinta/vulcan-agent/check"
)
const (
streamPath = "stream"
streamName = "events"
streamTimeout = 2 * time.Second
pingInterval = 1 * time.Second
waitTime = 100 * time.Millisecond
)
var (
knownAgentID = "00000000-0000-0000-0000-000000000000"
unknownAgentID = "11111111-1111-1111-1111-111111111111"
knownCheckJob = check.JobParams{
ScanID: "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaab",
CheckID: "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
Target: "test",
Image: "test",
Timeout: 10,
}
unknownCheckJob = check.JobParams{
CheckID: "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
Target: "test",
Image: "test",
Timeout: 10,
}
)
type testMeta struct {
aborted bool
}
type TestAgent struct {
id string
status string
storage check.Storage
jobs map[string]check.Job
ctx context.Context
log *logrus.Entry
}
func (ta *TestAgent) ID() string {
return ta.id
}
func (ta *TestAgent) Status() string {
mu.Lock()
defer mu.Unlock()
return ta.status
}
func (ta *TestAgent) SetStatus(status string) {
mu.Lock()
defer mu.Unlock()
ta.status = status
}
func (ta *TestAgent) Job(checkID string) check.Job {
job, err := ta.storage.Get(checkID)
if err != nil {
return check.Job{}
}
return job
}
func (ta *TestAgent) Jobs() map[string]check.Job {
return ta.jobs
}
func (ta *TestAgent) Run(checkID string) error {
var err error
_, err = ta.storage.Get(checkID)
if err != nil {
return err
}
return ta.storage.SetMeta(checkID, testMeta{})
}
func (ta *TestAgent) Kill(checkID string) error {
return nil
}
func (ta *TestAgent) AbortChecks(scanID string) error {
var err error
jobs, err := ta.storage.GetAll()
if err != nil {
return err
}
for _, j := range jobs {
if j.ScanID == scanID {
ta.Abort(j.CheckID)
}
}
return nil
}
func (ta *TestAgent) Abort(checkID string) error {
var err error
_, err = ta.storage.Get(checkID)
if err != nil {
return err
}
err = ta.storage.SetMeta(checkID, testMeta{aborted: true})
if err != nil {
return err
}
ta.log.WithFields(logrus.Fields{
"check_id": checkID,
}).Info("check aborted")
return nil
}
func (ta *TestAgent) Raw(checkID string) ([]byte, error) {
return []byte{}, nil
}
func TestStreamActions(t *testing.T) {
log := logrus.New()
logrus.SetLevel(logrus.DebugLevel)
l := logrus.NewEntry(log)
// Test server.
server := gowse.NewServer(l)
topic := server.CreateTopic(streamName)
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if err := topic.SubscriberHandler(w, r); err != nil {
l.Printf("error handling subscriber request: %+v", err)
}
})
ts := httptest.NewServer(mux)
wsURL, err := url.Parse(ts.URL)
if err != nil {
t.Fatal(err)
}
wsURL.Scheme = "ws"
wss := http.Server{Addr: ts.Listener.Addr().String(), Handler: mux}
go wss.ListenAndServe()
// Test client.
stor := check.NewMemoryStorage()
a := TestAgent{id: knownAgentID, storage: &stor, jobs: make(map[string]check.Job), log: l}
agentCtx, agentCancel := context.WithCancel(context.Background())
s, err := New(agentCtx, agentCancel, &a, &stor, wsURL.String(), streamTimeout, l)
if err != nil {
t.Fatal(err)
}
job, err := stor.NewJob(agentCtx, knownCheckJob, l)
if err != nil {
t.Fatal(err)
}
err = a.Run(job.CheckID)
if err != nil {
t.Fatal(err)
}
actions := map[string]func(agent.Agent, string){
"disconnect": func(a agent.Agent, checkID string) {
a.SetStatus(agent.StatusDisconnected)
},
"abort": func(a agent.Agent, scanID string) {
err := a.AbortChecks(scanID)
if err != nil {
t.Fatal(err)
}
},
"ping": func(_ agent.Agent, _ string) {},
}
// Keep sending pings.
go func() {
ticker := time.NewTicker(pingInterval)
agentDone := agentCtx.Done()
LOOP:
for {
select {
case <-ticker.C:
topic.Broadcast(Message{Action: "ping"})
case <-agentDone:
break LOOP
}
}
}()
go func() {
err = s.HandleMessages(actions)
if err != nil {
log.Error(err)
}
}()
time.Sleep(waitTime)
tests := []struct {
name string
message Message
testFunc func(ta *TestAgent) bool
}{
{
name: "malformed-message",
message: Message{},
},
{
name: "empty-action",
message: Message{AgentID: knownAgentID},
},
{
name: "unknown-action",
message: Message{AgentID: knownAgentID, Action: "unknown"},
},
{
name: "unknown-scan",
message: Message{ScanID: unknownCheckJob.ScanID, Action: "abort"},
},
{
name: "known-scan",
message: Message{ScanID: knownCheckJob.ScanID, Action: "abort"},
testFunc: func(ta *TestAgent) bool { return a.Job(knownCheckJob.CheckID).Meta.(testMeta).aborted },
},
{
name: "unknown-agent",
message: Message{AgentID: unknownAgentID, Action: "disconnect"},
},
{
name: "known-agent",
message: Message{AgentID: knownAgentID, Action: "disconnect"},
testFunc: func(ta *TestAgent) bool { return true },
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
topic.Broadcast(tc.message)
time.Sleep(waitTime)
if tc.testFunc != nil {
if !tc.testFunc(&a) {
t.Fatalf("test %v failed", tc.name)
}
}
})
}
agentCancel()
}
func TestStreamConnectTimeout(t *testing.T) {
log := logrus.New()
logrus.SetLevel(logrus.DebugLevel)
l := logrus.NewEntry(log)
// Test server.
server := gowse.NewServer(l)
topic := server.CreateTopic(streamName)
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if err := topic.SubscriberHandler(w, r); err != nil {
l.Printf("error handling subscriber request: %+v", err)
}
})
ts := httptest.NewServer(mux)
wsURL, err := url.Parse(ts.URL)
if err != nil {
t.Fatal(err)
}
wsURL.Scheme = "ws"
wss := http.Server{Addr: ts.Listener.Addr().String(), Handler: mux}
go wss.ListenAndServe()
// Test client.
stor := check.NewMemoryStorage()
a := TestAgent{id: knownAgentID, storage: &stor, jobs: make(map[string]check.Job), log: l}
agentCtx, agentCancel := context.WithCancel(context.Background())
s, err := New(agentCtx, agentCancel, &a, &stor, wsURL.String(), streamTimeout, l)
if err != nil {
t.Fatal(err)
}
actions := map[string]func(agent.Agent, string){
"disconnect": func(a agent.Agent, checkID string) {
a.SetStatus(agent.StatusDisconnected)
},
"ping": func(_ agent.Agent, _ string) {},
}
if err := s.HandleMessages(actions); err == nil {
log.Error(err)
t.Fatal(errors.New("stream connection didn't time out"))
}
agentCancel()
}
func TestStreamHandleRegister(t *testing.T) {
log := logrus.New()
logrus.SetLevel(logrus.DebugLevel)
l := logrus.NewEntry(log)
// Test server.
server := gowse.NewServer(l)
topic := server.CreateTopic(streamName)
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if err := topic.SubscriberHandler(w, r); err != nil {
l.Printf("error handling subscriber request: %+v", err)
}
})
ts := httptest.NewServer(mux)
wsURL, err := url.Parse(ts.URL)
if err != nil {
t.Fatal(err)
}
wsURL.Scheme = "ws"
wss := http.Server{Addr: ts.Listener.Addr().String(), Handler: mux}
go wss.ListenAndServe()
// Test client.
stor := check.NewMemoryStorage()
a := TestAgent{id: knownAgentID, storage: &stor, jobs: make(map[string]check.Job), log: l}
agentCtx, agentCancel := context.WithCancel(context.Background())
s, err := New(agentCtx, agentCancel, &a, &stor, wsURL.String(), streamTimeout, l)
if err != nil {
t.Fatal(err)
}
// Keep sending register events.
go func() {
ticker := time.NewTicker(pingInterval)
agentDone := agentCtx.Done()
LOOP:
for {
select {
case <-ticker.C:
topic.Broadcast(Message{Action: "register", AgentID: knownAgentID})
case <-agentDone:
break LOOP
}
}
}()
if err := s.HandleRegister(); err != nil {
log.Error(err)
t.Fatal(errors.New("stream connection not registered"))
}
agentCancel()
}
func TestStreamHandleRegisterMalformedMessage(t *testing.T) {
log := logrus.New()
logrus.SetLevel(logrus.DebugLevel)
l := logrus.NewEntry(log)
// Test server.
server := gowse.NewServer(l)
topic := server.CreateTopic(streamName)
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if err := topic.SubscriberHandler(w, r); err != nil {
l.Printf("error handling subscriber request: %+v", err)
}
})
ts := httptest.NewServer(mux)
wsURL, err := url.Parse(ts.URL)
if err != nil {
t.Fatal(err)
}
wsURL.Scheme = "ws"
wss := http.Server{Addr: ts.Listener.Addr().String(), Handler: mux}
go wss.ListenAndServe()
// Test client.
stor := check.NewMemoryStorage()
a := TestAgent{id: knownAgentID, storage: &stor, jobs: make(map[string]check.Job), log: l}
agentCtx, agentCancel := context.WithCancel(context.Background())
s, err := New(agentCtx, agentCancel, &a, &stor, wsURL.String(), streamTimeout, l)
if err != nil {
t.Fatal(err)
}
// Keep sending malformed messages.
go func() {
ticker := time.NewTicker(pingInterval)
agentDone := agentCtx.Done()
LOOP:
for {
select {
case <-ticker.C:
topic.Broadcast("malformed")
case <-agentDone:
break LOOP
}
}
}()
if err := s.HandleRegister(); err == nil {
log.Error(err)
t.Fatal(errors.New("stream registered correctly"))
}
agentCancel()
}
func TestStreamHandleRegisterAgentDone(t *testing.T) {
log := logrus.New()
logrus.SetLevel(logrus.DebugLevel)
l := logrus.NewEntry(log)
// Test server.
server := gowse.NewServer(l)
topic := server.CreateTopic(streamName)
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if err := topic.SubscriberHandler(w, r); err != nil {
l.Printf("error handling subscriber request: %+v", err)
}
})
ts := httptest.NewServer(mux)
wsURL, err := url.Parse(ts.URL)
if err != nil {
t.Fatal(err)
}
wsURL.Scheme = "ws"
wss := http.Server{Addr: ts.Listener.Addr().String(), Handler: mux}
go wss.ListenAndServe()
// Test client.
stor := check.NewMemoryStorage()
a := TestAgent{id: knownAgentID, storage: &stor, jobs: make(map[string]check.Job), log: l}
agentCtx, agentCancel := context.WithCancel(context.Background())
s, err := New(agentCtx, agentCancel, &a, &stor, wsURL.String(), streamTimeout, l)
if err != nil {
t.Fatal(err)
}
agentCancel()
if err := s.HandleRegister(); err == nil {
log.Error(err)
t.Fatal(errors.New("agent context not cancelled"))
}
}
func TestStreamHandleRegisterTimeout(t *testing.T) {
log := logrus.New()
logrus.SetLevel(logrus.DebugLevel)
l := logrus.NewEntry(log)
// Test server.
server := gowse.NewServer(l)
topic := server.CreateTopic(streamName)
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if err := topic.SubscriberHandler(w, r); err != nil {
l.Printf("error handling subscriber request: %+v", err)
}
})
ts := httptest.NewServer(mux)
wsURL, err := url.Parse(ts.URL)
if err != nil {
t.Fatal(err)
}
wsURL.Scheme = "ws"
wss := http.Server{Addr: ts.Listener.Addr().String(), Handler: mux}
go wss.ListenAndServe()
// Test client.
stor := check.NewMemoryStorage()
a := TestAgent{id: knownAgentID, storage: &stor, jobs: make(map[string]check.Job), log: l}
agentCtx, agentCancel := context.WithCancel(context.Background())
s, err := New(agentCtx, agentCancel, &a, &stor, wsURL.String(), streamTimeout, l)
if err != nil {
t.Fatal(err)
}
if err := s.HandleRegister(); err == nil {
log.Error(err)
t.Fatal(errors.New("stream did not timeout"))
}
}
func TestStreamReconnect(t *testing.T) {
log := logrus.New()
logrus.SetLevel(logrus.DebugLevel)
l := logrus.NewEntry(log)
// Test server.
server := gowse.NewServer(l)
topic := server.CreateTopic(streamName)
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if err := topic.SubscriberHandler(w, r); err != nil {
l.Printf("error handling subscriber request: %+v", err)
}
})
ts := httptest.NewServer(mux)
wsURL, err := url.Parse(ts.URL)
if err != nil {
t.Fatal(err)
}
wsURL.Scheme = "ws"
wss := http.Server{Addr: ts.Listener.Addr().String(), Handler: mux}
go wss.ListenAndServe()
// Test client.
stor := check.NewMemoryStorage()
a := TestAgent{id: knownAgentID, storage: &stor, jobs: make(map[string]check.Job), log: l}
agentCtx, agentCancel := context.WithCancel(context.Background())
s, err := New(agentCtx, agentCancel, &a, &stor, wsURL.String(), streamTimeout, l)
if err != nil {
t.Fatal(err)
}
job, err := stor.NewJob(agentCtx, knownCheckJob, l)
if err != nil {
t.Fatal(err)
}
err = a.Run(job.CheckID)
if err != nil {
t.Fatal(err)
}
actions := map[string]func(agent.Agent, string){
"disconnect": func(a agent.Agent, checkID string) {
a.SetStatus(agent.StatusDisconnected)
},
"ping": func(_ agent.Agent, _ string) {},
}
go func() { _ = s.HandleMessages(actions) }()
// Keep sending pings.
go func() {
ticker := time.NewTicker(pingInterval)
agentDone := agentCtx.Done()
LOOP:
for {
select {
case <-ticker.C:
topic.Broadcast(Message{Action: "ping"})
case <-agentDone:
break LOOP
}
}
}()
// Wait until stream connection is established.
for s.Status() != StatusConnected {
time.Sleep(waitTime)
}
// Disconnect stream.
err = s.disconnect()
if err != nil {
t.Fatal(err)
}
// Wait for agent to reconnect.
for s.Status() != StatusConnected {
time.Sleep(waitTime)
}
time.Sleep(2 * streamTimeout)
// Disconnect existing agent.
topic.Broadcast(Message{Action: "disconnect", AgentID: knownAgentID})
time.Sleep(2 * streamTimeout)
if a.Status() != agent.StatusDisconnected {
t.Fatalf("agent %v has not disconnected", knownAgentID)
}
agentCancel()
}
func TestStreamDisconnect(t *testing.T) {
log := logrus.New()
logrus.SetLevel(logrus.DebugLevel)
l := logrus.NewEntry(log)
// Test server.
server := gowse.NewServer(l)
topic := server.CreateTopic(streamName)
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if err := topic.SubscriberHandler(w, r); err != nil {
l.Printf("error handling subscriber request: %+v", err)
}
})
ts := httptest.NewServer(mux)
wsURL, err := url.Parse(ts.URL)
if err != nil {
t.Fatal(err)
}
wsURL.Scheme = "ws"
wss := http.Server{Addr: ts.Listener.Addr().String(), Handler: mux}
go wss.ListenAndServe()
// Test client.
stor := check.NewMemoryStorage()
a := TestAgent{id: knownAgentID, storage: &stor, jobs: make(map[string]check.Job), log: l}
agentCtx, agentCancel := context.WithCancel(context.Background())
s, err := New(agentCtx, agentCancel, &a, &stor, wsURL.String(), streamTimeout, l)
if err != nil {
t.Fatal(err)
}
job, err := stor.NewJob(agentCtx, knownCheckJob, l)
if err != nil {
t.Fatal(err)
}
err = a.Run(job.CheckID)
if err != nil {
t.Fatal(err)
}
actions := map[string]func(agent.Agent, string){
"ping": func(_ agent.Agent, _ string) {},
}
// Wait for the scheduler to die.
var wg sync.WaitGroup
wg.Add(1)
go func() {
_ = s.HandleMessages(actions)
defer wg.Done()
}()
// Keep sending pings.
go func() {
ticker := time.NewTicker(pingInterval)
agentDone := agentCtx.Done()
LOOP:
for {
select {
case <-ticker.C:
topic.Broadcast(Message{Action: "ping"})
case <-agentDone:
break LOOP
}
}
}()
// Close server.
ts.Close()
// Disconnect stream.
err = s.disconnect()
if err != nil {
t.Fatal(err)
}
wg.Wait()
agentCancel()
}
func TestStreamCancel(t *testing.T) {
log := logrus.New()
logrus.SetLevel(logrus.DebugLevel)
l := logrus.NewEntry(log)
// Test server.
server := gowse.NewServer(l)
topic := server.CreateTopic(streamName)
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if err := topic.SubscriberHandler(w, r); err != nil {
l.Printf("error handling subscriber request: %+v", err)
}
})
ts := httptest.NewServer(mux)
wsURL, err := url.Parse(ts.URL)
if err != nil {
t.Fatal(err)
}
wsURL.Scheme = "ws"
wss := http.Server{Addr: ts.Listener.Addr().String(), Handler: mux}
go wss.ListenAndServe()
// Test client.
stor := check.NewMemoryStorage()
a := TestAgent{id: knownAgentID, storage: &stor, jobs: make(map[string]check.Job), log: l}
agentCtx, agentCancel := context.WithCancel(context.Background())
s, err := New(agentCtx, agentCancel, &a, &stor, wsURL.String(), streamTimeout, l)
if err != nil {
t.Fatal(err)
}
job, err := stor.NewJob(agentCtx, knownCheckJob, l)
if err != nil {
t.Fatal(err)
}
err = a.Run(job.CheckID)
if err != nil {
t.Fatal(err)
}
actions := map[string]func(agent.Agent, string){
"ping": func(_ agent.Agent, _ string) {},
}
// Check that the stream stops handling messages
// when the agent context is cancelled.
var wg sync.WaitGroup
wg.Add(1)
go func() {
_ = s.HandleMessages(actions)
defer wg.Done()
}()
// Keep sending pings.
go func() {
ticker := time.NewTicker(pingInterval)
agentDone := agentCtx.Done()
LOOP:
for {
select {
case <-ticker.C:
topic.Broadcast(Message{Action: "ping"})
case <-agentDone:
break LOOP
}
}
}()
agentCancel()
wg.Wait()
}
|
package getcontent
import (
"bufio"
"database/sql"
_ "github.com/go-sql-driver/mysql"
"fmt"
"github.com/yangyouwei/xiaoshuo_content/read_conf"
"io"
"strings"
"log"
"os"
"regexp"
"sync"
)
type booksinfo struct {
Id int `db:"id"`
Sourcesfilename string `db:"Sourcesfilename"`
Chapterdone int `db:"chapterdone"`
}
type chapter struct {
Id int64 `db:"id"`
BookId int `db:"booksId"`
ChapterId int `db:"chapterId"`
Content string `db:"content"`
Size string `db:"size"`
Chapterline int64 `db:"chapterlines"`
start int64
end int64
}
type LineOffsetstr struct {
start int
end int
}
var fullContent []string
var bookinfos = make(chan booksinfo ,100)
//var chapterContent []string
var bookId []int
func GetContent(dbc *sql.DB) {
//并发
c := read_conf.Main_str.Concurrent
wg := sync.WaitGroup{}
wg.Add(c+1)
go getbookinfs(dbc, bookinfos,&wg)
for i := 0; i < c; i++ {
go func(wg *sync.WaitGroup) {
for {
b, isclose := <-bookinfos //判断chan是否关闭,关闭了就退出循环不在取文件名结束程序
if !isclose { //判断通道是否关闭,关闭则退出循环
return
}
//获取一本书籍信息
//小说全部内容
var fc *[]string
fc = readfullcontent(b.Sourcesfilename)
var chapterInfo *[]chapter
//获取该本小说的全部章节信息,并更新章节start end 行数
chapterInfo = getchapterinfo(dbc, b)
chapterInfo = dooffset(chapterInfo)
for _, k := range *chapterInfo {
//取出章节内容写入数据库
updatechapter(dbc, k, fc)
}
}
wg.Done()
}(&wg)
}
wg.Wait()
}
func getbookinfs(dbc *sql.DB, c chan booksinfo,wg *sync.WaitGroup) {
//查询总数
n := 0
sqltext := "select id from books order by id DESC limit 1;"
err := dbc.QueryRow(sqltext).Scan(&n)
if err != nil {
panic(err)
}
fmt.Println("booksnum: ",n)
for i:= 1;i <= n ;i++ {
booksql := fmt.Sprintf("SELECT id, Sourcesfilename FROM books WHERE id=%v",i)
res, err := dbc.Query(booksql)
if err != nil {
fmt.Println(err)
continue
}
for res.Next() {
//注意这里的Scan括号中的参数顺序,和 SELECT 的字段顺序要保持一致。
a := booksinfo{}
if err := res.Scan(&a.Id, &a.Sourcesfilename); err != nil {
log.Fatal(err)
}
c <- a
}
}
close(c)
wg.Done()
}
func getchapterinfo(dbc *sql.DB, book booksinfo) *[]chapter {
var chinfo []chapter
chaptersql := fmt.Sprintf("SELECT id,booksId,content,chapterlines FROM chapter_%v WHERE booksId=%v",book.Id%100+1,book.Id)
rows, err := dbc.Query(chaptersql)
if err != nil {
panic(err)
}
c := chapter{}
for rows.Next() {
if err := rows.Scan(&c.Id,&c.BookId,&c.Content,&c.Chapterline); err != nil {
log.Fatal(err)
}
chinfo = append(chinfo,c)
}
if err := rows.Err(); err != nil {
log.Fatal(err)
}
rows.Close()
return &chinfo
}
func dooffset(c *[]chapter) *[]chapter {
num := len(*c)
a := *c
for n,v := range *c {
if n == num - 1 {
a[n].start = v.Chapterline
a[n].end = 0
return &a
}
a[n].start = v.Chapterline
a[n].end = a[n+1].Chapterline - 1
}
return &a
}
func readfullcontent(fp string) *[]string {
fi, err := os.Open(fp)
if err != nil {
fmt.Printf("Error: %s\n", err)
}
defer fi.Close()
br := bufio.NewReader(fi)
var tmp []string
for {
a, _, c := br.ReadLine()
if c == io.EOF {
break
}
//fmt.Println(string(a))
tmp = append(tmp,string(a))
//fmt.Println(cap(tmp))
}
return &tmp
}
//取出章节内容合并,更新章节表的内容。
func updatechapter(dbc *sql.DB, c chapter, fc *[]string){
fmt.Println(c)
cs := *fc
var a []string
if c.end == 0 {
a = cs[c.start:]
}else {
a = cs[c.start:c.end]
}
var content string = "    "
for _,v := range a {
if len(v) == 0 {
continue
}
isok , err := regexp.Match(`^(\s+)$`,[]byte(v))
if err != nil {
fmt.Println(err)
}
if isok {
continue
}
isok1 := strings.HasPrefix(v,"更多精彩,更多好书,尽在新奇书网—http://www.xqishu.com")
if isok1 {
isok , err := regexp.Match(`^(.*)(更多精彩,更多好书,尽在新奇书网—http://www.xqishu.com)$`,[]byte(v))
if err != nil {
fmt.Println(err)
}
if isok {
continue
}
reg := regexp.MustCompile(`^(.*)(更多精彩,更多好书,尽在新奇书网—http://www.xqishu.com)(.+$)`)
result := reg.FindAllStringSubmatch(v,-1)
v = result[0][3]
}
content = content + v +"</br></br>"
}
//替换标签
//fmt.Printf("chapterID: %v start: %v end: %v \n",c.Id,c.start,c.end)
replacecharacter(&content)
//fmt.Println(content)
//写入数据库
//fmt.Println(c.BookId)
sqlupdate(dbc,c,content)
}
func sqlupdate(dbcon *sql.DB,c chapter,content string) {
contentsql := fmt.Sprintf("UPDATE chapter_%v SET content=? WHERE id=?",c.BookId%100+1)
stmt, err := dbcon.Prepare(contentsql)
if err != nil {
log.Println(err)
}
_, err = stmt.Exec(content,c.Id)
defer stmt.Close()
if err != nil {
log.Println(err)
}
}
func replacecharacter(s *string) *string {
//去行首空白字符
isok , err := regexp.Match(`^(.+)(\s+)(.+)$`,[]byte(*s))
if err != nil {
fmt.Println(err)
}
if isok {
reg := regexp.MustCompile(`^(.+)(\s+)(.+$)`)
result := reg.FindAllStringSubmatch(*s,-1)
*s = result[0][1]+result[0][3]
}
return s
}
|
package controllers
import (
"database/sql"
"net/http"
"../config"
"../helpers"
"../models"
)
type PageDataSingle struct {
Title string
Description string
Singledata models.GetifobyUID
}
// Show Получение информации с конкретным ID
func Show(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
helpers.ShowError(w, "405")
return
}
singledata, err := models.ShowSingleData(w, r)
if err == sql.ErrNoRows || err != nil {
helpers.ShowError(w, "404")
return
}
pagedatasingle := PageDataSingle{Title: "Каталог - главная страница", Description: "Полезная инфо", Singledata: singledata}
config.TPL.ExecuteTemplate(w, "showsingledata.html", pagedatasingle)
}
|
// Copyright (c) KwanJunWen
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
package estemplate
import "fmt"
// DatatypeDenseVector Specialised Datatype that stores dense vectors of float
// values. The maximum number of dimensions that can be in a vector should not exceed
// 1024.
// ! Experimental and may be changed or removed completely in a future release.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/dense-vector.html
// for details.
type DatatypeDenseVector struct {
Datatype
name string
copyTo []string
// fields specific to dense vector datatype
dims *int
}
// NewDatatypeDenseVector initializes a new DatatypeDenseVector.
func NewDatatypeDenseVector(name string) *DatatypeDenseVector {
return &DatatypeDenseVector{
name: name,
}
}
// Name returns field key for the Datatype.
func (v *DatatypeDenseVector) Name() string {
return v.name
}
// CopyTo sets the field(s) to copy to which allows the values of multiple fields to be
// queried as a single field.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/copy-to.html
// for details.
func (v *DatatypeDenseVector) CopyTo(copyTo ...string) *DatatypeDenseVector {
v.copyTo = append(v.copyTo, copyTo...)
return v
}
// Dims sets the number of dimensions in the vector. Internally, each document's dense
// vector is encoded as a binary doc value. Its size in bytes is equal to 4 * dims + 4,
// where dims - the number of the vector's dimensions.
func (v *DatatypeDenseVector) Dims(dims int) *DatatypeDenseVector {
v.dims = &dims
return v
}
// Validate validates DatatypeDenseVector.
func (v *DatatypeDenseVector) Validate(includeName bool) error {
var invalid []string
if includeName && v.name == "" {
invalid = append(invalid, "Name")
}
// TODO: validate dims
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Source returns the serializable JSON for the source builder.
func (v *DatatypeDenseVector) Source(includeName bool) (interface{}, error) {
// {
// "test": {
// "type": "dense_vector",
// "copy_to": ["field_1", "field_2"],
// "dims": 3
// }
// }
options := make(map[string]interface{})
options["type"] = "dense_vector"
if len(v.copyTo) > 0 {
var copyTo interface{}
switch {
case len(v.copyTo) > 1:
copyTo = v.copyTo
break
case len(v.copyTo) == 1:
copyTo = v.copyTo[0]
break
default:
copyTo = ""
}
options["copy_to"] = copyTo
}
if v.dims != nil {
options["dims"] = v.dims
}
if !includeName {
return options, nil
}
source := make(map[string]interface{})
source[v.name] = options
return source, nil
}
|
// Fetcher for http://api.coindesk.com
package coindesk
import (
"errors"
"fmt"
"net/http"
"strings"
"time"
"github.com/l-vitaly/btcticker"
"github.com/l-vitaly/btcticker/httputil"
)
var (
errParse = errors.New("parse error")
errCurrencyNotFound = errors.New("currency not found")
)
const fetcherName = "coindesk"
const url = "http://api.coindesk.com/v1/bpi/currentprice/%s.json"
type fetcher struct {
}
// Name
func (p *fetcher) Name() string {
return fetcherName
}
// Fetch
func (p *fetcher) Fetch(from, to string) (*btcticker.FetchData, error) {
to = strings.ToUpper(to)
r := httputil.NewRequest(60*time.Second, 60*time.Second, 60*time.Second)
status, data, err := r.SendJSON(fmt.Sprintf(url, to), "GET", nil)
if err != nil {
return nil, err
}
if status != http.StatusOK {
return nil, errParse
}
currencyData := map[string]interface{}{}
if bpi, ok := data["bpi"]; ok {
if bpi, ok := bpi.(map[string]interface{}); ok {
if currencyVal, ok := bpi[to].(map[string]interface{}); ok {
currencyData = currencyVal
}
}
}
updated := time.Time{}
if timeVal, ok := data["time"].(map[string]interface{}); ok {
if updatedVal, ok := timeVal["updatedISO"].(string); ok {
updated, err = time.Parse(time.RFC3339, updatedVal)
if err != nil {
return nil, err
}
}
}
if amount, ok := currencyData["rate_float"].(float64); ok {
return &btcticker.FetchData{Amount: amount, Timestamp: updated.Second()}, nil
}
return nil, errCurrencyNotFound
}
func init() {
btcticker.RegisterFetcher(&fetcher{})
}
|
package main
// Worth checking out is also fastwalk.go and its simblings in
// https://github.com/golang/tools/
// Unfortunately, it's not possible to import it :(
import (
"fmt"
"github.com/MichaelTJones/walk"
"os"
"path/filepath"
"sort"
"strings"
)
type kv struct {
key string
value int
}
func countingWalker(basedir string, increment chan string) walk.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
// if info.IsDir() {
// return nil
// }
subdirName, err := filepath.Rel(basedir, path)
if err != nil {
return err
}
if i := strings.IndexRune(subdirName, os.PathSeparator); i < 0 {
// subdirName contains no slashes, so it's a direct child of basedir itself
subdirName = "."
} else {
subdirName = subdirName[:i]
}
increment <- subdirName
return nil
}
}
func main() {
basedir := "."
if len(os.Args) > 1 {
basedir = os.Args[1]
}
counts := make(map[string]int)
ch := make(chan string)
done := make(chan bool)
go func() {
for dir := range ch {
counts[dir] += 1
}
done <- true
}()
err := walk.Walk(basedir, countingWalker(basedir, ch))
if err != nil {
panic(err)
}
close(ch)
<-done
// '.' counts itself as well and that's no good
counts["."] -= 1
sorted := make([]kv, 0, len(counts))
for k, v := range counts {
sorted = append(sorted, kv{k, v})
}
sort.Slice(sorted, func(i, j int) bool {
return sorted[i].value < sorted[j].value
})
// we can be sure that sorted contains at least '.'
maxLen := len(fmt.Sprintf("%d", sorted[len(sorted)-1].value))
for _, pair := range sorted {
fmt.Printf(" %*d %s\n", maxLen, pair.value, pair.key)
}
}
|
package log
import (
"github.com/gin-gonic/gin"
"github.com/lestrrat-go/file-rotatelogs"
"github.com/sirupsen/logrus"
"go.rock.com/rock-platform/rock/server/conf"
"io"
"log"
"os"
"path/filepath"
"strings"
"time"
)
type Logger struct {
*logrus.Logger
}
var SingleLogger *Logger
func GetLogger() *Logger {
if SingleLogger == nil {
SingleLogger = &Logger{logrus.New()}
formatter := &logrus.TextFormatter{
ForceColors: true,
FullTimestamp: true,
TimestampFormat: "2006-01-02 15:04:05",
}
SingleLogger.Logger.SetFormatter(formatter)
}
return SingleLogger
}
func (l *Logger) InitLogger() {
config := conf.GetConfig()
// Set log directory location
logDir := config.Viper.GetString("log.dir")
l.Infoln("[Rock Platform] Set log dir to: ", logDir)
// Set rotation log options 日志切割
logFile := filepath.Join(logDir, "rock.%Y%m%d.log")
logf, err := rotatelogs.New(logFile,
rotatelogs.WithLinkName("rock.log"), // 生成软链,指向最新日志文件
rotatelogs.WithMaxAge(7*time.Hour*24), // 文件最大保存时间
rotatelogs.WithRotationTime(time.Hour*24), // 日志切割时间间隔
)
if err != nil {
l.Fatalf("[Rock Platform] failed to create rotation logs: %s", err)
}
gin.DefaultWriter = io.MultiWriter(logf, os.Stderr) // gin.DefaultWriter变量能控制日志的保存方式及保存位置
log.SetOutput(gin.DefaultWriter) // 决定了log应该输出到什么地方,默认是标准输出(同下解释)
l.SetOutput(gin.DefaultWriter) // 设置output,默认为stderr,可以为任何io.Writer,比如文件*os.File
//l.SetReportCaller(true) // 显示行号等信息,但是会增加性能消耗
//Set log level
logLevel := config.Viper.GetString("log.level")
switch strings.ToLower(logLevel) {
case "debug":
l.SetLevel(logrus.DebugLevel) // Debug级别应该会打到屏幕上的
case "info":
l.SetLevel(logrus.InfoLevel)
gin.SetMode(gin.ReleaseMode) // 全局设置环境,gin.DebugMode为开发环境,gin.ReleaseMode线上环境为
case "warn":
l.SetLevel(logrus.WarnLevel)
gin.SetMode(gin.ReleaseMode)
case "error":
l.SetLevel(logrus.ErrorLevel)
gin.SetMode(gin.ReleaseMode)
default:
l.SetLevel(logrus.DebugLevel)
l.Warningf("[Rock Platform] Got unknown log level %s, and set log level to default: debug", logLevel)
}
l.Infoln("[Rock Platform] Set log level to:", logLevel)
}
|
package attr
import "testing"
func TestIsEmpty(t *testing.T) {
test := func(v interface{}, e bool) {
res := IsEmpty(v)
if res != e {
t.Errorf("(%v) expected empty (%t) but (%t)", v, e, res)
}
}
test(0, true)
test(1, false)
test(-1, false)
test(int64(0), true)
test(int(1), false)
test("", true)
test("hoge", false)
test(123456789123456789, false)
test(0.0, true)
test(0.1, false)
test(0.0000001, false)
test([]string{}, true)
test([]string{"a"}, false)
test([]int{1}, false)
test(nil, true)
}
func TestIntIsEmpty(t *testing.T) {
test := func(v int64, e bool) {
res := IntIsEmpty(v)
if res != e {
t.Errorf("(%d) expected empty (%t) but (%t)", v, e, res)
}
}
test(0, true)
test(1, false)
test(-1, false)
}
func TestStringIsEmpty(t *testing.T) {
test := func(v string, e bool) {
res := StringIsEmpty(v)
if res != e {
t.Errorf("(%s) expected empty (%t) but (%t)", v, e, res)
}
}
test("", true)
test("hoge", false)
}
func TestFloatIsEmpty(t *testing.T) {
test := func(v float64, e bool) {
res := FloatIsEmpty(v)
if res != e {
t.Errorf("(%f) expected empty (%t) but (%t)", v, e, res)
}
}
test(0.0, true)
test(0.1, false)
test(0.0000001, false)
}
func TestSliceIsEmpty(t *testing.T) {
test := func(v interface{}, e bool) {
res := SliceIsEmpty(v)
if res != e {
t.Errorf("(%v) expected empty (%t) but (%t)", v, e, res)
}
}
test([]string{}, true)
test([]string{"a"}, false)
test([]int{1}, false)
test(nil, true)
var s []int
test(s, true)
}
func TestInvalidIsEmpty(t *testing.T) {
test := func(v interface{}, e bool) {
res := InvalidIsEmpty(v)
if res != e {
t.Errorf("(%v) expected empty (%t) but (%t)", v, e, res)
}
}
test(nil, true)
test(1, false)
}
|
/**
* @Author: jinjiaji
* @Description:
* @File: sliding_window
* @Version: 1.0.0
* @Date: 2021/8/16 下午5:48
*/
package module5
import (
"sync"
"testing"
"time"
)
//TestDefaultCounter 模拟1000并行下的4亿次计数以及4亿次统计
func TestDefaultCounter(t *testing.T) {
qc := NewDefaultQueriesCounter()
defer qc.Close()
wg := sync.WaitGroup{}
t.Log("start:", time.Now().Unix())
n1, n2 := 1000, 100000
for i := 0; i < n1; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < n2; j++ {
qc.Incr(resultTypeSuccess)
qc.Incr(resultTypeFailure)
qc.Incr(resultTypeTimeout)
qc.Incr(resultTypeRejection)
qc.Count(resultTypeSuccess, 1)
qc.Count(resultTypeFailure, 1)
qc.Count(resultTypeTimeout, 1)
qc.Count(resultTypeRejection, 1)
}
}()
}
wg.Wait()
t.Log("end:", time.Now().Unix())
//统计前100秒的数据,所以暂停2秒
time.Sleep(time.Second * 2)
c1 := qc.Count(resultTypeSuccess, 100)
c2 := qc.Count(resultTypeFailure, 100)
c3 := qc.Count(resultTypeTimeout, 100)
c4 := qc.Count(resultTypeRejection, 100)
t.Log(c1, c2, c3, c4)
if c1 != n1*n2 || c2 != n1*n2 || c3 != n1*n2 || c4 != n1*n2 {
t.Fatal("check err")
}
}
//BenchmarkDefaultCounter 无锁版计数器benchmark
//BenchmarkDefaultCounter-8 98804888 11.6 ns/op
func BenchmarkDefaultCounter(b *testing.B) {
qc := NewDefaultQueriesCounter()
defer qc.Close()
for i := 0; i < b.N; i++ {
qc.Incr(resultTypeSuccess)
qc.Count(resultTypeSuccess, 1)
}
}
//BenchmarkMuxCounter 有锁版计数器benchmark
//BenchmarkMuxCounter-8 22033450 51.1 ns/op
func BenchmarkMuxCounter(b *testing.B) {
qc := NewMuxQueriesCounter()
defer qc.Close()
for i := 0; i < b.N; i++ {
qc.Incr(resultTypeSuccess)
qc.Count(resultTypeSuccess, 1)
}
}
//BenchmarkTime time组件benchmark
//BenchmarkTime-8 13139469 84.9 ns/op
func BenchmarkTime(b *testing.B) {
for i := 0; i < b.N; i++ {
time.Now().Unix()
}
}
|
package shard
import (
"bytes"
"fmt"
"hash/crc32"
"testing"
"time"
"h12.io/sej"
)
func BenchmarkAppend(b *testing.B) {
tt := sej.Test{b}
path := tt.NewDir()
w, err := NewWriter(Path{path, "blue", 8}, shardFNV)
if err != nil {
b.Fatal(err)
}
keys := make([][]byte, b.N)
for i := range keys {
keys[i] = []byte("key-" + fmt.Sprintf("%09x", i))
}
value := bytes.Repeat([]byte{'a'}, 100)
now := time.Now()
msg := sej.Message{Value: value, Timestamp: now}
// timeAppend(b, w, keys, &msg)
timeAppendParallel(b, w, keys, &msg)
w.Close()
}
func timeAppend(b *testing.B, w *Writer, keys [][]byte, msg *sej.Message) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
msg.Key = keys[i]
if err := w.Append(msg); err != nil {
b.Fatal(err)
}
}
if err := w.Flush(); err != nil {
b.Fatal(err)
}
b.StopTimer()
}
func timeAppendParallel(b *testing.B, w *Writer, keys [][]byte, msg *sej.Message) {
b.SetParallelism(2)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
i := 0
for pb.Next() {
msg.Key = keys[i]
if err := w.Append(msg); err != nil {
b.Fatal(err)
}
i++
}
})
if err := w.Flush(); err != nil {
b.Fatal(err)
}
b.StopTimer()
}
func shardCRC(msg *sej.Message) uint16 {
const mask16 = 1<<16 - 1
s := crc32.ChecksumIEEE(msg.Key)
return uint16((s >> 16) ^ (s & mask16))
}
func shardFNV(msg *sej.Message) uint16 {
const (
offset32 = 2166136261
prime32 = 16777619
mask16 = 1<<16 - 1
)
var s uint32 = offset32
for _, c := range msg.Key {
s ^= uint32(c)
s *= prime32
}
return uint16((s >> 16) ^ (s & mask16))
}
|
package main
import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
)
var (
protoPath string
protoOut string
)
func init() {
flag.StringVar(&protoPath, "f", "", "proto源文件路径")
flag.StringVar(&protoOut, "t", "", "proto文件编译输出路径")
}
func main() {
flag.Parse()
if protoPath == "" {
log.Fatal("proto源文件地址不能为空")
}
dirs, err := getAllPkgs(protoPath)
if err != nil {
log.Fatalf("读取proto文件子目录发生错误, error:%v", err)
}
dirs = append(dirs, protoPath)
err = compilerProto(dirs)
if err != nil {
log.Fatalf("编译proto失败, err:%v", err)
}
log.Println("生成完成")
}
func getAllPkgs(pp string) (dirs []string, err error) {
var (
dirTemp string
)
dir, err := ioutil.ReadDir(pp)
if err != nil {
return nil, err
}
for _, fi := range dir {
if fi.IsDir() {
dirTemp = filepath.Join(pp, fi.Name())
dirs = append(dirs, dirTemp)
childDirs, _ := getAllPkgs(dirTemp)
dirs = append(dirs, childDirs...)
}
}
return dirs, nil
}
func compilerProto(dirs []string) error {
var (
goOut string
errStr string
)
for _, dirPath := range dirs {
dir := strings.Replace(dirPath, protoPath, "", 1)
if dir != "" {
goOut = filepath.Join(protoOut, dir[1:])
}else{
goOut = protoOut
}
_, err := os.Stat(goOut)
if err != nil {
if os.IsPermission(err) {
log.Fatalf("mkdir path:%s, err:%v", goOut, err)
}
if os.IsNotExist(err) {
if err := os.MkdirAll(goOut, os.ModePerm); err != nil{
log.Fatalf("mkdir path:%s, err:%v", goOut, err)
}
}
}
if _, err := os.Stat(dirPath); err != nil {
if os.IsNotExist(err) {
log.Fatalf("源路径:%s, error:%v", dirPath, err)
}
}
var (
out bytes.Buffer
stderr bytes.Buffer
files []string
)
files = getProtoFile(dirPath)
if len(files) == 0 {
continue
}
protoPathInfo := fmt.Sprintf("-I=%s%s", dirPath, string(os.PathSeparator))
goOutPath := fmt.Sprintf("--go_out=plugins=grpc:%s", goOut)
var args = []string{protoPathInfo, goOutPath}
args = append(args, files...)
cmd := exec.Command("protoc", args...)
cmd.Stdout = &out
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
errStr = strings.Trim(stderr.String(), " ")
log.Println(fmt.Sprintf("错误目录:%s 错误描述:%s", protoPath, errStr))
}
}
return nil
}
func getProtoFile(p string) []string {
var files []string
var fileName string
dir, err := ioutil.ReadDir(p)
if err != nil {
return files
}
for _, fi := range dir {
if fi.IsDir() {
continue
}
fileName = fi.Name()
if strings.ToLower(path.Ext(fileName)) != ".proto" {
continue
}
files = append(files, fileName)
}
return files
}
|
package db
import "github.com/gobjserver/gobjserver/core/entity"
// ObjectGatewayImpl .
type ObjectGatewayImpl struct {
Datatbase CommonDatabase
}
// FindAll .
func (gateway ObjectGatewayImpl) FindAll() ([]string, error) {
return gateway.Datatbase.GetTableNames()
}
// Insert .
func (gateway ObjectGatewayImpl) Insert(objectName string, instance interface{}) (*entity.Object, error) {
if !gateway.Datatbase.ContainTable(objectName) {
gateway.Datatbase.CreateTable(objectName)
}
return gateway.Datatbase.Insert(objectName, instance)
}
// Find .
func (gateway ObjectGatewayImpl) Find(objectName string) []*entity.Object {
return gateway.Datatbase.Find(objectName)
}
// FindByID .
func (gateway ObjectGatewayImpl) FindByID(objectName string, objectID string) (*entity.Object, error) {
return gateway.Datatbase.FindByID(objectName, objectID)
}
// Update .
func (gateway ObjectGatewayImpl) Update(objectName string, objectID string, instance *entity.Object) (*entity.Object, error) {
return gateway.Datatbase.Update(objectName, objectID, instance)
}
// Delete .
func (gateway ObjectGatewayImpl) Delete(objectName string, objectID string) (bool, error) {
return gateway.Datatbase.Delete(objectName, objectID)
}
|
package main
import "time"
type Routine struct {
RoutineId int `json:"routine_id"`
Title string `json:"title"`
TotalDuration int `json:"total_duration"`
Character string `json:"character"`
CreatorTag string `json:"creator_tag"`
CreatorId int `json:"creator_id"`
CreationDate time.Time `json:"creation_date"`
Popularity int `json:"popularity"`
Drills Drills `json:"drills"`
}
type Routines []Routine
type Drill struct {
DrillTitle string `json:"drill_title"`
Duration int `json:"duration"`
}
type Drills []Drill
type Library struct {
LibraryId int `json:"library_id"`
UserId int `json:"user_id"`
Routines Routines `json:"routines"`
}
type User struct {
UserId int `json:"user_id"`
Tag string `json:"tag"`
Email string `json:"email"`
Bio string `json:"bio"`
Main string `json:"main"`
}
|
package converter
import (
"fmt"
"image"
"image/color"
_ "image/gif"
_ "image/jpeg"
_ "image/png"
"math"
"os"
"github.com/MarioCdeS/imgtoascii/converter/config"
)
const (
ramp10 = "@%#*+=-:. "
ramp70 = "$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\\|()1{}[]?-_+~<>i!lI;:,\"^`'. "
)
type Error struct {
Msg string
Cause error
}
func (e *Error) Error() string {
if e == nil {
return "unknown error"
}
return e.Msg
}
type internalConfig struct {
*config.Config
colWidth int
rowHeight int
outRows int
outRamp []rune
}
func Run(cfg *config.Config) ([]string, *Error) {
img, errLoad := loadImage(cfg.ImagePath)
if errLoad != nil {
return nil, &Error{"unable to load image", errLoad}
}
intCfg, err := calculateInternalConfig(cfg, img.Bounds())
if err != nil {
return nil, err
}
return convertToASCII(img, intCfg), nil
}
func loadImage(path string) (image.Image, error) {
reader, err := os.Open(path)
if err != nil {
return nil, err
}
defer reader.Close()
img, _, err := image.Decode(reader)
return img, err
}
func calculateInternalConfig(cfg *config.Config, imgBounds image.Rectangle) (*internalConfig, *Error) {
imgWidth := imgBounds.Max.X - imgBounds.Min.X
if cfg.OutCols > imgWidth {
return nil, &Error{
fmt.Sprintf("image size is too small for the specified number of output columns (%d)", cfg.OutCols),
nil,
}
}
imgHeight := imgBounds.Max.Y - imgBounds.Min.Y
colWidth := imgWidth / cfg.OutCols
rowHeight := int(float64(colWidth) * cfg.ColRowRatio)
outRows := imgHeight / rowHeight
if outRows > imgHeight {
return nil, &Error{
fmt.Sprintf("image size is too small for the calculated number of output rows (%d)", outRows),
nil,
}
}
var outRampStr string
if cfg.Ramp == config.Ramp10 {
outRampStr = ramp10
} else {
outRampStr = ramp70
}
return &internalConfig{
cfg,
colWidth,
rowHeight,
outRows,
[]rune(outRampStr),
}, nil
}
func convertToASCII(img image.Image, cfg *internalConfig) []string {
numRowsPerStrip := cfg.outRows / cfg.NumCPU
chs := make([]chan string, cfg.NumCPU)
for i := 0; i < cfg.NumCPU; i++ {
var numRows int
if i == cfg.NumCPU-1 {
numRows = cfg.outRows - (cfg.NumCPU-1)*numRowsPerStrip
} else {
numRows = numRowsPerStrip
}
chs[i] = make(chan string, numRows)
go convertImgStripToASCII(img, i*numRowsPerStrip*cfg.rowHeight, numRows, cfg, chs[i])
}
ascii := make([]string, cfg.outRows)
counts := make([]int, cfg.NumCPU)
busy := true
for busy {
busy = false
for i := 0; i < cfg.NumCPU; i++ {
if line, ok := <-chs[i]; ok {
ascii[i*numRowsPerStrip+counts[i]] = line
counts[i]++
busy = true
}
}
}
return ascii
}
func convertImgStripToASCII(img image.Image, minY int, numRows int, cfg *internalConfig, ch chan<- string) {
defer close(ch)
line := make([]rune, cfg.OutCols)
minX := img.Bounds().Min.X
for j := 0; j < numRows; j++ {
for i := 0; i < cfg.OutCols; i++ {
charMinX := minX + i*cfg.colWidth
charMinY := minY + j*cfg.rowHeight
charRect := image.Rect(charMinX, charMinY, charMinX+cfg.colWidth, charMinY+cfg.rowHeight)
idx := int(math.Floor((rectGrayAverage(img, &charRect) / math.MaxUint8) * float64(len(cfg.outRamp)-1)))
line[i] = cfg.outRamp[idx]
}
ch <- string(line)
}
}
func rectGrayAverage(img image.Image, rect *image.Rectangle) float64 {
var total float64
for x := rect.Min.X; x < rect.Max.X; x++ {
for y := rect.Min.Y; y < rect.Max.Y; y++ {
total += float64(color.GrayModel.Convert(img.At(x, y)).(color.Gray).Y)
}
}
return total / float64((rect.Max.X-rect.Min.X)*(rect.Max.Y-rect.Min.Y))
}
|
package inter
import "snatch_ssc/sys"
type SscData struct {
No sys.NullString `json:"no"` // 期号
Results sys.NullString `json:"results"` // 结果
// DrawTime sys.NullTime `json:"drawTime"` // 时间
}
type Snatch interface {
// 抓取网页
Snatch() (string, error)
// 解析网页数据
Resolve(string) []*SscData
}
|
package graph
import (
"fmt"
"math"
)
func hslahex(h, s, l, a float64) string {
r, g, b, xa := hsla(h, s, l, a)
return fmt.Sprintf("\"#%02x%02x%02x%02x\"", sat8(r), sat8(g), sat8(b), sat8(xa))
}
func hue(v1, v2, h float64) float64 {
if h < 0 {
h += 1
}
if h > 1 {
h -= 1
}
if 6*h < 1 {
return v1 + (v2-v1)*6*h
} else if 2*h < 1 {
return v2
} else if 3*h < 2 {
return v1 + (v2-v1)*(2.0/3.0-h)*6
}
return v1
}
func hsla(h, s, l, a float64) (r, g, b, ra float64) {
if s == 0 {
return l, l, l, a
}
_, h = math.Modf(h)
var v2 float64
if l < 0.5 {
v2 = l * (1 + s)
} else {
v2 = (l + s) - s*l
}
v1 := 2*l - v2
r = hue(v1, v2, h+1.0/3.0)
g = hue(v1, v2, h)
b = hue(v1, v2, h-1.0/3.0)
ra = a
return
}
// sat8 converts 0..1 float to 0..255 uint8.
// sat8 is short for saturate 8, referring to 8 byte saturation arithmetic.
//
// sat8(x) = 0 if x < 0
// sat8(x) = 255 if x > 1
func sat8(v float64) uint8 {
v *= 255.0
if v >= 255 {
return 255
} else if v <= 0 {
return 0
}
return uint8(v)
}
|
package admin
import (
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/phantacix/go-admin-panel/core"
"github.com/phantacix/go-admin-panel/core/db"
"github.com/phantacix/go-admin-panel/module/admin/model"
"testing"
)
func init() {
core.Init("../../config/env/local/admin.json")
}
func TestAdminDBMigrate(t *testing.T) {
orm := db.AdminDB()
dept, log, menu, role, account := &model.Dept{}, &model.Log{}, &model.Menu{}, &model.Role{}, &model.Account{}
orm.DropTableIfExists(dept, log, menu, role, account)
if err := orm.AutoMigrate(dept, log, menu, role, account).Error; err != nil {
fmt.Print(err)
}
}
|
// Copyright 2020 The VectorSQL Authors.
//
// Code is licensed under Apache License, Version 2.0.
package databases
import (
"os"
"sync"
"io/ioutil"
"path/filepath"
"columns"
"datatypes"
"parsers"
"storages"
"base/errors"
"parsers/sqlparser"
)
type OnDiskDatabase struct {
mu sync.RWMutex
ctx *DatabaseContext
node *sqlparser.DBDDL
metaFuns *MetaFuns
executeFuns *ExecuteFuns
tableCaches map[string]*Table
}
func NewOnDiskDatabase(ctx *DatabaseContext, node *sqlparser.DBDDL) IDatabase {
database := &OnDiskDatabase{
ctx: ctx,
node: node,
tableCaches: make(map[string]*Table),
}
database.executeFuns = &ExecuteFuns{
CreateDatabase: database.createDatabase,
DropDatabase: database.dropDatabase,
CreateTable: database.createTable,
DropTable: database.dropTable,
}
database.metaFuns = &MetaFuns{
GetDBName: database.getDBName,
GetDataPath: database.getDataPath,
GetEngineName: database.getEngineName,
GetMetaDataPath: database.getMetaDataPath,
}
return database
}
func (database *OnDiskDatabase) Load() error {
ctx := database.ctx
log := ctx.log
conf := ctx.conf
dbName := database.node.DBName
tablefiles := filepath.Join(conf.Server.Path, "metadata", dbName, "*.sql")
files, _ := filepath.Glob(tablefiles)
for _, file := range files {
query, err := ioutil.ReadFile(file)
if err != nil {
return errors.Wrap(err)
}
node, err := parsers.Parse(string(query))
if err != nil {
return err
}
ddl := node.(*sqlparser.DDL)
tableName := ddl.Table.Name.String()
if err := database.attachTable(tableName, ddl); err != nil {
return err
}
log.Info("Load table:%v, file:%s", ddl.Table, file)
}
return nil
}
func (database *OnDiskDatabase) Name() string {
return ""
}
func (database *OnDiskDatabase) Executor() *ExecuteFuns {
return database.executeFuns
}
func (database *OnDiskDatabase) Meta() *MetaFuns {
return database.metaFuns
}
func (database *OnDiskDatabase) GetTables() []*Table {
var tables []*Table
for _, v := range database.tableCaches {
tables = append(tables, v)
}
return tables
}
func (database *OnDiskDatabase) GetStorage(tableName string) (storages.IStorage, error) {
database.mu.RLock()
defer database.mu.RUnlock()
table, ok := database.tableCaches[tableName]
if !ok {
return nil, errors.Errorf("couldn't find table:%v storage", tableName)
}
return table.storage, nil
}
func (database *OnDiskDatabase) attachTable(tableName string, node *sqlparser.DDL) error {
ctx := database.ctx
log := ctx.log
conf := ctx.conf
dbName := database.node.DBName
engine := node.TableSpec.Options.Engine
database.mu.Lock()
defer database.mu.Unlock()
if _, ok := database.tableCaches[tableName]; ok {
return errors.Errorf("%s.%s exists", dbName, tableName)
}
var colDefinitions []*sqlparser.ColumnDefinition
if err := sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) {
switch node := node.(type) {
case *sqlparser.ColumnDefinition:
colDefinitions = append(colDefinitions, node)
}
return true, nil
}, node.TableSpec); err != nil {
return err
}
cols := make([]*columns.Column, len(colDefinitions))
for i, coldef := range colDefinitions {
dataType, err := datatypes.DataTypeFactory(coldef.Type.Type)
if err != nil {
return err
}
cols[i] = columns.NewColumn(coldef.Name.String(), dataType)
}
storageCtx := storages.NewStorageContext(ctx.log, ctx.conf)
storage, err := storages.StorageFactory(storageCtx, engine, cols)
if err != nil {
return err
}
table := NewTable(conf, dbName, tableName, engine, node, storage)
database.tableCaches[tableName] = table
log.Info("Attach table:%s.%s, engine:%s", dbName, tableName, engine)
return nil
}
func (database *OnDiskDatabase) detachTable(tableName string) error {
database.mu.Lock()
defer database.mu.Unlock()
log := database.ctx.log
dbName := database.getDBName()
tbl, ok := database.tableCaches[tableName]
if !ok {
return errors.Errorf("%s.%s doesn't exists", dbName, tableName)
}
tbl.storage.Close()
delete(database.tableCaches, tableName)
log.Info("Detach table:%s.%s", dbName, tableName)
return nil
}
// Execute handlers.
func (database *OnDiskDatabase) createDatabase() error {
log := database.ctx.log
dbName := database.getDBName()
dataPath := database.getDataPath()
metaDataPath := database.getMetaDataPath()
metaFile := metaDataPath + ".sql"
// Check.
if _, err := GetDatabase(dbName); err == nil {
return errors.Errorf("database:%v exists", dbName)
}
if err := os.MkdirAll(dataPath, os.ModePerm); err != nil {
return errors.Wrap(err)
}
if err := os.MkdirAll(metaDataPath, os.ModePerm); err != nil {
return errors.Wrap(err)
}
buf := sqlparser.NewTrackedBuffer(nil)
database.node.Format(buf)
query := buf.String()
log.Debug("%s: %s", metaFile, query)
if err := ioutil.WriteFile(metaFile, []byte(query), 0644); err != nil {
return errors.Wrap(err)
}
log.Info("Create database:%s, meta:%s", dbName, metaDataPath)
return databases.attachDatabase(dbName, database)
}
func (database *OnDiskDatabase) dropDatabase() error {
log := database.ctx.log
dbName := database.getDBName()
dataPath := database.getDataPath()
metaDataPath := database.getMetaDataPath()
metaFile := metaDataPath + ".sql"
for tableName := range database.tableCaches {
if err := database.dropTable(tableName); err != nil {
return err
}
}
if err := os.RemoveAll(dataPath); err != nil {
return errors.Wrap(err)
}
if err := os.RemoveAll(metaDataPath); err != nil {
return errors.Wrap(err)
}
if err := os.RemoveAll(metaFile); err != nil {
return errors.Wrap(err)
}
log.Info("Drop database:%s, meta:%s", dbName, metaFile)
return databases.detachDatabase(dbName)
}
func (database *OnDiskDatabase) createTable(node *sqlparser.DDL) error {
log := database.ctx.log
tableName := node.Table.Name.String()
tableDataPath := filepath.Join(database.getDataPath(), tableName)
tableMetaFile := filepath.Join(database.getMetaDataPath(), tableName) + ".sql"
buf := sqlparser.NewTrackedBuffer(nil)
node.Format(buf)
query := buf.String()
if err := ioutil.WriteFile(tableMetaFile, []byte(query), 0644); err != nil {
return errors.Wrap(err)
}
if err := os.MkdirAll(tableDataPath, os.ModePerm); err != nil {
return errors.Wrap(err)
}
log.Info("Create table:%s, query:%s", tableMetaFile, query)
return database.attachTable(tableName, node)
}
func (database *OnDiskDatabase) dropTable(tableName string) error {
log := database.ctx.log
dbName := database.getDBName()
tableDataPath := filepath.Join(database.getDataPath(), tableName)
tableMetaFile := filepath.Join(database.getMetaDataPath(), tableName) + ".sql"
if err := os.RemoveAll(tableMetaFile); err != nil {
return errors.Wrap(err)
}
if err := os.RemoveAll(tableDataPath); err != nil {
return errors.Wrap(err)
}
log.Info("Drop table:%s.%s", dbName, tableName)
return database.detachTable(tableName)
}
// Meta handlers.
func (database *OnDiskDatabase) getDBName() string {
return database.node.DBName
}
func (database *OnDiskDatabase) getDataPath() string {
node := database.node
conf := database.ctx.conf
return filepath.Join(conf.Server.Path, "data", node.DBName)
}
func (database *OnDiskDatabase) getEngineName() string {
if database.node.Options != nil {
return database.node.Options.Engine
}
return ""
}
func (database *OnDiskDatabase) getMetaDataPath() string {
node := database.node
conf := database.ctx.conf
return filepath.Join(conf.Server.Path, "metadata", node.DBName)
}
|
package main
import (
"fmt"
"github.com/hyperledger/fabric-chaincode-go/shim"
"github.com/hyperledger/fabric-protos-go/peer"
)
// SimpleAsset implements a simple chaincode to manage an asset
type SimpleAsset struct{}
// Init func
func (t *SimpleAsset) Init(stub shim.ChaincodeStubInterface) peer.Response {
return shim.Success(nil)
}
// Invoke func
func (t *SimpleAsset) Invoke(stub shim.ChaincodeStubInterface) peer.Response {
return shim.Success(nil)
}
func main() {
if err := shim.Start(new(SimpleAsset)); err != nil {
fmt.Printf("Error starting chaincode: %s", err)
}
}
|
package redis
import (
."asyncMessageSystem/app/config"
"github.com/gomodule/redigo/redis"
"log"
"strconv"
)
var Cache = new(Instance)
//func init(){
func Init(){
var err error
password := redis.DialPassword(Conf.Redis.Password)
database := redis.DialDatabase(Conf.Redis.Database)
Cache.Conn,err = redis.Dial("tcp",Conf.Redis.Host+":"+ strconv.Itoa(Conf.Redis.Port) ,password,database)
if err != nil {
log.Panic(err.Error())
}
}
type Instance struct {
Conn redis.Conn
}
func (i *Instance) Set(key string,value interface{})(reply interface{},err error){
reply,err = i.Conn.Do("set",key,value)
return
}
func (i *Instance) SetEx(key string,expire int,value interface{})(reply interface{},err error){
reply,err = i.Conn.Do("setEx",key,expire,value)
return
} |
package ravendb
var (
spatialCriteriaFactoryInstance = NewSpatialCriteriaFactory()
)
type SpatialCriteriaFactory struct {
}
func NewSpatialCriteriaFactory() *SpatialCriteriaFactory {
return &SpatialCriteriaFactory{}
}
func (f *SpatialCriteriaFactory) RelatesToShape(shapeWkt string, relation SpatialRelation) *WktCriteria {
return f.RelatesToShapeWithError(shapeWkt, relation, IndexingSpatialDefaultDistnaceErrorPct)
}
func (f *SpatialCriteriaFactory) RelatesToShapeWithError(shapeWkt string, relation SpatialRelation, distErrorPercent float64) *WktCriteria {
return NewWktCriteria(shapeWkt, relation, distErrorPercent)
}
func (f *SpatialCriteriaFactory) Intersects(shapeWkt string) *WktCriteria {
return f.IntersectsWithError(shapeWkt, IndexingSpatialDefaultDistnaceErrorPct)
}
func (f *SpatialCriteriaFactory) IntersectsWithError(shapeWkt string, distErrorPercent float64) *WktCriteria {
return f.RelatesToShapeWithError(shapeWkt, SpatialRelationIntersects, distErrorPercent)
}
func (f *SpatialCriteriaFactory) Contains(shapeWkt string) *WktCriteria {
return f.ContainsWithError(shapeWkt, IndexingSpatialDefaultDistnaceErrorPct)
}
func (f *SpatialCriteriaFactory) ContainsWithError(shapeWkt string, distErrorPercent float64) *WktCriteria {
return f.RelatesToShapeWithError(shapeWkt, SpatialRelationContains, distErrorPercent)
}
func (f *SpatialCriteriaFactory) Disjoint(shapeWkt string) *WktCriteria {
return f.DisjointWithError(shapeWkt, IndexingSpatialDefaultDistnaceErrorPct)
}
func (f *SpatialCriteriaFactory) DisjointWithError(shapeWkt string, distErrorPercent float64) *WktCriteria {
return f.RelatesToShapeWithError(shapeWkt, SpatialRelationDisjoin, distErrorPercent)
}
func (f *SpatialCriteriaFactory) Within(shapeWkt string) *WktCriteria {
return f.WithinWithError(shapeWkt, IndexingSpatialDefaultDistnaceErrorPct)
}
func (f *SpatialCriteriaFactory) WithinWithError(shapeWkt string, distErrorPercent float64) *WktCriteria {
return f.RelatesToShapeWithError(shapeWkt, SpatialRelationWithin, distErrorPercent)
}
func (f *SpatialCriteriaFactory) WithinRadius(radius float64, latitude float64, longitude float64) *CircleCriteria {
return f.WithinRadiusWithUnits(radius, latitude, longitude, "")
}
func (f *SpatialCriteriaFactory) WithinRadiusWithUnits(radius float64, latitude float64, longitude float64, radiusUnits SpatialUnits) *CircleCriteria {
return f.WithinRadiusWithUnitsAndError(radius, latitude, longitude, radiusUnits, IndexingSpatialDefaultDistnaceErrorPct)
}
func (f *SpatialCriteriaFactory) WithinRadiusWithUnitsAndError(radius float64, latitude float64, longitude float64, radiusUnits SpatialUnits, distErrorPercent float64) *CircleCriteria {
return NewCircleCriteria(radius, latitude, longitude, radiusUnits, SpatialRelationWithin, distErrorPercent)
}
|
package main
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
"unicode"
"golang.org/x/net/html"
)
const (
SectionClass = "sectionHeading"
HighlightClass = "noteHeading"
NoteTextClass = "noteText"
AuthorClass = "authors"
TitleClass = "bookTitle"
)
type Highlight struct {
Color string
Page int
Location int
Text string
Note string
}
type Section struct {
Title string
Highlights []*Highlight
}
type Notebook struct {
Title string
Author string
Sections []*Section
}
// Parse extracts sections, highlights and notes from a Kindle notebook
// exported as HTML
func (nb *Notebook) Parse(htmlNotebook *html.Node) error {
// All highlights and notes are in a div element with class="bodyContainer".
// Find this element and then iterate its children.
bc := findFirstNodeByClass(htmlNotebook, "bodyContainer")
if bc == nil {
return ErrNoBodyContainer
}
var (
crtSection *Section
crtHighlight *Highlight
// Indicates that the next div element with class NoteTextClass
// contains the text of a note, not the text of a highlight.
//
// NB: A note always appears immediately after a highlight.
insideNote bool
)
for c := bc.FirstChild; c != nil; c = c.NextSibling {
if c.Type != html.ElementNode {
continue
}
switch getClassAttr(c) {
case SectionClass:
// A new section starts
// Do this check to avoid adding a nil Section on the first iteration
if crtSection != nil {
// Add previous section to notebook
crtSection.Highlights = append(crtSection.Highlights, crtHighlight)
nb.Sections = append(nb.Sections, crtSection)
crtHighlight = nil // Reset
}
crtSection = &Section{Title: strings.TrimSpace(c.FirstChild.Data)}
case HighlightClass:
// A new Highlight or an attached Note starts
highlightText := strings.TrimLeftFunc(c.FirstChild.Data, unicode.IsSpace)
if strings.HasPrefix(highlightText, "Highlight") {
// Do this check to avoid adding a nil Highlight on first iteration
if crtHighlight != nil {
crtSection.Highlights = append(crtSection.Highlights, crtHighlight)
}
crtHighlight = parseHighlightNode(c)
} else if strings.HasPrefix(highlightText, "Note") {
insideNote = true
}
case NoteTextClass:
// The text of a highlight or note
text := parseTextNode(c)
if insideNote {
crtHighlight.Note = text
insideNote = false // Reset
} else {
crtHighlight.Text = text
}
case AuthorClass:
nb.Author = strings.TrimSpace(c.FirstChild.Data)
case TitleClass:
nb.Title = strings.TrimSpace(c.FirstChild.Data)
}
}
// Append last highlight and section
crtSection.Highlights = append(crtSection.Highlights, crtHighlight)
nb.Sections = append(nb.Sections, crtSection)
return nil
}
func (nb *Notebook) Print() {
fmt.Printf("'%s' by %s\n", nb.Title, nb.Author)
for i, s := range nb.Sections {
fmt.Printf("Section %02d: %s\n", i+1, s.Title)
for j, h := range s.Highlights {
fmt.Printf("Highlight %02d-%d\n", i+1, j+1)
fmt.Printf("\tText: %s\n", h.Text)
fmt.Printf("\tNote: %s\n", h.Note)
}
fmt.Println()
}
}
var ErrNoBodyContainer = errors.New(`expected element with class="bodyContainer"`)
var (
reExtractLocation = regexp.MustCompile(`Location (\d+)`)
reExtractPage = regexp.MustCompile(`Page (\d+)`)
)
// parseHighlightNode parses a Highlight node to determine the page, location
// and color of the highlight.
//
// A Highlight node looks like this (contains only location):
// <div class="noteHeading">
// Highlight (<span class="highlight_yellow">yellow</span>) - Location 2364
// </div>
//
// Or like this (contains both page and location):
// <div class="noteHeading">
// Highlight(<span class="highlight_yellow">yellow</span>) - 10. Scientists as Explorers of the Universe > Page 115 · Location 954
// </div>
func parseHighlightNode(n *html.Node) *Highlight {
// The Node structure for a Highlight node has three children:
// 1. TextNode with the first part of the text:
// "Highlight("
// 2. ElementNode with the highlight color:
// <span class="highlight_yellow">yellow</span>
// 3. TextNode with the second part of the text:
// ") - Location 2364"
var h Highlight
for c := n.FirstChild; c != nil; c = c.NextSibling {
switch c.Type {
case html.TextNode:
matches := reExtractLocation.FindStringSubmatch(c.Data)
if matches != nil {
// It's safe to skip the error check because the regular expression matches only numbers, and location numbers definitely fit in an int64
loc, _ := strconv.Atoi(matches[1])
h.Location = loc
}
matches = reExtractPage.FindStringSubmatch(c.Data)
if matches != nil {
// It's safe to skip the error check because the regular expression matches only numbers, and page numbers definitely fit in an int64
page, _ := strconv.Atoi(matches[1])
h.Page = page
}
case html.ElementNode:
// Extract the highlight color from the class attribute
colorClass := getClassAttr(c)
idx := strings.IndexRune(colorClass, '_')
if idx != -1 {
h.Color = colorClass[idx+1:]
} else {
// HTML might be malformed.
// Expected to find highlight_<color_name>
}
}
}
return &h
}
// parseTextNode parses Note nodes and returns the their content.
//
// A Note node looks like this:
// <div class="noteText">
// Entrepreneurship is enhanced by performing lots of quick, easily performed experiments.
// </div>
func parseTextNode(n *html.Node) string {
if getClassAttr(n) != NoteTextClass {
return ""
}
return strings.TrimSpace(n.FirstChild.Data)
}
// findFirstNodeByClass does a depth-first search and returns
// the first node that has the given class attribute
func findFirstNodeByClass(n *html.Node, className string) *html.Node {
if n.Type == html.ElementNode {
for _, a := range n.Attr {
if a.Key == "class" && a.Val == className {
return n
}
}
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
if nc := findFirstNodeByClass(c, className); nc != nil {
return nc
}
}
return nil
}
// getClassAttr returns the value of a node's "class" attribute.
// If the node doesn't have a "class" attribute, then it return an empty string.
func getClassAttr(n *html.Node) string {
for _, a := range n.Attr {
if a.Key == "class" {
return a.Val
}
}
return ""
}
|
package conver
import (
"encoding/json"
"log"
"testing"
)
type Anyom1 struct {
Single string `align:"single"`
}
type Anyom struct {
Sex string `align:"sex"`
Anyom1
}
type People struct {
Age string `align:"age"`
Name string `align:"name"`
Message Msg `align:"msg"`
Anyom
}
type Msg struct {
Time string `align:"time"`
Address Addr `align:"address"`
}
type Addr struct {
Home string `align:"home"`
}
var rsp = `{"age":1800,"name":"test","sex":"nan","single":"yes","msg":{"time":"20150331","address":{"home":"hello world"}},"sign":"adsuhdawkjdiahandawdh"}`
func TestConver(t *testing.T) {
params := make(map[string]interface{})
err := json.Unmarshal([]byte(rsp), ¶ms)
if err != nil {
log.Println(err)
}
o := &People{}
err = Do(o, params)
if err != nil {
t.Error(err)
t.FailNow()
}
log.Printf("%+v", o)
}
|
// Based on: https://github.com/btcsuite/btcwallet/blob/master/walletsetup.go
/*
* Copyright (c) 2014-2015 The btcsuite developers
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package lnwallet
import (
"path/filepath"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/wire"
_ "github.com/btcsuite/btcwallet/walletdb/bdb"
)
// networkDir returns the directory name of a network directory to hold wallet
// files.
func networkDir(dataDir string, chainParams *chaincfg.Params) string {
netname := chainParams.Name
// For now, we must always name the testnet data directory as "testnet"
// and not "testnet3" or any other version, as the chaincfg testnet3
// paramaters will likely be switched to being named "testnet3" in the
// future. This is done to future proof that change, and an upgrade
// plan to move the testnet3 data directory can be worked out later.
if chainParams.Net == wire.TestNet3 {
netname = "testnet"
}
return filepath.Join(dataDir, netname)
}
|
package main
import (
"math"
"sort"
"strconv"
"strings"
)
//726. 原子的数量
//给定一个化学式formula(作为字符串),返回每种原子的数量。
//
//原子总是以一个大写字母开始,接着跟随0个或任意个小写字母,表示原子的名字。
//
//如果数量大于 1,原子后会跟着数字表示原子的数量。如果数量等于 1 则不会跟数字。例如,H2O 和 H2O2 是可行的,但 H1O2 这个表达是不可行的。
//
//两个化学式连在一起是新的化学式。例如H2O2He3Mg4 也是化学式。
//
//一个括号中的化学式和数字(可选择性添加)也是化学式。例如 (H2O2) 和 (H2O2)3 是化学式。
//
//给定一个化学式,输出所有原子的数量。格式为:第一个(按字典序)原子的名子,跟着它的数量(如果数量大于 1),然后是第二个原子的名字(按字典序),跟着它的数量(如果数量大于 1),以此类推。
//
//示例 1:
//
//输入:
//formula = "H2O"
//输出: "H2O"
//解释:
//原子的数量是 {'H': 2, 'O': 1}。
//示例 2:
//
//输入:
//formula = "Mg(OH)2"
//输出: "H2MgO2"
//解释:
//原子的数量是 {'H': 2, 'Mg': 1, 'O': 2}。
//示例 3:
//
//输入:
//formula = "K4(ON(SO3)2)2"
//输出: "K4N2O14S4"
//解释:
//原子的数量是 {'K': 4, 'N': 2, 'O': 14, 'S': 4}。
//注意:
//
//所有原子的第一个字母为大写,剩余字母都是小写。
//formula的长度在[1, 1000]之间。
//formula只包含字母、数字和圆括号,并且题目中给定的是合法的化学式。
//思路 倒序遍历,栈,哈希表
func countOfAtoms(formula string) string {
dic := make(map[string]int)
array := make([]string, 0)
stack := make([]int, 0)
n := len(formula)
count, h := 1, 0
var elm []byte
for i := n - 1; i > -1; i-- {
ch := formula[i]
switch {
case ch >= '0' && ch <= '9':
if h == 0 {
count = 0
}
count += int(ch-'0') * int(math.Pow10(h))
h++
case ch == ')':
if len(stack) > 0 {
count *= stack[len(stack)-1]
}
stack = append(stack, count)
count, h = 1, 0
case ch == '(':
stack = stack[:len(stack)-1]
case ch >= 'a' && ch <= 'z':
elm = append([]byte{ch}, elm...)
default:
elm = append([]byte{ch}, elm...)
str := string(elm)
if len(stack) > 0 {
count *= stack[len(stack)-1]
}
if dic[str] == 0 {
array = append(array, str)
}
dic[str] += count
count, h = 1, 0
elm = []byte{}
}
}
sort.Strings(array)
var result strings.Builder
for _, v := range array {
result.WriteString(v)
if dic[v] > 1 {
result.WriteString(strconv.Itoa(dic[v]))
}
}
return result.String()
}
func main() {
println(countOfAtoms("K4(ON(SO3)2)2"))
}
|
package Subdomain_Visit_Count
import (
"fmt"
"strconv"
"strings"
)
func subdomainVisits(cpdomains []string) []string {
var rst []string
var visits = make(map[string]int)
for _, v := range cpdomains {
splitNum := strings.Split(v, " ")
count, _ := strconv.Atoi(splitNum[0])
subDomains := splitNum[1]
visits[subDomains] += count
for i, k := range subDomains {
if k == '.' {
visits[subDomains[i+1:]] += count
}
}
}
for k, v := range visits {
visit := fmt.Sprintf("%s %s", strconv.Itoa(v), k)
rst = append(rst, visit)
}
return rst
}
|
/**
* @Author: lzw5399
* @Date: 2021/1/16 21:43
* @Desc: 流程实例
*/
package controller
import (
"net/http"
"strconv"
"workflow/global/response"
"workflow/model/request"
"workflow/service"
"github.com/labstack/echo/v4"
)
var (
instanceSvc service.InstanceService = service.NewInstanceService()
)
// 创建新的实例
func StartProcessInstance(c echo.Context) error {
var r request.InstanceRequest
if err := c.Bind(&r); err != nil {
return response.Failed(c, http.StatusBadRequest)
}
result, err := instanceSvc.Start(&r)
if err != nil {
return response.FailWithMsg(c, int(result), err)
}
return response.OkWithData(c, result)
}
// 获取一个实例
func GetProcessInstance(c echo.Context) error {
id, err := strconv.Atoi(c.QueryParam("id"))
if err != nil {
return response.Failed(c, http.StatusBadRequest)
}
instance, err := instanceSvc.Get(uint(id))
if err != nil {
return response.Failed(c, http.StatusNotFound)
}
return response.OkWithData(c, instance)
}
// process instance list
func ListProcessInstances(c echo.Context) error {
// 从queryString获取分页参数
var r request.PagingRequest
if err := c.Bind(&r); err != nil {
return response.Failed(c, http.StatusBadRequest)
}
instances, err := instanceSvc.List(&r)
if err != nil {
return response.Failed(c, http.StatusInternalServerError)
}
return response.OkWithData(c, instances)
}
// 获取流程实例中的变量
func GetInstanceVariable(c echo.Context) error {
var r request.GetVariableRequest
if err := c.Bind(&r); err != nil {
return response.Failed(c, http.StatusBadRequest)
}
resp, err := instanceSvc.GetVariable(&r)
if err != nil {
return response.FailWithMsg(c, http.StatusInternalServerError, err)
}
return response.OkWithData(c, resp)
}
func GetInstanceVariableList(c echo.Context) error {
var r request.GetVariableListRequest
if err := c.Bind(&r); err != nil {
return response.Failed(c, http.StatusBadRequest)
}
variables, err := instanceSvc.ListVariables(&r)
if err != nil {
return response.FailWithMsg(c, http.StatusInternalServerError, err)
}
return response.OkWithData(c, variables)
}
|
package main
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"regexp"
"strings"
)
type HashServer struct {
Url string
ResponseRegexp string
}
func main() {
fmt.Println(`
#####################################
# MD5 Cracker #
#-----------------------------------#
# github.com/dursunkatar #
#####################################
`)
servers := []HashServer{
HashServer{
Url: "http://www.nitrxgen.net/md5db/%s",
},
HashServer{
Url: "https://md5decrypt.net/Api/api.php?hash=%s&hash_type=md5&email=deanna_abshire@proxymail.eu&code=1152464b80a61728",
},
HashServer{
Url: "https://hashtoolkit.com/reverse-hash/?hash=%s",
ResponseRegexp: "<span title=\"decrypted md5 hash\">(.*?)</span>",
},
}
hash := os.Args[1]
for _, cserver := range servers {
if ok, result := cserver.crack(hash); ok {
fmt.Println(result)
os.Exit(0)
}
}
fmt.Println("Not Found!")
}
func (m HashServer) regex(result string) string {
re := regexp.MustCompile(m.ResponseRegexp)
match := re.FindStringSubmatch(result)
if len(match) == 0 {
return ""
}
return match[1]
}
func (m HashServer) crack(hash string) (bool, string) {
res, err := http.Get(fmt.Sprintf(m.Url, hash))
if err != nil {
return false, ""
}
defer res.Body.Close()
bytes, err := ioutil.ReadAll(res.Body)
body := strings.Trim(string(bytes), " ")
if err != nil || body == "" {
return false, ""
}
if m.ResponseRegexp != "" {
result := m.regex(body)
if result != "" {
return true, m.regex(body)
}
return false, ""
}
return true, body
}
|
package main
import (
"fmt"
"reflect"
)
func main() {
fmt.Println("Hello, world")
fmt.Println("0", reflect.TypeOf(0).Name())
fmt.Println("2 ** 8 ", reflect.TypeOf(256).Name())
fmt.Println("2 ** 32 ", reflect.TypeOf(4294967296).Name())
fmt.Println("2 ** 64 ", reflect.TypeOf(1844674407370955161).Name())
}
|
package main
import (
"errors"
"fmt"
"log"
"net"
"net/http"
"os"
"github.com/gin-gonic/gin"
"github.com/sony/sonyflake"
)
func machineID() (uint16, error) {
ipStr := os.Getenv("MY_IP")
if len(ipStr) == 0 {
return 0, errors.New("'MY_IP' environment variable not set")
}
ip := net.ParseIP(ipStr)
if len(ip) < 4 {
return 0, errors.New("invalid IP")
}
return uint16(ip[2])<<8 + uint16(ip[3]), nil
}
func main() {
st := sonyflake.Settings{}
st.MachineID = machineID
sf := sonyflake.NewSonyflake(st)
if sf == nil {
log.Fatal("failed to initialize sonyflake")
}
r := gin.Default()
r.GET("/", func(c *gin.Context) {
// Generate new ID
id, err := sf.NextID()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{
"error": err.Error(),
})
} else {
// Return ID as string
c.JSON(http.StatusOK, gin.H{
"id": fmt.Sprint(id),
})
}
})
if err := r.Run(":3000"); err != nil {
log.Fatal("failed to run server: ", err)
}
}
|
package main
import (
"encoding/json"
"fmt"
"github.com/julienschmidt/httprouter"
"io/ioutil"
"log"
"net/http"
"regexp"
)
type Anniversary struct {
Batchcomplete string `json:"batchcomplete"`
Warnings struct {
Extracts struct {
message string `json:"*"`
} `json:"extracts"`
} `json:"warnings"`
Query struct {
Pages struct {
Id struct {
Pageid int `json:"pageid"`
Ns int `json:"ns"`
Title string `json:"title"`
Extract string `json:"extract"`
} `json:"456328"`
} `json:"pages"`
} `json:"query"`
}
const (
ENDPOINT = "https://ja.wikipedia.org/w/api.php?action=query&prop=extracts&titles=%e6%97%a5%e6%9c%ac%e3%81%ae%e8%a8%98%e5%bf%b5%e6%97%a5%e4%b8%80%e8%a6%a7&format=xml"
)
func Search(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
client := &http.Client{}
request, _ := http.NewRequest("GET", ENDPOINT, nil)
response, _ := client.Do(request) // Endpointにgetを投げる
defer response.Body.Close() //deferで終了時に実行
body, _ := ioutil.ReadAll(response.Body)
var anniversaries []Anniversary
json.Unmarshal(body, &anniversaries)
//fmt.Fprintln(w, string(body))
responseString := string(body)
punctuateMonthly := regexp.MustCompile(`(\d{1,2})日\s-\s(.+日|.+デー)`)
punctuatedByMonth := punctuateMonthly.FindAllString(responseString, 400)
for s1, s2 := range punctuatedByMonth {
fmt.Fprintln(w, s1+1, s2)
}
}
func main() {
router := httprouter.New()
router.GET("/search/:month/:date", Search)
log.Fatal(http.ListenAndServe(":8080", router))
}
|
package store
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/source/file"
_ "github.com/golang-migrate/migrate/v4/source/github"
"github.com/pkg/errors"
"nidavellir/libs"
)
func (p *Postgres) Migrate() error {
driver, err := postgres.WithInstance(p.db.DB(), &postgres.Config{})
if err != nil {
return errors.Wrap(err, "could not create database driver")
}
m, err := migrate.NewWithDatabaseInstance(migrationSource(), "postgres", driver)
if err != nil {
return errors.Wrap(err, "could not create migration instance. "+
"If error is due to rate limit by github, set your username and token in the environment with "+
"'GITHUB_USERNAME' and 'GITHUB_TOKEN' respectively")
}
if err := m.Up(); err != nil && err != migrate.ErrNoChange {
return errors.Wrap(err, "could not apply migrations")
}
return nil
}
func migrationSource() string {
// returns a non-empty sourceUrl and no errors if path exists
sourceUrlFromPath := func(elem ...string) string {
dir := filepath.Join(elem...)
if libs.PathExists(dir) {
sourceUrl := "file://" + dir
// Replace windows full path with current directory as go-migrate uses net/url to parse path and that
// package doesn't parse windows path
if runtime.GOOS == "windows" {
cwd, err := os.Getwd()
if err != nil {
return ""
}
sourceUrl = strings.Replace(strings.Replace(sourceUrl, cwd, ".", 1), `\`, "/", -1)
}
return sourceUrl
}
return ""
}
_, file, _, _ := runtime.Caller(0)
root, _ := os.Executable()
for _, elems := range [][]string{
{filepath.Dir(file), "migration"},
{filepath.Dir(file), "migrations"},
{filepath.Dir(root), "migration"},
{filepath.Dir(root), "migrations"},
} {
if sourceUrl := sourceUrlFromPath(elems...); sourceUrl != "" {
return sourceUrl
}
}
// use gitlab url by default
username := os.Getenv("GITHUB_USERNAME")
publicRepoReadonlyToken := os.Getenv("GITHUB_TOKEN")
repoPath := "kantopark/nidavellir/services/store/migration"
return fmt.Sprintf("github://%s:%s@%s", username, publicRepoReadonlyToken, repoPath)
}
|
// Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"bytes"
"fmt"
"github.com/uber/kraken/utils/randutil"
)
// BlobFixture joins all information associated with a blob for testing convenience.
type BlobFixture struct {
Content []byte
Digest Digest
MetaInfo *MetaInfo
}
// Length returns the length of the blob.
func (f *BlobFixture) Length() int64 {
return int64(len(f.Content))
}
// Info returns a BlobInfo for f.
func (f *BlobFixture) Info() *BlobInfo {
return NewBlobInfo(f.Length())
}
// CustomBlobFixture creates a BlobFixture with custom fields.
func CustomBlobFixture(content []byte, digest Digest, mi *MetaInfo) *BlobFixture {
return &BlobFixture{content, digest, mi}
}
// SizedBlobFixture creates a randomly generated BlobFixture of given size with given piece lengths.
func SizedBlobFixture(size uint64, pieceLength uint64) *BlobFixture {
b := randutil.Text(size)
d, err := NewDigester().FromBytes(b)
if err != nil {
panic(err)
}
mi, err := NewMetaInfo(d, bytes.NewReader(b), int64(pieceLength))
if err != nil {
panic(err)
}
return &BlobFixture{
Content: b,
Digest: d,
MetaInfo: mi,
}
}
// NewBlobFixture creates a randomly generated BlobFixture.
func NewBlobFixture() *BlobFixture {
return SizedBlobFixture(256, 8)
}
// PeerIDFixture returns a randomly generated PeerID.
func PeerIDFixture() PeerID {
p, err := RandomPeerID()
if err != nil {
panic(err)
}
return p
}
// PeerInfoFixture returns a randomly generated PeerInfo.
func PeerInfoFixture() *PeerInfo {
return NewPeerInfo(PeerIDFixture(), randutil.IP(), randutil.Port(), false, false)
}
// OriginPeerInfoFixture returns a randomly generated PeerInfo for an origin.
func OriginPeerInfoFixture() *PeerInfo {
return NewPeerInfo(PeerIDFixture(), randutil.IP(), randutil.Port(), true, true)
}
// MetaInfoFixture returns a randomly generated MetaInfo.
func MetaInfoFixture() *MetaInfo {
return NewBlobFixture().MetaInfo
}
// InfoHashFixture returns a randomly generated InfoHash.
func InfoHashFixture() InfoHash {
return MetaInfoFixture().InfoHash()
}
// DigestFixture returns a random Digest.
func DigestFixture() Digest {
return NewBlobFixture().Digest
}
// DigestListFixture returns a list of random Digests.
func DigestListFixture(n int) []Digest {
var l DigestList
for i := 0; i < n; i++ {
l = append(l, DigestFixture())
}
return l
}
// PeerContextFixture returns a randomly generated PeerContext.
func PeerContextFixture() PeerContext {
pctx, err := NewPeerContext(
RandomPeerIDFactory,
"zone1",
"test01-zone1",
randutil.IP(),
randutil.Port(),
false)
if err != nil {
panic(err)
}
return pctx
}
// OriginContextFixture returns a randomly generated origin PeerContext.
func OriginContextFixture() PeerContext {
octx := PeerContextFixture()
octx.Origin = true
return octx
}
// TagFixture creates a random tag for service repo-bar.
func TagFixture() string {
return fmt.Sprintf("namespace-foo/repo-bar:%s", randutil.Text(8))
}
// NamespaceFixture creates a random namespace.
func NamespaceFixture() string {
return fmt.Sprintf("namespace-foo/repo-bar-%s", randutil.Text(8))
}
|
package template
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"text/template"
)
// Template include template's infomation.
type Template struct {
Manifest Manifest
Args []Args
}
// NewTemplate return a new template instance.
func NewTemplate(templateURL string, options []string) (*Template, error) {
var tmpl Template
// Open template manifest file
templateFile := path.Join(templateURL, "manifest.json")
if _, err := os.Stat(templateFile); os.IsNotExist(err) {
return &tmpl, errors.New("template file does not exist")
}
file, err := os.OpenFile(templateFile, os.O_RDONLY, 0644)
if err != nil {
return &tmpl, fmt.Errorf("open template file failed, %s", err)
}
manifestRaw, err := ioutil.ReadAll(file)
if err != nil {
return &tmpl, fmt.Errorf("read file failed: %s", err)
}
// Parse manifest content
err = tmpl.parseManifest(manifestRaw)
if err != nil {
return &tmpl, fmt.Errorf("parse manifest failed, %w", err)
}
// Parse option's values
if len(options) != len(tmpl.Manifest.Options) {
return &tmpl, fmt.Errorf("invalid options, required %d options, but only %d",
len(tmpl.Manifest.Options), len(options))
}
for i := range tmpl.Manifest.Options {
tmpl.Manifest.Options[i].Value = options[i]
}
// Parse files' name
for i, file := range tmpl.Manifest.Files {
t := template.Must(template.New(file.Name).Parse(file.Name))
buf := bytes.NewBuffer([]byte{})
err = t.Execute(buf, tmpl.Manifest.OptionMap())
if err != nil {
return &tmpl, fmt.Errorf("parse file name failed, %s", err)
}
tmpl.Manifest.Files[i].Name = buf.String()
tmpl.Manifest.Files[i].Template = path.Join(templateURL, file.Template)
}
return &tmpl, nil
}
// Generate start the task to create and modify files.
func (tmpl *Template) Generate() error {
var err error
for _, f := range tmpl.Manifest.Files {
err = f.Generate(tmpl.Manifest.OptionMap())
if err != nil {
return err
}
}
return nil
}
// parseManifest parse the raw manifest raw data.
func (tmpl *Template) parseManifest(raw []byte) error {
// Parse manifest
var manifest Manifest
err := json.Unmarshal(raw, &manifest)
if err != nil {
return fmt.Errorf("json unmarshal failed, %w", err)
}
tmpl.Manifest = manifest
return nil
}
// interact with user to parse manifest's options
func (tmpl *Template) interact() error {
return nil
}
|
package BuddySimulator
import (
"math"
"fmt"
)
type Memory struct {
size uint
height uint
freeLists [][]*Block
}
func NewMemory(size uint) (*Memory, error) {
floatDepth := math.Log2(float64(size))
if math.Floor(floatDepth) != floatDepth {
return nil, fmt.Errorf("size must be a power of 2; got %d", size)
}
newMemory := &Memory{}
newMemory.size = size
newMemory.height = uint(floatDepth) + 1
newMemory.freeLists = make([][]*Block, newMemory.height)
newMemory.freeLists[newMemory.height - 1] = append(newMemory.freeLists[newMemory.height - 1], NewBlock(0, size))
return newMemory, nil
}
func (m Memory)String() string {
res := ""
res += "{"
res += "size:" + fmt.Sprint(m.size)
res += " height:" + fmt.Sprint(m.height)
res += " freeLists:["
for i := uint(0); i < m.height; i++ {
res += "["
for j, b := range m.freeLists[i] {
res += fmt.Sprintf("%+v", *b)
if j != len(m.freeLists[i]) - 1 {
res += ", "
}
}
res += "]"
}
res += "]}"
return res
}
func (m *Memory)FreeBlock(block *Block) error {
buddyAddress, left := block.getBuddyAddressAndOwnPos()
_, height := calculateNextPowerOfTwoAndHeight(block.size)
var (
b *Block
pos int
found bool = false
)
for pos, b = range m.freeLists[height] {
if b.GetAddress() == buddyAddress {
found = true
break
}
}
if found {
m.freeLists[height] = append(m.freeLists[height][:pos], m.freeLists[height][pos + 1:]...)
if left {
return m.FreeBlock(NewBlock(block.GetAddress(), 2 * block.GetSize()))
} else {
return m.FreeBlock(NewBlock(buddyAddress, 2 * block.GetSize()))
}
}
m.freeLists[height] = append(m.freeLists[height], block)
return nil
}
func (m *Memory)AllocateMemory(size uint) (block *Block, err error) {
nextPower, height := calculateNextPowerOfTwoAndHeight(size)
if (nextPower > m.size) {
return nil, fmt.Errorf("can't allocate block of %d > memory capacity %d", size, m.size)
}
return m.makeBlockOfHeight(height)
}
func (m *Memory)makeBlockOfHeight(height uint) (*Block, error) {
if len(m.freeLists[height]) == 0 {
if err := m.splitBlockOfHeight(height + 1); err != nil {
return nil, err
}
}
block := m.freeLists[height][0]
m.freeLists[height] = m.freeLists[height][1:]
return block, nil
}
func (m*Memory)splitBlockOfHeight(height uint) error {
if height >= m.height {
return fmt.Errorf("end of memory height reached got %d, max height %d", height, m.height - 1)
}
if len(m.freeLists[height]) == 0 {
if err := m.splitBlockOfHeight(height + 1); err != nil {
return err
}
}
blockSize := CalculateBlockSize(m, height - 1)
splitBlock := m.freeLists[height][0]
m.freeLists[height] = m.freeLists[height][1:]
leftBuddy := NewBlock(splitBlock.GetAddress(), blockSize)
rightBuddy := NewBlock(splitBlock.GetAddress() + blockSize, blockSize)
m.freeLists[height - 1] = append(m.freeLists[height - 1], leftBuddy, rightBuddy)
return nil
}
func calculateNextPowerOfTwoAndHeight(size uint) (nextPower, height uint) {
nextPower = 1
height = 0
for nextPower < size {
nextPower = nextPower << 1
height++
}
return nextPower, height
}
type Block struct {
address uint
size uint
}
func NewBlock(address, size uint) *Block {
var b Block = Block{address, size}
return &b
}
func (b Block)getBuddyAddressAndOwnPos() (buddyAddress uint, left bool) {
blockSize := b.GetSize()
address := b.GetAddress()
if (address / blockSize) % 2 == 0 {
return address + blockSize, true
} else {
return address - blockSize, false
}
}
func CalculateBlockSize(memory *Memory, blockHeight uint) uint {
return memory.size >> (memory.height - 1 - blockHeight)
}
func (b Block)GetAddress() uint {
return b.address
}
func (b Block) GetSize() uint {
return b.size
}
|
package router
import (
"editorApi/controller/editorapi"
"editorApi/middleware"
"github.com/gin-gonic/gin"
)
func InitClassWeixinRouter(Router *gin.RouterGroup) {
LangRouter := Router.Group("editor").Use(middleware.CORSMiddleware())
{
LangRouter.POST("class_weixin/list", editorapi.ClassWeixinList)
LangRouter.POST("class_weixin/add", editorapi.ClassWeixinAdd)
LangRouter.PUT("class_weixin/edit", editorapi.ClassWeixinEdit)
LangRouter.DELETE("class_weixin/del", editorapi.ClassWeixinDel)
}
} |
package modules
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"github.com/spf13/cobra"
"golang.org/x/oauth2"
)
var baseUrl = "https://api.github.com"
type UserPlan struct {
Name string `json:"name"`
Space int64 `json:"space"`
PrivateRepo int64 `json:"private_repo"`
Collaborators int64 `json:"collaborator"`
}
type User struct {
Login string `json:"login"`
Id int `json:"id"`
NodeId string `json:"node_id"`
AvatarUrl string `json:"avatar_url"`
GravatarId string `json:"gravatar_id"`
Url string `json:"url"`
HtmlUrl string `json:"html_url"`
FollowersUrl string `json:"followers_url"`
FollowingUrl string `json:"following_url"`
GistsUrl string `json:"gists_url"`
StarredUrl string `json:"starred_url"`
SubscriptionUrl string `json:"subscriptions_url"`
OrganizationUrl string `json:"organizations_url"`
ReposUrl string `json:"repos_url"`
EventUrl string `json:"events_url"`
RecievedEventsUrl string `json:"received_events_url"`
UserType string `json:"type"`
SiteAdmin bool `json:"site_admin"`
Name string `json:"name"`
Company string `json:"company"`
Blog string `json:"blog"`
Location string `json:"location"`
Email string `json:"email"`
Hireable bool `json:"hireable"`
Bio string `json:"bio"`
TwitterUsername string `json:"twitter_username"`
PublicRepos int64 `json:"public_repos"`
PublicGist int64 `json:"public_gists"`
Followers int64 `json:"followers"`
Following int64 `json:"following"`
CreatedAt string `json:"created_at"`
UpdatedAt string `json:"updated_at"`
PrivateGist int64 `json:"private_gists"`
TotalPrivateRepos int64 `json:"total_private_repos"`
OwnedPrivateRepos int64 `json:"owned_private_repos"`
DiskUsage int64 `json:"disk_usage"`
Collaborators int64 `json:"collaborators"`
TwoFactorAuthentication string `json:"two_factor_authentication"`
Plan UserPlan `json:"plan"`
}
type Repo struct {
Name string `json:"name"`
Private string `jsob:"private"`
Description string `json:"description"`
}
func check(err error) {
if err != nil {
log.Fatalln(err)
}
}
// CreateRepo creates a repo connected to users pat
func CreateRepo(cmdCtx *cobra.Command, args []string) {
if len(args) == 0 {
fmt.Println("Please enter a repo name ex. nere create dopeRepo")
os.Exit(1)
}
ctx := context.Background()
ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: args[1]})
client := oauth2.NewClient(ctx, ts)
reqBody, err := json.Marshal(Repo{Name: args[0]})
check(err)
req, err := http.NewRequest("POST", fmt.Sprintf("%s/user/repos", baseUrl), bytes.NewBuffer(reqBody))
check(err)
_, err = client.Do(req)
check(err)
fmt.Printf("Success: %s was created\n", args[0])
}
// DeleteRepo delete a repo connected to users pat
func DeleteRepo(cmdCtx *cobra.Command, args []string) {
if len(args) == 0 {
fmt.Println("Please enter a repo name ex. nere delete dopeRepo")
os.Exit(1)
}
ctx := context.Background()
ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: args[1]})
client := oauth2.NewClient(ctx, ts)
userReq, err := http.NewRequest("GET", fmt.Sprintf("%s/user", baseUrl), nil)
check(err)
userResp, err := client.Do(userReq)
check(err)
body, err := ioutil.ReadAll(userResp.Body)
check(err)
user := User{}
json.Unmarshal(body, &user)
req, err := http.NewRequest("DELETE", fmt.Sprintf("%s/repos/%s/%s", baseUrl, user.Login, args[0]), nil)
check(err)
_, err = client.Do(req)
check(err)
fmt.Printf("Success: %s was deleted\n", args[0])
}
|
package goudpscan_test
import (
"context"
"errors"
"net"
"sync"
"testing"
"time"
"github.com/KernelPryanic/goudpscan/goudpscan"
"github.com/KernelPryanic/goudpscan/unsafe"
"github.com/rs/zerolog/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewOptions(t *testing.T) {
options := goudpscan.NewOptions(true, uint(10), uint8(3), 2)
assert.NotNil(t, options, "NewOptions should not return nil")
}
func TestNewScanner(t *testing.T) {
scanner := goudpscan.New(
[]string{"127.0.0.1"},
[]string{"80"},
map[uint16][]string{},
goudpscan.NewOptions(true, uint(10), uint8(3), 2),
)
assert.NotNil(t, scanner, "New should not return nil")
}
func TestSegmentation(t *testing.T) {
tests := []struct {
subnet []byte
expected [][]byte
}{
{[]byte("192.168.0.1"), [][]byte{[]byte("192"), []byte("168"), []byte("0"), []byte("1")}},
{[]byte("10.0.0.0/24"), [][]byte{[]byte("10"), []byte("0"), []byte("0"), []byte("0"), []byte("24")}},
{[]byte("10.0.0.1-10"), [][]byte{[]byte("10"), []byte("0"), []byte("0"), []byte("1-10")}},
{[]byte("10.0.0.1-10/24"), [][]byte{[]byte("10"), []byte("0"), []byte("0"), []byte("1-10"), []byte("24")}},
}
for _, test := range tests {
segments := goudpscan.Segmentation(test.subnet)
require.Equal(t, test.expected, segments)
}
}
func TestBreakUpIP(t *testing.T) {
tests := []struct {
segments [][]byte
expected [][]byte
expectedErr bool
}{
{
segments: [][]byte{[]byte("192"), []byte("168"), []byte("0"), []byte("1")},
expected: [][]byte{[]byte("192.168.0.1")},
},
{
segments: [][]byte{[]byte("10"), []byte("0"), []byte("0"), []byte("1-3")},
expected: [][]byte{[]byte("10.0.0.1"), []byte("10.0.0.2"), []byte("10.0.0.3")},
},
{
segments: [][]byte{[]byte("10"), []byte("0-1"), []byte("0"), []byte("1-3")},
expected: [][]byte{[]byte("10.0.0.1"), []byte("10.0.0.2"), []byte("10.0.0.3"), []byte("10.1.0.1"), []byte("10.1.0.2"), []byte("10.1.0.3")},
},
{
segments: [][]byte{[]byte("10"), []byte("0,1-2"), []byte("0"), []byte("24")},
expectedErr: true,
},
{
segments: [][]byte{[]byte("10"), []byte("0-1,1"), []byte("0"), []byte("24")},
expectedErr: true,
},
{
segments: [][]byte{[]byte("10"), []byte("0-1"), []byte("0,1-1"), []byte("24")},
expectedErr: true,
},
}
for _, test := range tests {
ips, err := goudpscan.BreakUpIP(test.segments)
if test.expectedErr {
require.Error(t, err)
} else {
require.Equal(t, test.expected, ips)
}
}
}
func TestParseSubnet(t *testing.T) {
tests := []struct {
subnet string
expected [][]byte
expectedErr bool
}{
{subnet: "192.168.0.1", expected: [][]byte{[]byte("192.168.0.1")}},
{subnet: "10.0.0.0/24", expected: [][]byte{[]byte("10.0.0.0/24")}},
{subnet: "10.0.0.1-4", expected: [][]byte{[]byte("10.0.0.1"), []byte("10.0.0.2"), []byte("10.0.0.3"), []byte("10.0.0.4")}},
{subnet: "10.0.0.1-2/24", expected: [][]byte{[]byte("10.0.0.1/24"), []byte("10.0.0.2/24")}},
{subnet: "10.0,1-2.1-2/24", expectedErr: true},
{subnet: "10.0,1-2.1-2", expectedErr: true},
}
for _, test := range tests {
subnets, err := goudpscan.ParseSubnet(unsafe.S2B(test.subnet))
if test.expectedErr {
require.Error(t, err)
} else {
require.Equal(t, test.expected, subnets)
}
}
}
func TestBreakUPPort(t *testing.T) {
tests := []struct {
portRange []byte
expected []uint16
expectedErr bool
}{
{portRange: []byte("80"), expected: []uint16{80}},
{portRange: []byte("22-25"), expected: []uint16{22, 23, 24, 25}},
{portRange: []byte("22-25,80"), expected: []uint16{22, 23, 24, 25, 80}},
{portRange: []byte("80.0"), expectedErr: true},
{portRange: []byte("22.2-25"), expectedErr: true},
{portRange: []byte("22-25.5"), expectedErr: true},
}
for _, test := range tests {
ports, err := goudpscan.BreakUPPort(test.portRange)
if test.expectedErr {
require.Error(t, err)
} else {
require.Equal(t, test.expected, ports)
}
}
}
func TestHosts(t *testing.T) {
tests := []struct {
name string
cidr []byte
expected [][]byte
expectedErr bool
}{
{
name: "Single IP",
cidr: []byte("192.168.1.1"),
expected: [][]byte{[]byte("192.168.1.1")},
},
{
name: "CIDR /30",
cidr: []byte("192.168.1.0/30"),
expected: [][]byte{[]byte("192.168.1.0"), []byte("192.168.1.1"), []byte("192.168.1.2"), []byte("192.168.1.3")},
},
{
name: "Invalid CIDR",
cidr: []byte("192.168.1.256/24"),
expectedErr: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
got, err := goudpscan.Hosts(test.cidr)
if test.expectedErr {
require.Error(t, err)
} else {
require.Equal(t, test.expected, got)
}
})
}
}
func TestScan(t *testing.T) {
hosts := []string{"127.0.0.1"}
ports := []string{"80"}
payloads := make(map[uint16][]string)
opts := goudpscan.NewOptions(true, 1, 0, 1)
sc := goudpscan.New(hosts, ports, payloads, opts)
// Create a context to stop the SniffICMP function
ctx, cancel := context.WithCancel(context.Background())
// Run the SniffICMP function in a separate goroutine
var wg sync.WaitGroup
wg.Add(1)
go func() {
if err := sc.SniffICMP(ctx, &wg); err != nil && !errors.Is(err, net.ErrClosed) {
t.Errorf("SniffICMP failed: %v", err)
}
}()
time.Sleep(250 * time.Millisecond)
// Run the Scan function
scanResult, err := sc.Scan(&log.Logger)
if err != nil {
t.Errorf("Scan failed: %v", err)
}
// Check the result of the scan
expectedKey := "127.0.0.1:80"
if _, ok := scanResult[expectedKey]; !ok {
t.Errorf("Scan result does not contain the expected key: %s", expectedKey)
}
require.Len(t, scanResult, 1)
// Stop the SniffICMP function
cancel()
wg.Wait()
}
|
package server
import (
"github.com/Alireza-Ta/goask/model"
"github.com/Alireza-Ta/goask/validation"
"github.com/gin-gonic/gin"
"net/http"
"net/url"
"strconv"
)
//ReplyStore manages encapsulated database access.
type ReplyStore interface {
ListReply(query url.Values) (model.Replies, error)
CreateReply(r *model.Reply) error
UpdateReply(r *model.Reply) error
}
//ReplyAPI provides handlers for managing replies.
type ReplyAPI struct {
store ReplyStore
}
// GetReplyList returns list of replies.
func (rapi *ReplyAPI) GetReplyList(c *gin.Context) {
list, err := rapi.store.ListReply(c.Request.URL.Query())
if err != nil {
JSONNotFound("Error finding replies list. ", err, c)
return
}
c.JSON(http.StatusOK, list)
}
// PostReply creates a reply.
func (rapi *ReplyAPI) PostReply(c *gin.Context) {
// claims := jwt.ExtractClaims(c)
in := new(model.Reply)
if err := c.ShouldBindJSON(in); err != nil {
JSONValidation(validation.Messages(err), c)
return
}
qid, _ := strconv.Atoi(c.Param("question_id"))
r := new(model.Reply)
r.Body = in.Body
r.QuestionId = qid
// r.AuthorID = claims["id"]
if err := rapi.store.CreateReply(r); err != nil {
JSONInternalServer(err, c)
return
}
c.JSON(http.StatusOK, r)
}
// PatchReply updates a reply.
func (rapi *ReplyAPI) PatchReply(c *gin.Context) {
in := new(model.Reply)
if err := c.ShouldBindJSON(in); err != nil {
JSONValidation(validation.Messages(err), c)
return
}
// if in.Id == 0 {
// rid, _ := strconv.Atoi(c.Param("reply_id"))
// in.Id = rid
// }
if err := rapi.store.UpdateReply(in); err != nil {
JSONInternalServer(err, c)
return
}
c.JSON(http.StatusOK, in)
}
|
package main
import (
"encoding/json"
"fmt"
"html/template"
"io/ioutil"
"net/http"
"sort"
"strings"
"time"
"github.com/caarlos0/env"
)
type config struct {
GithubToken string `env:"GHTOKEN"`
Authors string `env:"AUTHORS"`
RequiredPRCount int `env:"REQUIRED_PR_COUNT"`
RefreshInterval int `env:"REFRESH_INTERVAL" envDefault:"1800"`
Bozzes string `env:"BOZZES"`
Timezone string `env:"TIMEZONE" envDefault:"UTC"`
}
type AuthorData struct {
AuthorClass string
Author string
PrCount int
PrCountClass string
AvatarURL string
}
type LeaderboardData struct {
AuthorData []AuthorData
RefreshInterval int
Year int
UpdatedTime string
}
type avatarResult struct {
AvatarURL string `json:"avatar_url"`
Name string `json:"name"`
}
type prCountResult struct {
PrCount int `json:"total_count"`
}
func leaderboard(writer http.ResponseWriter, request *http.Request) {
t := template.Must(template.ParseFiles("leaderboard.html"))
authorData := getAuthorData()
location, err := time.LoadLocation(cfg.Timezone)
if err != nil {
location, _ = time.LoadLocation("UTC")
}
leaderboardData := LeaderboardData{
AuthorData: authorData,
RefreshInterval: cfg.RefreshInterval,
Year: calcYear(),
UpdatedTime: time.Now().In(location).Format("2 Jan 2006 3:04 PM MST"),
}
t.Execute(writer, leaderboardData)
}
func leaderboardJSON(writer http.ResponseWriter, request *http.Request) {
jsonString, _ := json.Marshal(getAuthorData())
fmt.Fprintf(writer, "%s", jsonString)
}
// return slice of AuthorData structs ordered by PR count descending
func getAuthorData() []AuthorData {
authors := strings.Split(cfg.Authors, ":")
authorData := make([]AuthorData, len(authors))
fmt.Printf("Authors: %v\n", authors)
bozzes := strings.Split(cfg.Bozzes, ":")
for i, author := range authors {
avatarData := getAvatar(author)
var authorClass string
for _, b := range bozzes {
if author == b {
authorClass = "bozz"
}
}
// Use github login name if the `Name` field from the GitHub API is empty.
authorName := avatarData.Name
if len(authorName) == 0 {
authorName = author
}
var prCountClass string
var prCount = getPrCount(author)
if prCount >= cfg.RequiredPRCount {
prCountClass = "met-pr-count"
}
currentAuthor := AuthorData{AuthorClass: authorClass, Author: authorName, PrCount: prCount, PrCountClass: prCountClass, AvatarURL: avatarData.AvatarURL}
authorData[i] = currentAuthor
fmt.Printf("Author: %s, PR count: %d\n", currentAuthor.Author, currentAuthor.PrCount)
}
sort.Slice(authorData, func(i, j int) bool {
// If PR counts are tied, sort by name ascending
if authorData[i].PrCount == authorData[j].PrCount {
return authorData[i].Author < authorData[j].Author
}
// else sort by PR count descending
return authorData[i].PrCount > authorData[j].PrCount
})
return authorData
}
func getAvatar(author string) avatarResult {
response, err := makeAuthorizedRequest("https://api.github.com/users/%s", author)
if response != nil {
defer response.Body.Close()
}
if err != nil {
fmt.Println("Failed to fetch avatar. %s\n", err)
return avatarResult{}
} else {
ghData, _ := ioutil.ReadAll(response.Body)
result := avatarResult{}
json.Unmarshal([]byte(ghData), &result)
return result
}
}
func getPrCount(author string) (prCount int) {
year := calcYear()
response, err := makeAuthorizedRequest("https://api.github.com/search/issues?q=created:%d-09-30T00:00:00-12:00..%d-10-31T23:59:59-12:00+type:pr+is:public+author:%s", year, year, author)
if response != nil {
defer response.Body.Close()
}
if err != nil {
fmt.Println("Failed to fetch PR count. %s\n", err)
return -1
} else {
ghData, _ := ioutil.ReadAll(response.Body)
result := prCountResult{}
json.Unmarshal([]byte(ghData), &result)
return result.PrCount
}
}
func makeAuthorizedRequest(urlFormat string, arguments ...interface{}) (*http.Response, error) {
url := fmt.Sprintf(urlFormat, arguments...)
client := &http.Client{}
request, _ := http.NewRequest("GET", url, nil)
if cfg.GithubToken != "" {
request.Header.Set("Authorization", "token "+cfg.GithubToken)
}
return client.Do(request)
}
func calcYear() int {
currentTime := time.Now()
dateTimeString := fmt.Sprintf("30 Sep %d 0:00 -1200", currentTime.Year()-2000)
hacktoberfestStart, _ := time.Parse(time.RFC822Z, dateTimeString)
if currentTime.Before(hacktoberfestStart) {
return currentTime.Year() - 1
} else {
return currentTime.Year()
}
}
// global config
var cfg = config{}
func main() {
if err := env.Parse(&cfg); err != nil {
fmt.Printf("%+v\n", err)
}
fs := http.FileServer(http.Dir("assets"))
http.Handle("/", fs)
http.HandleFunc("/leaderboard.json", leaderboardJSON)
http.HandleFunc("/leaderboard", leaderboard)
http.ListenAndServe(":4000", nil)
}
|
package views // import "github.com/jenkins-x/octant-jx/pkg/plugin/views"
import (
"html"
"strings"
"github.com/jenkins-x/jx-logging/v3/pkg/log"
v1 "github.com/jenkins-x/jx-api/v4/pkg/apis/jenkins.io/v1"
"github.com/jenkins-x/octant-jx/pkg/common/pluginctx"
"github.com/jenkins-x/octant-jx/pkg/common/viewhelpers"
"github.com/jenkins-x/octant-jx/pkg/plugin"
"github.com/vmware-tanzu/octant/pkg/plugin/service"
"github.com/vmware-tanzu/octant/pkg/store"
"github.com/vmware-tanzu/octant/pkg/view/component"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
type RepositoriesViewConfig struct {
OwnerFilter component.TableFilter
}
func (f *RepositoriesViewConfig) TableFilters() []*component.TableFilter {
return []*component.TableFilter{&f.OwnerFilter}
}
func BuildRepositoriesView(request service.Request, pluginContext pluginctx.Context) (component.Component, error) {
ctx := request.Context()
client := request.DashboardClient()
dl, err := client.List(ctx, store.Key{
APIVersion: "jenkins.io/v1",
Kind: "SourceRepository",
Namespace: pluginContext.Namespace,
})
if err != nil {
log.Logger().Infof("failed: %s", err.Error())
return nil, err
}
log.Logger().Infof("got list of SourceRepository %d\n", len(dl.Items))
header := viewhelpers.NewMarkdownText(viewhelpers.ToBreadcrumbMarkdown(plugin.RootBreadcrumb, "Repositories"))
config := &RepositoriesViewConfig{}
table := component.NewTableWithRows(
"Repositories", "There are no Repositories!",
component.NewTableCols("Owner", "Name", "Status"),
[]component.TableRow{})
for _, pa := range dl.Items {
tr, err := toRepositoryTableRow(pa, config)
if err != nil {
log.Logger().Infof("failed to create Table Row: %s", err.Error())
continue
}
if tr != nil {
table.Add(*tr)
}
}
table.Sort("Name", false)
viewhelpers.InitTableFilters(config.TableFilters())
table.AddFilter("Owner", config.OwnerFilter)
flexLayout := component.NewFlexLayout("")
flexLayout.AddSections(component.FlexLayoutSection{
{Width: component.WidthFull, View: header},
{Width: component.WidthFull, View: table},
})
return flexLayout, nil
}
func toRepositoryTableRow(u unstructured.Unstructured, config *RepositoriesViewConfig) (*component.TableRow, error) {
r := &v1.SourceRepository{}
err := viewhelpers.ToStructured(&u, r)
if err != nil {
return nil, err
}
owner := r.Spec.Org
viewhelpers.AddFilterValue(&config.OwnerFilter, owner)
return &component.TableRow{
"Owner": component.NewText(owner),
"Name": ToRepositoryName(r),
"Status": ToRepositoryStatus(r),
}, nil
}
func ToRepositoryStatus(r *v1.SourceRepository) component.Component {
status := ""
if r.Annotations != nil {
value := strings.ToLower(r.Annotations["webhook.jenkins-x.io"])
if value == "true" {
status = `<clr-icon shape="check-circle" class="is-solid is-success" title="Webhook registered successfully"></clr-icon>`
} else if value != "" {
if strings.HasPrefix(value, "creat") {
status = `<span class="spinner spinner-inline" title="Registering webhook..."></span>`
} else {
text := "Failed to register Webook"
message := r.Annotations["webhook.jenkins-x.io/error"]
if message != "" {
text += ": " + html.EscapeString(message)
}
status = `<clr-icon shape="warning-standard" class="is-solid is-danger" title="` + text + `"></clr-icon>`
}
}
}
return viewhelpers.NewMarkdownText(status)
}
func ToRepositoryName(r *v1.SourceRepository) component.Component {
s := &r.Spec
u := s.URL
if u == "" {
u = s.HTTPCloneURL
}
if u == "" {
if s.Org != "" && s.Repo != "" {
u = s.Org + "/" + s.Repo
if s.Provider != "" {
u = s.Provider + "/" + u
}
}
}
return viewhelpers.NewMarkdownText(viewhelpers.ToMarkdownLink(s.Repo, u))
}
|
package godb
import (
"bytes"
"unsafe"
)
// database btree node interface
type dbBTreeNode interface {
hasKey(k []byte) int // returns index of matching key if it exists, otherwise -1
}
// node represents a btree's node of order M.
// if M is 128 a node will occupy 4096 bytes.
// to ensure that a node has only 4096 bytes,
// a fixed sized key of 24 bytes must be used
type node struct {
numk int
keys [M - 1][]byte
ptrs [M]unsafe.Pointer
rent *node
leaf bool
}
// create and return a new index node
func newNode() *node {
return &node{}
}
// create and return a new leaf node
func newLeaf() *node {
return &node{leaf: true}
}
// checks if a node contains a matching key and
// returns the index of the key, otherwise if it
// does not exist it will return a value of -1.
func (n *node) hasKey(k []byte) int {
for i := 0; i < n.numk; i++ {
if bytes.Equal(k, n.keys[i]) {
return i
}
}
return -1
}
|
package protocol
import (
)
const (
Req = "req"
Res = "res"
MethodNodeInfo = "node.info"
MethodAgentInfo = "agent.info"
MethodExecScript = "script.exec"
MethodCreateFile = "file.create"
StatusOK = "ok"
StatusError = "error"
)
type Node struct {
Hostname string `json:"hostname"`
Nics []*Nic `json:"nics"`
}
type Nic struct {
Name string `json:"name"`
Ip4Addr string `json:"ip4addr"`
IsPrimary bool `json:"is_primary"`
}
type Agent struct {
Version string `json:"version"`
}
type Command struct {
Command string `json:"command"`
Args []string `json:"args"`
Restrict bool `json:"restrict"`
}
type CommandResult struct {
Command *Command `json:"command"`
Output string `json:"output"`
ExitCode int `json:"exit_code"`
}
type Script struct {
Commands []*Command `json:"commands"`
}
type ScriptResult struct {
CommandResults []*CommandResult `json:"command_results"`
Ok bool `json:"ok"`
}
type File struct {
Path string `json:"path"`
Mode string `json:"mode"`
Content string `json:"content"`
}
|
package export
import (
"fmt"
"io/ioutil"
"main/src/config"
"github.com/rs/zerolog/log"
"gopkg.in/yaml.v3"
)
type NodeExporterWinTelemetry struct {
Addr string `yaml:"addr"`
Path string `yaml:"path"`
MaxRequests int `yaml:"max-requests"`
}
type NodeExporterWinScrape struct {
TimeoutMargin float32 `yaml:"timeout-margin"`
}
type NodeExporterWinLog struct {
Level string `yaml:"level"`
}
type NodeExporterWinService struct {
ServicesWhere string `yaml:"services-where"`
}
type NodeExporterWinCollector struct {
Service *NodeExporterWinService `yaml:"service"`
}
type NodeExporterWinCollectors struct {
Enabled string `yaml:"enabled"`
}
type NodeExporterWin struct {
Collectors *NodeExporterWinCollectors `yaml:"collectors"`
Collector *NodeExporterWinCollector `yaml:"collector"`
Log *NodeExporterWinLog `yaml:"log"`
Scrape *NodeExporterWinScrape `yaml:"scrape"`
Telemetry *NodeExporterWinTelemetry `yaml:"telemetry"`
}
type NodeExporterLinuxConfig struct {
Image string `yaml:"image"`
ContainerName string `yaml:"container_name"`
Command string `yaml:"command"`
Volumes []string `yaml:"volumes"`
Hostname string `yaml:"hostname"`
Restart string `yaml:"restart"`
Ports []string `yaml:"ports"`
}
type NodeExporterLinuxServices struct {
NodeExporterLinuxConfig *NodeExporterLinuxConfig `yaml:"node-exporter"`
}
type NodeExporterLinux struct {
Version string `yaml:"version"`
Services *NodeExporterLinuxServices `yaml:"services"`
}
type NodeExporter struct {
new *NodeExporterWin
nel *NodeExporterLinux
}
func NewNodeExporter() *NodeExporter {
return &NodeExporter{
new: &NodeExporterWin{
Collectors: &NodeExporterWinCollectors{
Enabled: "cpu,cs,logical_disk,net,os,service,system",
},
Collector: &NodeExporterWinCollector{
Service: &NodeExporterWinService{
ServicesWhere: "default windows hostname",
},
},
Log: &NodeExporterWinLog{
Level: "debug",
},
Scrape: &NodeExporterWinScrape{
TimeoutMargin: 0.5,
},
Telemetry: &NodeExporterWinTelemetry{
Addr: ":9200",
Path: "/metrics",
MaxRequests: 5,
},
},
nel: &NodeExporterLinux{
Version: "3",
Services: &NodeExporterLinuxServices{
NodeExporterLinuxConfig: &NodeExporterLinuxConfig{
Image: "quay.io/prometheus/node-exporter",
ContainerName: "node-exporter",
Command: "--web.listen-address=:9200",
Volumes: []string{"/:/host:ro"},
Hostname: "default linux hostname",
Restart: "always",
Ports: make([]string, 0),
},
},
},
}
}
func (ne *NodeExporter) WriteToFile(configs config.CombinedServices, path string) {
pathWin := fmt.Sprintf("%swindows_config.yml", path)
pathLinux := fmt.Sprintf("%sdocker-compose.yml", path)
mapServiceNode := make(map[string]int32)
for _, config := range configs {
mapServiceNode[config.NodePort] = config.MachineId
}
// generate node_exporter
for addr, machineId := range mapServiceNode {
ne.new.Collector.Service.ServicesWhere = fmt.Sprintf("Name='Machine_%d'", machineId)
ne.new.Telemetry.Addr = fmt.Sprintf(":%s", addr)
ne.nel.Services.NodeExporterLinuxConfig.Hostname = fmt.Sprintf("Machine_%d", machineId)
ne.nel.Services.NodeExporterLinuxConfig.Ports = append(ne.nel.Services.NodeExporterLinuxConfig.Ports, fmt.Sprintf("%s:%s", addr, addr))
ne.nel.Services.NodeExporterLinuxConfig.Command = fmt.Sprintf("--web.listen-address=:%s", addr)
}
// write windows_config.yml
dataWin, errWin := yaml.Marshal(ne.new)
if errWin != nil {
log.Fatal().Err(errWin).Msg("yaml marshal failed")
}
errWin = ioutil.WriteFile(pathWin, dataWin, 0644)
if errWin != nil {
log.Fatal().Err(errWin).Msg("write windows_config.yml failed")
}
log.Info().Str("path", pathWin).Msgf("write %s successful", pathWin)
// write docker-compose.yml
dataLinux, errLinux := yaml.Marshal(ne.nel)
if errLinux != nil {
log.Fatal().Err(errLinux).Msg("yaml marshal failed")
}
errLinux = ioutil.WriteFile(pathLinux, dataLinux, 0644)
if errLinux != nil {
log.Fatal().Err(errLinux).Msg("write docker-compose.yml failed")
}
log.Info().Str("path", pathLinux).Msgf("write %s successful", pathLinux)
}
// test
func (ne *NodeExporter) UnmarshalToStruct() {
data, err := ioutil.ReadFile("../config/node_exporter/windows_config.yml")
if err != nil {
log.Fatal().Err(err).Msg("read windows_config.yml failed")
}
err = yaml.Unmarshal(data, ne.new)
if err != nil {
log.Fatal().Err(err).Msg("unmarshal yaml failed")
}
log.Info().Interface("windows_config.yaml", ne.new).Msg("unmarshal success")
data, err = ioutil.ReadFile("../config/node_exporter/docker-compose.yml")
if err != nil {
log.Fatal().Err(err).Msg("read docker-compose.yml failed")
}
err = yaml.Unmarshal(data, ne.nel)
if err != nil {
log.Fatal().Err(err).Msg("unmarshal yaml failed")
}
log.Info().Interface("docker_compose.yaml", ne.new).Msg("unmarshal success")
}
|
package main
import (
"billable/api"
"billable/config"
"flag"
"fmt"
"log"
"net/http"
// "os"
_ "github.com/denisenkom/go-mssqldb"
"github.com/gorilla/mux"
"github.com/rs/cors"
)
var (
app = &api.App{}
)
func main() {
var configDir string
flag.StringVar(&configDir, "config", ".", "directory location of config file")
flag.Parse()
if configDir == "" {
fmt.Printf("----------------------------------------------------------\n")
flag.Usage()
fmt.Printf("----------------------------------------------------------\n")
}
Config := config.Data{}
_ = Config.Init(configDir)
logPath, port := Config.LogPath, Config.Port
app.Router = mux.NewRouter()
app.LogPath = logPath
app.RegisterRoutes()
serviceAddress := ":" +port
// if port, ok := os.LookupEnv("BILLABLEAPI_PORT"); ok {
// serviceAddress = ":" + port
// }
allowOriginFunc := func(origin string) bool {
return true
}
c := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{http.MethodGet, http.MethodPost, http.MethodOptions},
AllowedHeaders: []string{"Accept", "Accept-Encoding", "Content-Type", "*"},
AllowOriginFunc: allowOriginFunc,
})
log.Println("Server started and listening on port ", serviceAddress)
log.Fatal(http.ListenAndServe(serviceAddress, c.Handler(app.Router)))
}
|
package ravendb
// CompareExchangeValue represents value for compare exchange
type CompareExchangeValue struct {
Key string
Index int64
Value interface{}
}
// NewCompareExchangeValue returns new CompareExchangeValue
func NewCompareExchangeValue(key string, index int64, value interface{}) *CompareExchangeValue {
return &CompareExchangeValue{
Key: key,
Index: index,
Value: value,
}
}
|
/*
1 slice_reversal.go
// reverse reverses the contents of s in place
func reverse(s []int)
Output some slices before and after reversal.
*/
package main
import (
"fmt"
"sort"
"rand"
)
// reverse reverses the contents of s in place
func reverse(s []int){
j := len(s) - 1
/* TODO: can't figure out how to pass two variables in the for-statement
eg. for i := 0, j := 0; i < j; i++, j-- { ...
*/
for i := 0; i < j; i++ {
s[i], s[j] = s[j], s[i]
j--
}
}
// do 10 random slices and reverse them
// print the slice before and after reversing
func main(){
for i := 0; i < 10; i++ {
arr := randomSlice()
fmt.Printf("before: %v\n", arr)
reverse(arr)
fmt.Printf("\n")
fmt.Printf("after: %v\n", arr)
fmt.Printf("---\n")
}
}
// create slice with length/capacity between 1-51 populated with random numbers between 0-100
func randomSlice() []int {
temp := make([]int, (rand.Intn(50) + 1)) // rand.Intn can return 0
for i,_ := range temp{
temp[i] = rand.Intn(100)
}
sort.SortInts(temp)
return temp
}
|
package redis
import "time"
// connection redis const
const (
Host = "127.0.0.1"
Port = 6379
)
// config redis
const (
// set time key expiration on second
SetTimeExp = 1 * time.Minute
)
// possible key caching data
const (
// key for detail data user profile
UserProfile = "user_profile:id:%d"
// key for detail data user family
UserFamily = "user_family:id:%d"
// key for detail data user transportation
UserTransportation = "user_transportation:id:%d"
)
|
package tq
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestManifestDefaultsToFixedRetries(t *testing.T) {
assert.Equal(t, 8, NewManifest().MaxRetries())
}
func TestRetryCounterDefaultsToFixedRetries(t *testing.T) {
rc := newRetryCounter()
assert.Equal(t, 8, rc.MaxRetries)
}
func TestRetryCounterIncrementsObjects(t *testing.T) {
rc := newRetryCounter()
rc.Increment("oid")
assert.Equal(t, 1, rc.CountFor("oid"))
}
func TestRetryCounterCanNotRetryAfterExceedingRetryCount(t *testing.T) {
rc := newRetryCounter()
rc.MaxRetries = 1
rc.Increment("oid")
count, canRetry := rc.CanRetry("oid")
assert.Equal(t, 1, count)
assert.False(t, canRetry)
}
|
package process
import (
"context"
"errors"
"net"
"strings"
"time"
"github.com/coredns/coredns/plugin"
"github.com/miekg/dns"
"github.com/ajruckman/ContraCore/internal/functions"
"github.com/ajruckman/ContraCore/internal/system"
)
func DNS(name string, next plugin.Handler, ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
// https://stackoverflow.com/a/4083071/9911189
// https://groups.google.com/forum/#!topic/comp.protocols.dns.bind/uOWxNkm7AVg
if len(r.Question) != 1 {
return 0, errors.New("this should never happen")
}
q := queryContext{
ResponseWriter: w,
r: r,
_question: r.Question[0],
_domain: functions.RT(r.Question[0].Name),
_client: net.ParseIP(strings.Split(w.RemoteAddr().String(), ":")[0]),
received: time.Now().UTC(),
}
dcs := strings.Split(q._domain, ".")
if len(dcs) > 1 {
suffix := strings.Join(dcs[1:], ".")
q._suffix = &suffix
}
if q._domain == "!runprobe" {
return dns.RcodeSuccess, w.WriteMsg(responseWithCode(r, 15)) // 15 = max valid unassigned RCODE
}
lease, found := getLeaseByIP(q._client)
if found {
m := lease.MAC.String()
q.mac = &m
q.hostname = lease.Hostname
q.vendor = lease.Vendor
}
system.Console.Info("incoming: ", q.String())
if whitelisted := whitelist(&q); whitelisted {
q.action = ActionWhitelisted // Might be overwritten by other returners
goto skip
}
if ret, rcode, err := blacklist(&q); ret {
return rcode, err
}
skip:
// Always check this; queries with search domains will contain periods
if ret, rcode, err := respondByHostname(&q); ret {
return rcode, err
}
if ret, rcode, err := respondByPTR(&q); ret {
return rcode, err
}
// TODO: strip search domain to check DomainNeeded safely
if system.DomainNeeded && (strings.Count(q._domain, ".") == 0 || q._matchedSearchDomain != nil) {
if q._question.Qtype == dns.TypeNS && q._domain == "" {
// Permit looking up root servers
goto next
}
if q._matchedSearchDomain == nil {
system.Console.Infof("DomainNeeded is true and question '%s' does not contain any periods; returning NXDomain", q._domain)
} else {
system.Console.Infof("DomainNeeded is true and question '%s' has a search domain that was not found; returning NXDomain", q._domain)
}
q.action = ActionDomainNeeded
m := responseWithCode(r, dns.RcodeNameError)
err := q.respond(m)
return dns.RcodeRefused, err
}
next:
q.action = ActionNotBlacklisted
return plugin.NextOrFailure(name, next, ctx, q, r)
}
|
package args
// Initiative type.
type Initiative string
const (
INITIATIVE_NEVER Initiative = "never"
INITIATIVE_AUTO = "auto"
INITIATIVE_MANUAL = "manual"
)
// PatchType represents the type of a binary patch, if any. Only bsdiff is supported
type PatchType string
const (
PATCHTYPE_BSDIFF PatchType = "bsdiff"
PATCHTYPE_NONE = ""
)
// Params represent parameters sent by the go-update client.
type Params struct {
// protocol version
Version int `json:"version"`
// identifier of the application to update
//AppId string `json:"app_id"`
// version of the application updating itself
AppVersion string `json:"app_version"`
// operating system of target platform
OS string `json:"os"`
// hardware architecture of target platform
Arch string `json:"arch"`
// application-level user identifier
//UserId string `json:"user_id"`
// checksum of the binary to replace (used for returning diff patches)
Checksum string `json:"checksum"`
// release channel (empty string means 'stable')
//Channel string `json:"-"`
// tags for custom update channels
Tags map[string]string `json:"tags"`
}
// Result represents the answer to be sent to the client.
type Result struct {
// should the update be applied automatically/manually
Initiative Initiative `json:"initiative"`
// url where to download the updated application
URL string `json:"url"`
// a URL to a patch to apply
PatchURL string `json:"patch_url"`
// the patch format (only bsdiff supported at the moment)
PatchType PatchType `json:"patch_type"`
// version of the new application
Version string `json:"version"`
// expected checksum of the new application
Checksum string `json:"checksum"`
// signature for verifying update authenticity
Signature string `json:"signature"`
}
|
package main // version 0.0.3
import (
"bufio"
"fmt"
"os"
"strings"
"time"
"github.com/gempir/go-twitch-irc"
"github.com/sirpinwheel/overseer/handlers"
"github.com/sirpinwheel/overseer/settings"
"github.com/sirpinwheel/overseer/utils"
)
// BotClient - exportin connection
var BotClient *twitch.Client = twitch.NewClient(settings.BOT, settings.OAUTH)
var ticker *time.Ticker = time.NewTicker(settings.PERIOD)
// string -> function map for commands called locally in console
var consoleHandlerMap = map[string]func(string){
"stop": func(arguments string) {
stop()
},
"say": func(arguments string) {
BotClient.Say(settings.CHANNEL, arguments)
},
}
// string -> function map for commands called in chat by owner
var adminHandlerMap = map[string]func(*twitch.PrivateMessage){
"stop": func(msg *twitch.PrivateMessage) {
stop()
},
}
// Function for starting needed goroutines
func start() {
// Greeting
fmt.Println("Connected to #" + settings.CHANNEL + " as " + settings.BOT)
fmt.Println("- - - - - - - - - - - - - - - - - - - - - - -")
// Goroutine for periodic task of giving current viewers a point
go func() {
for t := range ticker.C {
_ = t
users, err := BotClient.Userlist(settings.CHANNEL)
if err != nil {
panic(err)
}
utils.GrantPoint(&users)
}
}()
// Goroutine for handling console input
go func() {
reader := bufio.NewReader(os.Stdin)
for {
fmt.Print(">> ")
text, _ := reader.ReadString('\n')
text = strings.TrimRight(text, "\r\n")
split := strings.SplitN(text, " ", 2)
command := split[0]
arguments := ""
if len(split) > 1 {
arguments = split[1]
}
for k, v := range consoleHandlerMap {
if k == command {
v(arguments)
}
}
}
}()
}
// Function for halting the bot safely
func stop() {
boterr := BotClient.Disconnect()
ticker.Stop()
if boterr != nil {
panic(boterr)
}
}
func main() {
// Hook / callback for general message type sent in chat
BotClient.OnPrivateMessage(func(message twitch.PrivateMessage) {
// Check if message is not empty
if len(message.Message) != 0 {
if message.User.Name == settings.CHANNEL {
for k, v := range adminHandlerMap {
if k == strings.TrimPrefix(message.Message, settings.PREFIX) {
v(&message)
}
}
}
// Check if message begins with prefix (a.k.a. is a command)
if strings.HasPrefix(message.Message, settings.PREFIX) {
for k, v := range handlers.Handlers {
if k == strings.TrimPrefix(message.Message, settings.PREFIX) {
v(BotClient, &message)
}
}
}
}
})
// Joining channel
BotClient.Join(settings.CHANNEL)
BotClient.OnConnect(start)
fmt.Println("Connecting...")
err := BotClient.Connect()
if err != nil {
if !strings.Contains(err.Error(), "client called Disconnect()") {
panic(err)
}
}
}
|
package database
import (
"context"
"database/sql"
"errors"
"fmt"
"strings"
"github.com/tahmooress/motor-shop/internal/entities/models"
)
func (m *Mysql) CreateBuyFactor(ctx context.Context, factor models.Factor, shopID models.ID) (*models.Factor, error) {
tx, err := m.db.Begin()
if err != nil {
return nil, fmt.Errorf("mysql >> CreateFactor >> db.Begin() >> %w", err)
}
defer tx.Rollback()
motorStatment := "INSERT IGNORE INTO motors(id, model_name, pelak_number," +
" body_number,color, model_year) VALUES "
motorVals := ""
motorArgs := make([]interface{}, 0)
iSmtm := "INSERT INTO items(id,pelak_number, factor_number) VALUES"
itemVals := ""
itemsArgs := make([]interface{}, 0)
for i, motor := range factor.Motors {
motorVals += "(?,?,?,?,?,?),"
factor.Motors[i].ID = models.NewID()
motorArgs = append(motorArgs, factor.Motors[i].ID, motor.ModelName, motor.PelakNumber,
motor.BodyNumber, motor.Color, motor.ModelYear)
itemVals += "(?,?,?),"
itemsArgs = append(itemsArgs, models.NewID(), motor.PelakNumber, factor.FactorNumber)
}
motorStatment += strings.TrimRight(motorVals, ",")
iSmtm += strings.TrimRight(itemVals, ",")
mStmt, err := tx.PrepareContext(ctx, motorStatment)
if err != nil {
return nil, fmt.Errorf("mysql >> Preparecontext() >> %w", err)
}
defer mStmt.Close()
_, err = mStmt.ExecContext(ctx, motorArgs...)
if err != nil {
return nil, fmt.Errorf("mysql >> ExecContext() >> %w", err)
}
itemsStmt, err := tx.PrepareContext(ctx, iSmtm)
if err != nil {
return nil, fmt.Errorf("mysql >> CreateBuyFactor >> PrepareContext >> %w", err)
}
defer itemsStmt.Close()
if factor.Customer.ID.ID() == 0 {
c, e := m.getCustomerByMobile(ctx, factor.Customer.Mobile)
if e != nil {
if errors.Is(e, sql.ErrNoRows) {
id, err := m.CreateCustomerWithTX(ctx, tx, factor.Customer)
if err != nil {
return nil, fmt.Errorf("mysql >> CreateFactor >> %w", err)
}
factor.Customer.ID = *id
} else {
return nil, e
}
}
if e == nil {
factor.Customer.ID = c.ID
}
} else if factor.Customer.ID.ID() != 0 {
customer, err := m.GetCustomerByID(ctx, factor.Customer.ID)
if err != nil {
return nil, fmt.Errorf("mysql >> CreateBuyFactor >> %w", err)
}
factor.Customer = *customer
}
factorID := models.NewID()
factorStmt, err := tx.PrepareContext(ctx, "INSERT INTO factors(id, customer_id, type, shop_id,factor_number,"+
"total_amount, payed_amount, created_at) VALUES(?,?,?,?,?,?,?,?)")
if err != nil {
return nil, fmt.Errorf("mysql >> CreateFactor >> PrepareContext() >> %w", err)
}
defer factorStmt.Close()
_, err = factorStmt.ExecContext(ctx, factorID, factor.Customer.ID, models.BUY, shopID, factor.FactorNumber,
factor.TotalAmount, factor.PayedAmount, factor.CreatedAt)
if err != nil {
return nil, fmt.Errorf("mysql >> CreateFactor >> ExecContext() >> %w", err)
}
_, err = itemsStmt.ExecContext(ctx, itemsArgs...)
if err != nil {
return nil, fmt.Errorf("mysql >> CreateBuyFactor >> ExecContext() >> %w", err)
}
inventoryStmt := "INSERT IGNORE INTO shop_inventory(id, shop_id, motor_id, factor_number) VALUES "
inventoryVlas := ""
inventoryArgs := make([]interface{}, 0)
for _, motor := range factor.Motors {
inventoryVlas += "(?,?,?,?),"
inventoryArgs = append(inventoryArgs, models.NewID(), shopID, motor.ID, factor.FactorNumber)
}
inventoryStmt += strings.TrimRight(inventoryVlas, ",")
iStmt, err := tx.PrepareContext(ctx, inventoryStmt)
if err != nil {
return nil, fmt.Errorf("mysql >> CreateFactor >> PrepareContext() >> %w", err)
}
defer iStmt.Close()
_, err = iStmt.ExecContext(ctx, inventoryArgs...)
if err != nil {
return nil, fmt.Errorf("mysql >> CreateFactor >> ExecContext() >> %w", err)
}
paysStmt := "INSERT INTO shop_payable(id, customer_id, factor_number, shop_id, " +
"amount, status, clear_date) VALUES "
payVals := ""
payArgs := make([]interface{}, 0)
for i, pay := range factor.Equities {
payVals += "(?,?,?,?,?,?,?),"
factor.Equities[i].ID = models.NewID()
factor.Equities[i].Status = models.DEBTOR
payArgs = append(payArgs, factor.Equities[i].ID, factor.Customer.ID, factor.FactorNumber, shopID,
pay.Amount, factor.Equities[i].Status, pay.DueDate)
}
paysStmt += strings.TrimRight(payVals, ",")
pStmt, err := tx.PrepareContext(ctx, paysStmt)
if err != nil {
return nil, fmt.Errorf("mysql >> CreateFactor >> PrepareContext() >> %w", err)
}
defer pStmt.Close()
_, err = pStmt.ExecContext(ctx, payArgs...)
if err != nil {
return nil, fmt.Errorf("mysql >> CreateFactor >> ExecContext() >> %w", err)
}
txStmt, err := tx.PrepareContext(ctx, "INSERT INTO transactions(id, shop_id, type, subject,"+
"amount, description,factor_number,created_at) VALUES(?,?,?,?,"+
"?,?,?,?)")
if err != nil {
return nil, fmt.Errorf("mysql >> CreateBuyFactor >> PrepareContext() >> %w", err)
}
defer txStmt.Close()
_, err = txStmt.ExecContext(ctx, models.NewID(), shopID, models.PAYED, models.EQUITY,
factor.PayedAmount, "",factor.FactorNumber, factor.CreatedAt)
if err != nil {
return nil, fmt.Errorf("mysql >> CreateBuyFactor >> ExecContext() >> %w", err)
}
err = tx.Commit()
if err != nil {
return nil, fmt.Errorf("mysql >> CreateFactor >> tx.Commit() >> %w", err)
}
return &models.Factor{
ID: factorID,
FactorNumber: factor.FactorNumber,
PayedAmount: factor.PayedAmount,
TotalAmount: factor.TotalAmount,
Motors: factor.Motors,
Customer: factor.Customer,
Equities: factor.Equities,
CreatedAt: factor.CreatedAt,
UpdatedAt: factor.UpdatedAt,
}, nil
}
|
package selector
import (
"context"
"errors"
"math/rand"
)
type RandomSelect struct {}
func (r *RandomSelect) Name () string {
return "random"
}
func (r *RandomSelect) Select (ctx context.Context, nodes []*Node) (*Node, error) {
if len(nodes) == 0 {
return nil, errors.New("NO_NODES")
}
var indexMap map[int]*Node = make(map[int]*Node)
var index = 0
for _, node := range nodes {
if Weight == 0 {
Weight = 1
}
for i := 0; i < Weight; i++ {
indexMap[index] = node
index++
}
}
selectedNode := indexMap[rand.Intn(index)]
return selectedNode, nil
}
|
package events
import (
"fmt"
"github.com/Phala-Network/go-substrate-rpc-client/v3/types"
"github.com/vedhavyas/go-subkey/scale"
)
type BlockRewardInfo struct {
Seed types.U256 `json:"seed"`
OnlineTarget types.U256 `json:"onlineTarget"`
ComputeTarget types.U256 `json:"computeTarget"`
}
type PayoutReason byte
const (
OnlineReward PayoutReason = 0
ComputeReward PayoutReason = 1
)
func (v *PayoutReason) Decode(decoder scale.Decoder) error {
b, err := decoder.ReadOneByte()
vb := PayoutReason(b)
switch vb {
case OnlineReward, ComputeReward:
*v = vb
default:
return fmt.Errorf("unknown VoteThreshold enum: %v", vb)
}
return err
}
func (v PayoutReason) Encode(encoder scale.Encoder) error {
return encoder.PushByte(byte(v))
}
|
// +build windows
package adasync
import (
"fmt"
"testing"
"time"
)
func TestScanDrives(t *testing.T) {
t.Skip() //annoying test requiring human interaction
drives = make([]string, 0)
detectDrives()
fmt.Println(drives)
fmt.Println("Plug in drive")
time.Sleep(15 * time.Second)
newDrives, _ := detectDrives()
if len(newDrives) != 1 {
t.Error("No new drive detected")
}
fmt.Println(newDrives, drives)
fmt.Println("Remove drive")
time.Sleep(15 * time.Second)
_, missingDrives := detectDrives()
if len(missingDrives) != 1 {
t.Error("No drive missing")
}
}
func TestFullAndQuick(t *testing.T) {
t.Skip() //annoying test requiring human interaction
drives = make([]string, 0)
fmt.Println(fullScan())
fmt.Println("Plug in drive")
time.Sleep(15 * time.Second)
fmt.Println(quickScan())
}
|
package godash
import (
"fmt"
"reflect"
)
// Filter out elements that fail the predicate.
//
// Input of type slice is supported as of now.
// Output is a slice in which filtered-in elements are stored.
// PredicateFn function is applied on each element of input to determine to filter or not
//
// Validations:
//
// 1. Input and Output's slice should be of same type
// 2. Predicate function can take one argument and return one argument
// 3. Predicate's return argument is always boolean.
// 4. Predicate's input argument should be input/output slice's element type.
//
// Validation errors are returned to the caller.
func Filter(in, out, predicateFn interface{}) error {
input := reflect.ValueOf(in)
output := reflect.ValueOf(out)
if err := validateOut(output); err != nil {
return err
}
if input.Type() != output.Elem().Type() {
return fmt.Errorf("input(%s) and output(%s) should be of the same Type", input.Type(), output.Elem().Type())
}
predicate := reflect.ValueOf(predicateFn)
if predicate.Type().NumOut() != 1 {
return fmt.Errorf("predicate function should return only one return value - a boolean")
}
if predicateType := predicate.Type().Out(0).Kind(); predicateType != reflect.Bool {
return fmt.Errorf("predicate function should return only a (boolean) and not a (%s)", predicateType)
}
if input.Kind() == reflect.Slice {
{
if input.Type().Elem().Kind() != predicate.Type().In(0).Kind() {
return fmt.Errorf(
"predicate function's first argument has to be the type (%s) instead of (%s)",
input.Type().Elem(),
predicate.Type().In(0),
)
}
}
result := reflect.MakeSlice(output.Elem().Type(), 0, input.Len())
for i := 0; i < input.Len(); i++ {
arg := input.Index(i)
returnValues := predicate.Call([]reflect.Value{arg})
predicatePassed := returnValues[0].Bool()
if predicatePassed {
result = reflect.Append(result, arg)
}
}
output.Elem().Set(result)
return nil
}
return fmt.Errorf("not implemented")
}
|
package api
func CurrentLoginName(client *Client, hostname string) (string, error) {
var query struct {
Viewer struct {
Login string
}
}
err := client.Query(hostname, "UserCurrent", &query, nil)
return query.Viewer.Login, err
}
func CurrentUserID(client *Client, hostname string) (string, error) {
var query struct {
Viewer struct {
ID string
}
}
err := client.Query(hostname, "UserCurrent", &query, nil)
return query.Viewer.ID, err
}
|
// Copyright 2020 Insolar Network Ltd.
// All rights reserved.
// This material is licensed under the Insolar License version 1.0,
// available at https://github.com/insolar/block-explorer/blob/master/LICENSE.md.
package main
import (
"context"
"fmt"
"github.com/insolar/insconfig"
"gopkg.in/gormigrate.v1"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/postgres"
"github.com/insolar/block-explorer/configuration"
"github.com/insolar/block-explorer/instrumentation/belogger"
"github.com/insolar/block-explorer/migrations"
)
func main() {
cfg := &configuration.DB{}
params := insconfig.Params{
EnvPrefix: "migrate",
ConfigPathGetter: &insconfig.DefaultPathGetter{},
}
insConfigurator := insconfig.New(params)
if err := insConfigurator.Load(cfg); err != nil {
panic(err)
}
fmt.Println("Starts with configuration:\n", insConfigurator.ToYaml(cfg))
ctx := context.Background()
log := belogger.FromContext(ctx)
db, err := gorm.Open("postgres", cfg.URL)
if err != nil {
log.Fatalf("Error while connecting to database: %s", err.Error())
return
}
defer db.Close()
db = db.LogMode(true)
db.SetLogger(belogger.NewGORMLogAdapter(log))
m := gormigrate.New(db, migrations.MigrationOptions(), migrations.Migrations())
if err = m.Migrate(); err != nil {
log.Fatalf("Could not migrate: %v", err)
return
}
log.Info("migrated successfully!")
}
|
package ml
import (
"bytes"
"fmt"
tf "github.com/galeone/tensorflow/tensorflow/go"
"github.com/galeone/tensorflow/tensorflow/go/op"
tg "github.com/galeone/tfgo"
"image"
"io"
"io/ioutil"
"os"
"strings"
)
type Coco struct {
model *tg.Model
labels []string
}
const path = "files/models/my_model/"
// NewCoco returns a Coco object
func NewCoco() *Coco {
return &Coco{}
}
func readLabels(labelsFile string) ([]string, error) {
fileBytes, err := ioutil.ReadFile(labelsFile)
if err != nil {
return nil, fmt.Errorf("Unable to read labels file: %v", err)
}
return strings.Split(string(fileBytes), "\n"), nil
}
// Load loads the my_model SavedModel.
func (c *Coco) Load() error {
model := tg.LoadModel(path, []string{"serve"}, nil)
c.model = model
var err error
c.labels, err = readLabels(strings.Join([]string{path, "labels.txt"}, ""))
if err != nil {
return fmt.Errorf("Error loading labels file: %v", err)
}
return nil
}
// Predict predicts.
func (c *Coco) Predict(data []byte) *ObjectDetectionResponse {
tensor, _ := makeTensorFromBytes(data)
output := c.model.Exec(
[]tf.Output{
c.model.Op("StatefulPartitionedCall", 0),
},
map[tf.Output]*tf.Tensor{
c.model.Op("serving_default_input_1", 0): tensor,
},
)
outcome := NewObjectDetectionResponse(output, c.labels)
return outcome
}
// Convert the image in filename to a Tensor suitable as input
func makeTensorFromBytes(bytes []byte) (*tf.Tensor, error) {
// bytes to tensor
tensor, err := tf.NewTensor(string(bytes))
if err != nil {
return nil, err
}
// create batch
graph, input, output, err := makeTransformImageGraph("jpeg")
if err != nil {
return nil, err
}
// Execute that graph create the batch of that image
session, err := tf.NewSession(graph, nil)
if err != nil {
return nil, err
}
defer session.Close()
batch, err := session.Run(
map[tf.Output]*tf.Tensor{input: tensor},
[]tf.Output{output},
nil)
if err != nil {
return nil, err
}
return batch[0], nil
}
// makeBatch uses ExpandDims to convert the tensor into a batch of size 1.
func makeBatch() (graph *tf.Graph, input, output tf.Output, err error) {
s := op.NewScope()
input = op.Placeholder(s, tf.String)
decode := op.DecodeJpeg(s, input, op.DecodeJpegChannels(3))
output = op.ExpandDims(s,
op.Cast(s, decode, tf.Float),
op.Const(s.SubScope("make_batch"), int32(0)))
graph, err = s.Finalize()
return graph, input, output, err
}
func makeTensorFromImage(imageBuffer *bytes.Buffer, imageFormat string) (*tf.Tensor, error) {
tensor, err := tf.NewTensor(imageBuffer.String())
if err != nil {
return nil, err
}
graph, input, output, err := makeTransformImageGraph(imageFormat)
if err != nil {
return nil, err
}
session, err := tf.NewSession(graph, nil)
if err != nil {
return nil, err
}
defer session.Close()
normalized, err := session.Run(
map[tf.Output]*tf.Tensor{input: tensor},
[]tf.Output{output},
nil)
if err != nil {
return nil, err
}
return normalized[0], nil
}
func makeTransformImageGraph(imageFormat string) (graph *tf.Graph, input, output tf.Output, err error) {
const (
H, W = 224, 224
Mean = float32(0)
Scale = float32(255)
)
s := op.NewScope()
input = op.Placeholder(s, tf.String)
// Decode PNG or JPEG
var decode tf.Output
if imageFormat == "png" {
decode = op.DecodePng(s, input, op.DecodePngChannels(3))
} else {
decode = op.DecodeJpeg(s, input, op.DecodeJpegChannels(3))
}
// Div and Sub perform (value-Mean)/Scale for each pixel
output = op.Div(s,
op.Sub(s,
// Resize to 224x224 with bilinear interpolation
op.ResizeBilinear(s,
// Create a batch containing a single image
op.ExpandDims(s,
// Use decoded pixel values
op.Cast(s, decode, tf.Float),
op.Const(s.SubScope("make_batch"), int32(0))),
op.Const(s.SubScope("size"), []int32{H, W})),
op.Const(s.SubScope("mean"), Mean)),
op.Const(s.SubScope("scale"), Scale))
graph, err = s.Finalize()
return graph, input, output, err
}
func normalizeImage(body io.ReadCloser) (*tf.Tensor, error) {
var buf bytes.Buffer
io.Copy(&buf, body)
tensor, err := tf.NewTensor(buf.String())
if err != nil {
return nil, err
}
graph, input, output, err := getNormalizedGraph()
if err != nil {
return nil, err
}
session, err := tf.NewSession(graph, nil)
if err != nil {
return nil, err
}
normalized, err := session.Run(
map[tf.Output]*tf.Tensor{
input: tensor,
},
[]tf.Output{
output,
},
nil)
if err != nil {
return nil, err
}
return normalized[0], nil
}
// Creates a graph to decode, rezise and normalize an image
func getNormalizedGraph() (graph *tf.Graph, input, output tf.Output, err error) {
s := op.NewScope()
input = op.Placeholder(s, tf.String)
// 3 return RGB image
decode := op.DecodeJpeg(s, input, op.DecodeJpegChannels(3))
// Sub: returns x - y element-wise
output = op.Sub(s,
// make it 224x224: inception specific
op.ResizeBilinear(s,
// inserts a dimension of 1 into a tensor's shape.
op.ExpandDims(s,
// cast image to float type
op.Cast(s, decode, tf.Float),
op.Const(s.SubScope("make_batch"), int32(0))),
op.Const(s.SubScope("size"), []int32{224, 224})),
// mean = 117: inception specific
op.Const(s.SubScope("mean"), float32(117)))
graph, err = s.Finalize()
return graph, input, output, err
}
func makeTensorFromImageForInception(filename string) (*tf.Tensor, error) {
const (
// Some constants specific to the pre-trained model at:
// https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip
//
// - The model was trained after with images scaled to 224x224 pixels.
// - The colors, represented as R, G, B in 1-byte each were converted to
// float using (value - Mean)/Std.
//
// If using a different pre-trained model, the values will have to be adjusted.
H, W = 224, 224
Mean = 117
Std = float32(1)
)
file, err := os.Open(filename)
if err != nil {
return nil, err
}
defer file.Close()
img, _, err := image.Decode(file)
if err != nil {
return nil, err
}
sz := img.Bounds().Size()
if sz.X != W || sz.Y != H {
return nil, fmt.Errorf("input image is required to be %dx%d pixels, was %dx%d", W, H, sz.X, sz.Y)
}
// 4-dimensional input:
// - 1st dimension: Batch size (the model takes a batch of images as
// input, here the "batch size" is 1)
// - 2nd dimension: Rows of the image
// - 3rd dimension: Columns of the row
// - 4th dimension: Colors of the pixel as (B, G, R)
// Thus, the shape is [1, 224, 224, 3]
var ret [1][H][W][3]float32
for y := 0; y < H; y++ {
for x := 0; x < W; x++ {
px := x + img.Bounds().Min.X
py := y + img.Bounds().Min.Y
r, g, b, _ := img.At(px, py).RGBA()
ret[0][y][x][0] = float32(int(b>>8)-Mean) / Std
ret[0][y][x][1] = float32(int(g>>8)-Mean) / Std
ret[0][y][x][2] = float32(int(r>>8)-Mean) / Std
}
}
return tf.NewTensor(ret)
}
|
package main
import (
"fmt"
"github.com/OlaleyeJumoke/GoInterfaceAndStruct/organization"
)
func main() {
p := organization.NewPerson("Jumoke", "Olaleye", organization.NewEuropeanUnionIdentifier("123456789", "Germany"))
fmt.Println(p)
fmt.Println(p.FullName())
err := p.SetTwitterHandler("@jumoke")
if err != nil {
fmt.Printf("An error occur while setting twitter handler %s\n", err.Error())
}
name1 := Name{first: "Jumoke", last: "Olaleye"}
name2 := Name{first: "Jumoke", last: "Olaleye"}
if name1 == name2 {
println("we match")
}
}
// Name strct
type Name struct {
first string
last string
}
|
package main
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
func main() {
solve(os.Stdin, os.Stdout)
}
func solve(stdin io.Reader, stdout io.Writer) {
sc := bufio.NewScanner(stdin)
sc.Scan()
n, _ := strconv.Atoi(sc.Text())
sc.Scan()
k, _ := strconv.Atoi(sc.Text())
r, x, y := []int{}, []int{}, []int{}
for i := 0; i < k; i++ {
sc.Scan()
line := strings.Split(sc.Text(), " ")
ri, _ := strconv.Atoi(line[0])
xi, _ := strconv.Atoi(line[1])
yi, _ := strconv.Atoi(line[2])
r = append(r, ri)
x = append(x, xi)
y = append(y, yi)
}
ans := search(n, k, r, x, y)
fmt.Fprintln(stdout, ans)
}
type unionFind struct {
par []int
rank []int
}
func newUnionFind(n int) unionFind {
u := unionFind{
par: make([]int, n),
rank: make([]int, n),
}
for i := 0; i < n; i++ {
u.par[i] = i
}
return u
}
func (u unionFind) root(x int) int {
if u.par[x] == x {
return x
}
par := u.root(u.par[x])
u.par[x] = par
return par
}
func (u unionFind) unite(x, y int) {
x = u.root(x)
y = u.root(y)
if x == y {
return
}
if u.rank[x] < u.rank[y] {
u.par[x] = y
} else {
u.par[y] = x
if u.rank[x] == u.rank[y] {
u.rank[x]++
}
}
}
func (u unionFind) same(x, y int) bool {
return u.root(x) == u.root(y)
}
func search(n, k int, r, x, y []int) (ans int) {
u := newUnionFind(n * 3)
for ki := 0; ki < k; ki++ {
xi := x[ki] - 1
yi := y[ki] - 1
ri := r[ki]
if xi < 0 || n <= xi || yi < 0 || n <= yi {
ans++
continue
}
if ri == 1 {
if u.same(xi, yi+n) || u.same(xi, yi+n*2) {
ans++
} else {
u.unite(xi, yi)
u.unite(xi+n, yi+n)
u.unite(xi+n*2, yi+n*2)
}
} else {
if u.same(xi, yi) || u.same(xi, yi+n*2) {
ans++
} else {
u.unite(xi, yi+n)
u.unite(xi+n, yi+n*2)
u.unite(xi+n*2, yi)
}
}
}
return
}
|
package main
import (
"encoding/json"
"errors"
"fmt"
"github.com/shirou/gopsutil/net"
log "github.com/sirupsen/logrus"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"net/http"
"time"
)
// Define the metrics we wish to expose
var netIndicator = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "sreagent_net",
Help: "Net Stats",
}, []string{"net","measure","operation"} )
// Define the metrics we wish to expose
var netRates = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "sreagent_net_rates",
Help: "Net Throughput",
}, []string{"netif", "unit", "direction"} )
var PluginConfig map[string]map[string]map[string]interface{}
var PluginData,
PluginDataPrev map[string]interface{}
var TScurrent,
TSprevious int64
var PDropRecv,
PDropSent float64
func PluginMeasure() ([]byte, []byte, float64) {
// Get measurement of IOCounters
TScurrent = time.Now().UnixNano()
netio, _ := net.IOCounters(true)
for netidx := range netio { PluginData[netio[netidx].Name] = netio[netidx] }
Δts := TScurrent - TSprevious // nanoseconds!
PDropRecv = 0.0
PDropSent = 0.0
NETS:
for netid, _ := range PluginData {
_, present := PluginDataPrev[netid]
if !present {continue NETS}
inc_precv := PluginData[netid].(net.IOCountersStat).PacketsRecv - PluginDataPrev[netid].(net.IOCountersStat).PacketsRecv
inc_psent := PluginData[netid].(net.IOCountersStat).PacketsSent - PluginDataPrev[netid].(net.IOCountersStat).PacketsSent
inc_brecv := PluginData[netid].(net.IOCountersStat).BytesRecv - PluginDataPrev[netid].(net.IOCountersStat).BytesRecv
inc_bsent := PluginData[netid].(net.IOCountersStat).BytesSent - PluginDataPrev[netid].(net.IOCountersStat).BytesSent
inc_ein := PluginData[netid].(net.IOCountersStat).Errin - PluginDataPrev[netid].(net.IOCountersStat).Errin
inc_eout := PluginData[netid].(net.IOCountersStat).Errout - PluginDataPrev[netid].(net.IOCountersStat).Errout
inc_din := PluginData[netid].(net.IOCountersStat).Dropin - PluginDataPrev[netid].(net.IOCountersStat).Dropin
inc_dout := PluginData[netid].(net.IOCountersStat).Dropout - PluginDataPrev[netid].(net.IOCountersStat).Dropout
// Update metrics related to the plugin
netIndicator.WithLabelValues(netid, "packets", "received").Add(float64(inc_precv))
netIndicator.WithLabelValues(netid, "packets", "sent" ).Add(float64(inc_psent))
netIndicator.WithLabelValues(netid, "bytes", "received").Add(float64(inc_brecv))
netIndicator.WithLabelValues(netid, "bytes", "sent" ).Add(float64(inc_bsent))
netIndicator.WithLabelValues(netid, "errors", "received").Add(float64(inc_ein))
netIndicator.WithLabelValues(netid, "errors", "sent" ).Add(float64(inc_eout))
netIndicator.WithLabelValues(netid, "packets", "dropin" ).Add(float64(inc_din))
netIndicator.WithLabelValues(netid, "packets", "dropout" ).Add(float64(inc_dout))
ppsrecv := float64(inc_precv) * 1e9 / float64(Δts)
ppssent := float64(inc_psent) * 1e9 / float64(Δts)
droprecv := float64(inc_din) * 1e9 / float64(Δts)
dropsent := float64(inc_dout) * 1e9 / float64(Δts)
bpsrecv := float64(8 * inc_brecv) * 1e9 / (float64(Δts) * 1024.0 * 1024.0)
bpssent := float64(8 * inc_bsent) * 1e9 / (float64(Δts) * 1024.0 * 1024.0)
netRates.WithLabelValues(netid, "pps", "received" ).Set(ppsrecv)
netRates.WithLabelValues(netid, "pps", "sent" ).Set(ppssent)
netRates.WithLabelValues(netid, "dropps", "received" ).Set(droprecv)
netRates.WithLabelValues(netid, "dropps", "sent" ).Set(dropsent)
netRates.WithLabelValues(netid, "mbps", "received" ).Set(bpsrecv)
netRates.WithLabelValues(netid, "mbps", "sent" ).Set(bpssent)
PDropRecv += droprecv
PDropSent += dropsent
}
// save current values as previous
for netid, _ := range PluginData {
_, present := PluginDataPrev[netid]
if present { PluginDataPrev[netid] = PluginData[netid] }
}
TSprevious = TScurrent
myMeasure, _ := json.Marshal(PluginData)
return myMeasure, []byte(""), float64(TScurrent) / 1e9
}
func PluginAlert(measure []byte) (string, string, bool, error) {
// log.WithFields(log.Fields{"MyMeasure": string(MyMeasure[:]), "measure": string(measure[:])}).Info("PluginAlert")
// var m interface{}
// err := json.Unmarshal(measure, &m)
// if err != nil { return "unknown", "", true, err }
alertMsg := ""
alertLvl := ""
alertFlag := false
alertErr := errors.New("no error")
// Check that the Dropped rates are good
switch {
case PDropSent > PluginConfig["alert"]["drop"]["engineered"].(float64):
alertLvl = "fatal"
alertMsg += "Overall Packet Drop sent above engineered point "
alertFlag = true
alertErr = errors.New("excessive drop in sending")
// return now, looks bad
return alertMsg, alertLvl, alertFlag, alertErr
case PDropRecv > PluginConfig["alert"]["drop"]["engineered"].(float64):
alertLvl = "fatal"
alertMsg += "Overall Packet Drop recv above engineered point "
alertFlag = true
alertErr = errors.New("excessive drop in receiving")
// return now, looks bad
return alertMsg, alertLvl, alertFlag, alertErr
case PDropSent > PluginConfig["alert"]["drop"]["design"].(float64):
alertLvl = "warn"
alertMsg += "Overall Packet Drop sent above design point "
alertFlag = true
alertErr = errors.New("moderately high packet drop sent")
case PDropRecv > PluginConfig["alert"]["drop"]["design"].(float64):
alertLvl = "warn"
alertMsg += "Overall Packet Drop recv above design point "
alertFlag = true
alertErr = errors.New("moderately high packet drop recv")
}
return alertMsg, alertLvl, alertFlag, alertErr
}
func InitPlugin(config string) {
if PluginData == nil {
PluginData = make(map[string]interface{}, 20)
}
if PluginDataPrev == nil {
PluginDataPrev = make(map[string]interface{}, 20)
}
if PluginConfig == nil {
PluginConfig = make(map[string]map[string]map[string]interface{}, 20)
}
err := json.Unmarshal([]byte(config), &PluginConfig)
if err != nil {
log.WithFields(log.Fields{"config": config}).Error("failed to unmarshal config")
}
PDropRecv = 0.0
PDropSent = 0.0
TSprevious = time.Now().UnixNano()
netio, _ := net.IOCounters(true)
for netidx := range netio { PluginDataPrev[netio[netidx].Name] = netio[netidx] }
// Register metrics with prometheus
prometheus.MustRegister(netIndicator)
prometheus.MustRegister(netRates)
log.WithFields(log.Fields{"pluginconfig": PluginConfig, "plugindata": PluginData}).Info("InitPlugin")
}
func main() {
config := `
{
"alert":
{
"drop":
{
"low": 0.00,
"design": 1.0,
"engineered": 10.0
}
}
}
`
//--------------------------------------------------------------------------//
// time to start a prometheus metrics server
// and export any metrics on the /metrics endpoint.
http.Handle("/metrics", promhttp.Handler())
go func() {
http.ListenAndServe(":8999", nil)
}()
//--------------------------------------------------------------------------//
InitPlugin(config)
log.WithFields(log.Fields{"PluginConfig": PluginConfig}).Info("InitPlugin")
tickd := 200 * time.Millisecond
for i := 1; i <= 100; i++ {
tick := time.Now().UnixNano()
measure, measureraw, measuretimestamp := PluginMeasure()
alertmsg, alertlvl, isAlert, err := PluginAlert(measure)
fmt.Printf("Iteration #%d tick %d %v \n", i, tick,PluginData)
log.WithFields(log.Fields{"timestamp": measuretimestamp,
"measure": string(measure[:]),
"measureraw": string(measureraw[:]),
"PluginData": PluginData,
"alertMsg": alertmsg,
"alertLvl": alertlvl,
"isAlert": isAlert,
"AlertErr": err,
}).Debug("Tick")
time.Sleep(tickd)
}
}
|
package crd
import (
"context"
"log"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
api_v1 "github.com/cyberark/secretless-broker/pkg/apis/secretless.io/v1"
secretlessClientset "github.com/cyberark/secretless-broker/pkg/k8sclient/clientset/versioned"
)
// ResourceEventHandler is the interface for handling CRD push notification
type ResourceEventHandler interface {
CRDAdded(*api_v1.Configuration)
CRDDeleted(*api_v1.Configuration)
CRDUpdated(*api_v1.Configuration, *api_v1.Configuration)
}
// RegisterCRDListener registers a CRD push-notification handler to the available
// k8s cluster
func RegisterCRDListener(namespace string, configSpec string, resourceEventHandler ResourceEventHandler) error {
log.Printf("%s: Registering CRD watcher...", PluginName)
clientConfig, err := NewKubernetesConfig()
if err != nil {
return err
}
clientset, err := secretlessClientset.NewForConfig(clientConfig)
if err != nil {
return err
}
// TODO: Watch for CRD availability
// TODO: We might not want to listen in on all namespace changes
watchList := &cache.ListWatch{
ListFunc: func(listOpts meta_v1.ListOptions) (result runtime.Object, err error) {
return clientset.SecretlessV1().Configurations(namespace).List(context.TODO(), listOpts)
},
WatchFunc: func(listOpts meta_v1.ListOptions) (watch.Interface, error) {
return clientset.SecretlessV1().Configurations(namespace).Watch(context.TODO(), listOpts)
},
}
_, controller := cache.NewInformer(
watchList,
&api_v1.Configuration{},
CRDForcedRefreshInterval,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
configObj := obj.(*api_v1.Configuration)
if configObj.ObjectMeta.Name != configSpec {
return
}
log.Printf("%s: Add configuration event", PluginName)
log.Println(configObj.ObjectMeta.Name)
resourceEventHandler.CRDAdded(configObj)
},
DeleteFunc: func(obj interface{}) {
configObj := obj.(*api_v1.Configuration)
if configObj.ObjectMeta.Name != configSpec {
return
}
log.Printf("%s: Delete configuration event", PluginName)
resourceEventHandler.CRDDeleted(configObj)
},
UpdateFunc: func(oldObj, newObj interface{}) {
oldConfigObj := oldObj.(*api_v1.Configuration)
if oldConfigObj.ObjectMeta.Name != configSpec {
return
}
log.Printf("%s: Update/refresh configuration event", PluginName)
newConfigObj := newObj.(*api_v1.Configuration)
resourceEventHandler.CRDUpdated(oldConfigObj, newConfigObj)
},
},
)
go controller.Run(wait.NeverStop)
return nil
}
|
package set1
import (
"errors"
"fmt"
"math"
)
func hammingDistance(s1 []byte, s2 []byte) (int, error) {
if len(s1) != len(s2) {
return -1, errors.New("Undefined for strings of unequal length")
}
s1Binary := bytesToBinary(s1)
s2Binary := bytesToBinary(s2)
distance := 0
for i := range s1Binary {
if s1Binary[i] != s2Binary[i] {
distance += 1
}
}
return distance, nil
}
func bytesToBinary(s []byte) string {
result := ""
for _, c := range s {
result = fmt.Sprintf("%s%.8b", result, c)
}
return result
}
func breakVigenere(text []byte) ([]byte, []byte) {
keysize := detectVigenereKeySize(text)
blocks := ChunkArray(text, keysize)
var transpose [][]byte
for i := 0; i < keysize; i++ {
var block []byte
for j := 0; j < len(blocks); j++ {
if i < len(blocks[j]) {
block = append(block, blocks[j][i])
}
}
transpose = append(transpose, block)
}
var finalKey []byte
for _, block := range transpose {
_, blockKey, _ := DecryptSingleByteXOR(block, nil)
finalKey = append(finalKey, blockKey)
}
result := RepeatingKeyXOR(text, finalKey)
return result, finalKey
}
/**
* Algorithm from http://cryptopals.com/sets/1/challenges/6
*
* 1. For each KEYSIZE, take the first KEYSIZE worth of bytes, and the
* second KEYSIZE worth of bytes, and find the edit distance between
* them. Normalize this result by dividing by KEYSIZE.
* 2. The KEYSIZE with the smallest normalized edit distance is probably
* the key. You could proceed perhaps with the smallest 2-3 KEYSIZE
* values. Or take 4 KEYSIZE blocks instead of 2 and average the
* distances.
* 3. Now that you probably know the KEYSIZE: break the ciphertext into
* blocks of KEYSIZE length.
* 4. Now transpose the blocks: make a block that is the first byte of
* every block, and a block that is the second byte of every block,
* and so on.
* 5. Solve each block as if it was single-character XOR. You already
* have code to do this.
* 6. For each block, the single-byte XOR key that produces the best
* looking histogram is the repeating-key XOR key byte for that block.
* Put them together and you have the key.
*/
func detectVigenereKeySize(text []byte) int {
var detectedKeySize int
var smallestEditDistance float32
smallestEditDistance = math.MaxFloat32
startAt := 2
endAt := 40
for keysize := startAt; keysize <= endAt; keysize++ {
// 4 keysize length for each block (the wording from step 2 was not very clear about this)
blocklength := keysize * 4
firstWorth := text[0:blocklength]
secondWorth := text[blocklength : blocklength*2]
editDistance, _ := hammingDistance(firstWorth, secondWorth)
normalizedEditDistance := float32(editDistance) / float32(keysize)
if normalizedEditDistance < smallestEditDistance {
smallestEditDistance = normalizedEditDistance
detectedKeySize = keysize
}
}
return detectedKeySize
}
func ChunkArray(array []byte, size int) [][]byte {
var chunks [][]byte
var chunk []byte
for len(array) >= size {
chunk, array = array[:size], array[size:]
chunks = append(chunks, chunk)
}
if len(array) > 0 {
chunks = append(chunks, array)
}
return chunks
}
|
package gw
import (
"fmt"
"github.com/oceanho/gw/libs/gwreflect"
"reflect"
"sync"
)
type ObjectTyper struct {
Name string
DependOn []TyperDependency
Typer reflect.Type
ActualValue reflect.Value
newAPI reflect.Value
IsPtr bool
}
const (
IStoreName = "github.com/oceanho/gw.IStore"
IPasswordSignerName = "github.com/oceanho/gw.IPasswordSigner"
IPermissionManagerName = "github.com/oceanho/gw.IPermissionManager"
IPermissionCheckerName = "github.com/oceanho/gw.IPermissionChecker"
ISessionStateManagerName = "github.com/oceanho/gw.ISessionStateManager"
ICryptoHashName = "github.com/oceanho/gw.ICryptoHash"
ICryptoProtectName = "github.com/oceanho/gw.ICryptoProtect"
IdentifierGeneratorName = "github.com/oceanho/gw.IdentifierGenerator"
IAuthManagerName = "github.com/oceanho/gw.IAuthManager"
IUserManagerName = "github.com/oceanho/gw.IUserManager"
ServerStateName = "github.com/oceanho/gw.ServerState"
HostServerName = "github.com/oceanho/gw.HostServer"
IEventManagerName = "github.com/oceanho/gw.IEventManager"
)
var (
NullReflectValue = reflect.ValueOf(nil)
BuiltinComponentTyper = reflect.TypeOf(BuiltinComponent{})
)
func (ss ServerState) objectTypers() map[string]ObjectTyper {
var typers = make(map[string]ObjectTyper)
typers[IStoreName] = newNilApiObjectTyper(IStoreName, ss.Store())
typers[HostServerName] = newNilApiObjectTyper(HostServerName, ss.s)
typers[ServerStateName] = newNilApiObjectTyper(ServerStateName, ss)
typers[IUserManagerName] = newNilApiObjectTyper(IUserManagerName, ss.UserManager())
typers[IAuthManagerName] = newNilApiObjectTyper(IAuthManagerName, ss.AuthManager())
typers[ICryptoHashName] = newNilApiObjectTyper(ICryptoHashName, ss.CryptoHash())
typers[ICryptoProtectName] = newNilApiObjectTyper(ICryptoProtectName, ss.CryptoProtect())
typers[IEventManagerName] = newNilApiObjectTyper(IEventManagerName, ss.EventManager())
typers[IPasswordSignerName] = newNilApiObjectTyper(IPasswordSignerName, ss.PasswordSigner())
typers[IdentifierGeneratorName] = newNilApiObjectTyper(IdentifierGeneratorName, ss.IDGenerator())
typers[IPermissionManagerName] = newNilApiObjectTyper(IPermissionManagerName, ss.PermissionManager())
typers[IPermissionCheckerName] = newNilApiObjectTyper(IPermissionCheckerName, ss.PermissionChecker())
typers[ISessionStateManagerName] = newNilApiObjectTyper(ISessionStateManagerName, ss.SessionStateManager())
return typers
}
func newNilApiObjectTyper(name string, value interface{}) ObjectTyper {
return ObjectTyper{
Name: name,
IsPtr: false,
newAPI: NullReflectValue,
Typer: reflect.TypeOf(value),
ActualValue: reflect.ValueOf(value),
}
}
type BuiltinComponent struct {
Store IStore
UserManager IUserManager
AuthManager IAuthManager
SessionStateManager ISessionStateManager
PermissionManager IPermissionManager
PermissionChecker IPermissionChecker
CryptoHash ICryptoHash
CryptoProtect ICryptoProtect
PasswordSigner IPasswordSigner
IDGenerator IdentifierGenerator
}
func (bc BuiltinComponent) New(
Store IStore,
UserManager IUserManager,
AuthManager IAuthManager,
SessionStateManager ISessionStateManager,
PermissionManager IPermissionManager,
PermissionChecker IPermissionChecker,
CryptoHash ICryptoHash,
CryptoProtect ICryptoProtect,
PasswordSigner IPasswordSigner,
IDGenerator IdentifierGenerator,
) BuiltinComponent {
bc.Store = Store
bc.UserManager = UserManager
bc.AuthManager = AuthManager
bc.SessionStateManager = SessionStateManager
bc.PermissionManager = PermissionManager
bc.PermissionChecker = PermissionChecker
bc.CryptoHash = CryptoHash
bc.CryptoProtect = CryptoProtect
bc.PasswordSigner = PasswordSigner
bc.IDGenerator = IDGenerator
return bc
}
type TyperDependency struct {
Name string
IsPtr bool
Typer reflect.Type
}
type DIConfig struct {
NewFuncName string
ResolveFunc func(di interface{}, state interface{}, typerName string) interface{}
}
var defaultDIConfig = DIConfig{
NewFuncName: "New",
ResolveFunc: func(diImpl interface{}, state interface{}, typerName string) interface{} {
var di = diImpl.(*DefaultDIProviderImpl)
if di == nil {
panic("diImpl not are DefaultDIProviderImpl")
}
var store interface{}
if state != nil {
store = state.(IStore)
}
var objectTyper, ok = di.objectTypers[typerName]
if !ok {
panic(fmt.Sprintf("object typer(%s) not found", typerName))
}
var result reflect.Value
if objectTyper.newAPI == NullReflectValue {
result = objectTyper.ActualValue
} else {
var values []reflect.Value
for _, typerDp := range objectTyper.DependOn {
if typerDp.Name == IStoreName && store != nil {
values = append(values, reflect.ValueOf(store))
} else {
values = append(values, resolver(di, typerDp, store))
}
}
result = objectTyper.newAPI.Call(values)[0]
}
return result.Interface()
},
}
type IDIProvider interface {
Register(actual ...interface{}) bool
RegisterWithTyper(typers ...reflect.Type) bool
RegisterWithName(typerName string, actual interface{}) bool
Resolve(typerName string) interface{}
ResolveByTyper(typer reflect.Type) interface{}
ResolveWithState(state interface{}, typerName string) interface{}
ResolveByTyperWithState(state interface{}, typer reflect.Type) interface{}
}
type DefaultDIProviderImpl struct {
locker sync.Mutex
config DIConfig
state *ServerState
objectTypers map[string]ObjectTyper
typerMappers map[reflect.Type]string
}
func DefaultDIProvider(state *ServerState) IDIProvider {
stateTypers := state.objectTypers()
var registeredTypers = make(map[reflect.Type]string)
for _, d := range stateTypers {
registeredTypers[d.Typer] = d.Name
}
var di = &DefaultDIProviderImpl{
state: state,
objectTypers: stateTypers,
typerMappers: registeredTypers,
config: defaultDIConfig,
}
di.Register(BuiltinComponent{})
return di
}
func (d *DefaultDIProviderImpl) Register(actual ...interface{}) bool {
for _, a := range actual {
if !d.RegisterWithName("", a) {
return false
}
}
return true
}
func (d *DefaultDIProviderImpl) RegisterWithTyper(typers ...reflect.Type) bool {
for _, typer := range typers {
if !d.RegisterWithName("", reflect.New(typer).Interface()) {
return false
}
}
return true
}
func (d *DefaultDIProviderImpl) RegisterWithName(name string, actual interface{}) bool {
var actualValue = reflect.ValueOf(actual)
var newMethod = actualValue.MethodByName(d.config.NewFuncName)
if newMethod.Kind() != reflect.Func {
panic(fmt.Sprintf("typer(%s) has no %s(...) APIs.",
gwreflect.GetPkgFullName(reflect.TypeOf(actual)), d.config.NewFuncName))
}
var newMethodTyper = newMethod.Type()
var newMethodNumIn = newMethodTyper.NumIn()
var newMethodNumOut = newMethodTyper.NumOut()
if newMethodNumOut != 1 {
panic(fmt.Errorf("actual(%s) named typer New(...) func should be has only one return values", actual))
}
var outTyper = newMethodTyper.Out(0)
var virtualName = gwreflect.GetPkgFullName(outTyper)
if name != "" {
virtualName = name
}
var newFunParamObjectTypers = make([]TyperDependency, newMethodNumIn)
for idx := 0; idx < newMethodNumIn; idx++ {
newFunParamObjectTypers[idx] = typeToObjectTyperName(newMethodTyper.In(idx))
}
var objectTyper = ObjectTyper{
newAPI: newMethod,
Name: virtualName,
DependOn: newFunParamObjectTypers,
ActualValue: actualValue,
Typer: reflect.TypeOf(actual),
}
d.locker.Lock()
defer d.locker.Unlock()
d.objectTypers[virtualName] = objectTyper
d.typerMappers[outTyper] = virtualName
return true
}
func (d *DefaultDIProviderImpl) Resolve(typerName string) interface{} {
return d.ResolveWithState(d.state.Store(), typerName)
}
func (d *DefaultDIProviderImpl) ResolveByTyper(typer reflect.Type) interface{} {
return d.ResolveByTyperWithState(d.state.Store(), typer)
}
func (d *DefaultDIProviderImpl) ResolveByTyperWithState(state interface{}, typer reflect.Type) interface{} {
var typerName, ok = d.typerMappers[typer]
if !ok {
typerName = gwreflect.GetPkgFullName(typer)
}
return d.ResolveWithState(state, typerName)
}
func (d *DefaultDIProviderImpl) ResolveWithState(state interface{}, typerName string) interface{} {
return d.config.ResolveFunc(d, state, typerName)
}
// helpers
func resolver(defaultDiImpl *DefaultDIProviderImpl, typerDependency TyperDependency, state interface{}) reflect.Value {
var store interface{}
if state != nil {
store = state.(IStore)
}
var values []reflect.Value
var objectTyper, ok = defaultDiImpl.objectTypers[typerDependency.Name]
if !ok {
panic(fmt.Sprintf("missing typer(%s)", typerDependency.Name))
}
if objectTyper.newAPI == NullReflectValue {
return objectTyper.ActualValue
}
for _, dp := range objectTyper.DependOn {
dp := dp
if dp.Name == IStoreName && store != nil {
values = append(values, reflect.ValueOf(store))
} else if _, ok := defaultDiImpl.objectTypers[dp.Name]; ok {
values = append(values, resolver(defaultDiImpl, dp, store))
} else {
panic(fmt.Sprintf("object typer(%s) not found", dp.Name))
}
}
var result = objectTyper.newAPI.Call(values)[0]
switch result.Kind() {
case reflect.Ptr:
if typerDependency.IsPtr {
return result
} else {
return reflect.ValueOf(result.Elem().Interface())
}
default:
if !typerDependency.IsPtr {
return result
} else {
var typeValue = reflect.New(reflect.TypeOf(result.Interface()))
typeValue.Elem().Set(result)
return typeValue
}
}
}
func typeToObjectTyperName(typer reflect.Type) TyperDependency {
return TyperDependency{
Typer: typer,
Name: gwreflect.GetPkgFullName(typer),
IsPtr: typer.Kind() == reflect.Ptr,
}
}
|
// Copyright 2021 Red Hat, Inc. and/or its affiliates
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1
import (
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// Type ...
type Type string
const (
// ReplicaSet ...
ReplicaSet Type = "ReplicaSet"
)
// Phase ...
type Phase string
const (
// Running ...
Running Phase = "Running"
// Failed ...
Failed Phase = "Failed"
// Pending ...
Pending Phase = "Pending"
)
// MongoDBCommunitySpec defines the desired state of MongoDB
type MongoDBCommunitySpec struct {
// Members is the number of members in the replica set
// +optional
Members int `json:"members"`
// Type defines which type of MongoDB deployment the resource should create
// +kubebuilder:validation:Enum=ReplicaSet
Type Type `json:"type"`
// Version defines which version of MongoDB will be used
Version string `json:"version"`
// Arbiters is the number of arbiters (each counted as a member) in the replica set
// +optional
Arbiters int `json:"arbiters"`
// FeatureCompatibilityVersion configures the feature compatibility version that will
// be set for the deployment
// +optional
FeatureCompatibilityVersion string `json:"featureCompatibilityVersion,omitempty"`
// ReplicaSetHorizons Add this parameter and values if you need your database
// to be accessed outside of Kubernetes. This setting allows you to
// provide different DNS settings within the Kubernetes cluster and
// to the Kubernetes cluster. The Kubernetes Operator uses split horizon
// DNS for replica set members. This feature allows communication both
// within the Kubernetes cluster and from outside Kubernetes.
// +optional
ReplicaSetHorizons ReplicaSetHorizonConfiguration `json:"replicaSetHorizons,omitempty"`
// Security configures security features, such as TLS, and authentication settings for a deployment
// +required
Security Security `json:"security"`
// Users specifies the MongoDB users that should be configured in your deployment
// +required
Users []MongoDBUser `json:"users"`
// +optional
StatefulSetConfiguration StatefulSetConfiguration `json:"statefulSet,omitempty"`
// AdditionalMongodConfig is additional configuration that can be passed to
// each data-bearing mongod at runtime. Uses the same structure as the mongod
// configuration file: https://docs.mongodb.com/manual/reference/configuration-options/
// +kubebuilder:validation:Type=object
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +nullable
AdditionalMongodConfig MongodConfiguration `json:"additionalMongodConfig,omitempty"`
}
// ReplicaSetHorizonConfiguration holds the split horizon DNS settings for
// replica set members.
type ReplicaSetHorizonConfiguration []map[string]string
// CustomRole defines a custom MongoDB role.
type CustomRole struct {
// The name of the role.
Role string `json:"role"`
// The database of the role.
DB string `json:"db"`
// The privileges to grant the role.
Privileges []Privilege `json:"privileges"`
// An array of roles from which this role inherits privileges.
// +optional
Roles []Role `json:"roles"`
// The authentication restrictions the server enforces on the role.
// +optional
AuthenticationRestrictions []AuthenticationRestriction `json:"authenticationRestrictions,omitempty"`
}
// Privilege defines the actions a role is allowed to perform on a given resource.
type Privilege struct {
Resource Resource `json:"resource"`
Actions []string `json:"actions"`
}
// Resource specifies specifies the resources upon which a privilege permits actions.
// See https://docs.mongodb.com/manual/reference/resource-document for more.
type Resource struct {
// +optional
DB *string `json:"db,omitempty"`
// +optional
Collection *string `json:"collection,omitempty"`
// +optional
Cluster bool `json:"cluster,omitempty"`
// +optional
AnyResource bool `json:"anyResource,omitempty"`
}
// AuthenticationRestriction specifies a list of IP addresses and CIDR ranges users
// are allowed to connect to or from.
type AuthenticationRestriction struct {
ClientSource []string `json:"clientSource"`
ServerAddress []string `json:"serverAddress"`
}
// StatefulSetConfiguration holds the optional custom StatefulSet
// that should be merged into the operator created one.
type StatefulSetConfiguration struct {
// +kubebuilder:pruning:PreserveUnknownFields
SpecWrapper StatefulSetSpecWrapper `json:"spec"`
}
// StatefulSetSpecWrapper is a wrapper around StatefulSetSpec with a custom implementation
// of MarshalJSON and UnmarshalJSON which delegate to the underlying Spec to avoid CRD pollution.
type StatefulSetSpecWrapper struct {
Spec appsv1.StatefulSetSpec `json:"-"`
}
// DeepCopy ...
func (m *StatefulSetSpecWrapper) DeepCopy() *StatefulSetSpecWrapper {
return &StatefulSetSpecWrapper{
Spec: m.Spec,
}
}
// MongodConfiguration holds the optional mongod configuration
// that should be merged with the operator created one.
//
// The CRD generator does not support map[string]interface{}
// on the top level and hence we need to work around this with
// a wrapping struct.
type MongodConfiguration struct {
Object map[string]interface{} `json:"-"`
}
// DeepCopy ...
func (m *MongodConfiguration) DeepCopy() *MongodConfiguration {
return &MongodConfiguration{
Object: runtime.DeepCopyJSON(m.Object),
}
}
// MongoDBUser ...
type MongoDBUser struct {
// Name is the username of the user
Name string `json:"name"`
// DB is the database the user is stored in. Defaults to "admin"
// +optional
DB string `json:"db"`
// PasswordSecretRef is a reference to the secret containing this user's password
PasswordSecretRef SecretKeyReference `json:"passwordSecretRef"`
// Roles is an array of roles assigned to this user
Roles []Role `json:"roles"`
// ScramCredentialsSecretName appended by string "scram-credentials" is the name of the secret object created by the mongoDB operator for storing SCRAM credentials
// +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
ScramCredentialsSecretName string `json:"scramCredentialsSecretName"`
}
// SecretKeyReference is a reference to the secret containing the user's password
type SecretKeyReference struct {
// Name is the name of the secret storing this user's password
Name string `json:"name"`
// Key is the key in the secret storing this password. Defaults to "password"
// +optional
Key string `json:"key"`
}
// Role is the database role this user should have
type Role struct {
// DB is the database the role can act on
DB string `json:"db"`
// Name is the name of the role
Name string `json:"name"`
}
// Security ...
type Security struct {
// +optional
Authentication Authentication `json:"authentication"`
// TLS configuration for both client-server and server-server communication
// +optional
TLS TLS `json:"tls"`
// User-specified custom MongoDB roles that should be configured in the deployment.
// +optional
Roles []CustomRole `json:"roles,omitempty"`
}
// TLS is the configuration used to set up TLS encryption
type TLS struct {
Enabled bool `json:"enabled"`
// Optional configures if TLS should be required or optional for connections
// +optional
Optional bool `json:"optional"`
// CertificateKeySecret is a reference to a Secret containing a private key and certificate to use for TLS.
// The key and cert are expected to be PEM encoded and available at "tls.key" and "tls.crt".
// This is the same format used for the standard "kubernetes.io/tls" Secret type, but no specific type is required.
// +optional
CertificateKeySecret LocalObjectReference `json:"certificateKeySecretRef"`
// CaConfigMap is a reference to a ConfigMap containing the certificate for the CA which signed the server certificates
// The certificate is expected to be available under the key "ca.crt"
// +optional
CaConfigMap LocalObjectReference `json:"caConfigMapRef"`
}
// LocalObjectReference is a reference to another Kubernetes object by name.
// TODO: Replace with a type from the K8s API. CoreV1 has an equivalent
//
// "LocalObjectReference" type but it contains a TODO in its
// description that we don't want in our CRD.
type LocalObjectReference struct {
Name string `json:"name"`
}
// Authentication ...
type Authentication struct {
// Modes is an array specifying which authentication methods should be enabled.
Modes []AuthMode `json:"modes"`
// IgnoreUnknownUsers set to true will ensure any users added manually (not through the CRD)
// will not be removed.
// TODO: defaults will work once we update to v1 CRD.
// +optional
// +kubebuilder:default:=true
// +nullable
IgnoreUnknownUsers *bool `json:"ignoreUnknownUsers,omitempty"`
}
// AuthMode ...
// +kubebuilder:validation:Enum=SCRAM;SCRAM-SHA-256;SCRAM-SHA-1
type AuthMode string
// MongoDBCommunityStatus defines the observed state of MongoDB
type MongoDBCommunityStatus struct {
MongoURI string `json:"mongoUri"`
Phase Phase `json:"phase"`
CurrentStatefulSetReplicas int `json:"currentStatefulSetReplicas"`
CurrentMongoDBMembers int `json:"currentMongoDBMembers"`
Message string `json:"message,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// MongoDBCommunity is the Schema for the mongodbs API
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=mongodbcommunity,scope=Namespaced,shortName=mdbc,singular=mongodbcommunity
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Current state of the MongoDB deployment"
// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".status.version",description="Version of MongoDB server"
type MongoDBCommunity struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec MongoDBCommunitySpec `json:"spec,omitempty"`
Status MongoDBCommunityStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// MongoDBCommunityList contains a list of MongoDB
type MongoDBCommunityList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []MongoDBCommunity `json:"items"`
}
func init() {
SchemeBuilder.Register(&MongoDBCommunity{}, &MongoDBCommunityList{})
}
|
package miembro
import (
log "commodus/internal/logger"
model "commodus/model"
miembro "commodus/model/miembro"
http "net/http"
schema "github.com/gorilla/schema"
echo "github.com/labstack/echo/v4"
)
// Constants
var systemError model.Error = model.Error{
Slug: "internal",
Message: "Failed to save due to system error...",
}
// Validators
// ListQuery validator
type ListQuery struct {
Page int `schema:"page" validate:"gte=0"`
Count int `schema:"count" validate:"gte=0,lte=500"`
}
type UpdateQuery struct {
Name string `json:"name" validate:"required"`
Telephone string `json:"telephone" validate:"required"`
}
// HandleCreate handler for creating Miembro's
func HandleCreate(c echo.Context) (err error) {
log.Info("recieved request to create new miembro")
m := new(miembro.Miembro)
if err = c.Bind(m); err != nil {
return c.JSON(http.StatusInternalServerError, systemError)
}
if err = m.Save(); err != nil {
return c.JSON(http.StatusInternalServerError, systemError)
}
return c.JSON(http.StatusOK, m)
}
// HandleList handler for listing Miembro's
func HandleList(c echo.Context) (err error) {
log.Info("received request to list miembros por favor")
query := new(ListQuery)
decoder := schema.NewDecoder()
r := c.Request()
if err := r.ParseForm(); err != nil {
log.Error("Failed to parse form %v", err)
return c.JSON(http.StatusInternalServerError, systemError)
}
if err = decoder.Decode(query, r.Form); err != nil {
log.Error("Failed to decode form %v", err)
return c.JSON(http.StatusInternalServerError, systemError)
}
if err = c.Validate(query); err != nil {
return c.JSON(
http.StatusBadRequest,
model.Error{
Slug: "invalid",
Message: err.Error(),
},
)
}
miembros, err := miembro.List(query.Page, query.Count)
return c.JSON(http.StatusOK, miembros)
}
// HandleUpdate handler for updating members
func HandleUpdate(c echo.Context) (err error) {
uuid := c.Param("id")
query := new(UpdateQuery)
if err = c.Bind(query); err != nil {
return c.JSON(http.StatusInternalServerError, systemError)
}
if err = c.Validate(query); err != nil {
return c.JSON(
http.StatusBadRequest,
model.Error{
Slug: "invalid",
Message: err.Error(),
},
)
}
m := miembro.Miembro{Uuid: uuid, Telephone: query.Telephone, Name: query.Telephone}
if err = m.Update(); err != nil {
return c.JSON(
http.StatusBadRequest,
model.Error{
Slug: "invalid",
Message: err.Error(),
},
)
}
return c.JSON(http.StatusNoContent, nil)
}
|
package agent
import (
"agent/net"
)
type SSHAgent struct {
Commands chan string
Client *net.SSHClient
quit_chan chan bool
}
func New(user string, privateKey string, host string) *SSHAgent {
agent := new(SSHAgent)
agent.quit_chan = make(chan bool)
agent.Client = net.NewSSHClient(user, privateKey, host)
agent.init()
return agent
}
func (a *SSHAgent) init() {
a.Commands = make(chan string, 100)
}
func (a *SSHAgent) Send(cmd string) {
if cap(a.Commands) == len(a.Commands) {
//buffer full, the remote server has something wrong
a.Shutdown()
} else {
a.Commands <- cmd
}
}
func (a *SSHAgent) Shutdown() {
a.quit_chan <- true
a.Client.Close()
a.init()
}
func (a *SSHAgent) Run(callback func(output string, err error)) {
go func() {
for {
select {
case <-a.quit_chan:
return
case cmd := <-a.Commands:
result, err := a.Client.Run(cmd)
callback(result, err)
}
}
}()
}
|
package rcl
import (
"math"
"sort"
)
// Pos is offset in bytes in current file
type Pos int
type Position struct {
File string
Offset int // offset in bytes, pos from 0
Line int // line number, pos from 1
Column int // column number
}
type File struct {
name string
lines []int // offset of the first character for each line
}
func (f *File) Position(p Pos) Position {
offset := int(p)
// TODO: check this logic ...
i := sort.Search(len(f.lines), func(i int) bool {
return f.lines[i] > offset
})
line := i + 1
column := offset - f.lines[i] + 1
return Position{
File: f.name,
Offset: offset,
Line: line,
Column: column,
}
}
type Node interface {
Pos() Pos // Pos is start position
End() Pos // End is end position. The actual range is [Pos, End), value of range is +1 of last character in token.
Accept(visitor Visitor) error // Accept should call corresponding visitor method based on node type
}
type baseNode struct {
pos int
end int
}
func (b *baseNode) Pos() Pos {
return Pos(b.pos)
}
func (b *baseNode) End() Pos {
return Pos(b.end)
}
type Null struct {
baseNode
}
func (n *Null) Accept(visitor Visitor) error {
return visitor.VisitNull(n)
}
type Bool struct {
baseNode
Val bool
}
func (b *Bool) Accept(visitor Visitor) error {
return visitor.VisitBool(b)
}
type NumberType byte
const (
NumberTypeUnknown NumberType = iota
NumberTypeInt
NumberTypeDouble
)
func (t NumberType) String() string {
return []string{
"unknown",
"int",
"double",
}[t]
}
type Number struct {
baseNode
Val int64
Type NumberType
}
func (n *Number) Accept(visitor Visitor) error {
return visitor.VisitNumber(n)
}
func (n *Number) Int() int64 {
if n.Type != NumberTypeInt {
panic("number type is not int but " + n.Type.String())
}
return n.Val
}
func (n *Number) Double() float64 {
if n.Type != NumberTypeDouble {
panic("number type is not double but " + n.Type.String())
}
return math.Float64frombits(uint64(n.Val))
}
type String struct {
baseNode
Val string
}
func (s *String) Accept(visitor Visitor) error {
return visitor.VisitString(s)
}
type Array struct {
baseNode
Values []Node
}
func (a *Array) Accept(visitor Visitor) error {
return visitor.VisitArray(a)
}
type Object struct {
baseNode
Keys []*String
Values []Node
}
func (o *Object) Accept(visitor Visitor) error {
return visitor.VisitObject(o)
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package printpreview provides support for controlling Chrome print preview
// directly through the UI.
package printpreview
import (
"context"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/event"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/chrome/uiauto/state"
"chromiumos/tast/local/input"
)
// Layout represents the layout setting in Chrome print preview.
type Layout int
const (
// Portrait represents the portrait layout setting.
Portrait Layout = iota
// Landscape represents the landscape layout setting.
Landscape
)
// PrintPreviewNode is the node for the top-level dialog.
var PrintPreviewNode *nodewith.Finder = nodewith.Name("Print").Role(role.Window).ClassName("RootView")
// Print sets focus on the print button in Chrome print preview and injects the
// ENTER key to start printing. This is more reliable than clicking the print
// button since notifications often block it from view.
func Print(ctx context.Context, tconn *chrome.TestConn) error {
printButton := nodewith.Name("Print").Role(role.Button)
ui := uiauto.New(tconn)
if err := uiauto.Combine("find and focus print button",
ui.WithTimeout(10*time.Second).WaitUntilExists(printButton),
ui.WithTimeout(10*time.Second).FocusAndWait(printButton),
)(ctx); err != nil {
return err
}
kb, err := input.Keyboard(ctx)
if err != nil {
return errors.Wrap(err, "failed to get the keyboard")
}
defer kb.Close()
if err := kb.Accel(ctx, "enter"); err != nil {
return errors.Wrap(err, "failed to type enter")
}
return nil
}
// SelectPrinter interacts with Chrome print preview to select the printer with
// the given printerName.
func SelectPrinter(ctx context.Context, tconn *chrome.TestConn, printerName string) error {
// Find and expand the destination list. The exact name may change based on which
// printer was previously selected, but it will always start with "Destination ".
dataList := nodewith.NameStartingWith("Destination ").Role(role.PopUpButton)
ui := uiauto.New(tconn)
if err := uiauto.Combine("find and click destination list",
ui.WithTimeout(10*time.Second).WaitUntilExists(dataList),
ui.LeftClick(dataList),
)(ctx); err != nil {
return err
}
// Find and click the See more... menu item.
seeMore := nodewith.Name("See more destinations").Role(role.MenuItem)
if err := uiauto.Combine("find and click See more... menu item",
ui.WithTimeout(10*time.Second).WaitUntilExists(seeMore),
ui.LeftClick(seeMore),
)(ctx); err != nil {
return err
}
// Find and select the printer.
printerList := nodewith.Name("Print Destinations")
printer := nodewith.Name(printerName).Role(role.StaticText).Ancestor(printerList).First()
if err := uiauto.Combine("find and click printer",
ui.WithTimeout(10*time.Second).WaitUntilExists(printer),
ui.LeftClick(printer),
)(ctx); err != nil {
return err
}
return nil
}
// SetLayout interacts with Chrome print preview to change the layout setting to
// the provided layout.
func SetLayout(ctx context.Context, tconn *chrome.TestConn, layout Layout) error {
// Find and expand the layout list.
layoutList := nodewith.Name("Layout").Role(role.ComboBoxSelect)
ui := uiauto.New(tconn)
if err := uiauto.Combine("find and click layout list",
ui.WithTimeout(10*time.Second).WaitUntilExists(layoutList),
ui.LeftClick(layoutList),
)(ctx); err != nil {
return err
}
// Find the landscape layout option to verify the layout list has expanded.
landscapeOption := nodewith.Name("Landscape").Role(role.ListBoxOption)
if err := ui.WithTimeout(10 * time.Second).WaitUntilExists(landscapeOption)(ctx); err != nil {
return errors.Wrap(err, "failed to wait for layout list to expand")
}
// Select the desired layout.
kb, err := input.Keyboard(ctx)
if err != nil {
return errors.Wrap(err, "failed to get the keyboard")
}
defer kb.Close()
var accelerator string
switch layout {
case Portrait:
accelerator = "search+left"
case Landscape:
accelerator = "search+right"
}
if err := kb.Accel(ctx, accelerator); err != nil {
return errors.Wrap(err, "failed to type accelerator")
}
if err := kb.Accel(ctx, "enter"); err != nil {
return errors.Wrap(err, "failed to type enter")
}
return nil
}
// SetPages interacts with Chrome print preview to set the selected pages.
func SetPages(ctx context.Context, tconn *chrome.TestConn, pages string) error {
// Find and expand the pages list.
pageList := nodewith.Name("Pages").Role(role.ComboBoxSelect)
ui := uiauto.New(tconn)
if err := uiauto.Combine("find and click page list",
ui.WithTimeout(10*time.Second).WaitUntilExists(pageList),
ui.LeftClick(pageList),
)(ctx); err != nil {
return err
}
// Find the custom pages option to verify the pages list has expanded.
customOption := nodewith.Name("Custom").Role(role.ListBoxOption)
if err := ui.WithTimeout(10 * time.Second).WaitUntilExists(customOption)(ctx); err != nil {
return errors.Wrap(err, "failed to wait for pages list to expand")
}
// Select "Custom" and set the desired page range.
kb, err := input.Keyboard(ctx)
if err != nil {
return errors.Wrap(err, "failed to get the keyboard")
}
defer kb.Close()
if err := kb.Accel(ctx, "search+right"); err != nil {
return errors.Wrap(err, "failed to type end")
}
if err := kb.Accel(ctx, "enter"); err != nil {
return errors.Wrap(err, "failed to type enter")
}
// Wait for the custom pages text field to appear and become focused (this
// happens automatically).
textField := nodewith.Name("e.g. 1-5, 8, 11-13").Role(role.TextField).State(state.Focused, true)
if err := ui.WithTimeout(10 * time.Second).WaitUntilExists(textField)(ctx); err != nil {
return errors.Wrap(err, "failed to find custom pages text field")
}
if err := kb.Type(ctx, pages); err != nil {
return errors.Wrap(err, "failed to type pages")
}
return nil
}
// WaitForPrintPreview waits for Print Preview to finish loading after it's
// initially opened.
func WaitForPrintPreview(tconn *chrome.TestConn) uiauto.Action {
ui := uiauto.New(tconn)
loadingPreviewText := nodewith.Name("Loading preview")
printPreviewFailedText := nodewith.Name("Print preview failed")
destinationButton := nodewith.Name("Destination").Role(role.StaticText).Ancestor(PrintPreviewNode)
emptyAction := func(context.Context) error { return nil }
return uiauto.Combine("wait for Print Preview to finish loading",
uiauto.NamedAction("wait for Print Preview to appear", ui.WithTimeout(10*time.Second).WaitUntilExists(destinationButton)),
// Wait for the loading text to appear to indicate print preview is loading.
// Since print preview can finish loading before the loading text is found,
// IfSuccessThen() is used with a stub "success" action just so that the
// WaitUntilExists() error is ignored and won't fail the test.
uiauto.IfSuccessThen(ui.WithTimeout(5*time.Second).WaitUntilExists(loadingPreviewText), emptyAction),
// Wait for the loading text to be removed to indicate print preview is no
// longer loading.
ui.WithTimeout(30*time.Second).WaitUntilGone(loadingPreviewText),
ui.Gone(printPreviewFailedText),
)
}
// ExpandMoreSettings expands the the "More settings" section of the print
// settings window. Does nothing if the section is already expanded.
func ExpandMoreSettings(ctx context.Context, tconn *chrome.TestConn) error {
ui := uiauto.New(tconn)
moreSettingsButton := nodewith.Name("More settings").Role(role.Button)
advancedSettingsButton := nodewith.Name("Advanced settings").Role(role.Button)
// Check whether the "More settings" section is already expanded by
// checking whether the "Advanced settings" button is reachable. If it's
// already expanded, return without doing anything.
if alreadyExpanded, err := ui.IsNodeFound(ctx, advancedSettingsButton); err != nil {
return err
} else if alreadyExpanded {
return nil
}
// If the section isn't expanded yet, expand it by left clicking on the
// "More settings" button.
if err := uiauto.Combine("find and click more settings button",
ui.WithTimeout(10*time.Second).WaitUntilExists(moreSettingsButton),
ui.EnsureFocused(moreSettingsButton),
ui.WaitForEvent(moreSettingsButton, event.Expanded, ui.DoDefault(moreSettingsButton)),
)(ctx); err != nil {
return err
}
return nil
}
|
package main
import (
"fmt"
"os"
. "github.com/carlpett/winlsa"
)
func main() {
luids, err := GetLogonSessions()
if err != nil {
fmt.Println("GetLogonSessions:", err)
os.Exit(1)
}
for _, luid := range luids {
sd, err := GetLogonSessionData(&luid)
if err != nil {
fmt.Println("LsaGetLogonSessionData:", err)
os.Exit(1)
}
fmt.Printf("logonid: %v\nlogontype: %v (%d)\nusername: %s\nsession: %v\nsid: %s\n\n", luid, sd.LogonType, sd.LogonType, sd.UserName, sd.Session, sd.Sid)
}
}
|
package main
import (
"encoding/json"
_ "encoding/json"
"fmt"
gorilla "github.com/gorilla/schema"
//"github.com/json-iterator/go"
"log"
"net/http"
_ "net/http/pprof"
)
var decoder = gorilla.NewDecoder()
type User struct {
Years int
Balance float32
Rating float32
Age int
AccountType string
}
func main() {
fmt.Println("Service started!")
mux := http.NewServeMux()
mux.HandleFunc("/Promotions", getPromotions)
//go func() {
// log.Println(http.ListenAndServe("localhost:8080", nil))
//}()
err := http.ListenAndServe(":8080", mux)
log.Fatal(err)
}
func getPromotions(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
http.Error(w, "Bad Input", http.StatusBadRequest)
return
}
var inputUser User
err = decoder.Decode(&inputUser, r.Form)
if err != nil {
http.Error(w, "Bad Input", http.StatusBadRequest)
return
}
if !validateUser(inputUser) {
http.Error(w, "Bad Input", http.StatusBadRequest)
return
}
var results []string
if ruleMillennial(inputUser) {
results = append(results, "Millennial Madness")
}
if ruleOldies(inputUser) {
results = append(results, "Golden Oldies")
}
if ruleLoyalty(inputUser) {
results = append(results, "Loyalty Bonus")
}
if ruleValued(inputUser) {
results = append(results, "Valued Customer")
}
if len(results) == 0 {
results = append(results, "No Promotions!")
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(results)
}
func validateUser(user User) bool {
if user.Years > 0 && user.Age > 0 && user.Rating > 0 && isValidAccountType(user.AccountType) {
return true
}
return false
}
func isValidAccountType(category string) bool {
switch category {
case
"Blue",
"Gold",
"Platinum":
return true
}
return false
}
func ruleMillennial(user User) bool {
if 21 <= user.Age && user.Age <= 35 {
if user.Rating >= 600 || user.Balance > 10000 {
return true
}
}
return false
}
func ruleOldies(user User) bool {
if user.Age >= 65 {
if user.Rating >= 500 || user.Balance > 5000 {
if user.Years >= 10 || user.AccountType == "Gold" || user.AccountType == "Platinum" {
return true
}
}
}
return false
}
func ruleLoyalty(user User) bool {
if user.Years > 5 {
return true
}
return false
}
func ruleValued(user User) bool {
if ruleGoodStanding(user) && !(ruleMillennial(user) || ruleOldies(user) || ruleLoyalty(user)) {
return true
}
return false
}
func ruleGoodStanding(user User) bool {
if user.AccountType == "Platinum" || user.Rating > 500 || user.Balance >= 0 {
return true
}
return false
}
|
package main
import (
"fmt"
"io"
"os"
"github.com/Cloud-Foundations/Dominator/lib/log"
)
func listGroupMethodsSubcommand(args []string, logger log.DebugLogger) error {
if err := listGroupSMs(os.Stdout, args[0], args[1], logger); err != nil {
return fmt.Errorf("Error listing group methods: %s", err)
}
return nil
}
func listGroupSMs(writer io.Writer, source, groupname string,
logger log.DebugLogger) error {
db, err := getDB(source, logger)
if err != nil {
return err
}
serviceMethods, err := db.GetGroupServiceMethods(groupname)
if err != nil {
return err
}
for _, serviceMethod := range serviceMethods {
fmt.Fprintln(writer, serviceMethod)
}
return nil
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package optbuilder
import (
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/opt/memo"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
)
// buildExport builds an EXPORT statement.
func (b *Builder) buildExport(export *tree.Export, inScope *scope) (outScope *scope) {
// We don't allow the input statement to reference outer columns, so we
// pass a "blank" scope rather than inScope.
emptyScope := b.allocScope()
inputScope := b.buildStmt(export.Query, nil /* desiredTypes */, emptyScope)
texpr := emptyScope.resolveType(export.File, types.String)
fileName := b.buildScalar(
texpr, emptyScope, nil /* outScope */, nil /* outCol */, nil, /* colRefs */
)
options := b.buildKVOptions(export.Options, emptyScope)
outScope = inScope.push()
b.synthesizeResultColumns(outScope, colinfo.ExportColumns)
outScope.expr = b.factory.ConstructExport(
inputScope.expr.(memo.RelExpr),
fileName,
options,
&memo.ExportPrivate{
FileFormat: export.FileFormat,
Columns: colsToColList(outScope.cols),
Props: inputScope.makePhysicalProps(),
},
)
return outScope
}
func (b *Builder) buildKVOptions(opts tree.KVOptions, inScope *scope) memo.KVOptionsExpr {
res := make(memo.KVOptionsExpr, len(opts))
for i := range opts {
res[i].Key = string(opts[i].Key)
if opts[i].Value != nil {
texpr := inScope.resolveType(opts[i].Value, types.String)
res[i].Value = b.buildScalar(
texpr, inScope, nil /* outScope */, nil /* outCol */, nil, /* colRefs */
)
} else {
res[i].Value = b.factory.ConstructNull(types.String)
}
}
return res
}
|
// Copyright 2017 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package containerd
import (
ctx "context"
"sync"
"time"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
dTypes "github.com/docker/engine-api/types"
dTypesEvents "github.com/docker/engine-api/types/events"
"github.com/sirupsen/logrus"
)
// watcherState holds global close flag, per-container queues for events and
// ignore toggles
type watcherState struct {
lock.Mutex
eventQueueBufferSize int
events map[string]chan dTypesEvents.Message
}
func newWatcherState(eventQueueBufferSize int) *watcherState {
return &watcherState{
eventQueueBufferSize: eventQueueBufferSize,
events: make(map[string]chan dTypesEvents.Message),
}
}
// enqueueByContainerID starts a handler for this container, if needed, and
// enqueues a copy of the event if it is non-nil. Passing in a nil event will
// only start the handler. These handlers can be reaped via
// watcherState.reapEmpty.
// This parallelism is desirable to respond to events faster; each event might
// require talking to an outside daemon (docker) and a single noisy container
// might starve others.
func (ws *watcherState) enqueueByContainerID(containerID string, e *dTypesEvents.Message) {
ws.Lock()
defer ws.Unlock()
if _, found := ws.events[containerID]; !found {
q := make(chan dTypesEvents.Message, eventQueueBufferSize)
ws.events[containerID] = q
go processContainerEvents(q)
}
if e != nil {
ws.events[containerID] <- *e
}
}
// handlingContainerID returns whether there is a goroutine already consuming
// events for this id
func (ws *watcherState) handlingContainerID(id string) bool {
ws.Lock()
defer ws.Unlock()
_, handled := ws.events[id]
return handled
}
// reapEmpty deletes empty queues from the map. This also causes the handler
// goroutines to exit. It is expected to be called periodically to avoid the
// map growing over time.
func (ws *watcherState) reapEmpty() {
ws.Lock()
defer ws.Unlock()
for id, q := range ws.events {
if len(q) == 0 {
close(q)
delete(ws.events, id)
}
}
}
// syncWithRuntime is used by the daemon to synchronize changes between Docker and
// Cilium. This includes identities, labels, etc.
func (ws *watcherState) syncWithRuntime() {
var wg sync.WaitGroup
timeoutCtx, cancel := ctx.WithTimeout(ctx.Background(), 10*time.Second)
defer cancel()
cList, err := dockerClient.ContainerList(timeoutCtx, dTypes.ContainerListOptions{All: false})
if err != nil {
log.WithError(err).Error("Failed to retrieve the container list")
return
}
for _, cont := range cList {
if ignoredContainer(cont.ID) {
continue
}
if alreadyHandled := ws.handlingContainerID(cont.ID); !alreadyHandled {
log.WithFields(logrus.Fields{
logfields.ContainerID: shortContainerID(cont.ID),
}).Debug("Found unwatched container")
wg.Add(1)
go func(wg *sync.WaitGroup, id string) {
defer wg.Done()
ws.enqueueByContainerID(id, nil) // ensure a handler is running for future events
handleCreateContainer(id, false)
}(&wg, cont.ID)
}
}
// Wait for all spawned go routines handling container creations to exit
wg.Wait()
}
|
package odoo
import (
"fmt"
)
// ProcurementRule represents procurement.rule model.
type ProcurementRule struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
Action *Selection `xmlrpc:"action,omptempty"`
Active *Bool `xmlrpc:"active,omptempty"`
CompanyId *Many2One `xmlrpc:"company_id,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
Delay *Int `xmlrpc:"delay,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
GroupId *Many2One `xmlrpc:"group_id,omptempty"`
GroupPropagationOption *Selection `xmlrpc:"group_propagation_option,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
LocationId *Many2One `xmlrpc:"location_id,omptempty"`
LocationSrcId *Many2One `xmlrpc:"location_src_id,omptempty"`
Name *String `xmlrpc:"name,omptempty"`
PartnerAddressId *Many2One `xmlrpc:"partner_address_id,omptempty"`
PickingTypeId *Many2One `xmlrpc:"picking_type_id,omptempty"`
ProcureMethod *Selection `xmlrpc:"procure_method,omptempty"`
Propagate *Bool `xmlrpc:"propagate,omptempty"`
PropagateWarehouseId *Many2One `xmlrpc:"propagate_warehouse_id,omptempty"`
RouteId *Many2One `xmlrpc:"route_id,omptempty"`
RouteSequence *Int `xmlrpc:"route_sequence,omptempty"`
Sequence *Int `xmlrpc:"sequence,omptempty"`
WarehouseId *Many2One `xmlrpc:"warehouse_id,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
}
// ProcurementRules represents array of procurement.rule model.
type ProcurementRules []ProcurementRule
// ProcurementRuleModel is the odoo model name.
const ProcurementRuleModel = "procurement.rule"
// Many2One convert ProcurementRule to *Many2One.
func (pr *ProcurementRule) Many2One() *Many2One {
return NewMany2One(pr.Id.Get(), "")
}
// CreateProcurementRule creates a new procurement.rule model and returns its id.
func (c *Client) CreateProcurementRule(pr *ProcurementRule) (int64, error) {
ids, err := c.CreateProcurementRules([]*ProcurementRule{pr})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateProcurementRule creates a new procurement.rule model and returns its id.
func (c *Client) CreateProcurementRules(prs []*ProcurementRule) ([]int64, error) {
var vv []interface{}
for _, v := range prs {
vv = append(vv, v)
}
return c.Create(ProcurementRuleModel, vv)
}
// UpdateProcurementRule updates an existing procurement.rule record.
func (c *Client) UpdateProcurementRule(pr *ProcurementRule) error {
return c.UpdateProcurementRules([]int64{pr.Id.Get()}, pr)
}
// UpdateProcurementRules updates existing procurement.rule records.
// All records (represented by ids) will be updated by pr values.
func (c *Client) UpdateProcurementRules(ids []int64, pr *ProcurementRule) error {
return c.Update(ProcurementRuleModel, ids, pr)
}
// DeleteProcurementRule deletes an existing procurement.rule record.
func (c *Client) DeleteProcurementRule(id int64) error {
return c.DeleteProcurementRules([]int64{id})
}
// DeleteProcurementRules deletes existing procurement.rule records.
func (c *Client) DeleteProcurementRules(ids []int64) error {
return c.Delete(ProcurementRuleModel, ids)
}
// GetProcurementRule gets procurement.rule existing record.
func (c *Client) GetProcurementRule(id int64) (*ProcurementRule, error) {
prs, err := c.GetProcurementRules([]int64{id})
if err != nil {
return nil, err
}
if prs != nil && len(*prs) > 0 {
return &((*prs)[0]), nil
}
return nil, fmt.Errorf("id %v of procurement.rule not found", id)
}
// GetProcurementRules gets procurement.rule existing records.
func (c *Client) GetProcurementRules(ids []int64) (*ProcurementRules, error) {
prs := &ProcurementRules{}
if err := c.Read(ProcurementRuleModel, ids, nil, prs); err != nil {
return nil, err
}
return prs, nil
}
// FindProcurementRule finds procurement.rule record by querying it with criteria.
func (c *Client) FindProcurementRule(criteria *Criteria) (*ProcurementRule, error) {
prs := &ProcurementRules{}
if err := c.SearchRead(ProcurementRuleModel, criteria, NewOptions().Limit(1), prs); err != nil {
return nil, err
}
if prs != nil && len(*prs) > 0 {
return &((*prs)[0]), nil
}
return nil, fmt.Errorf("procurement.rule was not found with criteria %v", criteria)
}
// FindProcurementRules finds procurement.rule records by querying it
// and filtering it with criteria and options.
func (c *Client) FindProcurementRules(criteria *Criteria, options *Options) (*ProcurementRules, error) {
prs := &ProcurementRules{}
if err := c.SearchRead(ProcurementRuleModel, criteria, options, prs); err != nil {
return nil, err
}
return prs, nil
}
// FindProcurementRuleIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindProcurementRuleIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(ProcurementRuleModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindProcurementRuleId finds record id by querying it with criteria.
func (c *Client) FindProcurementRuleId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(ProcurementRuleModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("procurement.rule was not found with criteria %v and options %v", criteria, options)
}
|
package controller
import (
"fmt"
"github.com/saxon134/go-utils/saData"
"github.com/saxon134/go-utils/saData/saHit"
"github.com/saxon134/go-utils/saOrm"
"github.com/saxon134/workflow/api"
"github.com/saxon134/workflow/db"
"github.com/saxon134/workflow/db/chat"
"github.com/saxon134/workflow/db/job"
"github.com/saxon134/workflow/db/user"
"github.com/saxon134/workflow/errs"
"github.com/saxon134/workflow/interface/ioJob"
"strings"
)
func JobList(c *api.Context) (resp *api.Response, err error) {
var in = new(ioJob.ListRequest)
c.Bind(in)
var usrObj = new(user.TblUser)
db.MySql.Table(user.TBNUser).Where("id = ?", c.Account.Id).First(usrObj)
if usrObj.Id <= 0 {
return nil, errs.New(errs.ErrorUnauthorized)
}
//是否是管理角色
var isManager = strings.Contains(usrObj.Roles, "1") || strings.Contains(usrObj.Roles, "2")
var ary = make([]*job.TblJob, 0, c.Paging.Limit)
var query = db.MySql.Table(job.TBNJob).Order("status asc, p asc, id desc").Limit(c.Paging.Limit)
if in.DemandId > 0 {
query.Where("demand_id = ?", in.DemandId).Find(&ary)
}
//非管理角色,只能查看自己参与的
if isManager == false {
query.Where(fmt.Sprintf("leader = %d", c.Account.Id))
}
//管理角色,可以指定人员查询
if isManager && in.UserId > 0 {
query.Where(fmt.Sprintf("leader = %d", in.UserId))
}
if in.Keyword != "" {
query.Where(fmt.Sprintf("title like '%%%s%%'", in.Keyword))
}
if in.Status != 0 {
query.Where("status = ?", in.Status)
} else {
query.Where("status = 1")
}
//指定需求,则不分页,一次返回所有
if in.DemandId > 0 {
err = query.Find(&ary).Error
if db.MySql.IsError(err) {
return nil, errs.New(err)
}
} else {
if c.Paging.Total <= 0 {
query.Count(&c.Paging.Total)
if db.MySql.IsError(err) {
return nil, errs.New(err)
}
}
if c.Paging.Total <= int64(c.Paging.Offset) {
return &api.Response{Result: nil}, nil
}
err = query.Find(&ary).Error
if db.MySql.IsError(err) {
return nil, errs.New(err)
}
}
//组装数据
var resAry = make([]*ioJob.ListItem, 0, len(ary))
for _, v := range ary {
var item = &ioJob.ListItem{
TblJob: v,
}
resAry = append(resAry, item)
}
return &api.Response{Result: resAry}, nil
}
func JobSave(c *api.Context) (resp *api.Response, err error) {
var in = new(job.TblJob)
c.Bind(in)
if in.Content == "" {
return nil, errs.New(errs.ErrorParams)
}
if in.Id <= 0 {
in.Leader = c.Account.Id
in.CreateAt = saOrm.Now()
if in.Status == 0 {
in.Status = 1
}
}
if in.ProjectId <= 0 && in.DemandId > 0 {
db.MySql.Raw("select project_id from demand where id = ?", in.DemandId).Scan(&in.ProjectId)
if in.ProjectId <= 0 {
return nil, errs.New("缺少项目ID")
}
}
//相同需求下,不能出现同名任务
if in.DemandId > 0 {
var existId int64
db.MySql.Raw("select id from job where demand_id = ? and content = ? and status <> -1", in.DemandId, in.Content).Scan(&existId)
if existId > 0 && existId != in.Id {
return nil, errs.New(errs.ErrorExisted)
}
}
//任务状态判断
var jobStatus int
if in.Id > 0 {
db.MySql.Raw("select status from job where id = ?", in.Id).Scan(&jobStatus)
if jobStatus == 2 && in.Status != 2 {
return nil, errs.New("任务已完成,状态不可变更")
}
}
//如果是完成任务,则创建一条聊天记录
if in.Id > 0 && in.DemandId > 0 && in.Status == 2 && jobStatus == 1 || jobStatus == -1 {
var demandStatus int
db.MySql.Raw("select status from demand where id = ?", in.DemandId).Scan(&demandStatus)
var content = fmt.Sprintf("完成任务【%s】", saData.SubStr(in.Content, 0, 15)+saHit.Str(saData.StrLen(in.Content) > 15, "...", ""))
var obj = &chat.TblChat{
CreateAt: saOrm.Now(),
Creator: c.Account.Id,
DemandId: in.DemandId,
Content: content,
DemandStatus: demandStatus,
Status: 1,
Level: 1,
}
db.MySql.Table(chat.TBNChat).Save(obj)
}
err = db.MySql.Save(in).Error
if err != nil {
return nil, errs.New(err)
}
return &api.Response{Result: in}, nil
}
|
/*
Copyright 2020 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helm
import (
"bufio"
"bytes"
"testing"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/deploy/types"
"github.com/GoogleContainerTools/skaffold/v2/testutil"
)
func TestParseReleaseManifests(t *testing.T) {
tests := []struct {
description string
yaml []byte
expected []types.Artifact
}{
{
description: "parse valid release info yaml with single artifact with namespace",
yaml: []byte(`# Source: skaffold-helm/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: skaffold-helm-skaffold-helm
namespace: test
labels:
app: skaffold-helm
chart: skaffold-helm-0.1.0
release: skaffold-helm
heritage: Tiller
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: nginx
selector:
app: skaffold-helm
release: skaffold-helm`),
expected: []types.Artifact{{Namespace: "test"}},
},
{
description: "parse valid release info yaml with single artifact without namespace sets helm namespace",
yaml: []byte(`# Source: skaffold-helm/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: skaffold-helm-skaffold-helm
labels:
app: skaffold-helm
chart: skaffold-helm-0.1.0
release: skaffold-helm
heritage: Tiller
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: nginx
selector:
app: skaffold-helm
release: skaffold-helm`),
expected: []types.Artifact{{
Namespace: "testNamespace",
}},
},
{
description: "parse valid release info yaml with multiple artifacts",
yaml: []byte(`# Source: skaffold-helm/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: skaffold-helm-skaffold-helm
labels:
app: skaffold-helm
chart: skaffold-helm-0.1.0
release: skaffold-helm
heritage: Tiller
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: nginx
selector:
app: skaffold-helm
release: skaffold-helm
---
# Source: skaffold-helm/templates/ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: skaffold-helm-skaffold-helm
namespace: test
labels:
app: skaffold-helm
chart: skaffold-helm-0.1.0
release: skaffold-helm
heritage: Tiller
annotations:
spec:
rules:
- http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: skaffold-helm-skaffold-helm
port:
number: 80`),
expected: []types.Artifact{{Namespace: "testNamespace"}, {Namespace: "test"}},
},
{
description: "parse invalid release info yaml",
yaml: []byte(`invalid release info`),
expected: nil,
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
r := bufio.NewReader(bytes.NewBuffer(test.yaml))
actual := parseReleaseManifests("testNamespace", r)
t.CheckDeepEqual(test.expected, actual, cmpopts.IgnoreFields(types.Artifact{}, "Obj"))
})
}
}
|
package google
import (
"go.opencensus.io/trace"
"github.com/caos/zitadel/internal/errors"
"github.com/caos/zitadel/internal/tracing"
)
type Config struct {
ProjectID string
MetricPrefix string
Fraction float64
}
func (c *Config) NewTracer() error {
if !envIsSet() {
return errors.ThrowInvalidArgument(nil, "GOOGL-sdh3a", "env not properly set, GOOGLE_APPLICATION_CREDENTIALS is misconfigured or missing")
}
tracing.T = &Tracer{projectID: c.ProjectID, metricPrefix: c.MetricPrefix, sampler: trace.ProbabilitySampler(c.Fraction)}
return tracing.T.Start()
}
|
package main
import (
//"encoding/binary"
//"fmt"
"math/rand"
)
func _Fbase_107__rand_init() {
rand.Seed(_FgenRand_int64__())
} // _Fbase_107__rand_init
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package cellular
import (
"context"
"time"
"chromiumos/tast/common/shillconst"
"chromiumos/tast/common/testexec"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/cellular"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/modemmanager"
"chromiumos/tast/local/shill"
"chromiumos/tast/local/upstart"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
type autoconnectTestParams struct {
autoconnectState bool
}
func init() {
testing.AddTest(&testing.Test{
Func: ShillCellularSuspendResumeAutoconnect,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Verifies that cellular maintains autoconnect state around Suspend/Resume",
Contacts: []string{
"danielwinkler@google.com",
"chromeos-cellular-team@google.com",
},
Attr: []string{"group:cellular", "cellular_unstable", "cellular_sim_active"},
Fixture: "cellular",
Timeout: 2 * time.Minute,
// TODO(b/217106877): Skip on herobrine as S/R is unstable
HardwareDeps: hwdep.D(hwdep.SkipOnPlatform("herobrine")),
SoftwareDeps: []string{"chrome"},
Params: []testing.Param{{
Name: "enabled",
Val: autoconnectTestParams{
autoconnectState: true,
},
}, {
Name: "disabled",
Val: autoconnectTestParams{
autoconnectState: false,
},
}},
})
}
func ShillCellularSuspendResumeAutoconnect(ctx context.Context, s *testing.State) {
cleanupCtx := ctx
params := s.Param().(autoconnectTestParams)
expectedStates := map[bool]string{true: shillconst.ServiceStateOnline,
false: shillconst.ServiceStateIdle}
if _, err := modemmanager.NewModemWithSim(ctx); err != nil {
s.Fatal("Could not find MM dbus object with a valid sim: ", err)
}
helper, err := cellular.NewHelper(ctx)
if err != nil {
s.Fatal("Failed to create cellular.Helper: ", err)
}
// Disable Ethernet and/or WiFi if present and defer re-enabling.
// Shill documentation shows that autoconnect will only be used if there
// is no other service available, so it is necessary to only have
// cellular available.
if enableFunc, err := helper.Manager.DisableTechnologyForTesting(ctx, shill.TechnologyEthernet); err != nil {
s.Fatal("Unable to disable Ethernet: ", err)
} else if enableFunc != nil {
newCtx, cancel := ctxutil.Shorten(ctx, shill.EnableWaitTime)
defer cancel()
defer enableFunc(ctx)
ctx = newCtx
}
if enableFunc, err := helper.Manager.DisableTechnologyForTesting(ctx, shill.TechnologyWifi); err != nil {
s.Fatal("Unable to disable Wifi: ", err)
} else if enableFunc != nil {
newCtx, cancel := ctxutil.Shorten(ctx, shill.EnableWaitTime)
defer cancel()
defer enableFunc(ctx)
ctx = newCtx
}
// Enable and get service to set autoconnect based on test parameters.
if _, err := helper.Enable(ctx); err != nil {
s.Fatal("Failed to enable modem: ", err)
}
if _, err := helper.SetServiceAutoConnect(ctx, params.autoconnectState); err != nil {
s.Fatal("Failed to enable AutoConnect: ", err)
}
// Request suspend for 10 seconds.
if err := testexec.CommandContext(ctx, "powerd_dbus_suspend", "--suspend_for_sec=10").Run(); err != nil {
s.Fatal("Failed to perform system suspend: ", err)
}
// The reconnection will not occur from the login screen, so we log in.
cr, err := chrome.New(ctx)
if err != nil {
s.Fatal("Failed to start Chrome: ", err)
}
defer cr.Close(cleanupCtx)
// chrome.Chrome.Close() will not log the user out.
defer upstart.RestartJob(ctx, "ui")
if err := helper.WaitForEnabledState(ctx, true); err != nil {
s.Fatal("Cellular not enabled after resume")
}
service, err := helper.FindServiceForDevice(ctx)
if err != nil {
s.Fatal("Unable to find Cellular Service for Device: ", err)
}
// Ensure service's state matches expectations.
if err := service.WaitForProperty(ctx, shillconst.ServicePropertyState, expectedStates[params.autoconnectState], 60*time.Second); err != nil {
s.Fatal("Failed to get service state: ", err)
}
}
|
package util
import (
"bytes"
"encoding/json"
"fmt"
"math"
"math/rand"
"os"
"strconv"
"time"
"reflect"
)
const (
defaultRandomInt64Limit = int64(10000)
defaultRandomInt32Limit = int32(10000)
defaultRandomStringLength = int(6)
)
func MinInt(a, b int) int {
return int(math.Min(float64(a), float64(b)))
}
func MinInts(a int, others ...int) int {
result := a
for _, other := range others {
if other < result {
result = other
}
}
return result
}
func MaxInt(a, b int) int {
return int(math.Max(float64(a), float64(b)))
}
func MinInt64(a, b int64) int64 {
return int64(math.Min(float64(a), float64(b)))
}
func MaxInt64(a, b int64) int64 {
return int64(math.Max(float64(a), float64(b)))
}
//SprintObj Sprint any struct object
func SprintObj(obj interface{}) string {
bytes, err := json.Marshal(obj)
if err != nil {
fmt.Println("err while marshaling:", err)
}
return string(bytes)
}
// GetRandomInt64 : helper function to get random int64
// len(args) == 0: get random between(inclusive) 1 ~ defaultRandomInt64Limit
// len(args) == 1: get random between(inclusive) 1 ~ args[0] (can ge negative)
// len(args) >= 2: get random between(inclusive) args[0] ~ args[1]
func GetRandomInt64(args ...int64) int64 {
rand.Seed(time.Now().UTC().UnixNano())
var minV int64
var maxV int64
if len(args) == 0 {
maxV = defaultRandomInt64Limit
} else if len(args) == 1 {
minV = int64(math.Min(0, float64(args[0])))
maxV = int64(math.Max(0, float64(args[0])))
} else {
minV = int64(math.Min(float64(args[0]), float64(args[1])))
maxV = int64(math.Max(float64(args[0]), float64(args[1])))
}
return rand.Int63n(maxV-minV) + minV + 1
}
// GetRandomInt32 : helper function to get random int32
// len(args) == 0: get random between(inclusive) 1 ~ defaultRandomInt64Limit
// len(args) == 1: get random between(inclusive) 1 ~ args[0] (can ge negative)
// len(args) >= 2: get random between(inclusive) args[0] ~ args[1] (inclusive)
func GetRandomInt32(args ...int32) int32 {
rand.Seed(time.Now().UTC().UnixNano())
var minV int32
var maxV int32
if len(args) == 0 {
maxV = defaultRandomInt32Limit
} else if len(args) == 1 {
minV = int32(math.Min(0, float64(args[0])))
maxV = int32(math.Max(0, float64(args[0])))
} else {
minV = int32(math.Min(float64(args[0]), float64(args[1])))
maxV = int32(math.Max(float64(args[0]), float64(args[1])))
}
return rand.Int31n(maxV-minV) + minV + 1
}
// GetRandomString returns random string of given length
func GetRandomString(lengths ...int) string {
rand.Seed(time.Now().UTC().UnixNano())
var length int
if len(lengths) == 0 {
length = defaultRandomStringLength
} else {
length = lengths[0]
}
wordDict := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
return GetRandomStringWithDict(length, wordDict)
}
// GetRandomStringWithDict returns random string of given length with custom dictionary
func GetRandomStringWithDict(length int, wordDict string) string {
rand.Seed(time.Now().UTC().UnixNano())
l := len(wordDict)
b := bytes.NewBuffer(nil)
for i := 0; i < length; i++ {
pos := rand.Intn(l)
_, _ = b.WriteString(string(wordDict[pos]))
}
return b.String()
}
func round(f float64) float64 {
return math.Floor(f + .5)
}
// RoundTime will round a time to certain accuracy
func RoundTime(t time.Time, d time.Duration) time.Time {
sec := t.Unix()
nanoOffset := t.Nanosecond()
div := float64(nanoOffset) / float64(d)
div = round(div)
roundedNanoOffset := int64(div) * int64(d)
return time.Unix(sec, roundedNanoOffset).UTC()
}
// LoadJSONFromFile read json file and parse it to object
func LoadJSONFromFile(filePath string, obj interface{}) error {
file, err := os.Open(filePath)
if err != nil {
return err
}
defer file.Close()
jsonParser := json.NewDecoder(file)
if err = jsonParser.Decode(obj); err != nil {
return err
}
return nil
}
func AbsInt64(i int64) int64 {
return int64(math.Abs(float64(i)))
}
// GetLenOfInt64 returns number of digits of an integer (including sign)
func GetLenOfInt64(i int64) int {
str := strconv.FormatInt(i, 10)
return len(str)
}
func StringListEqual(a, b []string) bool {
return reflect.DeepEqual(StringListToMapInt(a), StringListToMapInt(b))
}
func StringListToMapInt(strs []string) map[string]int {
m := make(map[string]int)
for _, s := range strs {
m[s]++
}
return m
}
|
package main
import (
"struct-package/management"
"fmt"
)
func main() {
user := management.User{ 3, "Adam", "Nasrudin", "Adam@gmail.com", true}
user2 := management.User{ 3, "Alifah", "Nurdianti", "alifah@gmail.com", true}
users := []management.User{user, user2}
group := management.Group{"Group Belajar", user, users, true}
fmt.Println(user.Display())
fmt.Println(user2.Display())
group.DisplayGroup()
} |
package memutil
import (
"fmt"
"github.com/wetware/ww/internal/mem"
capnp "zombiezen.com/go/capnproto2"
)
// Alloc allocates a new memory segment.
func Alloc(a capnp.Arena) (mem.Any, error) {
_, seg, err := capnp.NewMessage(a)
if err != nil {
return mem.Any{}, fmt.Errorf("alloc error: %w", err)
}
// TODO(performance): we might not always want to allocate a _root_ value,
// e.g. if the value is to be assigned to a vector index.
// Investigate the implications of root vs non-root and
// consider providing a mechanism for non-root allocation.
return mem.NewRootAny(seg)
}
// Bytes returns the underlying byte array for the supplied value.
func Bytes(any mem.Any) []byte { return any.Segment().Data() }
// IsNil returns true if the supplied value is nil.
func IsNil(any mem.Any) bool { return any.Which() == mem.Any_Which_nil }
|
package goc
import "sort"
type item struct {
key string
val interface{}
lastVis int64
}
type lruCache struct {
cap, maxCap int
logicClock int64
m map[string]item
}
func (c *lruCache) set(key string, val interface{}) {
c.cap++
c.logicClock++
c.m[key] = item{key, val, c.logicClock}
// Discards the least recently used item in lazy method.
// The amortized complexity for each set opration is O(logn). n is maxCap.
// Because we do gc after O(n) operations, and each gc will cost O(n*logn).
if c.cap > 2*c.maxCap+1024 {
c.gc()
}
}
func (c *lruCache) get(key string) (interface{}, bool) {
if itemVal, ok := c.m[key]; ok {
// Updating the last visting time
c.logicClock++
itemVal.lastVis = c.logicClock
c.m[key] = itemVal
return itemVal.val, true
}
return nil, false
}
func (c *lruCache) flush(key string) {
c.cap--
delete(c.m, key)
}
type byLastVisDesc []item
func (a byLastVisDesc) Len() int { return len(a) }
func (a byLastVisDesc) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byLastVisDesc) Less(i, j int) bool { return a[i].lastVis > a[j].lastVis }
func (c *lruCache) gc() {
var items []item
for _, v := range c.m {
items = append(items, v)
}
sort.Sort(byLastVisDesc(items))
c.m = make(map[string]item)
for i := 0; i < len(items) && i < c.maxCap; i++ {
c.m[items[i].key] = items[i]
}
c.cap = len(c.m)
}
func newLruCache(maxCap int) *lruCache {
c := &lruCache{}
c.maxCap = maxCap
c.m = make(map[string]item)
return c
}
|
package unload
import (
"bufio"
"io"
"net/textproto"
"sync"
)
// see https://golang.org/src/net
var (
bufioReaderPool sync.Pool
textprotoReaderPool sync.Pool
)
func newTextprotoReader(br *bufio.Reader) *textproto.Reader {
if v := textprotoReaderPool.Get(); v != nil {
tr := v.(*textproto.Reader)
tr.R = br
return tr
}
return textproto.NewReader(br)
}
func putTextprotoReader(r *textproto.Reader) {
r.R = nil
textprotoReaderPool.Put(r)
}
func newBufioReader(r io.Reader) *bufio.Reader {
if v := bufioReaderPool.Get(); v != nil {
br := v.(*bufio.Reader)
br.Reset(r)
return br
}
return bufio.NewReader(r)
}
func putBufioReader(br *bufio.Reader) {
br.Reset(nil)
bufioReaderPool.Put(br)
}
|
package hash
import "github.com/VIVelev/btcd/crypto/hash/xripemd160"
// Implementing SHA-256 from scratch was fun, however, for RIPEMD160
// I am taking an existing implementation.
func Ripemd160(data []byte) [20]byte {
h := xripemd160.New()
h.Write(data)
var ret [20]byte
copy(ret[:], h.Sum(nil))
return ret
}
|
/* ######################################################################
# Author: (zhengfei@fcadx.cn)
# Created Time: 2019-07-03 17:49:25
# File Name: handlers_test.go
# Description:
####################################################################### */
//go test -v -tags consul ./handlers -run TestGetByIds -pwd=`pwd` -registry=false
package handlers
import (
"context"
"flag"
"fmt"
"os"
"path"
"reflect"
"regexp"
"runtime"
"strings"
"testing"
"time"
"__PROJECT_NAME__/libs/config"
"__PROJECT_NAME__/models"
"github.com/ant-libs-go/util"
"github.com/cihub/seelog"
"github.com/smallnest/rpcx/client"
. "github.com/smartystreets/goconvey/convey"
)
var (
defaultPwd, _ = os.Getwd()
defaultNode = "/__PROJECT_NAME__"
)
var (
cli client.XClient
handler = NewServiceImpl()
pwd = flag.String("pwd", defaultPwd, "work directory")
cfg = flag.String("cfg", "conf/app.toml", "config file, relative path")
log = flag.Bool("log", false, "show log?")
node = flag.String("node", defaultNode, "consul node name")
registry = flag.Bool("registry", false, "do you want use registry?")
)
func TestMain(m *testing.M) {
flag.Parse()
fmt.Printf("Using configuration: %s\n", *cfg)
fmt.Printf("Using registry: %v\n", *registry)
// configuration
if strings.HasPrefix(*cfg, "/") == false {
*cfg = path.Join(*pwd, *cfg)
}
if err := config.SetFileAndLoad(*cfg); err != nil {
fmt.Println(err)
os.Exit(-1)
}
// logger
seelog.ReplaceLogger(seelog.Disabled)
if *log == true {
logFile := config.Get().Basic.LogFile
fmt.Printf("Using log configuration %s\n", logFile)
if strings.HasPrefix(logFile, "/") == false {
logFile = path.Join(*pwd, logFile)
}
var err error
var logger seelog.LoggerInterface
if logger, err = seelog.LoggerFromConfigAsFile(logFile); err != nil {
fmt.Printf("Log configuration parse error: %s\n", err)
os.Exit(-1)
}
seelog.ReplaceLogger(logger)
}
defer seelog.Flush()
// init models
models.Init()
if *log == false {
models.Orm.ShowSQL(false)
}
if *registry == true {
d := client.NewConsulDiscovery(*node, "Server", []string{"127.0.0.1:8500"}, nil)
cli = client.NewXClient("Server", client.Failover, client.RandomSelect, d, client.DefaultOption)
defer cli.Close()
}
os.Exit(m.Run())
}
func buildCommonHeader() *libs.Header {
return &libs.Header{
Requester: "test-client",
Timestamp: time.Now().Unix(),
Version: 1,
Operator: 1,
Metadata: map[string]string{}}
}
func Call(fn, req, resp interface{}) (err error) {
if *registry == true {
fname := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name()
re, _ := regexp.Compile(`\.(?P<fn>[\w]+)-fm$`)
ret, _ := util.FindStringSubmatch(re, fname)
err = cli.Call(context.Background(), ret["fn"], req, resp)
} else {
args := []reflect.Value{reflect.ValueOf(context.Background()), reflect.ValueOf(req), reflect.ValueOf(resp)}
reflect.ValueOf(fn).Call(args)
}
header := reflect.ValueOf(resp).Elem().FieldByName("Header").Interface().(*libs.Header)
if header.Code != libs.ResponseCode_OK {
err = fmt.Errorf("response code is %s, not is ok", header.Code)
}
fmt.Print(fmt.Sprintf(". err: %+v -> ", err))
return
}
func TestGetByIds(t *testing.T) {
req := &libs.GetByIdsRequest{
Header: buildCommonHeader(),
Body: []int32{108, 109},
}
resp := &libs.GetByIdsResponse{}
Convey("TestGetByIds", t, func() {
Convey("TestGetByIds should return nil", func() {
So(Call(handler.GetByIds, req, resp), ShouldBeNil)
})
})
}
|
package main
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"fmt"
"io/ioutil"
"log"
"net/http"
"runtime"
"time"
)
func getFuturesAccountBalance() []byte {
timeStamp := makeTimestamp()
secret := "BfqSCwpNCslkepaOO7dTejFRz5thaGiTUBX1p4fZp6sDPDuJrtmNt6Wse9hMpTOF"
data := fmt.Sprintf("timestamp=%d", timeStamp)
// Create a new HMAC by defining the hash type and the key (as byte array)
h := hmac.New(sha256.New, []byte(secret))
// Write Data to it
h.Write([]byte(data))
// Get result and encode as hexadecimal string
signature := hex.EncodeToString(h.Sum(nil))
req, _ := http.NewRequest("GET", fmt.Sprintf("https://fapi.binance.com/fapi/v2/balance?%s&signature=%s", data, signature), nil)
req.Header.Set("Content-Type", "application/json")
req.Header.Add("X-MBX-APIKEY", "klGMQA5VZzL5dhi2DuR4agiYgVZaF8gxmQ0ZEuYkyfURRymazrIYtIBd2TtEheRp")
client := &http.Client{}
response, err := client.Do(req)
if err != nil {
log.Fatalf("An Error Occured %v", err)
return nil
} else {
body, _ := ioutil.ReadAll(response.Body)
fmt.Println(response.Body)
return body
}
_, file, line, _ := runtime.Caller(0)
go Log("getFuturesAccountBalance", fmt.Sprintf("<%v> %v", line, file))
return nil
}
func changeMarginType(symbol string) {
timeStamp := makeTimestamp()
secret := "BfqSCwpNCslkepaOO7dTejFRz5thaGiTUBX1p4fZp6sDPDuJrtmNt6Wse9hMpTOF"
data := fmt.Sprintf("symbol=%s&marginType=ISOLATED×tamp=%d", symbol, timeStamp)
// Create a new HMAC by defining the hash type and the key (as byte array)
h := hmac.New(sha256.New, []byte(secret))
// Write Data to it
h.Write([]byte(data))
// Get result and encode as hexadecimal string
signature := hex.EncodeToString(h.Sum(nil))
req, _ := http.NewRequest("POST", fmt.Sprintf("https://fapi.binance.com/fapi/v1/marginType?%s&signature=%s", data, signature), nil)
req.Header.Set("Content-Type", "application/json")
req.Header.Add("X-MBX-APIKEY", "klGMQA5VZzL5dhi2DuR4agiYgVZaF8gxmQ0ZEuYkyfURRymazrIYtIBd2TtEheRp")
client := &http.Client{}
response, err := client.Do(req)
if err != nil {
log.Fatalf("An Error Occured %v", err)
} else {
body, _ := ioutil.ReadAll(response.Body)
log.Println(string(body))
}
_, file, line, _ := runtime.Caller(0)
go Log("changeMarginType", fmt.Sprintf("<%v> %v", line, file))
}
func changeInitialLeverage(symbol string, lev int) {
timeStamp := makeTimestamp()
secret := "BfqSCwpNCslkepaOO7dTejFRz5thaGiTUBX1p4fZp6sDPDuJrtmNt6Wse9hMpTOF"
data := fmt.Sprintf("symbol=%s&leverage=%d×tamp=%d", symbol, lev, timeStamp)
// Create a new HMAC by defining the hash type and the key (as byte array)
h := hmac.New(sha256.New, []byte(secret))
// Write Data to it
h.Write([]byte(data))
// Get result and encode as hexadecimal string
signature := hex.EncodeToString(h.Sum(nil))
req, _ := http.NewRequest("POST", fmt.Sprintf("https://fapi.binance.com/fapi/v1/leverage?%s&signature=%s", data, signature), nil)
req.Header.Set("Content-Type", "application/json")
req.Header.Add("X-MBX-APIKEY", "klGMQA5VZzL5dhi2DuR4agiYgVZaF8gxmQ0ZEuYkyfURRymazrIYtIBd2TtEheRp")
client := &http.Client{}
response, err := client.Do(req)
if err != nil {
log.Fatalf("An Error Occured %v", err)
} else {
body, _ := ioutil.ReadAll(response.Body)
log.Println(string(body))
}
_, file, line, _ := runtime.Caller(0)
go Log("changeInitialLeverage", fmt.Sprintf("<%v> %v", line, file))
}
func newOrder(symbol, side, typeVar, quantity, price, reduceOnly, stopPrice string) []byte {
timeStamp := makeTimestamp()
secret := "BfqSCwpNCslkepaOO7dTejFRz5thaGiTUBX1p4fZp6sDPDuJrtmNt6Wse9hMpTOF"
// data := fmt.Sprintf("symbol=BTCUSDT&side=BUY&type=LIMIT&timeInForce=GTC&quantity=1000&price=69×tamp=%d", timeStamp)
var data string
if stopPrice != "0" {
data = fmt.Sprintf("symbol=%s&side=%s&type=%s&timeInForce=GTC&quantity=%s&price=%s×tamp=%d&reduceOnly=%s&stopPrice=%s", symbol, side, typeVar, quantity, price, timeStamp, reduceOnly, stopPrice)
} else {
data = fmt.Sprintf("symbol=%s&side=%s&type=%s&timeInForce=GTC&quantity=%s&price=%s×tamp=%d&reduceOnly=%s", symbol, side, typeVar, quantity, price, timeStamp, reduceOnly)
}
// Create a new HMAC by defining the hash type and the key (as byte array)
h := hmac.New(sha256.New, []byte(secret))
// Write Data to it
h.Write([]byte(data))
// Get result and encode as hexadecimal string
signature := hex.EncodeToString(h.Sum(nil))
req, _ := http.NewRequest("POST", fmt.Sprintf("https://fapi.binance.com/fapi/v1/order?%s&signature=%s", data, signature), nil)
req.Header.Set("Content-Type", "application/json")
req.Header.Add("X-MBX-APIKEY", "klGMQA5VZzL5dhi2DuR4agiYgVZaF8gxmQ0ZEuYkyfURRymazrIYtIBd2TtEheRp")
client := &http.Client{}
response, err := client.Do(req)
if err != nil {
log.Fatalf("An Error Occured %v", err)
return nil
} else {
body, _ := ioutil.ReadAll(response.Body)
log.Println(string(body))
return body
}
// _, file, line, _ := runtime.Caller(0)
// go Log("newOrder", fmt.Sprintf("<%v> %v", line, file))
// return nil
}
func startUserDataStream() {
// req, _ := http.NewRequest("POST", "https://fapi.binance.com/fapi/v1/listenKey", nil)
// req.Header.Set("Content-Type", "application/json")
// req.Header.Add("X-MBX-APIKEY", "klGMQA5VZzL5dhi2DuR4agiYgVZaF8gxmQ0ZEuYkyfURRymazrIYtIBd2TtEheRp")
// client := &http.Client{}
// response, err := client.Do(req)
// if err != nil {
// log.Fatalf("An Error Occured %v", err)
// } else {
// body, _ := ioutil.ReadAll(response.Body)
// log.Println(string(body))
// }
// fmt.Println("newOrder")
// return nil
_, file, line, _ := runtime.Caller(0)
go Log("startUserDataStream", fmt.Sprintf("<%v> %v", line, file))
}
func accountTradeList() {
// timeStamp := makeTimestamp()
// secret := "BfqSCwpNCslkepaOO7dTejFRz5thaGiTUBX1p4fZp6sDPDuJrtmNt6Wse9hMpTOF"
// data := fmt.Sprintf("symbol=BTCUSDT&limit=500×tamp=%d", timeStamp)
// // Create a new HMAC by defining the hash type and the key (as byte array)
// h := hmac.New(sha256.New, []byte(secret))
// // Write Data to it
// h.Write([]byte(data))
// // Get result and encode as hexadecimal string
// signature := hex.EncodeToString(h.Sum(nil))
// req, _ := http.NewRequest("GET", fmt.Sprintf("https://fapi.binance.com/fapi/v1/userTrades?symbol=BTCUSDT&limit=500×tamp=%d&signature=%s", timeStamp, signature), nil)
// req.Header.Set("Content-Type", "application/json")
// req.Header.Add("X-MBX-APIKEY", "klGMQA5VZzL5dhi2DuR4agiYgVZaF8gxmQ0ZEuYkyfURRymazrIYtIBd2TtEheRp")
// client := &http.Client{}
// response, err := client.Do(req)
// if err != nil {
// log.Fatalf("An Error Occured %v", err)
// } else {
// body, _ := ioutil.ReadAll(response.Body)
// log.Println(string(body))
// }
_, file, line, _ := runtime.Caller(0)
go Log("accountTradeList", fmt.Sprintf("<%v> %v", line, file))
}
func cancelAllOpenOrders(symbol string) []byte {
timeStamp := makeTimestamp()
secret := "BfqSCwpNCslkepaOO7dTejFRz5thaGiTUBX1p4fZp6sDPDuJrtmNt6Wse9hMpTOF"
data := fmt.Sprintf("symbol=%s×tamp=%d", symbol, timeStamp)
// Create a new HMAC by defining the hash type and the key (as byte array)
h := hmac.New(sha256.New, []byte(secret))
// Write Data to it
h.Write([]byte(data))
// Get result and encode as hexadecimal string
signature := hex.EncodeToString(h.Sum(nil))
req, _ := http.NewRequest("DELETE", fmt.Sprintf("https://fapi.binance.com/fapi/v1/allOpenOrders?%s&signature=%s", data, signature), nil)
req.Header.Set("Content-Type", "application/json")
req.Header.Add("X-MBX-APIKEY", "klGMQA5VZzL5dhi2DuR4agiYgVZaF8gxmQ0ZEuYkyfURRymazrIYtIBd2TtEheRp")
client := &http.Client{}
response, err := client.Do(req)
if err != nil {
log.Fatalf("An Error Occured %v", err)
return nil
} else {
body, _ := ioutil.ReadAll(response.Body)
log.Println(string(body))
return body
}
// _, file, line, _ := runtime.Caller(0)
// go Log("cancelAllOpenOrders", fmt.Sprintf("<%v> %v", line, file))
// return nil
}
func cancelOrders(symbol, orderID string) []byte {
// timeStamp := makeTimestamp()
// secret := "BfqSCwpNCslkepaOO7dTejFRz5thaGiTUBX1p4fZp6sDPDuJrtmNt6Wse9hMpTOF"
// data := fmt.Sprintf("symbol=%s&orderId=%s×tamp=%d", symbol, orderID, timeStamp)
// // Create a new HMAC by defining the hash type and the key (as byte array)
// h := hmac.New(sha256.New, []byte(secret))
// // Write Data to it
// h.Write([]byte(data))
// // Get result and encode as hexadecimal string
// signature := hex.EncodeToString(h.Sum(nil))
// req, _ := http.NewRequest("DELETE", fmt.Sprintf("https://fapi.binance.com/fapi/v1/order?%s&signature=%s", data, signature), nil)
// req.Header.Set("Content-Type", "application/json")
// req.Header.Add("X-MBX-APIKEY", "klGMQA5VZzL5dhi2DuR4agiYgVZaF8gxmQ0ZEuYkyfURRymazrIYtIBd2TtEheRp")
// client := &http.Client{}
// response, err := client.Do(req)
// if err != nil {
// log.Fatalf("An Error Occured %v", err)
// return nil
// } else {
// body, _ := ioutil.ReadAll(response.Body)
// log.Println(string(body))
// return body
// }
_, file, line, _ := runtime.Caller(0)
go Log("cancelOrders", fmt.Sprintf("<%v> %v", line, file))
return nil
}
func makeTimestamp() int64 {
return time.Now().UnixNano() / int64(time.Millisecond)
}
|
package cache
import (
"bytes"
"math"
"strconv"
"testing"
)
func TestRender(t *testing.T) {
s := new(serializer)
s.add("I1", &storeEntry{Values: []int64{4, 8}, Counters: []uint32{4, 4}, Cnt: 4})
s.add("I2", &storeEntry{Values: []int64{0, 0}, Counters: []uint32{0, 0}, Cnt: 0})
want := `{"status":"ok","data":{"I1":{"values":[4,8],"counters":[4,4]},"I2":{"values":[0,0],"counters":[0,0]}}}`
have := s.render()
if !bytes.Equal(have, []byte(want)) {
t.Fatalf("\nHave: %s\nWant: %s\n", have, want)
}
}
func TestFixedLengthCapacity(t *testing.T) {
u32 := uint32(math.MaxUint32)
i64 := int64(math.MaxInt64)
s := new(serializer)
for i := 0; i < 10; i++ {
s.add("I"+strconv.Itoa(i), &storeEntry{Counters: []uint32{u32, u32}, Values: []int64{i64, i64}, Cnt: u32})
}
want := len(s.render()) - s.capacity
have := fixedLengthCapacity(len(s.rows), len(s.rows[0].e.Counters))
if have != want {
t.Fatalf("\nHave: %d\nWant: %d\n", have, want)
}
}
|
package leetcode
import "sort"
func rearrangeBarcodes(barcodes []int) []int {
m := map[int]int{}
for _, v := range barcodes {
m[v]++
}
content := make([][]int, 0, len(m))
for k, v := range m {
content = append(content, []int{k, v})
}
sort.Slice(content, func(i, j int) bool {
return content[i][1] > content[j][1]
})
idx := 0
for i := 0; i < len(barcodes); i += 2 {
if content[idx][1] == 0 {
idx++
}
content[idx][1]--
barcodes[i] = content[idx][0]
}
for i := 1; i < len(barcodes); i += 2 {
if content[idx][1] == 0 {
idx++
}
content[idx][1]--
barcodes[i] = content[idx][0]
}
return barcodes
}
|
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package cli provides a CLI interface for a mutating Kubernetes webhook.
package cli
import (
"flag"
"fmt"
"net"
"net/http"
"os"
"strconv"
"strings"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/webhook/pkg/injector"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8snet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
var (
address = flag.String("address", "", "The ip address the admission webhook serves on. If unspecified, a public address is selected automatically.")
port = flag.Int("port", 0, "The port the admission webhook serves on.")
podLabels = flag.String("pod-namespace-labels", "", "A comma-separated namespace label selector, the admission webhook will only take effect on pods in selected namespaces, e.g. `label1,label2`.")
)
// Main runs the webhook.
func Main() {
flag.Parse()
if err := run(); err != nil {
log.Warningf("%v", err)
os.Exit(1)
}
}
func run() error {
log.Infof("Starting %s\n", injector.Name)
// Create client config.
cfg, err := rest.InClusterConfig()
if err != nil {
return fmt.Errorf("create in cluster config: %w", err)
}
// Create clientset.
clientset, err := kubernetes.NewForConfig(cfg)
if err != nil {
return fmt.Errorf("create kubernetes client: %w", err)
}
if err := injector.CreateConfiguration(clientset, parsePodLabels()); err != nil {
return fmt.Errorf("create webhook configuration: %w", err)
}
if err := startWebhookHTTPS(clientset); err != nil {
return fmt.Errorf("start webhook https server: %w", err)
}
return nil
}
func parsePodLabels() *metav1.LabelSelector {
rv := &metav1.LabelSelector{}
for _, s := range strings.Split(*podLabels, ",") {
req := metav1.LabelSelectorRequirement{
Key: strings.TrimSpace(s),
Operator: "Exists",
}
rv.MatchExpressions = append(rv.MatchExpressions, req)
}
return rv
}
func startWebhookHTTPS(clientset kubernetes.Interface) error {
log.Infof("Starting HTTPS handler")
defer log.Infof("Stopping HTTPS handler")
if *address == "" {
ip, err := k8snet.ChooseHostInterface()
if err != nil {
return fmt.Errorf("select ip address: %w", err)
}
*address = ip.String()
}
mux := http.NewServeMux()
mux.Handle("/", http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
injector.Admit(w, r)
}))
server := &http.Server{
// Listen on all addresses.
Addr: net.JoinHostPort(*address, strconv.Itoa(*port)),
TLSConfig: injector.GetTLSConfig(),
Handler: mux,
}
if err := server.ListenAndServeTLS("", ""); err != http.ErrServerClosed {
return fmt.Errorf("start HTTPS handler: %w", err)
}
return nil
}
|
package metrics
import "github.com/cerana/cerana/provider"
// Metrics is a provider of system info and metrics functionality.
type Metrics struct{}
// RegisterTasks registers all of Metric's task handlers with the server.
func (m *Metrics) RegisterTasks(server *provider.Server) {
server.RegisterTask("metrics-cpu", m.CPU)
server.RegisterTask("metrics-disk", m.Disk)
server.RegisterTask("metrics-host", m.Host)
server.RegisterTask("metrics-memory", m.Memory)
server.RegisterTask("metrics-network", m.Network)
}
|
// Copyright 2017 The Kubernetes Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dataselect
// PropertyName is used to get the value of certain property of data cell.
// For example if we want to get the namespace of certain Deployment we can use DeploymentCell.GetProperty(NamespaceProperty)
type PropertyName string
// List of all property names supported by the UI.
const (
NameProperty = "name"
NameLengthProperty = "nameLength"
CreationTimestampProperty = "creationTimestamp"
NamespaceProperty = "namespace"
StatusProperty = "status"
ScopeProperty = "scope"
KindProperty = "kind"
DisplayNameProperty = "displayName"
DisplayEnNameProperty = "displayEnName"
DisplayZhNameProperty = "displayZhName"
DomainProperty = "domain"
LabelProperty = "label"
SecretTypeProperty = "secretType"
ProjectProperty = "project"
ProductNameProperty = "productName"
PipelineConfigProperty = "pipelineConfig"
CodeRepoServiceProperty = "codeRepoService"
CodeRepoBindingProperty = "codeRepoBinding"
CodeRepositoryProperty = "codeRepository"
CodeQualityBindingProperty = "codeQualityBinding"
ExactNameProperty = "exactName"
LabelEqualProperty = "labelEq"
JenkinsProperty = "jenkins"
JenkinsBindingProperty = "jenkinsBinding"
StartedAtProperty = "startedAt"
PipelineCreationTimestampProperty = "pipelineCreationTimestamp"
ImageRegistryProperty = "imageRegistry"
ImageRegistryBindingProperty = "imageRegistryBinding"
ImageRepositoryProperty = "imageRepository"
LatestCommitAt = "latestCommitAt"
MicroservicesConfigProfileProperty = "profile"
MicroservicesConfigLabelProperty = "label"
CategoryProperty = "category"
MultiBranchCategoryProperty = "multiBranchCategory"
MultiBranchNameProperty = "multiBranchName"
PipelineStatusProperty = "pipelineStatus"
ASMHostName = "asmHost"
)
|
package main
// https://golang.org/pkg/fmt/
// https://golang.org/pkg/time/
import (
"fmt"
"time"
)
func main() {
fmt.Println("Welcome to the playground!")
fmt.Println("The time is ", time.Now())
}
|
package exec
import (
"github.com/araddon/qlbridge/expr"
)
// exec.Visitor implements standard Sql Visit() patterns to create
// a job Builder.
// An implementation of Visitor() will be be able to execute/run a Statement
type Visitor interface {
VisitPreparedStmt(stmt *expr.PreparedStatement) (interface{}, error)
VisitSelect(stmt *expr.SqlSelect) (interface{}, error)
VisitInsert(stmt *expr.SqlInsert) (interface{}, error)
VisitDelete(stmt *expr.SqlDelete) (interface{}, error)
VisitUpdate(stmt *expr.SqlUpdate) (interface{}, error)
VisitShow(stmt *expr.SqlShow) (interface{}, error)
VisitDescribe(stmt *expr.SqlDescribe) (interface{}, error)
}
|
package main
// Leetcode 1111. (medium)
func maxDepthAfterSplit(seq string) []int {
l, r := 0, 0
res := make([]int, len(seq))
for i := range seq {
if seq[i] == '(' {
res[i] = l
l = (l + 1) % 2
} else {
res[i] = r
r = (r + 1) % 2
}
}
return res
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.