text stringlengths 11 4.05M |
|---|
package main
import (
"flag"
"github.com/fanan/netease_download/netease"
"log"
"os"
)
var playlistID = flag.Int64("l", 22914865, "playlistID")
var downloadDir = flag.String("d", os.ExpandEnv("$HOME/Downloads/"), "download dir")
func main() {
flag.Parse()
var pl = netease.NewPlayList(*playlistID)
fi, err := os.Lstat(*downloadDir)
if err != nil {
log.Fatal(err)
}
if !fi.IsDir() {
log.Fatalf("%s is not a directory", *downloadDir)
}
err = pl.Parse()
if err != nil {
log.Fatal(err)
}
n := len(pl.Result.Tracks)
for idx, track := range pl.Result.Tracks {
log.Printf("start downloading %d/%d -- %s\n", idx+1, n, track.Name)
err = track.Parse()
if err != nil {
log.Fatal(err)
}
err = track.Download(*downloadDir)
if err != nil {
log.Fatal(err)
}
}
log.Println("Downloading finished!")
}
|
// ReadAppsInfo
package main
import (
_ "bytes"
"encoding/json"
"fmt"
"internal/syscall/windows/registry"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"syscall"
"unsafe"
)
var (
kernel32 = syscall.NewLazyDLL("kernel32.dll")
GetModuleFileNameProc = kernel32.NewProc("GetModuleFileNameW")
)
type ItemApp struct {
Img string
Name string
System string
sPackage string
Appstore string
Activity string
Version string
}
var (
Item []ItemApp
)
func IsFile(f string) bool {
fi, err := os.Stat(f)
return err == nil && !fi.IsDir()
}
func GetModulePath() string {
var wpath [syscall.MAX_PATH]uint16
r1, _, _ := GetModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&wpath[0])), uintptr(len(wpath)))
if r1 == 0 {
return ""
}
return filepath.Dir(syscall.UTF16ToString(wpath[:]))
}
func GetBluestackDataInfo(skey string) (string, error) {
key, err := registry.OpenKey(registry.LOCAL_MACHINE, "SOFTWARE\\BlueStacks", registry.QUERY_VALUE)
if err != nil {
log.Fatal(err)
}
defer key.Close()
val, _, err := key.GetStringValue(skey)
if err != nil {
log.Fatal(err)
return "", err
}
return val, nil
}
func main() {
DataDir, _ := GetBluestackDataInfo("DataDir")
sPath := fmt.Sprintf("%s\\UserData\\Gadget\\apps.json", DataDir)
data, err := ioutil.ReadFile(sPath)
if err != nil {
log.Fatal(err)
return
}
err = json.Unmarshal(data, &Item)
if err != nil {
return
}
for i := range Item {
sIconName := Item[i].Img
sIconName = sIconName[:strings.LastIndex(sIconName, ".")]
sIconPath := fmt.Sprintf("%s\\UserData\\Library\\Icons\\%s.ico", DataDir, sIconName)
if IsFile(sIconPath) {
sExePath := fmt.Sprintf("%s\\DaeseongLib.exe", GetModulePath())
sLnkPath := fmt.Sprintf("%s\\%s.lnk", GetModulePath(), Item[i].Name)
sArg := fmt.Sprintf("%s;%s;%s", Item[i].Name, Item[i].sPackage, Item[i].Activity)
sExeName := fmt.Sprintf("%s", Item[i].Name)
//go 에서 CoCreateInstance 콜 하는 방법은?
//CreateShortCuttor(sExePath, sLnkPath, sArg, sExeName, sIconPath)
sParam := fmt.Sprintf("%s|%s|%s|%s|%s", sExePath, sLnkPath, sArg, sExeName, sIconPath)
sRunPath := "C:\\Go\\src\\DaeseongLib\\CreateShortCutBluestackIcon.exe"
cmd := exec.Command("cmd", "/c", sRunPath, sParam)
cmd.Run()
}
}
fmt.Println("complete")
}
|
// Package main implements a client for Greeter service.
package main
import (
"context"
"flag"
"fmt"
"log"
"time"
"google.golang.org/grpc"
pb "google.golang.org/grpc/examples/helloworld/helloworld"
"google.golang.org/grpc/peer"
_ "google.golang.org/grpc/xds/experimental"
)
var (
address = flag.String("address", "localhost:50051", "server address, with port")
name = flag.String("name", "world", "name to greet")
dialTimeout = flag.Duration("dialTimeout", 10*time.Second, "timeout for creating grpc.ClientConn, format: 100ms, or 10s, or 2h")
rpcTimeout = flag.Duration("rpcTimeout", time.Second, "timeout for each RPC, format: 100ms, or 10s, or 2h")
totalTime = flag.Duration("time", time.Hour, "total time the binary runs, format: 100ms, or 10s, or 2h")
xds = flag.Bool("xds", true, "do xds or not")
)
func main() {
flag.Parse()
// Set up a connection to the server.
ctx, cancel := context.WithTimeout(context.Background(), *dialTimeout)
defer cancel()
addr := *address
if *xds {
addr = "xds-experimental:///" + addr
}
conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
log.Fatalf("did not connect: %v", err)
}
defer conn.Close()
c := pb.NewGreeterClient(conn)
end := time.After(*totalTime)
for {
// Contact the server and print out its response.
ctx, cancel := context.WithTimeout(context.Background(), *rpcTimeout)
p := new(peer.Peer)
r, err := c.SayHello(ctx, &pb.HelloRequest{Name: *name}, grpc.Peer(p))
if err != nil {
cancel()
log.Fatalf("could not greet: %v", err)
}
cancel()
fmt.Printf("Greeting: %s, from %v\n", r.GetMessage(), p.Addr)
select {
case <-end:
return
default:
}
time.Sleep(time.Second)
}
}
|
package objects
// NodeStatistics is the structure for node statistics
type NodeStatistics struct {
ID uint `json:"id"`
Type string `json:"type"`
Name string `json:"name"`
Status string `json:"status"`
ErrMsg string `json:"errMsg"`
StartTime float32 `json:"startTime"`
EndTime float32 `json:"endTime"`
Duration float32 `json:"duration"`
FreeMemoryInitial float32 `json:"freeMemoryInitial"`
FreeMemoryFinal float32 `json:"freeMemoryFinal"`
FreeDiscInitial float32 `json:"freeDiscInitial"`
FreeDiscFinal float32 `json:"freeDiscFinal"`
}
|
package main
import (
"fmt"
)
func ReverseInt(n int) int {
// 出力する数値の変数を宣言
new_int := 0
// 引数が0より大きい間のforループを作成
for n > 0 {
// 引数を10で割った余りを変数reminderに格納
reminder := n % 10
// 出力用の変数を10倍にする
new_int *= 10
// 出力用の変数にreminderを足す
new_int += reminder
// 引数を10で割る
n /= 10
}
// 出力用の変数をreturnする
return new_int
}
func main() {
n := ReverseInt(123)
fmt.Println(n)
}
|
package rp_kit
import (
"github.com/go-redis/redis"
"github.com/go-xorm/xorm"
"github.com/limitedlee/microservice/common/config"
"testing"
)
func Test_NewRedisEngine(t *testing.T) {
type args struct {
dsn string
}
tests := []struct {
name string
args args
}{
{
name: "创建redis连接",
args: args{dsn: config.GetString("redis.Addr") + "|" + config.GetString("redis.Password") + "|" + config.GetString("redis.DB")},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := NewRedisEngine(tt.args.dsn)
if err := got.Engine.(*redis.Client).Ping().Err(); err != nil {
t.Error(err)
}
})
}
}
func Test_NewDBEngine(t *testing.T) {
type args struct {
dsn string
}
tests := []struct {
name string
args args
}{
{
name: "创建mysql连接",
args: args{dsn: config.GetString("mysql.test")},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := NewDBEngine(tt.args.dsn)
if err := got.Engine.(*xorm.Engine).Ping(); err != nil {
t.Error(err)
}
})
}
}
|
package main
import (
goflag "flag"
"fmt"
"html/template"
"log"
"net/http"
"os"
"github.com/ricoberger/sealed-secrets-web/pkg/secrets"
"github.com/ricoberger/sealed-secrets-web/pkg/version"
"github.com/bitnami-labs/flagenv"
"github.com/bitnami-labs/pflagenv"
flag "github.com/spf13/pflag"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/tools/clientcmd"
)
const (
flagEnvPrefix = "SEALED_SECRETS"
)
var (
disableLoadSecrets = flag.Bool("disable-load-secrets", false, "Disable the loading of existing secrets")
kubesealArgs = flag.String("kubeseal-arguments", "", "Arguments which are passed to kubeseal")
outputFormat = flag.String("format", "json", "Output format for sealed secret. Either json or yaml")
webExternalUrl = flag.String("web-external-url", "", "The URL under which the Sealed Secrets Web Interface is externally reachable (for example, if it is served via a reverse proxy).")
printVersion = flag.Bool("version", false, "Print version information and exit")
clientConfig clientcmd.ClientConfig
sHandler *secrets.Handler
indexTmpl = template.Must(template.ParseFiles("static/index.html"))
)
func init() {
flagenv.SetFlagsFromEnv(flagEnvPrefix, goflag.CommandLine)
// The "usual" clientcmd/kubectl flags
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
overrides := clientcmd.ConfigOverrides{}
kflags := clientcmd.RecommendedConfigOverrideFlags("")
flag.StringVar(&loadingRules.ExplicitPath, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster")
clientcmd.BindOverrideFlags(&overrides, flag.CommandLine, kflags)
clientConfig = clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, &overrides, os.Stdin)
pflagenv.SetFlagsFromEnv(flagEnvPrefix, flag.CommandLine)
// Standard goflags (glog in particular)
flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
}
func main() {
// We are using the same flags as the kubeseal command-line tool.
// The flags are passed to the kubeseal client to seal the secrets.
flag.Parse()
goflag.CommandLine.Parse([]string{})
if *printVersion {
v, err := version.Print("sealed secrets web")
if err != nil {
log.Fatalf("Could not get version information: %s", err.Error())
}
fmt.Println(v)
return
}
var err error
sHandler, err = secrets.NewHandler(clientConfig, *outputFormat)
if err != nil {
log.Fatalf("Could not initialize secrets handler: %s", err.Error())
}
http.HandleFunc("/", indexHandler)
http.HandleFunc("/_health", healthHandler)
http.HandleFunc("/api/seal", sealHandler)
http.HandleFunc("/api/secrets", secretsHandler)
http.HandleFunc("/api/base64", base64Handler)
http.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("./static"))))
// Start the web interface.
fmt.Printf("Starting \"Sealed Secrets\" Web Interface %s\n", version.Info())
fmt.Printf("Build context %s\n", version.BuildContext())
fmt.Printf("Listening on %s\n", ":8080")
log.Println(http.ListenAndServe(":8080", nil))
}
|
package main
import (
// 如果需要用到不同目录的go方法,则需要导入相应包
"calc"
"fmt"
)
func init() {
fmt.Println("main init...")
}
func main() {
// 对于不同目录下的go方法,只能调用首字母为大写的方法
a := calc.Add(10, 20)
fmt.Println("a = ", a)
b := calc.Minus(20, 10)
fmt.Println("b = ", b)
// 对于同目录下的go方法,可以直接调用
test()
// 结果为:
// calc init...
// main init...
// a = 30
// b = 10
// test...
// 注意:程序执行时,会先执行导入包的init方法,然后执行main包的init方法,再执行main方法
}
|
package email
type Action struct {
Message string
Button Button
}
|
package namecheap
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"strconv"
"github.com/hashicorp/go-cleanhttp"
)
var (
debug = os.Getenv("DEBUG") != ""
)
const (
namecheapApiUrl = "https://api.namecheap.com/xml.response"
sandboxApiUrl = "https://api.sandbox.namecheap.com/xml.response"
)
// New returns a Client instance by reading environment variables
func New() (*Client, error) {
username := os.Getenv("NAMECHEAP_USERNAME")
apiuser := os.Getenv("NAMECHEAP_API_USER")
token := os.Getenv("NAMECHEAP_TOKEN")
ip := os.Getenv("NAMECHEAP_IP") // TODO(adam): attempt local read?
sbx := os.Getenv("NAMECHEAP_USE_SANDBOX")
useSbx := sbx != "" && sbx != "false"
return NewClient(username, apiuser, token, ip, useSbx)
}
// NewClient creates a Client instance from the provided configuration
// typically users call New() with environment variables set instead.
func NewClient(username string, apiuser string, token string, ip string, useSandbox bool) (*Client, error) {
if username == "" || apiuser == "" || token == "" || ip == "" {
return nil, fmt.Errorf("ERROR: missing configuration - username=%q, apiuser=%q, token=%d, ip=%q", username, apiuser, len(token), ip)
}
// TODO(adam): parse `ip`, ipv4 only? is ipv6 allowed?
client := Client{
Token: token,
ApiUser: apiuser,
Username: username,
Ip: ip,
URL: namecheapApiUrl,
Http: cleanhttp.DefaultClient(),
}
if useSandbox {
client.URL = sandboxApiUrl
}
return &client, nil
}
// Client provides a client to the Namecheap API
type Client struct {
// Access Token
Token string
// ApiUser
ApiUser string // TODO(adam): What's this for? difference with Username?
// Username
Username string
// URL to the DO API to use
URL string
// IP that is whitelisted
Ip string
// HttpClient is the client to use. A client with
// default values will be used if not provided.
Http *http.Client
}
// Creates a new request with the params
func (c *Client) NewRequest(body map[string]string) (*http.Request, error) {
u, err := url.Parse(c.URL)
if err != nil {
return nil, fmt.Errorf("Error parsing base URL: %s", err)
}
body["Username"] = c.Username
body["ApiKey"] = c.Token
body["ApiUser"] = c.ApiUser
body["ClientIp"] = c.Ip
rBody := c.encodeBody(body)
if err != nil {
return nil, fmt.Errorf("Error encoding request body: %s", err)
}
// Build the request
req, err := http.NewRequest("POST", u.String(), bytes.NewBufferString(rBody))
if err != nil {
return nil, fmt.Errorf("Error creating request: %s", err)
}
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("Content-Length", strconv.Itoa(len(rBody)))
return req, nil
}
func (c *Client) decode(reader io.Reader, obj interface{}) error {
if debug {
bs, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
fmt.Printf("DEBUG: %q\n", string(bs))
reader = bytes.NewReader(bs) // refill `reader`
}
decoder := xml.NewDecoder(reader)
err := decoder.Decode(&obj)
if err != nil {
return err
}
return nil
}
func (c *Client) encodeBody(body map[string]string) string {
data := url.Values{}
for key, val := range body {
data.Set(key, val)
}
return data.Encode()
}
|
package bstest
import (
. "gx/ipfs/QmbgbNxC1PMyS2gbx7nf2jKNG7bZAfYJJebdK4ptBBWCz1/go-blockservice"
mockrouting "gx/ipfs/QmRJvdmKJoDcQEhhTt5NYXJPQFnJYPo1kfapxtjZLfDDqH/go-ipfs-routing/mock"
delay "gx/ipfs/QmUe1WCHkQaz4UeNKiHDUBV2T6i9prc3DniqyHPXyfGaUq/go-ipfs-delay"
bitswap "gx/ipfs/QmYJ48z7NEzo3u2yCvUvNtBQ7wJWd5dX2nxxc7FeA6nHq1/go-bitswap"
tn "gx/ipfs/QmYJ48z7NEzo3u2yCvUvNtBQ7wJWd5dX2nxxc7FeA6nHq1/go-bitswap/testnet"
)
// Mocks returns |n| connected mock Blockservices
func Mocks(n int) []BlockService {
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0))
sg := bitswap.NewTestSessionGenerator(net)
instances := sg.Instances(n)
var servs []BlockService
for _, i := range instances {
servs = append(servs, New(i.Blockstore(), i.Exchange))
}
return servs
}
|
package poly
import (
"fmt"
"github.com/renproject/secp256k1"
"github.com/renproject/shamir/shamirutil"
)
// Poly represents a polynomial in the field defined by the elliptic curve
// secp256k1. That is, the field of integers modulo n where n is the order of
// the secp256k1 group.
//
// A Poly can be indexed into, where index `i` will be the `i`th coefficient.
// For example, the constant term is index 0.
//
// Since this type just aliases a slice, all of the considerations of using a
// slice apply. For example, attempting to access coefficients outside of the
// length bound will panic, as will using a polynomial in a context where its
// capacity is too small (e.g. copying a polynomial that is too large for the
// destination's capacity). The fact that this type is an alias also means that
// it is possible to directly modify properties of the slice and the underlying
// memory. To ensure the correct functioning of this type, these manual
// modificaitons should be avoided, and rather the provided methods used.
type Poly []secp256k1.Fn
// NewFromSlice constructs a polynomial from a given slice. There will be no
// further initialisation; the coefficients of the polynomial will be
// determined by whatever values are currently in the slice, and the degree of
// the polynomial will be determined by the length of the slice. Specifically,
// the degree will be one less than the length of the slice. The capacity of
// the polynomial will be the capacity of the slice.
func NewFromSlice(coeffs []secp256k1.Fn) Poly {
coeffsCopy := make([]secp256k1.Fn, len(coeffs))
copy(coeffsCopy, coeffs)
poly := Poly(coeffsCopy)
poly.removeLeadingZeros()
return poly
}
// NewWithCapacity constructs a new polynomial with the given capacity. The
// polynomial will also be initialised to the zero polynomial.
//
// NOTE: This function will panic if the argument is less than 1.
func NewWithCapacity(c int) Poly {
coeffs := make([]secp256k1.Fn, c)
poly := Poly(coeffs)
// Make it the zero polynomial
poly.Zero()
return poly
}
// String implements the Stringer interface
func (p Poly) String() string {
var coeff secp256k1.Fn
coeff = *p.Coefficient(0)
str := fmt.Sprintf("%v", coeff.Int())
for i := 1; i <= p.Degree(); i++ {
coeff = *p.Coefficient(i)
if i == 1 {
str += fmt.Sprintf(" + %v x", coeff.Int())
} else {
str += fmt.Sprintf(" + %v x^%v", coeff.Int(), i)
}
}
return str
}
// Degree returns the degree of the polynomial. This is the exponent of the
// highest term with non-zero coefficient. For example, 3x^2 + 2x + 1 has
// degree 2.
func (p Poly) Degree() int {
return len(p) - 1
}
// Coefficient returns a pointer to the `i`th coefficient of the polynomial.
//
// NOTE: If `i` is geater than the degree of the polynomial, this function will
// panic.
func (p Poly) Coefficient(i int) *secp256k1.Fn {
return &p[i]
}
// Set copies a given polynomial into the destination polynomial. Since the
// memory is copied, the argument will remain unchanged.
func (p *Poly) Set(a Poly) {
// copy will only copy min(len(dst), len(src)) elements, so we need to make
// sure that the destination slice has the right length
p.setLenByDegree(a.Degree())
copy(*p, a)
}
// IsZero returns true if the polynomial is the zero polynomial, and false
// otherwise. The zero polynomial is defined to have degree 0 and a constant
// term that is equal to 0 (the additive identity in the field).
func (p *Poly) IsZero() bool {
truncated := *p
truncated.removeLeadingZeros()
if truncated.Degree() != 0 {
return false
}
return truncated.Coefficient(0).IsZero()
}
// Eq returns true if the two polynomials are equal and false if they are not.
// Equality of polynomials is defined as all coefficients being equal.
func (p *Poly) Eq(other Poly) bool {
truncated := *p
truncated.removeLeadingZeros()
other.removeLeadingZeros()
// Short circuit if the polynomials have different degrees
if truncated.Degree() != other.Degree() {
return false
}
// Otherwise check each coefficient
for i := range truncated {
if !truncated.Coefficient(i).Eq(other.Coefficient(i)) {
return false
}
}
return true
}
// Zero sets the polynomial to the zero polynomial (additive identity). That
// is, the polynomial of degree 0 with constant term coefficient also equal to
// 0.
func (p *Poly) Zero() {
p.setLenByDegree(0)
p.Coefficient(0).Clear()
}
// Sets the length of the underlying slice to be such that it can hold a
// polynomial of the given degree.
func (p *Poly) setLenByDegree(degree int) {
(*p) = (*p)[:degree+1]
}
// Ensures that the x^deg(p) coefficient of the polynomial is non zero by
// possibly reducing its Degree().
func (p *Poly) removeLeadingZeros() {
for p.Degree() > 0 && p.Coefficient(p.Degree()).IsZero() {
p.setLenByDegree(p.Degree() - 1)
}
}
// Evaluate computes the value of the polynomial at the given point.
func (p *Poly) Evaluate(x secp256k1.Fn) secp256k1.Fn {
var res secp256k1.Fn
res = *p.Coefficient(p.Degree())
for i := p.Degree() - 1; i >= 0; i-- {
res.Mul(&res, &x)
res.Add(&res, p.Coefficient(i))
}
return res
}
// ScalarMul computes the multiplication of the input polynomial by the input
// scale factor from the field that the polynomial is defined over. This
// function is safe for aliasing: the argument may be an alias of the caller.
//
// NOTE: If the destination polynomial doesn't have sufficient capacity to
// store the result, this function will panic. To ensure that the destination
// has enough capacity, it is enough to ensure that the capacity is at least as
// big as `deg(a) + 1`.
func (p *Poly) ScalarMul(a Poly, s secp256k1.Fn) {
// Short circuit conditions
if s.IsZero() {
p.Zero()
return
}
if s.IsOne() {
p.Set(a)
return
}
p.setLenByDegree(a.Degree())
for i := range *p {
p.Coefficient(i).Mul(a.Coefficient(i), &s)
}
}
// Add computes the addition of the two input polynomials and stores the result
// in the caller. This function is safe for aliasing: either (and possible
// both) of the input polynomials may be an alias of the caller.
//
// NOTE: If the destination polynomial doesn't have sufficient capacity to
// store the result, this function will panic. To ensure that the destination
// has enough capacity, it is enough to ensure that the capacity is at least as
// big as `max(deg(a), deg(b)) + 1`. It is possible that the result will have
// degree smaller than this, but this will only happen in the case that some of
// the leading terms cancel.
func (p *Poly) Add(a, b Poly) {
if a.Degree() > b.Degree() {
p.setLenByDegree(a.Degree())
for i := range b {
p.Coefficient(i).Add(a.Coefficient(i), b.Coefficient(i))
}
copy((*p)[b.Degree()+1:], a[b.Degree()+1:])
} else {
p.setLenByDegree(b.Degree())
for i := range a {
p.Coefficient(i).Add(a.Coefficient(i), b.Coefficient(i))
}
copy((*p)[a.Degree()+1:], b[a.Degree()+1:])
}
if a.Degree() == b.Degree() {
// Account for the fact that the leading coefficients of a and b may
// have cancelled eachother
p.removeLeadingZeros()
}
}
// AddScaled computes the addition of the first polynomial and a scaled version
// of the second polynomial and stores the result in the caller. This is
// equivalent to doing the scaling and then the addition separately, but allows
// for better memory efficiency. This function is safe for aliasing: either
// (and possible both) of the input polynomials may be an alias of the caller.
//
// NOTE: If the destination polynomial doesn't have sufficient capacity to
// store the result, this function will panic. To ensure that the destination
// has enough capacity, it is enough to ensure that the capacity is at least as
// big as `max(deg(a), deg(b)) + 1`. It is possible that the result will have
// degree smaller than this, but this will only happen in the case that some of
// the leading terms cancel.
func (p *Poly) AddScaled(a, b Poly, s secp256k1.Fn) {
var scaled secp256k1.Fn
if a.Degree() > b.Degree() {
p.setLenByDegree(a.Degree())
for i := range b {
scaled.Mul(&s, b.Coefficient(i))
p.Coefficient(i).Add(a.Coefficient(i), &scaled)
}
copy((*p)[b.Degree()+1:], a[b.Degree()+1:])
} else {
p.setLenByDegree(b.Degree())
for i := range a {
scaled.Mul(&s, b.Coefficient(i))
p.Coefficient(i).Add(a.Coefficient(i), &scaled)
}
for i := a.Degree() + 1; i <= b.Degree(); i++ {
p.Coefficient(i).Mul(b.Coefficient(i), &s)
}
}
if a.Degree() == b.Degree() {
// Account for the fact that the leading coefficients of a and b may
// have cancelled eachother
p.removeLeadingZeros()
}
}
// Sub subtracts the second polynomial from the first polynomial and stores the
// result in the destination polynomial. This function is safe for aliasing:
// either (and possible both) of the input polynomials may be an alias of the
// caller.
//
// NOTE: If the destination polynomial doesn't have sufficient capacity to
// store the result, this function will panic. To ensure that the destination
// has enough capacity, it is enough to ensure that the capacity is at least as
// big as `max(deg(a), deg(b)) + 1`. It is possible that the result will have
// degree smaller than this, but this will only happen in the case that some of
// the leading terms cancel.
func (p *Poly) Sub(a, b Poly) {
// Temporary value to store the negative of the coefficients from b
var neg secp256k1.Fn
if a.Degree() > b.Degree() {
p.setLenByDegree(a.Degree())
for i := range b {
neg.Negate(b.Coefficient(i))
p.Coefficient(i).Add(a.Coefficient(i), &neg)
}
// The remaining coefficients are just those of a
copy((*p)[b.Degree()+1:], a[b.Degree()+1:])
} else {
p.setLenByDegree(b.Degree())
for i := range a {
neg.Negate(b.Coefficient(i))
p.Coefficient(i).Add(a.Coefficient(i), &neg)
}
// The remaining terms are negatives of the coefficients of b
for i := a.Degree() + 1; i <= b.Degree(); i++ {
p.Coefficient(i).Negate(b.Coefficient(i))
}
}
if a.Degree() == b.Degree() {
// Account for the fact that the leading coefficients of a and b may
// have cancelled eachother
p.removeLeadingZeros()
}
}
// Neg computes the negation of the polynomial and stores it in the destination
// polynomial. This function is safe for aliasing: the argument may be an alias
// of the caller.
//
// NOTE: If the destination polynomial doesn't have sufficient capacity to
// store the result, this function will panic. To ensure that the destination
// has enough capacity, it is enough to ensure that the capacity is at least as
// big as `deg(a) + 1`.
func (p *Poly) Neg(a Poly) {
// Zero out any leading terms of higher Degree() than a
p.setLenByDegree(a.Degree())
for i := range *p {
p.Coefficient(i).Negate(a.Coefficient(i))
}
}
// Mul copmutes the product of the two polynomials and stores the result in the
// destination polynomial. This function is not safe when the two input
// polynomials are aliases of eachother as well as the destination; in this
// case the multiplication will give an incorrect result. The exception to this
// is when the polynomial has degree 0. Otherwise, either input polynomial may
// individually be an alias of the destination polynomial or be aliases of
// eachother (but not the destination) and still be safe.
//
// NOTE: If the destination polynomial doesn't have sufficient capacity to
// store the result, this function will panic. To ensure that the destination
// has enough capacity, it is enough to ensure that the capacity is at least as
// big as `deg(a) + deg(b) + 1`.
func (p *Poly) Mul(a, b Poly) {
// Short circuit if either polynomial is zero
if a.IsZero() || b.IsZero() {
p.Zero()
return
}
// In order to allow for the case that p == a or p == b, we need to make
// sure that we do not clobber coefficients before we have finished using
// them. To do this, we populate the higher Degree() coefficients first.
// However, we need to consider that, for instance, the coefficient for the
// x^1 term is equal to a0b1 + a1b0, which clearly uses the x^1 coefficient
// of both a and b. We therefore need to check which of a and b are aliased
// by p, and make sure to use the higher Degree() coefficient of the
// aliased polynomial first in our sum, before it gets clobbered.
//
// We need to check that the slices point to the same memory. Go doesn't
// allow comparison of slices other than to the nil value, so we use the
// following workaround wherein we compare the addresses of the first
// elements of the slices instead.
aliasedA := p.Coefficient(0) == a.Coefficient(0)
p.setLenByDegree(a.Degree() + b.Degree())
var aStart, bStart, numTerms int
var ab secp256k1.Fn
// If p aliases a, then we need to count down in the coefficients of a to
// avoid clobbering values that we will need to use
if aliasedA {
for i := a.Degree() + b.Degree(); i >= 0; i-- {
aStart = shamirutil.Min(a.Degree(), i)
bStart = shamirutil.Max(0, i-a.Degree())
numTerms = shamirutil.Min(aStart, b.Degree()-bStart)
// Account for the fact that initially the memory might not be
// zeroed
ab.Mul(a.Coefficient(aStart), b.Coefficient(bStart))
*p.Coefficient(i) = ab
for j := 1; j <= numTerms; j++ {
// Count down in a and up in b
ab.Mul(a.Coefficient(aStart-j), b.Coefficient(bStart+j))
p.Coefficient(i).Add(p.Coefficient(i), &ab)
}
}
} else {
// It is possible that p does not aliase either a or b here, but in
// this case either of the branches would work so we don't need to
// consider this case separately
for i := a.Degree() + b.Degree(); i >= 0; i-- {
aStart = shamirutil.Max(0, i-b.Degree())
bStart = shamirutil.Min(b.Degree(), i)
numTerms = shamirutil.Min(a.Degree()-aStart, bStart)
// Account for the fact that initially the memory might not be
// zeroed
ab.Mul(a.Coefficient(aStart), b.Coefficient(bStart))
*p.Coefficient(i) = ab
for j := 1; j <= numTerms; j++ {
// Count up in a and down in b
ab.Mul(a.Coefficient(aStart+j), b.Coefficient(bStart-j))
p.Coefficient(i).Add(p.Coefficient(i), &ab)
}
}
}
}
// Divide computes the division of `a` by `b`, storing the quotient in `q` and
// the remainder in `r`. That is, after calling this function, the polynomials
// should satisfy `a = bq + r`. Note that if either `q` or `r` are aliased by
// either `a` or `b`, the result will be incorrect. This is also true if `q` is
// an alias of `r`. The inputs `a` and `b` are not modified and can therefore
// also be aliases of eachother.
//
// NOTE: If the destination polynomials (i.e. `q` and `r`) don't have
// sufficient capacity to store the result, this function will panic. To ensure
// that these polynomials have enough capacity, it is sufficient to ensure that
// `q` has a capacity of at least `deg(a) - deg(b) + 1`, and that `r` has a
// capacity of at least `deg(a) + 1`.
func Divide(a, b Poly, q, r *Poly) {
// Short circuit when the division is trivial
if b.Degree() > a.Degree() {
q.Zero()
r.Set(a)
return
}
var c, s, bs, cInv secp256k1.Fn
var d, diff int
r.Set(a)
d = b.Degree()
c = *b.Coefficient(b.Degree())
q.setLenByDegree(r.Degree() - d)
cInv.Inverse(&c)
for r.Degree() >= d {
s.Mul(&cInv, r.Coefficient(r.Degree()))
// q = q + sx^(deg(r) - d)
diff = r.Degree() - d
*q.Coefficient(diff) = s
// r = r - b sx^(deg(r) - d)
for i := range b {
bs.Mul(&s, b.Coefficient(i))
bs.Negate(&bs)
r.Coefficient(diff+i).Add(r.Coefficient(diff+i), &bs)
}
r.setLenByDegree(r.Degree() - 1)
r.removeLeadingZeros()
}
// In the case that r = 0, we need to fix the data representation
if len(*r) == 0 {
r.Zero()
}
}
|
/*
* Copyright (c) 2020 - present Kurtosis Technologies LLC.
* All Rights Reserved.
*/
package testsuite
/*
An interface which the user implements to register their tests.
*/
type TestSuite interface {
// Get all the tests in the test suite; this is where users will "register" their tests
GetTests() map[string]Test
// Determines how many IP addresses will be available in the Docker network created for each test, which determines
// the maximum number of services that can be created in the test. The maximum number of services that each
// test can have = 2 ^ network_width_bits
GetNetworkWidthBits() uint32
}
|
/*
A word nest is created by taking a starting word, and generating a new string by placing the word inside itself. This process is then repeated.
Nesting 3 times with the word "incredible":
start = incredible
first = incre|incredible|dible
second = increin|incredible|credibledible
third = increinincr|incredible|ediblecredibledible
The final nest is "increinincrincredibleediblecredibledible" (depth = 3).
Given a starting word and the final word nest, return the depth of the word nest.
*/
package main
import (
"fmt"
"strings"
)
func main() {
test("incredible", "increinincrincredibleediblecredibledible")
test("floor", "floor")
test("code", "cocodccococodededeodeede")
test("engagement", "engenengagemengagemeengagementntentgagementagement")
test("passage", "passpassageage")
test("factory", "ffacfactofactfafactoryctoryoryrytoryactory")
test("deny", "ddededdddenyenyenyenynynyeny")
test("jinx", "jijijjijjijijjinxinxnxnxinxnxinxnxnx")
test("deal", "dedddealealealal")
test("paradox", "parparaparadoxdoxadox")
test("meet", "mmememmeeteeteteteet")
test("last", "lalastst")
test("silence", "sisilsisilencelenceencelence")
test("inflate", "inflate")
test("ruin", "rurrurrrrrrururuinininuinuinuinuinuininuinin")
test("episode", "episoepisepisepiepiepiepisoepisodedesodesodesodeodeodede")
test("dictate", "dictadicdidictdiddictadictadictateteteictatectateatectatetatete")
test("caller", "callcacacalccallcacaccallerallerllerllererallerlerllerllerer")
test("sweater", "sweatsweswsweatereateraterer")
test("measure", "measumememeasumemmeasmmeasureeasureureeasureasurereasureasurere")
test("relieve", "relierelierelrelierrelieveelieveveieveveve")
test("home", "hohohohhohohhhohhomeomemeomeomememeomemememe")
test("profession", "profesprofessionsion")
test("continuous", "contcontcontinuoconcocontinuousntinuoustinuoususinuousinuous")
}
func test(w, n string) {
fmt.Println(wordnestit(w, n), wordnestclosed(w, n))
}
func wordnestit(w, n string) int {
d := 0
for ; n != w; d++ {
i := strings.Index(n, w)
if i < 0 {
return -1
}
n = n[:i] + n[i+len(w):]
}
return d
}
func wordnestclosed(w, n string) int {
return len(n)/len(w) - 1
}
|
// Package robo is a set of utils for exploring unknown areas
package robo
|
// Package logutil contains functionality for working with logs.
package logutil
|
package utils
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_Set(t *testing.T) {
set := NewStringSet()
assert.Equal(t, 0, len(set.Iter()))
assert.False(t, set.Contains("test"))
set.Add("test")
assert.True(t, set.Contains("test"))
set.Add("test")
assert.Equal(t, 1, len(set.Iter()))
set.Remove("test")
assert.False(t, set.Contains("test"))
assert.Equal(t, 0, len(set.Iter()))
}
func Test_SetPreFill(t *testing.T) {
set := NewStringSet("test")
assert.Equal(t, 1, len(set.Iter()))
assert.True(t, set.Contains("test"))
}
func Test_SetChain(t *testing.T) {
set := NewStringSet("test").Add("test2")
assert.Equal(t, 2, len(set.Iter()))
assert.True(t, set.Contains("test"))
assert.True(t, set.Contains("test2"))
}
func Test_CopyStringSet(t *testing.T) {
set := NewStringSet("test")
set2 := CopyStringSet(set)
assert.Equal(t, 1, len(set.Iter()))
assert.True(t, set.Contains("test"))
assert.Equal(t, 1, len(set2.Iter()))
assert.True(t, set2.Contains("test"))
set2.Remove("test")
assert.Equal(t, 1, len(set.Iter()))
assert.True(t, set.Contains("test"))
assert.Equal(t, 0, len(set2.Iter()))
assert.False(t, set2.Contains("test"))
} |
package utils
import (
"fmt"
"github.com/fatih/color"
"os"
)
func PrintErrorAndExit(message string) {
color.Set(color.FgRed)
fmt.Println(message)
color.Unset()
os.Exit(1)
}
|
package golog
//发布日志类消息
// "errors"
type messageQueue struct {
// messagequeue.Base
}
type PushType struct {
Appid string
LogId string
LogTime string
LogText string
LogLevel LogLevel
}
func newMessageQueueInstance() *messageQueue {
messageQueue := &messageQueue{}
return messageQueue
}
func (this *messageQueue) publishLog(content PushType) error {
return nil
}
|
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"iboxctl/pkg/root"
"iboxctl/pkg/tools"
"iboxctl/pkg/udpsend"
)
var(
stop = make(chan struct{})
ch string
ip string
ipCmd = &cobra.Command{
Use: "start",
Short: "start server",
RunE: func(c *cobra.Command, args []string) error {
tools.PrintFlags(c.Flags())
fmt.Printf("start server %s\n",ch)
//ping.StartPing(ip)
udpsend.Exec("power_ch"+ch)
return nil
},
}
)
func init(){
ipCmd.PersistentFlags().StringVar(&ch, "ch", "","server's id")
//ipCmd.PersistentFlags().StringVar(&ip, "ip", "","server's id")
root.AddCmd(ipCmd)
} |
/*
* This file is part of impacca. Copyright (C) 2013 and above Shogun <shogun@cowtech.it>.
* Licensed under the MIT license, which can be found at https://choosealicense.com/licenses/mit.
*/
package utils
import (
"bufio"
"fmt"
"io"
"os"
"os/exec"
"regexp"
"strings"
"sync"
"syscall"
)
var debugMatcher = regexp.MustCompile("(?i)^(true|yes|y|t|1)$")
// ExecutionResult represents a command execution result
type ExecutionResult struct {
ExitCode int
Stdout string
Stderr string
Error error
}
// Verify checks the command executed properly and exited with exit code 0
func (e ExecutionResult) Verify(executableName, failureMessage string) {
if e.Error != nil {
Fatal("%s: {errorPrimary}%s{-}", failureMessage, e.Error.Error())
} else if e.ExitCode != 0 {
Fatal("%s: %s failed with code {errorPrimary}%d{-}.", failureMessage, executableName, e.ExitCode)
}
}
func wrapOutput(output string) string {
replacer, _ := regexp.Compile("(?m)(^)")
return replacer.ReplaceAllString(output, "⛓️\x1b[4G$1")
}
func showAndBufferOutput(wg *sync.WaitGroup, source io.ReadCloser, buffer *string, destination *os.File) {
defer source.Close()
scanner := bufio.NewScanner(source)
for scanner.Scan() {
line := scanner.Text()
if destination != nil {
fmt.Fprintln(destination, wrapOutput(line))
}
*buffer += line + "\n"
}
wg.Done()
}
// Execute executes a command.
func Execute(showOutput bool, cmd string, args ...string) (result ExecutionResult) {
gitCmd := exec.Command(cmd, args...)
// Pipe stdout and stderr
var destinationOut, destinationErr *os.File
var wg sync.WaitGroup
wg.Add(2)
if ShowDebug || showOutput {
destinationOut = os.Stdout
destinationErr = os.Stderr
}
commandStdout, _ := gitCmd.StdoutPipe()
commandStderr, _ := gitCmd.StderrPipe()
go showAndBufferOutput(&wg, commandStdout, &result.Stdout, destinationOut)
go showAndBufferOutput(&wg, commandStderr, &result.Stderr, destinationErr)
// Execute the command
Debug("Executing: %s %s", cmd, strings.Join(args, " "))
result.Error = gitCmd.Run()
wg.Wait()
// The command exited with errors, copy the exit code
if result.Error != nil {
if exitError, casted := result.Error.(*exec.ExitError); casted {
result.Error = nil // Reset the error since it just a command failure
result.ExitCode = exitError.Sys().(syscall.WaitStatus).ExitStatus()
}
}
if showOutput {
FinishStep(result.ExitCode)
}
return
}
// GitMustBeClean checks that the current working copy has not uncommitted changes
func GitMustBeClean(reason string) {
// Execute the command
result := Execute(false, "git", "status", "--short")
result.Verify("git", "Cannot check repository status")
if len(result.Stdout) > 0 {
Fatal("Cannot {errorPrimary}%s{-} as the working directory is not clean. Please commit all local changes and try again.", reason)
}
}
|
/*
* Get the details of a specific network in a given data center for a given account.
*/
package main
import (
"flag"
"fmt"
"os"
"path"
"strings"
"github.com/grrtrr/clcv2"
"github.com/grrtrr/clcv2/clcv2cli"
"github.com/grrtrr/clcv2/utils"
"github.com/grrtrr/exit"
"github.com/olekukonko/tablewriter"
)
func main() {
var location = flag.String("l", os.Getenv("CLC_LOCATION"), "Data centre alias (needed to resolve IDs)")
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "usage: %s [options] -l <Location> <Network-ID (hex)>\n", path.Base(os.Args[0]))
flag.PrintDefaults()
}
flag.Parse()
/* Location is required (despite hex id), an empty location leads to a "404 Not Found" response. */
if flag.NArg() != 1 || *location == "" {
flag.Usage()
os.Exit(1)
}
client, err := clcv2cli.NewCLIClient()
if err != nil {
exit.Fatal(err.Error())
}
// Always query all IPs. It does not increase the overhead much and is the most comprehensive variant.
// Other possible query types are: "claimed" , "free", and "none" (does not list any IP addresses).
details, err := client.GetNetworkDetails(*location, flag.Arg(0), "all")
if err != nil {
exit.Fatalf("failed to query network details of %s: %s", flag.Arg(0), err)
}
printNetworkDetails(details)
}
// printNetworkDetails pretty-prints @details
func printNetworkDetails(details clcv2.NetworkDetails) {
var table = tablewriter.NewWriter(os.Stdout)
var claimed []clcv2.IpAddressDetails
var free []string
if len(details.IpAddresses) > 0 {
for _, addr := range details.IpAddresses {
if addr.Claimed {
claimed = append(claimed, addr)
} else {
free = append(free, addr.Address)
}
}
}
fmt.Printf("Details of network %q", details.Name)
if details.Description != details.Name {
fmt.Printf(" (%s)", details.Description)
}
fmt.Printf(", ID %s:\n", details.Id)
table.SetHeader([]string{"CIDR", "Gateway", fmt.Sprintf("Free IPs (%d)", len(free)), "Type", "VLAN"})
table.Append([]string{
details.Cidr,
details.Gateway,
strings.Join(utils.CollapseIpRanges(free), ", "),
details.Type,
fmt.Sprint(details.Vlan),
})
table.Render()
if len(claimed) > 0 {
table = tablewriter.NewWriter(os.Stdout)
table.SetAutoFormatHeaders(false)
table.SetAlignment(tablewriter.ALIGN_RIGHT)
table.SetAutoWrapText(false)
table.SetHeader([]string{"Address", "Claimed", "Server", "Type"})
for _, i := range details.IpAddresses {
table.Append([]string{i.Address, fmt.Sprint(i.Claimed), i.Server, i.Type})
}
table.Render()
}
}
|
package boltdb
import (
"github.com/coreos/bbolt"
. "github.com/smartystreets/goconvey/convey"
"io/ioutil"
"testing"
)
func init() {
Open("../db/test.db")
}
func TestKeys(t *testing.T) {
keys, err := Keys("test")
if err != nil {
t.Fatal("get keys err, ", err)
}
t.Log(keys)
}
func TestGet(t *testing.T) {
b, err := Get("test.name")
if err != nil {
t.Fatal(err)
}
if b == nil {
t.Fatal("get empty")
}
ioutil.WriteFile("pay.json", b, 0700)
t.Log(string(b))
}
func TestSet(t *testing.T) {
b := "bucket111"
err := CreateBucket(b)
if err != nil {
t.Fatalf("create bucket error, %v", err)
}
Convey("set key (normal test)", t, func() {
So(Set(b, "k1", []byte("v1")), ShouldBeNil)
So(Set(b, "k1", []byte("v2")), ShouldBeNil)
So(Set(b, "k1", []byte("")), ShouldBeNil)
})
Convey("set key (special test)", t, func() {
So(Set(b, "", []byte("v1")), ShouldNotBeNil)
So(Set(b, "k1", nil), ShouldNotBeNil)
})
err = DeleteBucket(b)
if err != nil {
t.Fatalf("delete bucket error: %v", err)
}
Convey("set key (bucket does not exist)", t, func() {
So(Set("notSuchBucket", "k1", []byte("v1")), ShouldEqual, ErrBucketNotExist)
})
}
func TestCreateBucket(t *testing.T) {
Convey("test create bucket", t, func() {
So(CreateBucket("bucket1"), ShouldBeNil)
So(CreateBucket("1bucket1"), ShouldBeNil)
So(CreateBucket("中文"), ShouldBeNil)
_ = DeleteBucket("bucket1")
_ = DeleteBucket("1bucket1")
_ = DeleteBucket("中文")
So(CreateBucket(""), ShouldEqual, bbolt.ErrBucketNameRequired)
})
}
func TestDeleteBucket(t *testing.T) {
Convey("test delete bucket", t, func() {
_ = CreateBucket("test")
So(DeleteBucket("test"), ShouldBeNil)
So(DeleteBucket("bucket1222121"), ShouldBeError)
So(DeleteBucket(""), ShouldEqual, bbolt.ErrBucketNameRequired)
})
}
func TestBuckets(t *testing.T) {
Convey("test lookup bucket", t, func() {
bucketList := []string{"lookB", "lookB2"}
// create bucket
for _, v := range bucketList {
_ = CreateBucket(v)
}
result, err := Buckets()
So(err, ShouldBeNil)
// check the specific bucket if it does exist
for _, b := range bucketList {
So(result, ShouldContain, b)
}
// delete created buckets
for _, v := range bucketList {
_ = DeleteBucket(v)
}
})
}
func TestDeleteKey(t *testing.T) {
err := DeleteKey("test", "name")
if err != nil {
t.Error(err)
}
}
|
package controllers
import (
"crypto/sha1"
"fmt"
"log"
"mick/models"
"net/http"
"text/template"
"github.com/jinzhu/gorm"
)
func CreateAdmin(db *gorm.DB) {
var admin models.Admin
if db.Find(&admin).RecordNotFound() {
var pseudo string
var password string
var result string
var shaPassword []byte
fmt.Println("Il n'existe actuellement aucun Admin. On va donc en créer un ;-)")
for {
if result == "o" {
result = ""
break
} else {
fmt.Printf("Rendre le pseudo : ")
fmt.Scanf("%s", &pseudo)
fmt.Printf("Tu as choisi \" %s \". Es tu bien sûr? (o/n) ", pseudo)
fmt.Scanf("%s", &result)
}
}
for {
if result == "o" {
db.Save(&models.Admin{Pseudo: pseudo, Password: string(shaPassword)})
break
} else {
fmt.Printf("Passons au mot de passe : ")
fmt.Scanf("%s", &password)
h := sha1.New()
h.Write([]byte(password))
shaPassword = h.Sum(nil)
fmt.Printf("Tu as choisi \" %s \". Es tu bien sûr? (o/n)\n Si oui, dans ce cas il sera crypté et sera : \" %x \" ", password, shaPassword)
fmt.Scanf("%s", &result)
}
}
}
fmt.Println("Tout est ok. Amuses toi bien maintenant")
StartServer()
}
type AdminHandler int
func (h AdminHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html; charset=utf-8")
photos := []models.Photo{}
db, err := ConnDataBase()
if err != nil {
log.Println(err)
}
defer db.Close()
db.Find(&photos)
const page = `
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>{{ .Titre }}</title>
{{ .CSS }}
</head>
<body>
<div class="container">
<h1>{{ .Titre }}</h1>
{{ range $p := .photo }}
<figure>
<h4>{{ $p.Album }}</h4>
<img src="/images/{{ $p.Name }}" style="max-width: 300px; height: auto" />
<figcaption>
<p>{{ $p.Legend }}</p>
</figcaption>
</figure
{{ end }}
</body>
</html>
`
t, err := template.New("page").Parse(page)
check(err)
model := map[string]interface{}{
"Titre": "Photos",
"photo": photos,
}
err = t.Execute(w, model)
check(err)
}
const page = `
{{ range $a := .album }}
<h4>{{ $a.Name }}</h4>
<form action="/admin/album/edit/{{ $a.Id }}" method="POST">
<div>Nom<br/><input type="text" name="Name" value={{ $a.Name }}></div>
<input type="submit" class="edit" value="Valider">
</form>
{{ end }}
<form action="/admin/album/new" method="POST">
<div>Nom<br/><input type="text" name="Name"></div>
<input type="submit" class="new" value="Valider">
</form>
`
type AdminAlbumHandler int
func (h AdminAlbumHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html; charset=utf-8")
albums := []models.Album{}
db, err := ConnDataBase()
if err != nil {
log.Println(err)
}
defer db.Close()
db.Find(&albums)
t, err := template.New("page").Parse(page)
check(err)
model := map[string]interface{}{
"Titre": "Admin Albums",
"album": albums,
}
err = t.Execute(w, model)
check(err)
}
type AdminNewAlbumHandler int
func (h AdminNewAlbumHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
album := models.Album{}
db, err := ConnDataBase()
if err != nil {
log.Println(err)
}
defer db.Close()
r.ParseForm()
album.Name = r.FormValue("Name")
db.Save(&album)
http.Redirect(w, r, "/admin/album", 302)
} else {
http.Redirect(w, r, "/admin/album", 302)
}
}
|
package slice
// 数组类型的值(以下简称数组)的长度是固定的,而切片类型的值(以下简称切片)是可变长的。
import (
"strconv"
"reflect"
"fmt"
"testing"
)
// func GetArrayLen
func TestGetArrayLen(t* testing.T){
s1 := make([]int,5)
fmt.Printf("The length of s1:%d\n",len(s1))
fmt.Printf("The capacity of s1:%d\n",cap(s1))
fmt.Printf("The value of s1:%d\n",s1)
s2:=make([]int,5,8)
fmt.Printf("The length of s2:%d\n",len(s2))
fmt.Printf("The capacity of s2:%d\n",cap(s2))
fmt.Printf("The value of s2:%d\n",s2)
s3 :=[]int{1,2,3,4,5}
fmt.Printf("The length of s3:%d\n",len(s3))
fmt.Printf("The capacity of s3:%d\n",cap(s3))
fmt.Printf("The value of s3:%d\n",s3)
s4 := s3[2:4]
fmt.Printf("The length of s4:%d\n",len(s4))
fmt.Printf("The capacity of s4:%d\n",cap(s4))
fmt.Printf("The value of s4:%d\n",s4[0:3])
fmt.Printf("The most value of s4:%d\n",s4[0:cap(s4)])
}
// func Testsliceparam
func Testsliceparam(complexArray [3][]string)[3][]string{
complexArray[1][1] = "hello"
// return complexArray1
return complexArray
}
func TestInitArray(t *testing.T){
a := [...]int{1,2,3,4}
for _,v:=range a{
t.Log(v)
}
}
func TestArraySection(t *testing.T){
a := [...]int{1,2,3,4}
arrSec := a[:]
t.Log(arrSec)
}
func TestArrayType(t *testing.T){
arrayA := [...]int{1,2,3}
arrayB := [...]int{1,2,3,4}
fmt.Println(reflect.TypeOf(arrayA) == reflect.TypeOf(arrayB))
fmt.Println(reflect.TypeOf(arrayA))
fmt.Println(reflect.TypeOf(arrayB))
}
func TestArrayStringAdd(t*testing.T){
str1 := []string{"2","3","9","5"}
for _,value := range str1{
val,_:=strconv.Atoi(value)
if val < 9 {
}
t.Log(value)
}
} |
package p2p
import (
"fmt"
"github.com/hashicorp/memberlist"
"github.com/jlingohr/p2pvstream/fileutil"
"github.com/jlingohr/p2pvstream/hls"
"github.com/jlingohr/p2pvstream/message"
"github.com/jlingohr/p2pvstream/settings"
"github.com/jlingohr/p2pvstream/streaming"
"github.com/jlingohr/p2pvstream/stringutil"
"io/ioutil"
"log"
"math/rand"
"strconv"
"strings"
"sync"
"time"
)
type Seeder struct {
mlist *memberlist.Memberlist
broadcasts *memberlist.TransmitLimitedQueue
nodeToSegNum map[string]int // Node name to segment number map
segNumToNodes map[int][]*memberlist.Node // Segment number to nodes map
filePartition map[int][]int
m sync.RWMutex
config settings.Config
file streaming.StreamedFile
}
func NewSeeder(config settings.Config) (*Seeder, error) {
addrParts := strings.Split(config.SeedBindAddress, ":")
if len(addrParts) != 2 {
log.Fatal("Invalid address supplied")
}
bindAddr := addrParts[0]
bindPort := addrParts[1]
c := memberlist.DefaultWANConfig()
c.BindPort, _ = strconv.Atoi(bindPort)
c.BindAddr = bindAddr
c.PushPullInterval = 0 // disable push/pull
c.Name = config.NodeName
if config.ClusterGossipInterval != 0 {
c.GossipInterval = time.Duration(config.ClusterGossipInterval) * time.Millisecond
}
if config.ClusterProbeInterval != 0 {
c.ProbeInterval = time.Duration(config.ClusterProbeInterval) * time.Millisecond
}
if config.ClusterProbeTimeout != 0 {
c.ProbeTimeout = time.Duration(config.ClusterProbeTimeout) * time.Millisecond
}
if config.ClusterSuspicionMult != 0 {
c.SuspicionMult = config.ClusterSuspicionMult
}
m, err := memberlist.Create(c)
if err != nil {
return &Seeder{}, err
}
b := &memberlist.TransmitLimitedQueue{
NumNodes: func() int {
return m.NumMembers()
},
RetransmitMult: 3,
}
nodeToSegNum := map[string]int{config.NodeName: -1}
segNumToNodes := map[int][]*memberlist.Node{-1: m.Members()}
s := &Seeder{
broadcasts: b,
mlist: m,
nodeToSegNum: nodeToSegNum,
segNumToNodes: segNumToNodes,
m: sync.RWMutex{},
config: config,
}
s.file, err = s.getFile()
if err != nil {
return &Seeder{}, err
}
s.filePartition = s.getFilePartition()
c.Delegate = s
c.Events = s
log.Println("File Partition:", s.filePartition)
return s, nil
}
func (s *Seeder) getFile() (streaming.StreamedFile, error) {
filename := stringutil.RemoveFilenameExt(s.config.SeedFilename)
dirPath := hls.SEED_HLS_PATH + filename
numFiles, err := fileutil.FileCount(dirPath)
if err != nil {
log.Println(err)
return streaming.StreamedFile{}, err
}
return streaming.StreamedFile{
NoExtFilename: filename,
SegCount: numFiles - 1,
}, nil
}
func (s *Seeder) getFilePartition() map[int][]int {
clusterPartition := map[int][]int{}
minCount := s.file.SegCount / s.config.NumSegments
remainder := s.file.SegCount % s.config.NumSegments
segment := 0
minCounter := 1
oldRemainder := remainder
newRemainder := remainder
for index := 0; index < s.file.SegCount; index++ {
if minCounter == 1 {
clusterPartition[segment] = []int{index}
} else {
clusterPartition[segment] = append(clusterPartition[segment], index)
}
if minCount == 0 {
minCounter = 1
segment++
continue
}
if minCount <= minCounter {
if remainder == 0 {
minCounter = 1
segment++
continue
}
if newRemainder == 0 || newRemainder != oldRemainder {
minCounter = 1
segment++
oldRemainder = newRemainder
continue
}
newRemainder--
}
minCounter++
}
return clusterPartition
}
/////////////////////////////////////////////////////////////////////////////
// seeder goroutines
func (s *Seeder) Start() {
// todo start future seeding goroutines here
go s.automateStream()
}
func (s *Seeder) automateStream() {
for {
s.streamSegment()
time.Sleep(time.Duration(s.config.SeedFrequency) * time.Millisecond)
}
}
func (s *Seeder) streamSegment() {
// find current least and most advanced segment
mem := s.mlist.Members()
if len(mem) < 2 {
return
}
leastAdvancedSeg, mostAdvancedSeg := s.findLeastMostAdvancedSegment()
// find random node from least advanced segment
s.m.RLock()
mem = s.segNumToNodes[leastAdvancedSeg]
s.m.RUnlock()
randNode := s.findRandomNode(mem)
for randNode.Name == s.config.NodeName {
randNode = s.findRandomNode(mem)
}
// find random segment part from most advanced segment
segIdx, randSeg := s.findRandomSegment(mostAdvancedSeg)
if segIdx == -1 {
return
}
// sendSegment
p2pMsg, err := message.CreateSegmentResponse(s.config.NodeName, segIdx, randSeg)
if err != nil {
return
}
s.sendSegment(randNode, p2pMsg)
s.PrintString(fmt.Sprintf("[Stream Segment] || From: %s || To: %s || File Index: %d", s.config.NodeName, randNode.Name, segIdx))
}
func (s *Seeder) findLeastMostAdvancedSegment() (int, int) {
s.m.RLock()
defer s.m.RUnlock()
leastAdvancedSeg := 10000
mostAdvancedSeg := -1
for segment, nodeList := range s.segNumToNodes {
if len(nodeList) == 0 {
continue
}
if segment != -1 && segment < leastAdvancedSeg {
leastAdvancedSeg = segment
}
if segment > mostAdvancedSeg {
mostAdvancedSeg = segment
}
}
return leastAdvancedSeg, mostAdvancedSeg
}
/////////////////////////////////////////////////////////////////////////////
// streamer handle joining & leaving nodes
func (s *Seeder) NotifyJoin(node *memberlist.Node) {
s.PrintString(fmt.Sprintf("[Seeder Notify Join] || Node: %s", node.Name))
go s.handleNotifyJoin(node)
}
func (s *Seeder) handleNotifyJoin(node *memberlist.Node) {
s.m.Lock()
s.nodeToSegNum[node.Name] = 0
s.segNumToNodes[0] = append(s.segNumToNodes[0], node)
s.m.Unlock()
go s.sendNodeClusterStatus(node)
go s.sendAdvancedSegmentPart(node)
}
func (s *Seeder) sendAdvancedSegmentPart(node *memberlist.Node) {
_, mostAdvancedSeg := s.findLeastMostAdvancedSegment()
idx, randSeg := s.findRandomSegment(mostAdvancedSeg)
if idx == -1 {
return
}
p2p, err := message.CreateSegmentResponse(node.Name, idx, randSeg)
if err != nil {
return
}
s.sendSegment(node, p2p)
s.PrintString(fmt.Sprintf("[ Sent Advanced ] || From: %s || To: %s || File Index: %d", s.config.NodeName, node.Name, idx))
}
func (s *Seeder) sendNodeClusterStatus(node *memberlist.Node) {
s.m.RLock()
msg := message.CreateNodeClusterStatusMsg(s.nodeToSegNum)
s.m.RUnlock()
p2pMsg, err := message.CreateP2PMsg(msg)
if err != nil {
return
}
p2pMsg.MsgType = message.NodeClusterStatusMsg
s.sendSegment(node, p2pMsg)
s.PrintString(fmt.Sprintf("[Sent NodeClusterStatus] || Node: %s", node.Name))
}
func (s *Seeder) NotifyLeave(node *memberlist.Node) {
s.PrintString(fmt.Sprintf("[Seeder Notify Leave] || Node: %s", node.Name))
go s.handleNotifyLeave(node)
}
func (s *Seeder) handleNotifyLeave(node *memberlist.Node) {
s.m.Lock()
defer s.m.Unlock()
if segment, present := s.nodeToSegNum[node.Name]; present {
// remove node from nodeToSegNum
delete(s.nodeToSegNum, node.Name)
// remove node from segNumToNodes
segmentNodes := s.segNumToNodes[segment]
for index, segNode := range segmentNodes {
if segNode.Name == node.Name {
s.segNumToNodes[segment] = append(segmentNodes[:index], segmentNodes[index+1:]...)
break
}
}
}
s.PrintNodesSegment()
}
func (s *Seeder) NotifyUpdate(node *memberlist.Node) {
s.PrintString(fmt.Sprintf("[Seeder Notify Update] || Node: %s", node.Name))
}
/////////////////////////////////////////////////////////////////////////////
// helper functions
func (s *Seeder) sendSegment(node *memberlist.Node, p2pMsg message.P2PMessage) {
byt, err := message.EncodeMessage(p2pMsg)
if err != nil {
return
}
s.mlist.SendReliable(node, byt)
}
func (s *Seeder) findRandomSegment(segment int) (int, []byte) {
if segment == s.config.NumSegments || segment < 0 {
return -1, []byte{}
}
// find available partitions to retrieve from this segment
s.m.RLock()
availableSegmentPartition := s.filePartition[segment]
s.m.RUnlock()
// randomly choose from on of the available segment partitions
numAvailableSegmentPartition := len(availableSegmentPartition)
if numAvailableSegmentPartition == 0 {
return -1, []byte{}
}
ss := rand.NewSource(time.Now().Unix())
r := rand.New(ss)
fileIdx := availableSegmentPartition[r.Intn(numAvailableSegmentPartition)]
// retrieve file part from HD
dirPath := fmt.Sprintf("%s/%s/", hls.SEED_HLS_PATH, s.file.NoExtFilename)
cachedFilepath := dirPath + s.file.NoExtFilename + strconv.Itoa(fileIdx) + ".ts"
data, err := ioutil.ReadFile(cachedFilepath)
if err != nil {
log.Println(err)
return -1, []byte{}
}
return fileIdx, data
}
func (s *Seeder) findRandomNode(members []*memberlist.Node) *memberlist.Node {
ss := rand.NewSource(time.Now().Unix())
r := rand.New(ss) // initialize local pseudo random generator
return members[r.Intn(len(members))]
}
func (s *Seeder) changeNodeCluster(nodeName string, segment int) {
if segment == s.config.NumSegments {
return
}
s.m.Lock()
defer s.m.Unlock()
// get nodeName's memberlist.Node
nodeNode := &memberlist.Node{}
for _, node := range s.mlist.Members() {
if node.Name == nodeName {
nodeNode = node
break
}
}
previousSegmentNum, present := s.nodeToSegNum[nodeName]
if present {
// if previous segment is the same as the given segment, no node segment change occurred
if previousSegmentNum == segment {
return
}
// remove node from prev segment
prevSegmentNodes := s.segNumToNodes[previousSegmentNum]
for index, node := range prevSegmentNodes {
if node.Name == nodeName {
s.segNumToNodes[previousSegmentNum] = append(prevSegmentNodes[:index], prevSegmentNodes[index+1:]...)
break
}
}
} else {
// check segment already exists in segNumToNodes
if _, segmentPresent := s.segNumToNodes[segment]; !segmentPresent {
s.segNumToNodes[segment] = []*memberlist.Node{}
}
}
// set node to new cluster
s.segNumToNodes[segment] = append(s.segNumToNodes[segment], nodeNode)
s.nodeToSegNum[nodeName] = segment
log.Printf("%s changed segment number from %d to %d.\n", nodeName, previousSegmentNum, segment)
s.PrintNodesSegment()
}
/////////////////////////////////////////////////////////////////////////////
// handle rsp
// Delegate protocol implementation for Seeder
func (s *Seeder) NodeMeta(limit int) []byte {
return []byte{}
}
func (s *Seeder) NotifyMsg(b []byte) {
// all messages sent/broadcast must be wrapped into P2PMessage
msg, err := message.DecodeP2PMessage(b)
if err != nil {
log.Println(err)
return
}
go s.handleMsg(msg)
}
func (s *Seeder) GetBroadcasts(overhead, limit int) [][]byte {
return s.broadcasts.GetBroadcasts(overhead, limit)
}
func (s *Seeder) LocalState(join bool) []byte {
//stub, we don't sync state
return []byte{}
}
func (s *Seeder) MergeRemoteState(buf []byte, join bool) {
//stub, we don't sync state
}
func (s *Seeder) handleMsg(msg message.P2PMessage) {
switch msg.MsgType {
case message.ClusterChangeMsg:
changeCluster, err := message.DecodeClusterChange(msg.Payload)
if err != nil {
log.Println(err)
return
}
s.PrintString(fmt.Sprintf("[Received ClusterNodeChange] Node: %s %s %d", changeCluster.NodeName, "Cluster:", changeCluster.Cluster))
s.changeNodeCluster(changeCluster.NodeName, changeCluster.Cluster)
default:
log.Println("Unhandled message type in Seeder:", msg.MsgType)
}
}
/////////////////////////////////////////////////////////////////////////////
// handle rsp
func (s *Seeder) PrintNodesSegment() {
for node, cluster := range s.nodeToSegNum {
log.Println("[PrintNodesSegment] || Node: ", node, "|| Cluster:", strconv.Itoa(int(cluster)))
}
}
func (s *Seeder) PrintMembers() {
log.Println()
log.Println("Member List:")
for _, mem := range s.mlist.Members() {
log.Println(mem.Name)
}
}
func (s *Seeder) PrintString(msg string) {
log.Println()
log.Println(msg)
}
|
package main
import (
"flag"
"fmt"
"io"
"mime/multipart"
"net/http"
"os"
"strconv"
)
type Flags struct {
dest *string
addr *string
id *int
seek *int
chunk *int
}
const Boundary = "DellvinBlackDellvinBlackDellvinBlackDellvinBlack"
func main() {
f := setupCLArgs()
if f.addr == nil || f.id == nil {
return
}
addr := prepareURL(f)
tr := http.DefaultTransport
client := http.Client{
Transport: tr,
}
resp, err := client.Get(addr)
if err != nil {
fmt.Println(err)
return
}
defer resp.Body.Close()
fmt.Println("Response status:", resp.Status)
if resp.StatusCode == 200 {
var fd *os.File
if f.dest != nil {
fd, err = os.Create(*f.dest)
if err != nil {
fmt.Println(err.Error())
return
}
}
buf := make([]byte, *f.chunk)
partReader := multipart.NewReader(resp.Body, Boundary)
for {
part, err := partReader.NextPart()
if err == io.EOF {
break
}
var n int
for {
n, err = part.Read(buf)
if err == io.EOF {
break
} else if err != nil {
fmt.Println(err.Error())
return
}
if fd != nil {
fd.Write(buf[:n])
} else {
fmt.Printf(string(buf[:n]))
}
}
if fd != nil {
fd.Write(buf[:n])
} else {
fmt.Printf(string(buf[:n]))
}
}
fmt.Println("Done")
} else {
var body = make([]byte, 256)
resp.Body.Read(body)
fmt.Println(string(body))
}
}
func setupCLArgs() Flags {
f := Flags{}
f.dest = flag.String("dest", "./static/file.txt", "a destination for downloaded file")
f.addr = flag.String("addr", "http://localhost:8080", "address of remote server")
f.id = flag.Int("id", 18, "file id")
f.seek = flag.Int("seek", 32, "pos to seek in file")
f.chunk=flag.Int("chunk", 4096, "size of chunk")
flag.Parse()
return f
}
func prepareURL(f Flags) string {
url := *f.addr
url += "/download/"
url += strconv.Itoa(*f.id)
if f.chunk!=nil{
url += "/" + strconv.Itoa(*f.chunk)
}
if f.seek != nil {
url += "/" + strconv.Itoa(*f.seek)
}
return url
}
|
package main
import (
"context"
"crypto/rand"
"crypto/tls"
"encoding/binary"
"flag"
"fmt"
"io"
"os"
"github.com/hawkinsw/qperf/utils"
quic "github.com/lucas-clemente/quic-go"
)
const k = 1024
func transmit(done <-chan struct{}, bufferSize uint64, stream *quic.Stream) {
sendBuffer := make([]byte, bufferSize, bufferSize)
count, err := io.ReadFull(rand.Reader, sendBuffer)
if err != nil || count == 0 {
fmt.Printf("Could not create a %d buffer of random data: %s\n", bufferSize, err)
os.Exit(1)
}
transmitLoop:
for {
select {
case <-done:
break transmitLoop
default:
_, err := (*stream).Write(sendBuffer)
if err != nil {
break transmitLoop
}
}
}
}
func main() {
experimentDone := make(chan struct{})
tlsConf := &tls.Config{
InsecureSkipVerify: true,
NextProtos: []string{"quic-echo-example"},
}
serverAddr := flag.String("server", "localhost", "Server hostname or IP address.")
serverPort := flag.Uint64("port", 4242, "Server port number")
sendBufferSize := flag.Uint64("bufferSize", 3*k, "The write buffer size.")
flag.Parse()
session, err := quic.DialAddr(utils.CreateCompleteNetworkAddress(*serverAddr, *serverPort), tlsConf, nil)
if err != nil {
fmt.Printf("Could not open a QUIC session to %s on port %d: %s\n", *serverAddr, *serverPort, err)
os.Exit(1)
}
dataStream, err := session.OpenStreamSync(context.Background())
if err != nil {
fmt.Printf("Could not open a QUIC data stream to %s on port %d: %s\n", *serverAddr, *serverPort, err)
os.Exit(1)
}
controlStream, err := session.OpenStreamSync(context.Background())
if err != nil {
fmt.Printf("Could not open a QUIC control stream to %s on port %d: %s\n", *serverAddr, *serverPort, err)
os.Exit(1)
}
controlBuffer := make([]byte, 8, 8)
go transmit(experimentDone, *sendBufferSize, &dataStream)
// Streams are only opened once data is sent. Write 8 bytes to force the server
// to accept a control stream.
controlStream.Write(controlBuffer)
// Now, wait for the server to send us back a 64-bit number saying how many bytes
// we transferred during the experiment.
io.ReadFull(controlStream, controlBuffer)
fmt.Printf("control stream's buffer: %v\n", binary.LittleEndian.Uint64(controlBuffer))
controlStream.Close()
dataStream.Close()
session.CloseWithError(quic.ApplicationErrorCode(quic.NoError), "")
os.Exit(0)
}
|
package middleware
import (
"fmt"
log "proximity/pkg/utils/logger"
"github.com/gin-gonic/gin"
pkgErrors "github.com/pkg/errors"
)
// HandlePanic ... rest panic handler
func HandlePanic(c *gin.Context) {
defer func(c *gin.Context) {
r := recover()
var stackTrace string
if r != nil {
err, ok := r.(error)
if ok {
// Logs the error
stackTrace = fmt.Sprintf("%+v", pkgErrors.New(err.Error()))
log.Error("GO-BOILERPLATE.PANIC", "Unexpected panic occured", log.Priority1, nil, map[string]interface{}{"error": err.Error(), "stackTrace": stackTrace})
// Notice error in apm
// apm.APM.NoticeError(apm.FromContext(c), err)
// Forms error message
c.JSON(500, gin.H{
"message": "Panic: Unexpected error occured.",
"error": err.Error(),
"stackTrace": stackTrace,
})
} else {
// Logs the error
log.Error("GO-BOILERPLATE.PANIC", "Panic recovery failed to parse error", log.Priority1, nil, map[string]interface{}{"error": r})
// Notice error in apm
// apm.APM.NoticeError(apm.FromContext(c), errors.New("GO-BOILERPLATE.UNRECOVERED.PANIC"))
// Forms error message
c.JSON(500, gin.H{
"message": "Panic: Unexpected error occured, failed to parse error",
})
}
}
}(c)
c.Next()
}
|
package log
import (
"log"
"github.com/b2wdigital/goignite/pkg/config"
)
const (
ConsoleEnabled = "log.console.enabled"
ConsoleLevel = "log.console.level"
FileEnabled = "log.file.enabled"
FileLevel = "log.file.level"
FilePath = "log.file.path"
FileName = "log.file.name"
FileMaxSize = "log.file.maxsize"
FileCompress = "log.file.compress"
FileMaxAge = "log.file.maxage"
)
func init() {
log.Println("getting configurations for logging")
config.Add(ConsoleEnabled, true, "enable/disable console logging")
config.Add(ConsoleLevel, "INFO", "console log level")
config.Add(FileEnabled, false, "enable/disable file logging")
config.Add(FileLevel, "INFO", "console log level")
config.Add(FilePath, "/tmp", "log path")
config.Add(FileName, "application.l", "log filename")
config.Add(FileMaxSize, 100, "log file max size (MB)")
config.Add(FileCompress, true, "log file compress")
config.Add(FileMaxAge, 28, "log file max age (days)")
}
|
package job
// The different states that a job can be in.
const (
StateNew = "new"
StateQueued = "queued"
StateInProgress = "in progress"
StateComplete = "complete"
StateError = "error"
StatePassed = "passed"
StateFailed = "failed"
)
// DoneStates represents states that a job doesn't transition out of, i.e. once the job is in one of these states,
// it's done.
var DoneStates = []string{StateComplete, StateError, StatePassed, StateFailed}
// Job represents test details and metadata of a test run (aka Job), that is usually associated with a particular test
// execution instance (e.g. VM).
type Job struct {
ID string `json:"id"`
Passed bool `json:"passed"`
Status string `json:"status"`
Error string `json:"error"`
BrowserShortVersion string `json:"browser_short_version"`
BaseConfig struct {
PlatformName string `json:"platformName"`
PlatformVersion string `json:"platformVersion"`
DeviceName string `json:"deviceName"`
} `json:"base_config"`
// IsRDC flags a job started as a RDC run.
IsRDC bool `json:"-"`
}
// Done returns true if the job status is one of DoneStates. False otherwise.
func Done(status string) bool {
for _, s := range DoneStates {
if s == status {
return true
}
}
return false
}
|
package utils
import (
"os"
"log"
)
var InfoLog *log.Logger
var ErrorLog *log.Logger
var DebugLog *log.Logger
var LogFile *os.File
func InitLogger() {
InfoLog = log.New(os.Stdout, "INFO: ", log.Ldate|log.Ltime|log.Lshortfile)
ErrorLog = log.New(os.Stdout, "ERROR: ", log.Ldate|log.Ltime|log.Lshortfile)
DebugLog = log.New(os.Stdout, "DEBUG: ", log.Ldate|log.Ltime|log.Lshortfile)
}
func SetLogFile(filename string) {
var err error
LogFile, err = os.OpenFile(filename, os.O_RDWR | os.O_CREATE | os.O_APPEND, 0666)
if err != nil {
ErrorLog.Fatalf("Error opening logfile: %v", err)
}
InfoLog.SetOutput(LogFile)
ErrorLog.SetOutput(LogFile)
DebugLog.SetOutput(LogFile)
}
func CloseLogfile() {
LogFile.Close()
} |
package service
import (
"crypto/md5"
"errors"
"fmt"
"hash"
"math/big"
"net"
"strconv"
"strings"
"github.com/ms2008/poemoon/conf"
"github.com/ms2008/poemoon/utils"
)
const (
_codeIn = byte(0x03)
_codeOut = byte(0x06)
_type = byte(0x01)
_eof = byte(0x00)
_controlCheck = byte(0x20)
_adapterNum = byte(0x05)
_ipDog = byte(0x01)
)
var (
_delimiter = []byte{0x00, 0x00, 0x00, 0x00}
_emptyIP = []byte{0, 0, 0, 0}
_primaryDNS = []byte{202, 204, 176, 1}
_dhcpServer = []byte{222, 29, 188, 254}
_authVersion = []byte{0x1f, 0x00}
_magic1 = big.NewInt(1968)
_magic2 = big.NewInt(int64(0xffffffff))
_magic3 = big.NewInt(int64(711))
)
type Service struct {
Conf *conf.Config
md5Ctx hash.Hash
salt []byte // [4:8]
clientIP []byte // [20:24]
md5a []byte
tail1 []byte
tail2 []byte
keepAliveVer []byte // [28:30]
conn *net.UDPConn
ChallengeTimes int
Count int
}
// New create service instance and return.
func New(c *conf.Config) (s *Service) {
s = &Service{
Conf: c,
md5Ctx: md5.New(),
md5a: make([]byte, 16),
tail1: make([]byte, 16),
tail2: make([]byte, 4),
keepAliveVer: []byte{0xdc, 0x02},
clientIP: make([]byte, 4),
salt: make([]byte, 4),
ChallengeTimes: 0,
Count: 0,
}
var (
err error
udpAddr *net.UDPAddr
)
if udpAddr, err = net.ResolveUDPAddr("udp4", fmt.Sprintf("%s:%s", c.AuthServer, c.Port)); err != nil {
log.Error("net.ResolveUDPAddr(udp4, %s) error(%v) ", fmt.Sprintf("%s:%s", c.AuthServer, c.Port), err)
}
if s.conn, err = net.DialUDP("udp", nil, udpAddr); err != nil {
log.Error("net.DialUDP(udp, %v, %v) error(%v)", nil, udpAddr, err)
}
return
}
// Close service.
func (s *Service) Close() {
s.conn.Close()
return
}
func (s *Service) md5(items ...[]byte) (ret []byte) {
for _, v := range items {
s.md5Ctx.Write(v)
}
ret = s.md5Ctx.Sum(nil)
s.md5Ctx.Reset()
return
}
func (s *Service) mac() (ret []byte, err error) {
// check mac
as := strings.Replace(s.Conf.Mac, ":", "", -1)
if len(as) != 12 {
err = errors.New("length of mac address is not correct")
}
ret = make([]byte, 0, 6)
for i := 0; i < 12; i += 2 {
var v uint64
if v, err = strconv.ParseUint(as[i:i+2], 16, 8); err != nil {
log.Error("strconv.ParseUint(%v, 16, 8) error(%v)", as[i:i+2], err)
ret = nil
return
}
ret = append(ret, byte(v))
}
return
}
func (s *Service) ror(md5a, password []byte) (ret []byte) {
l := len(password)
ret = make([]byte, l)
for i := 0; i < l; i++ {
x := md5a[i] ^ password[i]
ret[i] = (byte)((x << 3) + (x >> 5))
}
return
}
func (s *Service) checkSum(data []byte) (ret []byte) {
// 1234 = 0x_00_00_04_d2
sum := []byte{0x00, 0x00, 0x04, 0xd2}
l := len(data)
i := 0
//0123_4567_8901_23
for ; i+3 < l; i = i + 4 {
//abcd ^ 3210
//abcd ^ 7654
//abcd ^ 1098
sum[0] ^= data[i+3]
sum[1] ^= data[i+2]
sum[2] ^= data[i+1]
sum[3] ^= data[i]
}
if i < l {
//剩下_23
//i=12,len=14
tmp := make([]byte, 4)
for j := 3; j >= 0 && i < l; j-- {
//j=3 tmp = 0 0 0 2 i=12 13
//j=2 tmp = 0 0 3 2 i=13 14
tmp[j] = data[i]
i++
}
for j := 0; j < 4; j++ {
sum[j] ^= tmp[j]
}
}
var x = big.NewInt(int64(0))
tmpBytes := x.SetBytes(sum).Mul(x, _magic1).Add(x, _magic2).Bytes()
l = len(tmpBytes)
i = 0
ret = make([]byte, 4)
for j := l - 1; j >= 0 && i < 4; j-- {
ret[i] = tmpBytes[j]
i++
}
return
}
func (s *Service) extra() bool {
return s.Count%21 == 0
}
func (s *Service) crc(buf []byte) (ret []byte) {
sum := make([]byte, 2)
l := len(buf)
for i := 0; i < l-1; i += 2 {
sum[0] ^= buf[i+1]
sum[1] ^= buf[i]
}
x := big.NewInt(int64(0))
tmpBytes := x.SetBytes(sum).Mul(x, _magic3).Bytes()
ret = make([]byte, 4)
l = len(tmpBytes)
for i := 0; i < 4 && l > 0; i++ {
l--
ret[i] = tmpBytes[l]
}
return
}
|
package scanner
import (
"go/token"
"h12.io/gombi/experiment/gre/scan"
)
const (
tNewline = 1000 + iota
tWhitespace
tLineComment
tGeneralCommentSL
tGeneralCommentML
tRawStringLit
tInterpretedStringLit
tSkip
)
var (
c = scan.Char
s = scan.Str
con = scan.Con
or = scan.Or
merge = scan.Merge
illegal = c(`\x00`)
any = illegal.Negate()
newline = c(`\n`)
unicodeChar = any.Exclude(newline)
unicodeLetter = c(`\p{L}`)
unicodeDigit = c(`\p{Nd}`)
//unicodeLetter = c(`A-Za-z0-9۰۱۸६४ŝ`)
//unicodeDigit = c(`0-99876`)
letter = merge(unicodeLetter, c(`_`))
decimalDigit = c(`0-9`)
octalDigit = c(`0-7`)
hexDigit = c(`0-9A-Fa-f`)
empty = s(``)
whitespaces = c(` \t\r`).OneOrMore()
lineComment = con(s(`//`), unicodeChar.ZeroOrMore(), or(newline, empty))
generalCommentSL = con(s(`/*`), any.Exclude(newline).ZeroOrMore().Ungreedy(), s(`*/`))
generalCommentML = con(s(`/*`), any.ZeroOrMore().Ungreedy(), s(`*/`))
identifier = con(letter, or(letter, unicodeDigit).ZeroOrMore())
intLit = or(hexLit, decimalLit, octalLit)
decimalLit = con(c(`1-9`), decimalDigit.ZeroOrMore())
octalLit = con(s(`0`), octalDigit.ZeroOrMore())
hexLit = con(s(`0`), c(`xX`), hexDigit.OneOrMore())
floatLit = or(floatLit1, floatLit2, floatLit3)
floatLit1 = con(decimals, s(`.`), decimals.ZeroOrOne(), exponent.ZeroOrOne())
floatLit2 = con(decimals, exponent)
floatLit3 = con(s(`.`), decimals, exponent.ZeroOrOne())
decimals = decimalDigit.OneOrMore()
exponent = con(c(`eE`), c(`\+\-`).ZeroOrOne(), decimals)
imaginaryLit = con(or(decimals, floatLit), c(`i`))
runeLit = con(c(`'`), or(unicodeValue, byteValue), c(`'`))
unicodeValue = or(unicodeChar, littleUValue, bigUValue, escapedChar)
unicodeStrValue = or(unicodeChar.Exclude(c(`"`)), littleUValue, bigUValue, escapedChar)
byteValue = or(octalByteValue, hexByteValue)
octalByteValue = con(s(`\`), octalDigit.Repeat(3))
hexByteValue = con(s(`\x`), hexDigit.Repeat(2))
littleUValue = con(s(`\u`), hexDigit.Repeat(4))
bigUValue = con(s(`\U`), hexDigit.Repeat(8))
escapedChar = con(s(`\`), c(`abfnrtv\\'"`))
rawStringLit = con(s("`"), or(unicodeChar.Exclude(c("`")), newline).ZeroOrMore(), s("`"))
interpretedStringLit = con(s(`"`), or(unicodeStrValue, byteValue).ZeroOrMore(), s(`"`))
matcher = scan.NewMapMatcher(scan.MM{
{newline, tNewline},
{whitespaces, tWhitespace},
{s(`if`), int(token.IF)},
{s(`break`), int(token.BREAK)},
{s(`case`), int(token.CASE)},
{s(`chan`), int(token.CHAN)},
{s(`const`), int(token.CONST)},
{s(`continue`), int(token.CONTINUE)},
{s(`default`), int(token.DEFAULT)},
{s(`defer`), int(token.DEFER)},
{s(`else`), int(token.ELSE)},
{s(`fallthrough`), int(token.FALLTHROUGH)},
{s(`for`), int(token.FOR)},
{s(`func`), int(token.FUNC)},
{s(`goto`), int(token.GOTO)},
{s(`go`), int(token.GO)},
{s(`import`), int(token.IMPORT)},
{s(`interface`), int(token.INTERFACE)},
{s(`map`), int(token.MAP)},
{s(`package`), int(token.PACKAGE)},
{s(`range`), int(token.RANGE)},
{s(`return`), int(token.RETURN)},
{s(`select`), int(token.SELECT)},
{s(`struct`), int(token.STRUCT)},
{s(`switch`), int(token.SWITCH)},
{s(`type`), int(token.TYPE)},
{s(`var`), int(token.VAR)},
{identifier, int(token.IDENT)},
{lineComment, tLineComment},
{generalCommentSL, tGeneralCommentSL},
{generalCommentML, tGeneralCommentML},
{runeLit, int(token.CHAR)},
{imaginaryLit, int(token.IMAG)},
{floatLit, int(token.FLOAT)},
{intLit, int(token.INT)},
{rawStringLit, tRawStringLit},
{interpretedStringLit, tInterpretedStringLit},
{s(`...`), int(token.ELLIPSIS)},
{s(`.`), int(token.PERIOD)},
{s(`(`), int(token.LPAREN)},
{s(`)`), int(token.RPAREN)},
{s(`{`), int(token.LBRACE)},
{s(`}`), int(token.RBRACE)},
{s(`,`), int(token.COMMA)},
{s(`==`), int(token.EQL)},
{s(`=`), int(token.ASSIGN)},
{s(`:=`), int(token.DEFINE)},
{s(`:`), int(token.COLON)},
{s(`+=`), int(token.ADD_ASSIGN)},
{s(`++`), int(token.INC)},
{s(`+`), int(token.ADD)},
{s(`-=`), int(token.SUB_ASSIGN)},
{s(`--`), int(token.DEC)},
{s(`-`), int(token.SUB)},
{s(`*=`), int(token.MUL_ASSIGN)},
{s(`*`), int(token.MUL)},
{s(`/=`), int(token.QUO_ASSIGN)},
{s(`/`), int(token.QUO)},
{s(`%=`), int(token.REM_ASSIGN)},
{s(`%`), int(token.REM)},
{s(`|=`), int(token.OR_ASSIGN)},
{s(`||`), int(token.LOR)},
{s(`|`), int(token.OR)},
{s(`^=`), int(token.XOR_ASSIGN)},
{s(`^`), int(token.XOR)},
{s(`<<=`), int(token.SHL_ASSIGN)},
{s(`<<`), int(token.SHL)},
{s(`>>=`), int(token.SHR_ASSIGN)},
{s(`>>`), int(token.SHR)},
{s(`&^=`), int(token.AND_NOT_ASSIGN)},
{s(`&^`), int(token.AND_NOT)},
{s(`&=`), int(token.AND_ASSIGN)},
{s(`&&`), int(token.LAND)},
{s(`&`), int(token.AND)},
{s(`!=`), int(token.NEQ)},
{s(`!`), int(token.NOT)},
{s(`<=`), int(token.LEQ)},
{s(`<-`), int(token.ARROW)},
{s(`<`), int(token.LSS)},
{s(`>=`), int(token.GEQ)},
{s(`>`), int(token.GTR)},
{s(`[`), int(token.LBRACK)},
{s(`]`), int(token.RBRACK)},
{s(`;`), int(token.SEMICOLON)},
{scan.Pat(`.`), int(token.ILLEGAL)},
})
)
func init() {
matcher.EOF = int(token.EOF)
matcher.Illegal = int(token.ILLEGAL)
}
type gombiScanner struct {
scan.Scanner
}
func newGombiScanner() gombiScanner {
s := gombiScanner{scan.Scanner{
Matcher: matcher,
}}
return s
}
|
package main
import (
"flag"
"fmt"
"io"
"net"
"os"
)
type Client struct {
ServerIP string
ServerPort int
Name string
conn net.Conn
flag int //当前client模式
}
func NewClient(serverIP string,serverPort int) *Client{
client := &Client{
ServerIP: serverIP,
ServerPort: serverPort,
flag: 999,
}
//连接服务器
conn,err := net.Dial("tcp",fmt.Sprintf("%s:%d",serverIP,serverPort))
if err != nil{
fmt.Println("net.dial error:",err)
}
client.conn = conn
return client
}
//处理server回应的消息,直接显示到标准输出
func (client *Client) DealResponse(){
//一旦client.conn有数据,就直接copy到标准输出,永久阻塞监听,不需要自己写for循环来实现
io.Copy(os.Stdout,client.conn)
}
//显示菜单
func (client *Client) menu() bool{
var flag int
fmt.Println("1.公聊模式")
fmt.Println("2.私聊模式")
fmt.Println("3.更新用户名")
fmt.Println("0.退出")
fmt.Scanln(&flag)
if flag >=0 && flag <=3{
client.flag = flag
return true
}else{
fmt.Println("请输入合法范围内数字")
return false
}
}
//公聊模式
func (client *Client) PublicChat(){
var chatMsg string
//提示输入信息
fmt.Println("请输入聊天内容,exit退出")
fmt.Scanln(&chatMsg)
for chatMsg!= "exit" {
// 发给服务器
//消息不为空发送
if len(chatMsg) != 0 {
sendMsg := chatMsg +"\n"
_,err := client.conn.Write([]byte(sendMsg))
if err != nil {
fmt.Println("conn write err :",err)
break
}
}
chatMsg = ""
fmt.Println("请输入聊天内容,exit退出")
fmt.Scanln(&chatMsg)
}
}
//私聊
//查询在线用户
func (client *Client) selectUsers(){
sendMsg := "who\n"
_,err := client.conn.Write([]byte(sendMsg))
if err != nil {
fmt.Println("conn write err :",err)
return
}
}
//私聊模式
func (client *Client) privateChat(){
var remoteName,chatMsg string
client.selectUsers()
fmt.Println("请输入聊天对象[用户名],exit退出")
fmt.Scanln(&remoteName)
for remoteName != "exit"{
fmt.Println("请输入消息内容,exit退出")
fmt.Scanln(&chatMsg)
for chatMsg != "exit"{
if len(chatMsg) != 0{
sendMsg := "to|"+remoteName+"|"+chatMsg+"\n\n"
_,err := client.conn.Write([]byte(sendMsg))
if err != nil{
fmt.Println("conn write err : ",err)
break
}
}
chatMsg = ""
fmt.Println("请输入消息内容,exit退出")
fmt.Scanln(&chatMsg)
}
}
client.selectUsers()
fmt.Println("请输入聊天对象[用户名],exit退出")
fmt.Scanln(&remoteName)
}
//改名
func (client *Client) rename() bool{
fmt.Println("请输入新用户名")
fmt.Scanln(&client.Name)
sendMsg := "rename|"+client.Name+"\n"
_,err := client.conn.Write([]byte(sendMsg))
if err != nil {
fmt.Println("conn.write error:",err)
return false
}
return true
}
func (client *Client) Run(){
for client.flag != 0{
for client.menu() != true{
}
//处理不同模式
switch client.flag {
case 1:
//公聊
client.PublicChat()
break
case 2:
//私聊
client.privateChat()
break
case 3:
//改名
client.rename()
break
}
}
}
var serverIP string
var serverPort int
func init(){
//生成编译的二进制使用文档
//./client -ip 127.0.0.1 -port 8888
flag.StringVar(&serverIP,"ip","127.0.0.1","设置服务器的IP,默认是127.0.0.1")
flag.IntVar(&serverPort,"port",8888,"设置服务器的端口,默认是8888")
}
func main(){
//命令行解析
flag.Parse()
client := NewClient(serverIP,serverPort)
if client == nil{
fmt.Println("链接服务器失败")
return
}
//开启goroutine处理server的回执消息
go client.DealResponse()
fmt.Println("连接服务器成功")
client.Run()
}
|
package match
import (
t "testing"
e "github.com/briancraig/game-server/match/entity"
)
//TestCanIAddSomePlayers assures that we can add players
func TestCanIAddSomePlayers(t *t.T) {
p1 := e.New()
p2 := e.New()
team := NewTeam()
team.Add(p1)
team.Add(p2)
if team.Size() != 2 {
t.Fatal("el tamaño del equipo es ", team.Size(), " y no 2")
}
}
//TestCanIGetTheRivals assures the correct set/get of rivals
func TestCanIGetTheRivals(t *t.T) {
team := NewTeam()
enemyTeam := NewTeam()
team.SetEnemy(enemyTeam) // the rival
if team.GetEnemy() != enemyTeam {
t.Fail()
}
}
|
package main
// Expects blockartlib.go to be in the ./blockartlib/ dir, relative to
// this art-app.go file
import "./blockartlib"
import "bufio"
import "fmt"
import "os"
import "crypto/ecdsa"
func main() {
minerAddr := "127.0.0.1:8081"
privKeyString := "3081a40201010430abb996d825e0a92b470d34f506eca5294a9198922ca4b18941d83150ceb4fe919b2bc6e5fb0c98deb727bcbd431e2de9a00706052b81040022a1640362000468e7eb38ea0d892ee056dd19d5c0358b91ae1995130886215f733375b0f52033b9584aa77fc4cbf8dace46b7f8f603cafeca0927956fcb6c72bb829cf04f7287faf09e9c96304925547178039bc4c7b17d94d51de01a98060e755bffdc8015bd"
privateKeyBytes, _ := hex.DecodeString(privKeyString)
privKey, _ := x509.ParseECPrivateKey(privateKeyBytes)
// Open a canvas.
canvas, settings, err := blockartlib.OpenCanvas(minerAddr, privKey)
if checkError(err) != nil {
return
}
success := true
validateNum := 2
// Case l
// Add an overlapping line (perpendicular)
_, _, _, err := canvas.AddShape(validateNum, blockartlib.PATH, "M 100 101 l 2 0", "transparent", "black")
checkShapeOverlap(err)
// Add an overlapping line (parallel)
_, _, _, err := canvas.AddShape(validateNum, blockartlib.PATH, "M 101 100 l 0 1", "transparent", "black")
checkShapeOverlap(err)
// Case h
// Add an overlapping line (perpendicular)
_, _, _, err := canvas.AddShape(validateNum, blockartlib.PATH, "M 101 100 v 2", "transparent", "black")
checkShapeOverlap(err)
// Add an overlapping line (parallel)
_, _, _, err := canvas.AddShape(validateNum, blockartlib.PATH, "M 100 101 h 1", "transparent", "black")
checkShapeOverlap(err)
// Case v
// Add an overlapping line (perpendicular)
_, _, _, err := canvas.AddShape(validateNum, blockartlib.PATH, "M 200 201 h 2", "transparent", "black")
checkShapeOverlap(err)
// Add an overlapping line (parallel)
_, _, _, err := canvas.AddShape(validateNum, blockartlib.PATH, "M 201 200 v 1", "transparent", "black")
checkShapeOverlap(err)
// Close the canvas.
ink4, err := canvas.CloseCanvas()
if checkError(err) != nil {
return
}
if success {
fmt.Println("Test case passed")
} else {
fmt.Println("Test case failed")
}
}
// If error is non-nil, print it out and return it.
func checkError(err error) error {
if err != nil {
fmt.Fprintf(os.Stderr, "Error ", err.Error())
return err
}
return nil
}
func checkShapeOverlap(err error) {
if err == nil || !err.(blockartlib.ShapeOverlapError) {
fmt.Println("Expected ShapeOverlapError")
success = false
}
}
|
package xclient
import (
"math"
"math/rand"
"sync"
"time"
)
type SelectMode int
const (
RandomSelect SelectMode = iota
RoundRobinSelect
)
// Refresh 从注册中心更新服务列表
// Update 手动更新服务列表
// Get 根据负载均衡策略,选择一个服务实例
type Discovery interface {
Refresh() error
Update(servers []string) error
Get(mod SelectMode) (string, error)
GetAll() ([]string, error)
}
// MultiServerDiscovery a discovery for multi servers. (without registry center)
type MultiServerDiscovery struct {
r *rand.Rand
mu sync.Mutex // protect following
servers []string
index int // record selected position for robin
}
func NewMultiServerDiscovery(servers []string) *MultiServerDiscovery {
d := &MultiServerDiscovery{
servers: servers,
r: rand.New(rand.NewSource(time.Now().UnixNano())),
}
d.index = d.r.Intn(math.MaxInt32 - 1)
return d
}
var _ Discovery = (*MultiServerDiscovery)(nil)
|
package metadata
import (
"incognito-chain/common"
"incognito-chain/privacy"
)
type ReturnStakingMetadata struct {
MetadataBase
TxID string
StakerAddress privacy.PaymentAddress
SharedRandom []byte
}
func NewReturnStaking(txID string, producerAddress privacy.PaymentAddress, metaType int, ) *ReturnStakingMetadata {
metadataBase := MetadataBase{
Type: metaType,
}
return &ReturnStakingMetadata{
TxID: txID,
StakerAddress: producerAddress,
MetadataBase: metadataBase,
}
}
func (sbsRes ReturnStakingMetadata) Hash() *common.Hash {
record := sbsRes.StakerAddress.String()
record += sbsRes.TxID
if sbsRes.SharedRandom != nil && len(sbsRes.SharedRandom) > 0 {
record += string(sbsRes.SharedRandom)
}
// final hash
record += sbsRes.MetadataBase.Hash().String()
hash := common.HashH([]byte(record))
return &hash
}
func (sbsRes *ReturnStakingMetadata) SetSharedRandom(r []byte) {
sbsRes.SharedRandom = r
} |
package main
import (
"fmt"
"github.com/hjcian/ds/stack"
)
func main() {
a := stack.NewItemStack()
fmt.Println(123)
a.Push(123)
a.Push(456)
a.Push(789)
fmt.Println("init: ", a)
b := a.Pull()
fmt.Println("pull: ", b)
c := a.Pull()
fmt.Println("pull: ", c)
d := a.Pull()
fmt.Println("pull: ", d)
e := a.Pull()
fmt.Println("pull: ", e)
fmt.Println("left: ", a)
}
|
package mocks
import (
"github.com/MagalixCorp/magalix-agent/v3/entities"
"github.com/MagalixCorp/magalix-agent/v3/kuber"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
type EntitiesWatcherMock struct {
Entities map[kuber.GroupVersionResourceKind][]unstructured.Unstructured
Parents map[string]*kuber.ParentController
}
func (ew *EntitiesWatcherMock) GetAllEntitiesByGvrk() (map[kuber.GroupVersionResourceKind][]unstructured.Unstructured, []error) {
return ew.Entities, nil
}
func (ew *EntitiesWatcherMock) GetParents(namespace string, kind string, name string) (*kuber.ParentController, bool) {
parents, found := ew.Parents[kuber.GetEntityKey(namespace, kind, name)]
return parents, found
}
func (ew *EntitiesWatcherMock) AddResourceEventsHandler(handler entities.ResourceEventsHandler) {}
|
// Copyright (C) 2015 Scaleway. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE.md file.
package cli
import "fmt"
// CommandListOpts holds a list of parameters
type CommandListOpts struct {
Values *[]string
}
// NewListOpts create an empty CommandListOpts
func NewListOpts() CommandListOpts {
var values []string
return CommandListOpts{
Values: &values,
}
}
// String returns a string representation of a CommandListOpts object
func (opts *CommandListOpts) String() string {
return fmt.Sprintf("%v", (*opts.Values))
}
// Set appends a new value to a CommandListOpts
func (opts *CommandListOpts) Set(value string) error {
(*opts.Values) = append((*opts.Values), value)
return nil
}
|
package solutions
/*
* @lc app=leetcode id=204 lang=golang
*
* [204] Count Primes
*/
/*
Your runtime beats 65.27 % of golang submissions
Your memory usage beats 50.3 % of golang submissions (12.8 MB)
*/
// @lc code=start
func countPrimes(n int) int {
if n == 0 || n == 1 {
return 0
}
np := make([]bool, n+1)
primeCount := 0
for i := 2; i < n; i++ {
if np[i] {
continue
}
prime := i
sum := prime * 2
primeCount++
for sum <= n {
np[sum] = true
sum += prime
}
}
return primeCount
}
// @lc code=end
|
package tests
import (
"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance"
"github.com/iotaledger/wasp/packages/coretypes"
"github.com/iotaledger/wasp/packages/kv/codec"
"github.com/iotaledger/wasp/packages/solo"
"github.com/stretchr/testify/require"
"testing"
)
var incFile = "wasm/inccounter_bg.wasm"
const incName = "inccounter"
const incDescription = "IncCounter, a PoC smart contract"
var incHname = coretypes.Hn(incName)
const varCounter = "counter"
const varNumRepeats = "numRepeats"
func TestIncSoloInc(t *testing.T) {
al := solo.New(t, false, false)
chain := al.NewChain(nil, "chain1")
err := chain.DeployWasmContract(nil, incName, incFile)
require.NoError(t, err)
req := solo.NewCallParams(incName, "increment").
WithTransfer(balance.ColorIOTA, 1)
_, err = chain.PostRequest(req, nil)
require.NoError(t, err)
ret, err := chain.CallView(incName, "getCounter")
require.NoError(t, err)
counter, _, err := codec.DecodeInt64(ret.MustGet(varCounter))
require.NoError(t, err)
require.EqualValues(t, 1, counter)
}
func TestIncSoloRepeatMany(t *testing.T) {
al := solo.New(t, false, false)
chain := al.NewChain(nil, "chain1")
err := chain.DeployWasmContract(nil, incName, incFile)
require.NoError(t, err)
req := solo.NewCallParams(incName, "repeatMany", varNumRepeats, 2).
WithTransfer(balance.ColorIOTA, 1)
_, err = chain.PostRequest(req, nil)
require.NoError(t, err)
chain.WaitForEmptyBacklog()
ret, err := chain.CallView(incName, "getCounter")
require.NoError(t, err)
counter, _, err := codec.DecodeInt64(ret.MustGet(varCounter))
require.NoError(t, err)
require.EqualValues(t, 3, counter)
}
|
package profilopedia
import "fmt"
type Profile struct {
}
|
package utils
import (
"github.com/bwmarrin/discordgo"
"os"
"strings"
)
func IsCanUseOpCommand(user *discordgo.User) bool {
l := os.Getenv("DISCORD_BOT_OP_LIST")
list := strings.Split(l, ",")
for _, v := range list {
if v == user.ID {
return true
}
}
return false
}
|
package c26_ctr_bitflipping
import (
"math/rand"
"testing"
"time"
)
func TestExploitAdmin(t *testing.T) {
key := make([]byte, 16)
rand.Seed(time.Now().UnixNano())
rand.Read(key)
enc := DefaultEnc(key)
isAdmin, err := ExploitAdmin(enc)
if err != nil {
t.Fatalf("Exploit error: %s\n", err)
}
if !isAdmin {
t.Errorf("Exploit don't work")
}
}
|
package pathfileops
import "testing"
func TestFileOpsCollection_InsertFileOpsAtIndex_01(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fh := FileHelper{}
fOpsCol := FileOpsCollection{}.New()
for i := 0; i < 5; i++ {
err := fOpsCol.AddByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByPathFileNameExtStrs(sf[i], df[i]). "+
"i='%v' Error='%v' ", i, err.Error())
return
}
}
arrayLen := fOpsCol.GetNumOfFileOps()
if arrayLen != 5 {
t.Errorf("Error: Expected intial array length='5'. "+
"Instead, array length='%v' ", arrayLen)
}
// # 2
origPath := fh.AdjustPathSlash("../../logTest/CmdrX/CmdrX.log")
origAbsPath, err := fh.MakeAbsolutePath(origPath)
if err != nil {
t.Errorf("Error returned by (1) fh.MakeAbsolutePath(origPath). origPath= '%v' Error='%v'", origPath, err.Error())
}
fmgrSrc, err := FileMgr{}.NewFromPathFileNameExtStr(origAbsPath)
if err != nil {
t.Errorf("%v", err.Error())
}
// # 3
origPath = fh.AdjustPathSlash("../../dirmgrtests/CmdrX.log")
origAbsPath, err = fh.MakeAbsolutePath(origPath)
if err != nil {
t.Errorf("Error returned by (2) fh.MakeAbsolutePath(origPath). origPath= '%v' Error='%v'", origPath, err.Error())
}
fmgrDst, err := FileMgr{}.NewFromPathFileNameExtStr(origAbsPath)
if err != nil {
t.Errorf("%v", err.Error())
}
fOp, err := FileOps{}.NewByFileMgrs(fmgrSrc, fmgrDst)
err = fOpsCol.InsertFileOpsAtIndex(fOp, 2)
if err != nil {
t.Errorf("Error returned by fOpsCol.InsertFileOpsAtIndex(fOp, 2). "+
"Error='%v'", err.Error())
}
arrayLen = fOpsCol.GetNumOfFileOps()
if arrayLen != 6 {
t.Errorf("Error: Expected after insertion array length='6'. "+
"Instead, array length='%v' ", arrayLen)
}
fOpVerify, err := fOpsCol.PeekFileOpsAtIndex(2)
if !fOp.Equal(&fOpVerify) {
t.Error("Expected original file operation to be equal to extracted file operation. " +
"They are NOT equal!")
}
}
func TestFileOpsCollection_InsertFileOpsAtIndex_02(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fh := FileHelper{}
fOpsCol := FileOpsCollection{}.New()
for i := 0; i < 5; i++ {
err := fOpsCol.AddByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByPathFileNameExtStrs(sf[i], df[i]). "+
"i='%v' Error='%v' ", i, err.Error())
return
}
}
arrayLen := fOpsCol.GetNumOfFileOps()
if arrayLen != 5 {
t.Errorf("Error: Expected intial array length='5'. "+
"Instead, array length='%v' ", arrayLen)
}
// # 2
origPath := fh.AdjustPathSlash("../../logTest/CmdrX/CmdrX.log")
origAbsPath, err := fh.MakeAbsolutePath(origPath)
if err != nil {
t.Errorf("Error returned by (1) fh.MakeAbsolutePath(origPath). origPath= '%v' Error='%v'", origPath, err.Error())
}
fmgrSrc, err := FileMgr{}.NewFromPathFileNameExtStr(origAbsPath)
if err != nil {
t.Errorf("%v", err.Error())
}
// # 3
origPath = fh.AdjustPathSlash("../../dirmgrtests/CmdrX.log")
origAbsPath, err = fh.MakeAbsolutePath(origPath)
if err != nil {
t.Errorf("Error returned by (2) fh.MakeAbsolutePath(origPath). origPath= '%v' Error='%v'", origPath, err.Error())
}
fmgrDst, err := FileMgr{}.NewFromPathFileNameExtStr(origAbsPath)
if err != nil {
t.Errorf("%v", err.Error())
}
fOp, err := FileOps{}.NewByFileMgrs(fmgrSrc, fmgrDst)
err = fOpsCol.InsertFileOpsAtIndex(fOp, 3)
if err != nil {
t.Errorf("Error returned by fOpsCol.InsertFileOpsAtIndex(fOp, 3). "+
"Error='%v'", err.Error())
}
arrayLen = fOpsCol.GetNumOfFileOps()
if arrayLen != 6 {
t.Errorf("Error: Expected after insertion array length='6'. "+
"Instead, array length='%v' ", arrayLen)
}
fOpVerify, err := fOpsCol.PeekFileOpsAtIndex(3)
if !fOp.Equal(&fOpVerify) {
t.Error("Expected original file operation to be equal to extracted file operation. " +
"They are NOT equal!")
}
}
func TestFileOpsCollection_InsertFileOpsAtIndex_03(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fh := FileHelper{}
fOpsCol := FileOpsCollection{}.New()
for i := 0; i < 5; i++ {
err := fOpsCol.AddByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByPathFileNameExtStrs(sf[i], df[i]). "+
"i='%v' Error='%v' ", i, err.Error())
return
}
}
arrayLen := fOpsCol.GetNumOfFileOps()
if arrayLen != 5 {
t.Errorf("Error: Expected intial array length='5'. "+
"Instead, array length='%v' ", arrayLen)
}
// # 2
origPath := fh.AdjustPathSlash("../../logTest/CmdrX/CmdrX.log")
origAbsPath, err := fh.MakeAbsolutePath(origPath)
if err != nil {
t.Errorf("Error returned by (1) fh.MakeAbsolutePath(origPath). origPath= '%v' Error='%v'", origPath, err.Error())
}
fmgrSrc, err := FileMgr{}.NewFromPathFileNameExtStr(origAbsPath)
if err != nil {
t.Errorf("%v", err.Error())
}
// # 3
origPath = fh.AdjustPathSlash("../../dirmgrtests/CmdrX.log")
origAbsPath, err = fh.MakeAbsolutePath(origPath)
if err != nil {
t.Errorf("Error returned by (2) fh.MakeAbsolutePath(origPath). origPath= '%v' Error='%v'", origPath, err.Error())
}
fmgrDst, err := FileMgr{}.NewFromPathFileNameExtStr(origAbsPath)
if err != nil {
t.Errorf("%v", err.Error())
}
fOp, err := FileOps{}.NewByFileMgrs(fmgrSrc, fmgrDst)
err = fOpsCol.InsertFileOpsAtIndex(fOp, 99)
if err != nil {
t.Errorf("Error returned by fOpsCol.InsertFileOpsAtIndex(fOp, 99). "+
"Error='%v'", err.Error())
}
arrayLen = fOpsCol.GetNumOfFileOps()
if arrayLen != 6 {
t.Errorf("Error: Expected after insertion array length='6'. "+
"Instead, array length='%v' ", arrayLen)
}
fOpVerify, err := fOpsCol.PeekFileOpsAtIndex(5)
if !fOp.Equal(&fOpVerify) {
t.Error("Expected original file operation to be equal to extracted file operation. " +
"They are NOT equal!")
}
}
func TestFileOpsCollection_InsertFileOpsAtIndex_04(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fh := FileHelper{}
fOpsCol := FileOpsCollection{}.New()
for i := 0; i < 5; i++ {
err := fOpsCol.AddByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByPathFileNameExtStrs(sf[i], df[i]). "+
"i='%v' Error='%v' ", i, err.Error())
return
}
}
arrayLen := fOpsCol.GetNumOfFileOps()
if arrayLen != 5 {
t.Errorf("Error: Expected intial array length='5'. "+
"Instead, array length='%v' ", arrayLen)
}
// # 2
origPath := fh.AdjustPathSlash("../../logTest/CmdrX/CmdrX.log")
origAbsPath, err := fh.MakeAbsolutePath(origPath)
if err != nil {
t.Errorf("Error returned by (1) fh.MakeAbsolutePath(origPath). origPath= '%v' Error='%v'", origPath, err.Error())
}
fmgrSrc, err := FileMgr{}.NewFromPathFileNameExtStr(origAbsPath)
if err != nil {
t.Errorf("%v", err.Error())
}
// # 3
origPath = fh.AdjustPathSlash("../../dirmgrtests/CmdrX.log")
origAbsPath, err = fh.MakeAbsolutePath(origPath)
if err != nil {
t.Errorf("Error returned by (2) fh.MakeAbsolutePath(origPath). origPath= '%v' Error='%v'", origPath, err.Error())
}
fmgrDst, err := FileMgr{}.NewFromPathFileNameExtStr(origAbsPath)
if err != nil {
t.Errorf("%v\n", err.Error())
}
fOp, err := FileOps{}.NewByFileMgrs(fmgrSrc, fmgrDst)
err = fOpsCol.InsertFileOpsAtIndex(fOp, -3)
if err == nil {
t.Error("Error: Expected an error return from err = fOpsCol." +
"InsertFileOpsAtIndex(fOp, -3). NO ERROR WAS RETURNED!! ")
}
}
func TestFileOpsCollection_InsertFileOpsAtIndex_05(t *testing.T) {
fOpsCol := FileOpsCollection{}
srcFile := "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
destFile := "../../dirmgrtests/level_0_0_test.txt"
fOp, err := FileOps{}.NewByPathFileNameExtStrs(srcFile, destFile)
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByPathFileNameExtStrs(srcFile, destFile).\n"+
"Error='%v'\n", err.Error())
return
}
fOpsCol.fileOps = nil
err = fOpsCol.InsertFileOpsAtIndex(fOp, 0)
if err != nil {
t.Errorf("Error returned by fOpsCol.InsertFileOpsAtIndex(fOp, 0)\n" +
"Error='%v'\n", err.Error())
}
}
func TestFileOpsCollection_InsertFileOpsAtIndex_06(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fOpsCol := FileOpsCollection{}.New()
for i := 0; i < 5; i++ {
err := fOpsCol.AddByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByPathFileNameExtStrs(sf[i], df[i]).\n"+
"i='%v'\nError='%v'\n", i, err.Error())
return
}
}
srcFile := "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
destFile := "../../dirmgrtests/level_0_0_test.txt"
fOp, err := FileOps{}.NewByPathFileNameExtStrs(srcFile, destFile)
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByPathFileNameExtStrs(srcFile, destFile).\n"+
"Error='%v'\n", err.Error())
return
}
err = fOpsCol.InsertFileOpsAtIndex(fOp, 0)
if err != nil {
t.Errorf("Error returned by fOpsCol.InsertFileOpsAtIndex(fOp, 0)\n" +
"Error='%v'\n", err.Error())
}
}
func TestFileOpsCollection_NewFromFileMgrCollection_01(t *testing.T) {
sf := make([]string, 10, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
sf[5] = "../../filesfortest/levelfilesfortest/level_0_5_test.txt"
sf[6] = "../../filesfortest/levelfilesfortest/level_0_6_test.txt"
sf[7] = "../../filesfortest/levelfilesfortest/level_0_7_test.txt"
sf[8] = "../../filesfortest/levelfilesfortest/level_0_8_test.txt"
sf[9] = "../../filesfortest/levelfilesfortest/level_0_9_test.txt"
baseDir :="../../filesfortest/levelfilesfortest"
baseDMgr, err := DirMgr{}.New(baseDir)
if err != nil {
t.Errorf("Error returned by DirMgr{}.New(baseDir)\n" +
"baseDir='%v'\n" +
"Error='%v'\n", baseDir, err.Error())
}
targetDir := "../../dirmgrtests"
targetDMgr, err := DirMgr{}.New(targetDir)
if err != nil {
t.Errorf("Error returned by DirMgr{}.New(targetDir)\n" +
"targetDir='%v'\n" +
"Error='%v'\n", targetDir, err.Error())
}
fMgrCol := FileMgrCollection{}
for i:=0; i < 10; i++ {
err := fMgrCol.AddFileMgrByPathFileNameExt(sf[i])
if err != nil {
t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(sf[%v])\n" +
"sf[%v]='%v'\n" +
"Error='%v'\n", i, i, sf[i], err.Error())
return
}
}
fOpsCol, err := FileOpsCollection{}.NewFromFileMgrCollection(&fMgrCol, &baseDMgr, &targetDMgr)
if err != nil {
t.Errorf("Error returned by FileOpsCollection{}.NewFromFileMgrCollection(" +
"&fMgrCol, &baseDMgr, &targetDMgr)\n" +
"baseDMgr='%v'\n" +
"targetDMgr='%v'\n" +
"Error='%v'\n", baseDMgr.GetAbsolutePath(), targetDMgr.GetAbsolutePath(), err.Error())
return
}
fOp, err := fOpsCol.PeekFileOpsAtIndex(9)
if err != nil {
t.Errorf("Error returned by fOpsCol.PeekFileOpsAtIndex(9)\n" +
"Error='%v'\n", err.Error())
return
}
actualDstFMgr := fOp.GetDestination()
actualDstDMgr := actualDstFMgr.GetDirMgr()
if !targetDMgr.EqualAbsPaths(&actualDstDMgr) {
t.Errorf("ERROR: Expected targetDMgr==actualDstDMgr.\n" +
"Instead, they ARE NOT EQUAL!\n" +
"targetDMgr='%v'\n" +
"actualDstDMgr='%v'\n", targetDMgr.GetAbsolutePath(), actualDstDMgr.GetAbsolutePath())
}
}
func TestFileOpsCollection_NewFromFileMgrCollection_02(t *testing.T) {
sf := make([]string, 10, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
sf[5] = "../../filesfortest/levelfilesfortest/level_0_5_test.txt"
sf[6] = "../../filesfortest/levelfilesfortest/level_0_6_test.txt"
sf[7] = "../../filesfortest/levelfilesfortest/level_0_7_test.txt"
sf[8] = "../../filesfortest/levelfilesfortest/level_0_8_test.txt"
sf[9] = "../../filesfortest/levelfilesfortest/level_0_9_test.txt"
baseDir :="../../filesfortest/levelfilesfortest"
baseDMgr, err := DirMgr{}.New(baseDir)
if err != nil {
t.Errorf("Error returned by DirMgr{}.New(baseDir)\n" +
"baseDir='%v'\n" +
"Error='%v'\n", baseDir, err.Error())
}
targetDir := "../../dirmgrtests"
targetDMgr, err := DirMgr{}.New(targetDir)
if err != nil {
t.Errorf("Error returned by DirMgr{}.New(targetDir)\n" +
"targetDir='%v'\n" +
"Error='%v'\n", targetDir, err.Error())
}
fMgrCol := FileMgrCollection{}
for i:=0; i < 10; i++ {
err := fMgrCol.AddFileMgrByPathFileNameExt(sf[i])
if err != nil {
t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(sf[%v])\n" +
"sf[%v]='%v'\n" +
"Error='%v'\n", i, i, sf[i], err.Error())
return
}
}
fOpsCollection := FileOpsCollection{}
fOpsCollection.fileOps = nil
fOpsCol, err := FileOpsCollection{}.NewFromFileMgrCollection(&fMgrCol, &baseDMgr, &targetDMgr)
if err != nil {
t.Errorf("Error returned by FileOpsCollection{}.NewFromFileMgrCollection(" +
"&fMgrCol, &baseDMgr, &targetDMgr)\n" +
"baseDMgr='%v'\n" +
"targetDMgr='%v'\n" +
"Error='%v'\n", baseDMgr.GetAbsolutePath(), targetDMgr.GetAbsolutePath(), err.Error())
return
}
fOp, err := fOpsCol.PeekFileOpsAtIndex(9)
if err != nil {
t.Errorf("Error returned by fOpsCol.PeekFileOpsAtIndex(9)\n" +
"Error='%v'\n", err.Error())
return
}
actualDstFMgr := fOp.GetDestination()
actualDstDMgr := actualDstFMgr.GetDirMgr()
if !targetDMgr.EqualAbsPaths(&actualDstDMgr) {
t.Errorf("ERROR: Expected targetDMgr==actualDstDMgr.\n" +
"Instead, they ARE NOT EQUAL!\n" +
"targetDMgr='%v'\n" +
"actualDstDMgr='%v'\n", targetDMgr.GetAbsolutePath(), actualDstDMgr.GetAbsolutePath())
}
}
func TestFileOpsCollection_NewFromFileMgrCollection_03(t *testing.T) {
baseDir :="../../filesfortest/levelfilesfortest"
baseDMgr, err := DirMgr{}.New(baseDir)
if err != nil {
t.Errorf("Error returned by DirMgr{}.New(baseDir)\n" +
"baseDir='%v'\n" +
"Error='%v'\n", baseDir, err.Error())
}
targetDir := "../../dirmgrtests"
targetDMgr, err := DirMgr{}.New(targetDir)
if err != nil {
t.Errorf("Error returned by DirMgr{}.New(targetDir)\n" +
"targetDir='%v'\n" +
"Error='%v'\n", targetDir, err.Error())
}
fMgrCol := FileMgrCollection{}
fMgrCol.fileMgrs = nil
fOpsCollection := FileOpsCollection{}
fOpsCollection.fileOps = nil
_, err = fOpsCollection.NewFromFileMgrCollection(&fMgrCol, &baseDMgr, &targetDMgr)
if err == nil {
t.Error("Expected an error return from FileOpsCollection{}." +
"NewFromFileMgrCollection(&fMgrCol, &baseDMgr, &targetDMgr)\n" +
"because the 'fMgrCol' is EMPTY!\n")
return
}
}
func TestFileOpsCollection_NewFromFileMgrCollection_04(t *testing.T) {
sf := make([]string, 10, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
sf[5] = "../../filesfortest/levelfilesfortest/level_0_5_test.txt"
sf[6] = "../../filesfortest/levelfilesfortest/level_0_6_test.txt"
sf[7] = "../../filesfortest/levelfilesfortest/level_0_7_test.txt"
sf[8] = "../../filesfortest/levelfilesfortest/level_0_8_test.txt"
sf[9] = "../../filesfortest/levelfilesfortest/level_0_9_test.txt"
targetDir := "../../dirmgrtests"
targetDMgr, err := DirMgr{}.New(targetDir)
if err != nil {
t.Errorf("Error returned by DirMgr{}.New(targetDir)\n" +
"targetDir='%v'\n" +
"Error='%v'\n", targetDir, err.Error())
}
fMgrCol := FileMgrCollection{}
for i:=0; i < 10; i++ {
err := fMgrCol.AddFileMgrByPathFileNameExt(sf[i])
if err != nil {
t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(sf[%v])\n" +
"sf[%v]='%v'\n" +
"Error='%v'\n", i, i, sf[i], err.Error())
return
}
}
fOpsCollection := FileOpsCollection{}
fOpsCollection.fileOps = nil
_, err = fOpsCollection.NewFromFileMgrCollection(&fMgrCol, nil, &targetDMgr)
if err == nil {
t.Error("Expected an error return from fOpsCollection.NewFromFileMgrCollection(" +
"&fMgrCol, nil, &targetDMgr)\n" +
"because 'sourceBaseDir' is 'nil'!\n" +
"However, NO ERROR WAS RETURNED!!!\n")
return
}
}
func TestFileOpsCollection_NewFromFileMgrCollection_05(t *testing.T) {
sf := make([]string, 10, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
sf[5] = "../../filesfortest/levelfilesfortest/level_0_5_test.txt"
sf[6] = "../../filesfortest/levelfilesfortest/level_0_6_test.txt"
sf[7] = "../../filesfortest/levelfilesfortest/level_0_7_test.txt"
sf[8] = "../../filesfortest/levelfilesfortest/level_0_8_test.txt"
sf[9] = "../../filesfortest/levelfilesfortest/level_0_9_test.txt"
baseDir :="../../filesfortest/levelfilesfortest"
baseDMgr, err := DirMgr{}.New(baseDir)
if err != nil {
t.Errorf("Error returned by DirMgr{}.New(baseDir)\n" +
"baseDir='%v'\n" +
"Error='%v'\n", baseDir, err.Error())
}
fMgrCol := FileMgrCollection{}
for i:=0; i < 10; i++ {
err := fMgrCol.AddFileMgrByPathFileNameExt(sf[i])
if err != nil {
t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(sf[%v])\n" +
"sf[%v]='%v'\n" +
"Error='%v'\n", i, i, sf[i], err.Error())
return
}
}
fOpsCollection := FileOpsCollection{}
fOpsCollection.fileOps = nil
_, err = fOpsCollection.NewFromFileMgrCollection(&fMgrCol, &baseDMgr, nil)
if err == nil {
t.Error("Expected an error return from fOpsCollection.NewFromFileMgrCollection" +
"(&fMgrCol, &baseDMgr, nil)\n" +
"because 'targetBaseDir' is 'nil'!\n" +
"However, NO ERROR WAS RETURNED!!!\n")
}
}
func TestFileOpsCollection_PopFileOpsAtIndex_01(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fOpsCol := FileOpsCollection{}.New()
var expectedFOp FileOps
for i := 0; i < 5; i++ {
fOp, err := FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
t.Errorf("Error returned by FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])\n" +
"i='%v'\n" +
"Error='%v'\n", i, err.Error())
return
}
if i == 2 {
expectedFOp = fOp.CopyOut()
}
err = fOpsCol.AddByFileOps(fOp)
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByFileOps(fOp). "+
"i='%v'\n" +
"sf[i]='%v'\n" +
"df[i]='%v'\n" +
"Error='%v'\n", i, sf[i], df[i], err.Error())
return
}
}
actualFOp, err := fOpsCol.PopFileOpsAtIndex(2)
if err != nil {
t.Errorf("Error returned by fOpsCol.PopFileOpsAtIndex(2)\n" +
"Error='%v'\n", err.Error())
return
}
if !expectedFOp.Equal(&actualFOp) {
t.Errorf("ERROR: Expected expectedFOp == actualFOp.\n" +
"However, Equal returned expectedFOp != actualFOp.\n" +
"expectedFOp.source='%v'\n" +
"expectedFOp.destination='%v'\n" +
"actualFOp.source='%v'\n" +
"actualFOp.destination='%v'\n",
expectedFOp.source.GetAbsolutePathFileName(),
expectedFOp.destination.GetAbsolutePathFileName(),
actualFOp.source.GetAbsolutePathFileName(),
actualFOp.destination.GetAbsolutePathFileName())
}
if fOpsCol.GetNumOfFileOps() != 4 {
t.Errorf("ERROR: Expected that after PopFileOpsAtIndex operation, Number Of FileOps='4'.\n" +
"Instead, Number Of FileOps='%v'\n", fOpsCol.GetNumOfFileOps() )
}
}
func TestFileOpsCollection_PopFileOpsAtIndex_02(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fOpsCol := FileOpsCollection{}.New()
var expectedFOp FileOps
for i := 0; i < 5; i++ {
fOp, err := FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
t.Errorf("Error returned by FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])\n" +
"i='%v'\n" +
"Error='%v'\n", i, err.Error())
return
}
if i == 0 {
expectedFOp = fOp.CopyOut()
}
err = fOpsCol.AddByFileOps(fOp)
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByFileOps(fOp). "+
"i='%v'\n" +
"sf[i]='%v'\n" +
"df[i]='%v'\n" +
"Error='%v'\n", i, sf[i], df[i], err.Error())
return
}
}
actualFOp, err := fOpsCol.PopFileOpsAtIndex(0)
if err != nil {
t.Errorf("Error returned by fOpsCol.PopFileOpsAtIndex(0)\n" +
"Error='%v'\n", err.Error())
return
}
if !expectedFOp.Equal(&actualFOp) {
t.Errorf("ERROR: Expected expectedFOp == actualFOp.\n" +
"However, Equal returned expectedFOp != actualFOp.\n" +
"expectedFOp.source='%v'\n" +
"expectedFOp.destination='%v'\n" +
"actualFOp.source='%v'\n" +
"actualFOp.destination='%v'\n",
expectedFOp.source.GetAbsolutePathFileName(),
expectedFOp.destination.GetAbsolutePathFileName(),
actualFOp.source.GetAbsolutePathFileName(),
actualFOp.destination.GetAbsolutePathFileName())
}
if fOpsCol.GetNumOfFileOps() != 4 {
t.Errorf("ERROR: Expected that after PopFileOpsAtIndex operation, Number Of FileOps='4'.\n" +
"Instead, Number Of FileOps='%v'\n", fOpsCol.GetNumOfFileOps() )
}
}
func TestFileOpsCollection_PopFileOpsAtIndex_03(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fOpsCol := FileOpsCollection{}.New()
var expectedFOp FileOps
for i := 0; i < 5; i++ {
fOp, err := FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
t.Errorf("Error returned by FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])\n" +
"i='%v'\n" +
"Error='%v'\n", i, err.Error())
return
}
if i == 4 {
expectedFOp = fOp.CopyOut()
}
err = fOpsCol.AddByFileOps(fOp)
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByFileOps(fOp). "+
"i='%v'\n" +
"sf[i]='%v'\n" +
"df[i]='%v'\n" +
"Error='%v'\n", i, sf[i], df[i], err.Error())
return
}
}
actualFOp, err := fOpsCol.PopFileOpsAtIndex(4)
if err != nil {
t.Errorf("Error returned by fOpsCol.PopFileOpsAtIndex(0)\n" +
"Error='%v'\n", err.Error())
return
}
if !expectedFOp.Equal(&actualFOp) {
t.Errorf("ERROR: Expected expectedFOp == actualFOp.\n" +
"However, Equal returned expectedFOp != actualFOp.\n" +
"expectedFOp.source='%v'\n" +
"expectedFOp.destination='%v'\n" +
"actualFOp.source='%v'\n" +
"actualFOp.destination='%v'\n",
expectedFOp.source.GetAbsolutePathFileName(),
expectedFOp.destination.GetAbsolutePathFileName(),
actualFOp.source.GetAbsolutePathFileName(),
actualFOp.destination.GetAbsolutePathFileName())
}
if fOpsCol.GetNumOfFileOps() != 4 {
t.Errorf("ERROR: Expected that after PopFileOpsAtIndex operation, Number Of FileOps='4'.\n" +
"Instead, Number Of FileOps='%v'\n", fOpsCol.GetNumOfFileOps() )
}
}
func TestFileOpsCollection_PopFileOpsAtIndex_04(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fOpsCol := FileOpsCollection{}.New()
for i := 0; i < 5; i++ {
fOp, err := FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
t.Errorf("Error returned by FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])\n" +
"i='%v'\n" +
"Error='%v'\n", i, err.Error())
return
}
err = fOpsCol.AddByFileOps(fOp)
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByFileOps(fOp). "+
"i='%v'\n" +
"sf[i]='%v'\n" +
"df[i]='%v'\n" +
"Error='%v'\n", i, sf[i], df[i], err.Error())
return
}
}
_, err := fOpsCol.PopFileOpsAtIndex(-1)
if err == nil {
t.Error("Expected an error return from fOpsCol.PopFileOpsAtIndex(-1)\n" +
"because the index '-1' is INVALID!\n" +
"However, NO ERROR WAS RETURNED!!!\n")
}
}
func TestFileOpsCollection_PopFileOpsAtIndex_05(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fOpsCol := FileOpsCollection{}.New()
for i := 0; i < 5; i++ {
fOp, err := FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
t.Errorf("Error returned by FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])\n" +
"i='%v'\n" +
"Error='%v'\n", i, err.Error())
return
}
err = fOpsCol.AddByFileOps(fOp)
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByFileOps(fOp). "+
"i='%v'\n" +
"sf[i]='%v'\n" +
"df[i]='%v'\n" +
"Error='%v'\n", i, sf[i], df[i], err.Error())
return
}
}
_, err := fOpsCol.PopFileOpsAtIndex(99)
if err == nil {
t.Error("Expected an error return from fOpsCol.PopFileOpsAtIndex(99)\n" +
"because the index '99' is INVALID!\n" +
"However, NO ERROR WAS RETURNED!!!\n")
}
}
func TestFileOpsCollection_PopFileOpsAtIndex_06(t *testing.T) {
fOpsCol := FileOpsCollection{}
fOpsCol.fileOps = nil
_, err := fOpsCol.PopFileOpsAtIndex(2)
if err == nil {
t.Error("Expected an error return from fOpsCol.PopFileOpsAtIndex(2)\n" +
"because the File Ops Collection is EMPTY!\n" +
"However, NO ERROR WAS RETURNED!!!\n")
}
}
func TestFileOpsCollection_PopFirstFileOps_01(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fOpsCol := FileOpsCollection{}.New()
var expectedFileOps FileOps
for i := 0; i < 5; i++ {
fOp, err := FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
t.Errorf("Error returned by FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])\n" +
"i='%v'\n" +
"Error='%v'\n", i, err.Error())
return
}
if i==0 {
expectedFileOps = fOp.CopyOut()
}
err = fOpsCol.AddByFileOps(fOp)
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByFileOps(fOp). "+
"i='%v'\n" +
"sf[i]='%v'\n" +
"df[i]='%v'\n" +
"Error='%v'\n", i, sf[i], df[i], err.Error())
return
}
}
actualFOp, err := fOpsCol.PopFirstFileOps()
if err != nil {
t.Errorf("Error returned by fOpsCol.PopFirstFileOps()\n" +
"Error='%v'\n", err.Error())
return
}
if !expectedFileOps.Equal(&actualFOp) {
t.Error("ERROR: Expected expectedFileOps.Equal(&actualFOp)=='true'.\n" +
"Instead, expectedFileOps.Equal(&actualFOp)=='false' !!\n")
}
if fOpsCol.GetNumOfFileOps() != 4 {
t.Errorf("ERROR: Expected the remaining File Operations object to be '4'.\n" +
"Instead, Number Of FileOps='%v'\n", fOpsCol.GetNumOfFileOps())
}
}
func TestFileOpsCollection_PopFirstFileOps_02(t *testing.T) {
fOpsCol := FileOpsCollection{}
fOpsCol.fileOps = nil
_, err := fOpsCol.PopFirstFileOps()
if err == nil {
t.Error("ERROR: Expected an error return from fOpsCol.PopFirstFileOps()\n" +
"because the File Ops Collection is EMPTY!\n" +
"However, NO ERROR WAS RETURNED!!!\n")
}
}
func TestFileOpsCollection_PopLastFileOps_01(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fOpsCol := FileOpsCollection{}.New()
var expectedFileOps FileOps
for i := 0; i < 5; i++ {
fOp, err := FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
t.Errorf("Error returned by FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])\n" +
"i='%v'\n" +
"Error='%v'\n", i, err.Error())
return
}
if i==4 {
expectedFileOps = fOp.CopyOut()
}
err = fOpsCol.AddByFileOps(fOp)
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByFileOps(fOp). "+
"i='%v'\n" +
"sf[i]='%v'\n" +
"df[i]='%v'\n" +
"Error='%v'\n", i, sf[i], df[i], err.Error())
return
}
}
actualFOp, err := fOpsCol.PopLastFileOps()
if err != nil {
t.Errorf("Error returned by fOpsCol.PopLastFileOps()\n" +
"Error='%v'\n", err.Error())
return
}
if !expectedFileOps.Equal(&actualFOp) {
t.Error("ERROR: Expected expectedFileOps.Equal(&actualFOp)=='true'.\n" +
"Instead, expectedFileOps.Equal(&actualFOp)=='false' !!\n")
}
if fOpsCol.GetNumOfFileOps() != 4 {
t.Errorf("ERROR: Expected the remaining File Operations object to be '4'.\n" +
"Instead, Number Of FileOps='%v'\n", fOpsCol.GetNumOfFileOps())
}
}
func TestFileOpsCollection_PopLastFileOps_02(t *testing.T) {
fOpsCol := FileOpsCollection{}
fOpsCol.fileOps = nil
_, err := fOpsCol.PopLastFileOps()
if err == nil {
t.Error("ERROR: Expected an error return from fOpsCol.PopLastFileOps()\n" +
"because the File Ops Collection is EMPTY!\n" +
"However, NO ERROR WAS RETURNED!!!\n")
}
}
func TestFileOpsCollection_PeekFileOpsAtIndex_01(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fOpsCol := FileOpsCollection{}.New()
var expectedFileOps FileOps
for i := 0; i < 5; i++ {
fOp, err := FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
t.Errorf("Error returned by FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])\n" +
"i='%v'\n" +
"Error='%v'\n", i, err.Error())
return
}
if i==2 {
expectedFileOps = fOp.CopyOut()
}
err = fOpsCol.AddByFileOps(fOp)
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByFileOps(fOp). "+
"i='%v'\n" +
"sf[i]='%v'\n" +
"df[i]='%v'\n" +
"Error='%v'\n", i, sf[i], df[i], err.Error())
return
}
}
actualFOp, err := fOpsCol.PeekFileOpsAtIndex(2)
if err != nil {
t.Errorf("Error returned by fOpsCol.PeekFileOpsAtIndex(2)\n" +
"Error='%v'\n", err.Error())
return
}
if !expectedFileOps.Equal(&actualFOp) {
t.Error("ERROR: Expected expectedFileOps.Equal(&actualFOp)=='true'.\n" +
"Instead, expectedFileOps.Equal(&actualFOp)=='false' !!\n")
}
}
func TestFileOpsCollection_PeekFileOpsAtIndex_02(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fOpsCol := FileOpsCollection{}.New()
var expectedFileOps FileOps
for i := 0; i < 5; i++ {
fOp, err := FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
t.Errorf("Error returned by FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])\n" +
"i='%v'\n" +
"Error='%v'\n", i, err.Error())
return
}
if i==0 {
expectedFileOps = fOp.CopyOut()
}
err = fOpsCol.AddByFileOps(fOp)
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByFileOps(fOp). "+
"i='%v'\n" +
"sf[i]='%v'\n" +
"df[i]='%v'\n" +
"Error='%v'\n", i, sf[i], df[i], err.Error())
return
}
}
actualFOp, err := fOpsCol.PeekFileOpsAtIndex(0)
if err != nil {
t.Errorf("Error returned by fOpsCol.PeekFileOpsAtIndex(0)\n" +
"Error='%v'\n", err.Error())
return
}
if !expectedFileOps.Equal(&actualFOp) {
t.Error("ERROR: Expected expectedFileOps.Equal(&actualFOp)=='true'.\n" +
"Instead, expectedFileOps.Equal(&actualFOp)=='false' !!\n")
}
}
func TestFileOpsCollection_PeekFileOpsAtIndex_03(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fOpsCol := FileOpsCollection{}.New()
var expectedFileOps FileOps
for i := 0; i < 5; i++ {
fOp, err := FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
t.Errorf("Error returned by FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])\n" +
"i='%v'\n" +
"Error='%v'\n", i, err.Error())
return
}
if i==4 {
expectedFileOps = fOp.CopyOut()
}
err = fOpsCol.AddByFileOps(fOp)
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByFileOps(fOp). "+
"i='%v'\n" +
"sf[i]='%v'\n" +
"df[i]='%v'\n" +
"Error='%v'\n", i, sf[i], df[i], err.Error())
return
}
}
actualFOp, err := fOpsCol.PeekFileOpsAtIndex(4)
if err != nil {
t.Errorf("Error returned by fOpsCol.PeekFileOpsAtIndex(4)\n" +
"Error='%v'\n", err.Error())
return
}
if !expectedFileOps.Equal(&actualFOp) {
t.Error("ERROR: Expected expectedFileOps.Equal(&actualFOp)=='true'.\n" +
"Instead, expectedFileOps.Equal(&actualFOp)=='false' !!\n")
}
}
func TestFileOpsCollection_PeekFileOpsAtIndex_04(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fOpsCol := FileOpsCollection{}.New()
for i := 0; i < 5; i++ {
fOp, err := FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
t.Errorf("Error returned by FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])\n" +
"i='%v'\n" +
"Error='%v'\n", i, err.Error())
return
}
err = fOpsCol.AddByFileOps(fOp)
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByFileOps(fOp). "+
"i='%v'\n" +
"sf[i]='%v'\n" +
"df[i]='%v'\n" +
"Error='%v'\n", i, sf[i], df[i], err.Error())
return
}
}
_, err := fOpsCol.PeekFileOpsAtIndex(-1)
if err == nil {
t.Error("Expected an error return from fOpsCol.PeekFileOpsAtIndex(-1)\n" +
"because the index, '-1', is INVALID!\n" +
"However, NO ERROR WAS RETURNED!!!\n")
}
}
func TestFileOpsCollection_PeekFileOpsAtIndex_05(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fOpsCol := FileOpsCollection{}.New()
for i := 0; i < 5; i++ {
fOp, err := FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
t.Errorf("Error returned by FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])\n" +
"i='%v'\n" +
"Error='%v'\n", i, err.Error())
return
}
err = fOpsCol.AddByFileOps(fOp)
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByFileOps(fOp). "+
"i='%v'\n" +
"sf[i]='%v'\n" +
"df[i]='%v'\n" +
"Error='%v'\n", i, sf[i], df[i], err.Error())
return
}
}
_, err := fOpsCol.PeekFileOpsAtIndex(99)
if err == nil {
t.Error("Expected an error return from fOpsCol.PeekFileOpsAtIndex(99)\n" +
"because the index, '99', is INVALID!\n" +
"However, NO ERROR WAS RETURNED!!!\n")
}
}
func TestFileOpsCollection_PeekFileOpsAtIndex_06(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fOpsCol := FileOpsCollection{}
fOpsCol.fileOps = nil
_, err := fOpsCol.PeekFileOpsAtIndex(0)
if err == nil {
t.Error("Expected an error return from fOpsCol.PeekFileOpsAtIndex(0)\n" +
"because the File Operations Collections is EMPTY!\n" +
"However, NO ERROR WAS RETURNED!!!\n")
}
}
func TestFileOpsCollection_PeekFirstFileOps_01(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fOpsCol := FileOpsCollection{}.New()
var expectedFileOps FileOps
for i := 0; i < 5; i++ {
fOp, err := FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
t.Errorf("Error returned by FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])\n" +
"i='%v'\n" +
"Error='%v'\n", i, err.Error())
return
}
if i==0 {
expectedFileOps = fOp.CopyOut()
}
err = fOpsCol.AddByFileOps(fOp)
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByFileOps(fOp). "+
"i='%v'\n" +
"sf[i]='%v'\n" +
"df[i]='%v'\n" +
"Error='%v'\n", i, sf[i], df[i], err.Error())
return
}
}
actualFOp, err := fOpsCol.PeekFirstFileOps()
if err != nil {
t.Errorf("Error returned by fOpsCol.PeekFirstFileOps()\n" +
"Error='%v'\n", err.Error())
return
}
if !expectedFileOps.Equal(&actualFOp) {
t.Error("ERROR: Expected expectedFileOps.Equal(&actualFOp)=='true'.\n" +
"Instead, expectedFileOps.Equal(&actualFOp)=='false' !!\n")
}
}
func TestFileOpsCollection_PeekFirstFileOps_02(t *testing.T) {
fOpsCol := FileOpsCollection{}
fOpsCol.fileOps = nil
_, err := fOpsCol.PeekFirstFileOps()
if err == nil {
t.Error("ERROR: Expected an error return from fOpsCol.PeekFirstFileOps()\n" +
"because the File Ops Collection is EMPTY!\n" +
"However, NO ERROR WAS RETURNED!!!\n")
}
}
func TestFileOpsCollection_PeekLastFileOps_01(t *testing.T) {
sf := make([]string, 5, 10)
sf[0] = "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
sf[1] = "../../filesfortest/levelfilesfortest/level_0_1_test.txt"
sf[2] = "../../filesfortest/levelfilesfortest/level_0_2_test.txt"
sf[3] = "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
sf[4] = "../../filesfortest/levelfilesfortest/level_0_4_test.txt"
df := make([]string, 5, 10)
df[0] = "../../dirmgrtests/level_0_0_test.txt"
df[1] = "../../dirmgrtests/level_0_1_test.txt"
df[2] = "../../dirmgrtests/level_0_2_test.txt"
df[3] = "../../dirmgrtests/level_0_3_test.txt"
df[4] = "../../dirmgrtests/level_0_4_test.txt"
fOpsCol := FileOpsCollection{}.New()
var expectedFileOps FileOps
for i := 0; i < 5; i++ {
fOp, err := FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])
if err != nil {
t.Errorf("Error returned by FileOps{}.NewByPathFileNameExtStrs(sf[i], df[i])\n" +
"i='%v'\n" +
"Error='%v'\n", i, err.Error())
return
}
if i==4 {
expectedFileOps = fOp.CopyOut()
}
err = fOpsCol.AddByFileOps(fOp)
if err != nil {
t.Errorf("Error returned by fOpsCol.AddByFileOps(fOp). "+
"i='%v'\n" +
"sf[i]='%v'\n" +
"df[i]='%v'\n" +
"Error='%v'\n", i, sf[i], df[i], err.Error())
return
}
}
actualFOp, err := fOpsCol.PeekLastFileOps()
if err != nil {
t.Errorf("Error returned by fOpsCol.PeekLastFileOps()\n" +
"Error='%v'\n", err.Error())
return
}
if !expectedFileOps.Equal(&actualFOp) {
t.Error("ERROR: Expected expectedFileOps.Equal(&actualFOp)=='true'.\n" +
"Instead, expectedFileOps.Equal(&actualFOp)=='false' !!\n")
}
}
func TestFileOpsCollection_PeekLastFileOps_02(t *testing.T) {
fOpsCol := FileOpsCollection{}
fOpsCol.fileOps = nil
_, err := fOpsCol.PeekLastFileOps()
if err == nil {
t.Error("ERROR: Expected an error return from fOpsCol.PeekLastFileOps()\n" +
"because the File Ops Collection is EMPTY!\n" +
"However, NO ERROR WAS RETURNED!!!\n")
}
}
|
package command
import (
"github.com/ross-weir/gort/pkg/config"
"github.com/spf13/cobra"
)
type Runner struct {
rootCmd *cobra.Command
version, commit, date string
cfg *config.Config
}
func NewRunner(version, commit, date string) *Runner {
r := &Runner{
version: version,
commit: commit,
date: date,
cfg: config.New(),
}
r.initRoot()
r.initVersion()
return r
}
func (r *Runner) Execute() error {
return r.rootCmd.Execute()
}
|
package crypto
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"math/big"
)
/**
See https://asecuritysite.com/encryption/goecdh
*/
var curve = elliptic.P256()
func GenECDHPrivKey() (*ecdsa.PrivateKey, error) {
// Todo: Generate ecdh key in the TPM or at least use randomness generated by the TPM
// Kernel entropy pool could be exhausted in a embedded system causing delay
privKey, err := ecdsa.GenerateKey(curve, rand.Reader)
return privKey, err
}
// concat(X, Y)
func ECCPubKeyToBytes(pubKey *ecdsa.PublicKey) []byte {
var keyBytes []byte
keyBytes = append(keyBytes, pubKey.X.Bytes() ...)
keyBytes = append(keyBytes, pubKey.Y.Bytes() ...)
return keyBytes
}
func BytesToECCPubKey(bytes []byte) *ecdsa.PublicKey {
x := big.Int{}
y := big.Int{}
return &ecdsa.PublicKey{
Curve: curve,
X: x.SetBytes(bytes[0:32]),
Y: y.SetBytes(bytes[32:64]),
}
}
func ComputeSessionKey(receiverPubKey *ecdsa.PublicKey, myPrivKey *ecdsa.PrivateKey, clientRand [32]byte, serverRand [32]byte) []byte {
sharedKeyX, sharedKeyY := receiverPubKey.Curve.ScalarMult(receiverPubKey.X, receiverPubKey.Y, myPrivKey.D.Bytes())
var keyMaterial []byte
keyMaterial = append(sharedKeyX.Bytes(), sharedKeyY.Bytes() ...)
keyMaterial = append(keyMaterial, clientRand[:] ...)
keyMaterial = append(keyMaterial, serverRand[:] ...)
digest := sha256.Sum256(keyMaterial)
return digest[:]
}
|
package main
import(
"fmt"
"strings"
"sort"
"strconv"
)
type sortRunes []rune
func (s sortRunes) Less(i, j int) bool {
return s[i] < s[j]
}
func (s sortRunes) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s sortRunes) Len() int {
return len(s)
}
func SortString(s string) string {
r := []rune(s)
sort.Sort(sortRunes(r))
return string(r)
}
func main(){
numbers := 43206523032
strNum := strconv.Itoa(numbers)
split := strings.Split(strNum, "0")
split1 := SortString(split[0])
split2 := SortString(split[1])
split3 := SortString(split[2])
resp := split1 + split2 + split3
reps, _ := strconv.Atoi(resp)
fmt.Println(reps)
} |
package cdkey
import (
"math/rand"
"strconv"
"strings"
"time"
pkgBean "webapi/bean"
)
const letterBytes = "ABCDEFGHJKLMNPQRSTUVWXYZ23456789"
const (
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
var src = rand.NewSource(time.Now().UnixNano())
func RandStringBytesMaskImprSrc(n int) string {
b := make([]byte, n)
// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = src.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
b[i] = letterBytes[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return string(b)
}
func ParseGiftString(plain string) map[int32]int32 {
results := make(map[int32]int32)
arr1 := strings.Split(plain, ",")
if n1 := len(arr1); n1 > 0 {
for i := 0; i < n1; i++ {
v := arr1[i]
arr2 := strings.Split(v, ":")
if n2 := len(arr2); n2 == 2 {
v1, _ := strconv.ParseInt(arr2[0], 10, 32)
v2, _ := strconv.ParseInt(arr2[1], 10, 32)
if v1 > 0 && v2 > 0 {
results[int32(v1)] = int32(v2)
}
}
}
}
return results
}
func generateCDKeyString(num, length int) []string {
cdkeys := make(map[string]bool)
for len(cdkeys) != num {
v := RandStringBytesMaskImprSrc(length)
cdkeys[v] = true
}
i := 0
results := make([]string, num)
for v, _ := range cdkeys {
results[i] = v
i++
}
return results
}
func GenerateWithGiftID(category uint32, channel string, num int, deadline int64, gift int) (error, int, []string) {
results := []string{}
for num > len(results) {
cdkeys := generateCDKeyString(num-len(results), 9)
err, batch := pkgBean.BatchAddCDKeyWithGift(category, channel, gift, deadline, cdkeys)
if err != nil {
return err, 0, nil
}
results = append(results, batch...)
}
return nil, gift, results
}
func Generate(category uint32, channel string, num int, deadline int64, resources map[int32]int32) (error, int, []string) {
err, bean := pkgBean.AddCDKeyGift(resources)
if err != nil {
return err, 0, nil
}
return GenerateWithGiftID(category, channel, num, deadline, bean.Gift)
}
|
//go:build go1.18
package parquet_test
import (
"sort"
"testing"
"github.com/segmentio/parquet-go"
)
func TestDedupeRowReader(t *testing.T) {
type Row struct {
Value int32 `parquet:"value"`
}
rows := make([]Row, 1000)
for i := range rows {
rows[i].Value = int32(i / 3)
}
dedupeMap := make(map[Row]struct{}, len(rows))
for _, row := range rows {
dedupeMap[row] = struct{}{}
}
dedupeRows := make([]Row, 0, len(dedupeMap))
for row := range dedupeMap {
dedupeRows = append(dedupeRows, row)
}
sort.Slice(dedupeRows, func(i, j int) bool {
return dedupeRows[i].Value < dedupeRows[j].Value
})
buffer1 := parquet.NewRowBuffer[Row]()
buffer1.Write(rows)
buffer1Rows := buffer1.Rows()
defer buffer1Rows.Close()
buffer2 := parquet.NewRowBuffer[Row]()
_, err := parquet.CopyRows(buffer2,
parquet.DedupeRowReader(buffer1Rows,
buffer1.Schema().Comparator(parquet.Ascending("value")),
),
)
if err != nil {
t.Fatal(err)
}
reader := parquet.NewGenericRowGroupReader[Row](buffer2)
defer reader.Close()
n, _ := reader.Read(rows)
assertRowsEqual(t, dedupeRows, rows[:n])
}
func TestDedupeRowWriter(t *testing.T) {
type Row struct {
Value int32 `parquet:"value"`
}
rows := make([]Row, 1000)
for i := range rows {
rows[i].Value = int32(i / 3)
}
dedupeMap := make(map[Row]struct{}, len(rows))
for _, row := range rows {
dedupeMap[row] = struct{}{}
}
dedupeRows := make([]Row, 0, len(dedupeMap))
for row := range dedupeMap {
dedupeRows = append(dedupeRows, row)
}
sort.Slice(dedupeRows, func(i, j int) bool {
return dedupeRows[i].Value < dedupeRows[j].Value
})
buffer1 := parquet.NewRowBuffer[Row]()
buffer1.Write(rows)
buffer1Rows := buffer1.Rows()
defer buffer1Rows.Close()
buffer2 := parquet.NewRowBuffer[Row]()
_, err := parquet.CopyRows(
parquet.DedupeRowWriter(buffer2,
buffer1.Schema().Comparator(parquet.Ascending("value")),
),
buffer1Rows,
)
if err != nil {
t.Fatal(err)
}
reader := parquet.NewGenericRowGroupReader[Row](buffer2)
defer reader.Close()
n, _ := reader.Read(rows)
assertRowsEqual(t, dedupeRows, rows[:n])
}
|
package core
//go:generate string -type=ModeState
type ModeState uint8
type InputData struct {
Mode ModeState
PrivateKey string
PublicKey string
}
type RawInputData struct {
Mode string
State ModeState
PrivateKey string
PublicKey string
}
|
package main
import (
"fmt"
"git.code.oa.com/fip-team/fiorm"
"github.com/gin-gonic/gin"
"github.com/spf13/viper"
"os"
"xinxin/service/util"
log "github.com/sirupsen/logrus"
)
//go:generate db2struct.exe -host=119.29.87.223 -port=3306 -dbname=shichang -user=root -password=dceb66c7d2408b87f9cb5bcbd16316f59c1ddf06e4837297b9c2ffe08fa1f5504dc4821d4628 -key=shichang -table=basic_daily_data
//go:generate db2struct.exe -host=119.29.87.223 -port=3306 -dbname=shichang -user=root -password=dceb66c7d2408b87f9cb5bcbd16316f59c1ddf06e4837297b9c2ffe08fa1f5504dc4821d4628 -key=shichang -table=basic_daily_history
//go:generate db2struct.exe -host=119.29.87.223 -port=3306 -dbname=shichang -user=root -password=dceb66c7d2408b87f9cb5bcbd16316f59c1ddf06e4837297b9c2ffe08fa1f5504dc4821d4628 -key=shichang -table=goods
//go:generate db2struct.exe -host=119.29.87.223 -port=3306 -dbname=shichang -user=root -password=dceb66c7d2408b87f9cb5bcbd16316f59c1ddf06e4837297b9c2ffe08fa1f5504dc4821d4628 -key=shichang -table=report_file
//go:generate db2struct.exe -host=119.29.87.223 -port=3306 -dbname=shichang -user=root -password=dceb66c7d2408b87f9cb5bcbd16316f59c1ddf06e4837297b9c2ffe08fa1f5504dc4821d4628 -key=shichang -table=report_notify
//go:generate db2struct.exe -host=119.29.87.223 -port=3306 -dbname=shichang -user=root -password=dceb66c7d2408b87f9cb5bcbd16316f59c1ddf06e4837297b9c2ffe08fa1f5504dc4821d4628 -key=shichang -table=user
//go:generate db2struct.exe -host=119.29.87.223 -port=3306 -dbname=shichang -user=root -password=dceb66c7d2408b87f9cb5bcbd16316f59c1ddf06e4837297b9c2ffe08fa1f5504dc4821d4628 -key=shichang -table=vendor
//go:generate db2struct.exe -host=119.29.87.223 -port=3306 -dbname=shichang -user=root -password=dceb66c7d2408b87f9cb5bcbd16316f59c1ddf06e4837297b9c2ffe08fa1f5504dc4821d4628 -key=shichang -table=vendor_price
//go:generate db2struct.exe -host=119.29.87.223 -port=3306 -dbname=shichang -user=root -password=dceb66c7d2408b87f9cb5bcbd16316f59c1ddf06e4837297b9c2ffe08fa1f5504dc4821d4628 -key=shichang -table=vendor_price_history
func main(){
// Creates a router without any middleware by default
r := gin.New()
// Recovery middleware recovers from any panics and writes a 500 if there was one.
r.Use(gin.Recovery())
UseRouters(r)
initLog()
initConfig()
initORM()
r.Run(":8081")
}
func initConfig(){
viper.SetEnvPrefix("config")
viper.AutomaticEnv()
viper.SetConfigName("config")
viper.AddConfigPath("./")
err := viper.ReadInConfig()
if err != nil {
fmt.Println(fmt.Errorf("Fatal error when reading config file:%s", err))
os.Exit(1)
}
}
func initORM() {
dbSettings := &fiorm.DbSettings{}
util.ReadKey("db", dbSettings)
fiorm.InitDB(dbSettings)
}
func initLog(){
var filename string = "./logs/log.txt"
// Create the log file if doesn't exist. And append to it if it already exists.
f, err := os.OpenFile(filename, os.O_WRONLY | os.O_APPEND | os.O_CREATE, 0644)
Formatter := new(log.TextFormatter)
// You can change the Timestamp format. But you have to use the same date and time.
// "2006-02-02 15:04:06" Works. If you change any digit, it won't work
// ie "Mon Jan 2 15:04:05 MST 2006" is the reference time. You can't change it
Formatter.TimestampFormat = "02-01-2006 15:04:05"
Formatter.FullTimestamp = true
log.SetFormatter(Formatter)
if err != nil {
// Cannot open log file. Logging to stderr
fmt.Println(err)
}else{
log.SetOutput(f)
}
} |
package server
import (
"context"
golog "log"
"net/http"
"time"
"github.com/gin-gonic/gin"
"github.com/henglory/Demo_Golang_v0.0.1/config"
"github.com/henglory/Demo_Golang_v0.0.1/service"
)
type errorResponse struct {
StatusCode int64 `json:"statusCode"`
StatusDesc string `json:"statusDesc"`
Request string `json:"request"`
}
type Server struct {
srv *http.Server
s service.Service
}
func NewServer(s service.Service) *Server {
server := &Server{
s: s,
}
return server
}
func (server *Server) Start() {
go server.ginStart()
}
func (server *Server) Close() {
server.srv.Close()
}
type readiness struct {
Success bool `json:"success"`
}
func (server Server) ginStart() {
gin.SetMode(gin.ReleaseMode)
r := gin.Default()
r.GET("/readiness", func(c *gin.Context) {
c.JSON(200, readiness{Success: true})
})
r.POST("/demo1", func(c *gin.Context) {
demo1(server.s, c)
})
r.POST("/demo2", func(c *gin.Context) {
demo2(server.s, c)
})
server.srv = &http.Server{
Addr: config.ServicePort,
Handler: r,
}
if err := server.srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
golog.Fatalf("listen: %s\n", err)
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
if err := server.srv.Shutdown(ctx); err != nil {
golog.Fatal("Server Shutdown:", err)
}
defer cancel()
}
|
package ch05
// Give a list of 0's, 1's and 2's, write a program to separate 0's, 1's and 2's.
func Separate_0_1_2(in []int) (zeros, ones, twos int) {
for _, val := range in {
if val == 0 {
zeros++
} else if val == 1 {
ones++
} else {
twos++
}
}
return zeros, ones, twos
}
|
package types
import (
"fmt"
sdk "github.com/cosmos/cosmos-sdk/types"
paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
)
var _ paramtypes.ParamSet = &Params{}
// Parameter keys
var (
KeyOracleRelayers = []byte("OracleRelayers")
)
// ParamKeyTable returns the parameter key table.
func ParamKeyTable() paramtypes.KeyTable {
return paramtypes.NewKeyTable().RegisterParamSet(&Params{})
}
// NewParams creates a new Params instance
func NewParams() Params {
return Params{
Relayers: []string{},
}
}
// ParamSetPairs returns the parameter set pairs.
func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs {
// TODO: @albert, add the rest of the parameters
return paramtypes.ParamSetPairs{
paramtypes.NewParamSetPair(KeyOracleRelayers, &p.Relayers, validateRelayers),
}
}
// DefaultParams returns a default set of parameters.
func DefaultParams() Params {
return Params{}
}
// Validate performs basic validation on auction parameters.
func (p Params) Validate() error {
if err := validateRelayers(p.Relayers); err != nil {
return err
}
return nil
}
func validateRelayers(i interface{}) error {
v, ok := i.([]string)
if !ok {
return fmt.Errorf("invalid parameter type: %T", i)
}
for _, relayer := range v {
if _, err := sdk.AccAddressFromBech32(relayer); err != nil {
return err
}
}
return nil
}
|
package pgsql
import (
"database/sql"
"database/sql/driver"
"time"
)
// DateRangeArrayFromTimeArray2Slice returns a driver.Valuer that produces a PostgreSQL daterange[] from the given Go [][2]time.Time.
func DateRangeArrayFromTimeArray2Slice(val [][2]time.Time) driver.Valuer {
return dateRangeArrayFromTimeArray2Slice{val: val}
}
// DateRangeArrayToTimeArray2Slice returns an sql.Scanner that converts a PostgreSQL daterange[] into a Go [][2]time.Time and sets it to val.
func DateRangeArrayToTimeArray2Slice(val *[][2]time.Time) sql.Scanner {
return dateRangeArrayToTimeArray2Slice{val: val}
}
type dateRangeArrayFromTimeArray2Slice struct {
val [][2]time.Time
}
func (v dateRangeArrayFromTimeArray2Slice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{'{', '}'}, nil
}
out := []byte{'{'}
for _, a := range v.val {
if !a[0].IsZero() {
out = append(out, '"', '[')
out = append(out, []byte(a[0].Format(dateLayout))...)
} else {
out = append(out, '"', '(')
}
out = append(out, ',')
if !a[1].IsZero() {
out = append(out, []byte(a[1].Format(dateLayout))...)
}
out = append(out, ')', '"', ',')
}
out[len(out)-1] = '}' // replace last "," with "}"
return out, nil
}
type dateRangeArrayToTimeArray2Slice struct {
val *[][2]time.Time
}
func (v dateRangeArrayToTimeArray2Slice) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
v.val = nil
return nil
}
elems := pgParseQuotedStringArray(data)
ranges := make([][2]time.Time, len(elems))
for i, elem := range elems {
var t0, t1 time.Time
arr := pgParseRange(elem)
if len(arr[0]) > 0 {
if t0, err = pgparsedate(arr[0]); err != nil {
return err
}
}
if len(arr[1]) > 0 {
if t1, err = pgparsedate(arr[1]); err != nil {
return err
}
}
ranges[i][0] = t0
ranges[i][1] = t1
}
*v.val = ranges
return nil
}
|
package model
type Holder struct {
UserName string `json:"userName" bson:"userName"`
SerialNum string `json:"serialNum" bson:"serialNum"`
CertificateName string `json:"certificateName" bson:"certificateName"`
IssueTime string `json:"issueTime" bson:"issueTime"`
IssuingUnit string `json:"issuingUnit" bson:"issuingUnit"`
IdCard string `json:"idCard" bson:"idCard"`
Phone string `json:"phone" bson:"phone"`
CreateTime string `json:"createTime" bson:"createTime"`
CertificateImg string `json:"certificateImg" bson:"certificateImg"`
}
|
package committer_test
import (
"fmt"
"github.com/TangoEnSkai/committer-go/committer"
"testing"
)
func TestCheckLength(t *testing.T) {
const (
minLength = 10
maxLength = 60
shortCommit = "short"
validLengthCommit = "the commit length is good enough"
longCommit = "this commit is toooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo long"
)
type args struct {
m committer.Message
}
tests := []struct {
name string
args args
wantErrStr string
wantOk bool
}{
{
name: "success/valid commit message",
args: args{
m: validLengthCommit,
},
wantErrStr: "",
wantOk: true,
},
{
name: "failed/too short",
args: args{
m: shortCommit,
},
wantErrStr: fmt.Sprintf("\tthe commit `%s` is too short: got(%d), need >= (%d)", shortCommit, len(shortCommit), minLength),
wantOk: false,
},
{
name: "failed/too long",
args: args{
m: longCommit,
},
wantErrStr: fmt.Sprintf("\tthe commit `%s` is too long: got(%d), need <= (%d)", longCommit, len(longCommit), maxLength),
wantOk: false,
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
gotErrStr, gotOk := committer.CheckLength(tc.args.m)
if gotErrStr != tc.wantErrStr {
t.Errorf("CheckLength() gotErrStr = %v, want %v", gotErrStr, tc.wantErrStr)
}
if gotOk != tc.wantOk {
t.Errorf("CheckLength() gotOk = %v, want %v", gotOk, tc.wantOk)
}
})
}
}
|
package entity
type QueryLoanListItem struct {
// 贷款日期
XdCol4 int `db:"xd_col4" json:"xd_col4" field:"xd_col4"`
// 到期日期
XdCol5 int `db:"xd_col5" json:"xd_col5" field:"xd_col5"`
// 贷款金额
XdCol6 int `db:"xd_col6" json:"xd_col6" field:"xd_col6"`
// 结欠金额
XdCol7 int `db:"xd_col7" json:"xd_col7" field:"xd_col7"`
}
|
package provider
import (
"time"
res "github.com/lucasvmiguel/goauth/auth/resource"
)
var (
expireAccessDatetime = time.Now().Add(24 * time.Hour) //1 day
expireRefreshDatetime = time.Now().Add(8760 * time.Hour) //1 year
tokenType = "Bearer"
)
//Provider should be implement by any provider (map, db)
type Provider interface {
InsertUser(string, string, interface{}) (*res.User, error)
RemoveUser(*res.User) error
UserByAccessT(string) (*res.User, error)
UserByRefreshT(string) (*res.User, error)
Debug()
}
|
package main
import (
"testing"
)
func TestShortestDistance(t *testing.T) {
var Neighborhood = []Street{
Street{From: "Kruthika's abode", To: "Mark's crib", Distance: 9},
Street{From: "Kruthika's abode", To: "Greg's casa", Distance: 4},
Street{From: "Kruthika's abode", To: "Matt's pad", Distance: 18},
Street{From: "Kruthika's abode", To: "Brian's apartment", Distance: 8},
Street{From: "Brian's apartment", To: "Wesley's condo", Distance: 7},
Street{From: "Brian's apartment", To: "Cam's dwelling", Distance: 17},
Street{From: "Greg's casa", To: "Cam's dwelling", Distance: 13},
Street{From: "Greg's casa", To: "Mike's digs", Distance: 19},
Street{From: "Greg's casa", To: "Matt's pad", Distance: 14},
Street{From: "Wesley's condo", To: "Kirk's farm", Distance: 10},
Street{From: "Wesley's condo", To: "Nathan's flat", Distance: 11},
Street{From: "Wesley's condo", To: "Bryce's den", Distance: 6},
Street{From: "Matt's pad", To: "Mark's crib", Distance: 19},
Street{From: "Matt's pad", To: "Nathan's flat", Distance: 15},
Street{From: "Matt's pad", To: "Craig's haunt", Distance: 14},
Street{From: "Mark's crib", To: "Kirk's farm", Distance: 9},
Street{From: "Mark's crib", To: "Nathan's flat", Distance: 12},
Street{From: "Bryce's den", To: "Craig's haunt", Distance: 10},
Street{From: "Bryce's den", To: "Mike's digs", Distance: 9},
Street{From: "Mike's digs", To: "Cam's dwelling", Distance: 20},
Street{From: "Mike's digs", To: "Nathan's flat", Distance: 12},
Street{From: "Cam's dwelling", To: "Craig's haunt", Distance: 18},
Street{From: "Nathan's flat", To: "Kirk's farm", Distance: 3},
}
startLocation := "Kruthika's abode"
targetLocation := "Craig's haunt"
// formats the data into a map so the input array is looped over only once
NodeMap := formatData(Neighborhood)
// holds PathData for each location
LocationPathData := initPathDataMap(NodeMap, &startLocation)
// loops over the NodeMap and determines the shortest route from the startLocation to all other locations
findPath(startLocation, LocationPathData, NodeMap)
shortestDistance := LocationPathData[targetLocation].ShortestDistance
if shortestDistance != 31 {
t.Errorf("Shortest distance was incorrect, got: %v, want: %v.", shortestDistance, 31)
}
}
func TestPathResultString(t *testing.T) {
var Neighborhood = []Street{
Street{From: "Kruthika's abode", To: "Mark's crib", Distance: 9},
Street{From: "Kruthika's abode", To: "Greg's casa", Distance: 4},
Street{From: "Kruthika's abode", To: "Matt's pad", Distance: 18},
Street{From: "Kruthika's abode", To: "Brian's apartment", Distance: 8},
Street{From: "Brian's apartment", To: "Wesley's condo", Distance: 7},
Street{From: "Brian's apartment", To: "Cam's dwelling", Distance: 17},
Street{From: "Greg's casa", To: "Cam's dwelling", Distance: 13},
Street{From: "Greg's casa", To: "Mike's digs", Distance: 19},
Street{From: "Greg's casa", To: "Matt's pad", Distance: 14},
Street{From: "Wesley's condo", To: "Kirk's farm", Distance: 10},
Street{From: "Wesley's condo", To: "Nathan's flat", Distance: 11},
Street{From: "Wesley's condo", To: "Bryce's den", Distance: 6},
Street{From: "Matt's pad", To: "Mark's crib", Distance: 19},
Street{From: "Matt's pad", To: "Nathan's flat", Distance: 15},
Street{From: "Matt's pad", To: "Craig's haunt", Distance: 14},
Street{From: "Mark's crib", To: "Kirk's farm", Distance: 9},
Street{From: "Mark's crib", To: "Nathan's flat", Distance: 12},
Street{From: "Bryce's den", To: "Craig's haunt", Distance: 10},
Street{From: "Bryce's den", To: "Mike's digs", Distance: 9},
Street{From: "Mike's digs", To: "Cam's dwelling", Distance: 20},
Street{From: "Mike's digs", To: "Nathan's flat", Distance: 12},
Street{From: "Cam's dwelling", To: "Craig's haunt", Distance: 18},
Street{From: "Nathan's flat", To: "Kirk's farm", Distance: 3},
}
startLocation := "Kruthika's abode"
targetLocation := "Craig's haunt"
// formats the data into a map so the input array is looped over only once
NodeMap := formatData(Neighborhood)
// holds PathData for each location
LocationPathData := initPathDataMap(NodeMap, &startLocation)
// loops over the NodeMap and determines the shortest route from the startLocation to all other locations
findPath(startLocation, LocationPathData, NodeMap)
path := ReturnPath(LocationPathData, targetLocation)
correctPath := "[\"Kruthika's abode\", \"Brian's apartment\", \"Wesley's condo\", \"Bryce's den\", \"Craig's haunt\"]"
if path != correctPath {
t.Errorf("Shortest distance was incorrect, got: %v, want: %v.", path, correctPath)
}
} |
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-08-07 09:41
# @File : lt_155_Min_Stack_test.go.go
# @Description :
# @Attention :
*/
package stack
import (
"fmt"
"testing"
)
func TestMinStack_GetMin(t *testing.T) {
stack := Constructor()
stack.Push(-2)
stack.Push(0)
stack.Push(-3)
fmt.Println(stack.GetMin())
stack.Pop()
stack.Top()
stack.GetMin()
}
|
// Copyright (c) 2020 Cisco and/or its affiliates.
//
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package main_test
import (
"context"
"google.golang.org/grpc"
"google.golang.org/grpc/health/grpc_health_v1"
)
func (f *ForwarderTestSuite) TestHealthCheck() {
ctx, cancel := context.WithTimeout(f.ctx, contextTimeout)
defer cancel()
healthClient := grpc_health_v1.NewHealthClient(f.sutCC)
healthResponse, err := healthClient.Check(ctx,
&grpc_health_v1.HealthCheckRequest{
Service: "networkservice.NetworkService",
},
grpc.WaitForReady(true),
)
f.NoError(err)
f.Require().NotNil(healthResponse)
f.Equal(grpc_health_v1.HealthCheckResponse_SERVING, healthResponse.Status)
}
|
/*
Copyright 2020-2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package multicluster
import (
"context"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
clusterv1 "open-cluster-management.io/api/cluster/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
clustercommon "github.com/oam-dev/cluster-gateway/pkg/common"
"github.com/oam-dev/kubevela/apis/types"
)
var _ = Describe("Test Virtual Cluster", func() {
It("Test Virtual Cluster", func() {
ClusterGatewaySecretNamespace = "vela-system"
ctx := context.Background()
Expect(k8sClient.Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ClusterGatewaySecretNamespace}})).Should(Succeed())
By("Initialize Secrets")
Expect(k8sClient.Create(ctx, &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: ClusterGatewaySecretNamespace,
Labels: map[string]string{
clustercommon.LabelKeyClusterCredentialType: string(v1alpha1.CredentialTypeX509Certificate),
clustercommon.LabelKeyClusterEndpointType: string(v1alpha1.ClusterEndpointTypeConst),
"key": "value",
},
Annotations: map[string]string{v1alpha1.AnnotationClusterAlias: "test-alias"},
},
})).Should(Succeed())
Expect(k8sClient.Create(ctx, &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-no-label",
Namespace: ClusterGatewaySecretNamespace,
Labels: map[string]string{
clustercommon.LabelKeyClusterCredentialType: string(v1alpha1.CredentialTypeX509Certificate),
},
},
})).Should(Succeed())
Expect(k8sClient.Create(ctx, &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-invalid",
Namespace: ClusterGatewaySecretNamespace,
},
})).Should(Succeed())
By("Test Get Virtual Cluster From Cluster Secret")
vc, err := GetVirtualCluster(ctx, k8sClient, "test-cluster")
Expect(err).Should(Succeed())
Expect(vc.Type).Should(Equal(v1alpha1.CredentialTypeX509Certificate))
Expect(vc.Labels["key"]).Should(Equal("value"))
_, err = GetVirtualCluster(ctx, k8sClient, "cluster-not-found")
Expect(err).ShouldNot(Succeed())
Expect(err.Error()).Should(ContainSubstring("no such cluster"))
_, err = GetVirtualCluster(ctx, k8sClient, "cluster-invalid")
Expect(err).ShouldNot(Succeed())
Expect(err.Error()).Should(ContainSubstring("not a valid cluster"))
By("Add OCM ManagedCluster")
Expect(k8sClient.Create(ctx, &clusterv1.ManagedCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "ocm-bad-cluster",
Namespace: ClusterGatewaySecretNamespace,
},
})).Should(Succeed())
Expect(k8sClient.Create(ctx, &clusterv1.ManagedCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "ocm-cluster",
Namespace: ClusterGatewaySecretNamespace,
Labels: map[string]string{"key": "value"},
Annotations: map[string]string{v1alpha1.AnnotationClusterAlias: "ocm-alias"},
},
Spec: clusterv1.ManagedClusterSpec{
ManagedClusterClientConfigs: []clusterv1.ClientConfig{{URL: "test-url"}},
},
})).Should(Succeed())
By("Test Get Virtual Cluster From OCM")
_, err = GetVirtualCluster(ctx, k8sClient, "ocm-bad-cluster")
Expect(err).ShouldNot(Succeed())
Expect(err.Error()).Should(ContainSubstring("has no client config"))
vc, err = GetVirtualCluster(ctx, k8sClient, "ocm-cluster")
Expect(err).Should(Succeed())
Expect(vc.Type).Should(Equal(types.CredentialTypeOCMManagedCluster))
By("Test List Virtual Clusters")
vcs, err := ListVirtualClusters(ctx, k8sClient)
Expect(err).Should(Succeed())
Expect(len(vcs)).Should(Equal(4))
vcs, err = FindVirtualClustersByLabels(ctx, k8sClient, map[string]string{"key": "value"})
Expect(err).Should(Succeed())
Expect(len(vcs)).Should(Equal(2))
By("Test virtual cluster list for clusterNameMapper")
cli := fakeClient{Client: k8sClient}
cnm, err := NewClusterNameMapper(ctx, cli)
Expect(err).Should(Succeed())
Expect(cnm.GetClusterName("example")).Should(Equal("example (example-alias)"))
Expect(cnm.GetClusterName("no-alias")).Should(Equal("no-alias"))
cli.returnBadRequest = true
_, err = NewClusterNameMapper(ctx, cli)
Expect(err).Should(Satisfy(errors.IsBadRequest))
cli.returnBadRequest = false
cli.virtualClusterNotRegistered = true
cnm, err = NewClusterNameMapper(ctx, cli)
Expect(err).Should(Succeed())
Expect(cnm.GetClusterName("example")).Should(Equal("example"))
Expect(cnm.GetClusterName("test-cluster")).Should(Equal("test-cluster (test-alias)"))
Expect(cnm.GetClusterName("ocm-cluster")).Should(Equal("ocm-cluster (ocm-alias)"))
cli.returnBadRequest = true
cli.virtualClusterNotRegistered = true
_, err = NewClusterNameMapper(ctx, cli)
Expect(err).ShouldNot(Succeed())
})
It("Test Cluster Version Get and Set", func() {
ClusterGatewaySecretNamespace = "vela-system2"
ctx := context.Background()
Expect(k8sClient.Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ClusterGatewaySecretNamespace}})).Should(Succeed())
cv, err := GetVersionInfoFromCluster(ctx, "local", cfg)
Expect(err).Should(BeNil())
Expect(cv.Minor).Should(Not(BeEquivalentTo("")))
Expect(cv.Major).Should(BeEquivalentTo("1"))
})
})
type fakeClient struct {
client.Client
returnBadRequest bool
virtualClusterNotRegistered bool
}
func (c fakeClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
if !c.virtualClusterNotRegistered && c.returnBadRequest {
return errors.NewBadRequest("")
}
if src, ok := list.(*v1alpha1.VirtualClusterList); ok {
if c.virtualClusterNotRegistered {
return runtime.NewNotRegisteredErrForKind("", schema.GroupVersionKind{})
}
objs := &v1alpha1.VirtualClusterList{Items: []v1alpha1.VirtualCluster{{
ObjectMeta: metav1.ObjectMeta{Name: "example"},
Spec: v1alpha1.VirtualClusterSpec{Alias: "example-alias"},
}, {
ObjectMeta: metav1.ObjectMeta{Name: "no-alias"},
}}}
objs.DeepCopyInto(src)
return nil
}
if c.returnBadRequest {
return errors.NewBadRequest("")
}
return c.Client.List(ctx, list, opts...)
}
|
package mutex_learn
import (
"fmt"
"math/rand"
"sync"
"sync/atomic"
"testing"
"time"
"unsafe"
)
// 复制Mutex定义的常量
const (
mutexLocked = 1 << iota // 加锁标识位置
mutexWoken // 唤醒标识位置
mutexStarving // 锁饥饿标识位置
mutexWaiterShift = iota // 标识waiter的起始bit位置
)
// 扩展一个Mutex结构
type Mutex struct {
sync.Mutex
}
/*
我们右移三位(这里的常量 mutexWaiterShift 的值为 3),就得到了当前等待者的数量。如果当前的锁已经被其他 goroutine 持有,
那么,我们就稍微调整一下这个值,加上一个 1(第 16 行),你基本上可以把它看作是当前持有和等待这把锁的 goroutine 的总数。
state 这个字段的第一位是用来标记锁是否被持有,第二位用来标记是否已经唤醒了一个等待者,第三位标记锁是否处于饥饿状态,
通过分析这个 state 字段我们就可以得到这些状态信息。我们可以为这些状态提供查询的方法,这样就可以实时地知道锁的状态了
*/
func (m *Mutex) Count() int {
// 获取state字段的值
// 通过 unsafe 操作,我们可以得到 state 字段的值
v := atomic.LoadInt32((*int32)(unsafe.Pointer(&m.Mutex)))
v = v >> mutexWaiterShift //得到等待者的数值
v = v + (v & mutexLocked) //再加上锁持有者的数量,0或者1
return int(v)
}
// 锁是否被持有
func (m *Mutex) IsLocked() bool {
state := atomic.LoadInt32((*int32)(unsafe.Pointer(&m.Mutex)))
return state&mutexLocked == mutexLocked
}
// 是否有等待者被唤醒
func (m *Mutex) IsWoken() bool {
state := atomic.LoadInt32((*int32)(unsafe.Pointer(&m.Mutex)))
return state&mutexWoken == mutexWoken
}
// 锁是否处于饥饿状态
func (m *Mutex) IsStarving() bool {
state := atomic.LoadInt32((*int32)(unsafe.Pointer(&m.Mutex)))
return state&mutexStarving == mutexStarving
}
/*
第 17 行是一个 fast path,如果幸运,没有其他 goroutine 争这把锁,那么,这把锁就会被这个请求的 goroutine 获取,直接返回。
如果锁已经被其他 goroutine 所持有,或者被其他唤醒的 goroutine 准备持有,那么,就直接返回 false,不再请求,代码逻辑在第 23 行。
如果没有被持有,也没有其它唤醒的 goroutine 来竞争锁,锁也不处于饥饿状态,就尝试获取这把锁(第 29 行),不论是否成功都将结果返回。
因为,这个时候,可能还有其他的 goroutine 也在竞争这把锁,所以,不能保证成功获取这把锁。
*/
// 尝试获取锁
func (m *Mutex) TryLock() bool {
// 如果能成功抢到锁
if atomic.CompareAndSwapInt32((*int32)(unsafe.Pointer(&m.Mutex)), 0, mutexLocked) {
return true
}
// 如果处于唤醒、加锁或者饥饿状态,这次请求就不参与竞争了,返回false
old := atomic.LoadInt32((*int32)(unsafe.Pointer(&m.Mutex)))
if old&(mutexLocked|mutexStarving|mutexWoken) != 0 {
return false
}
// 尝试在竞争的状态下请求锁
new := old | mutexLocked
return atomic.CompareAndSwapInt32((*int32)(unsafe.Pointer(&m.Mutex)), old, new)
}
func TestVar(t *testing.T) {
fmt.Println(mutexLocked)
fmt.Println(mutexWaiterShift)
}
func TestTryLock(t *testing.T) {
try()
}
func try() {
var mu Mutex
go func() { // 启动一个goroutine持有一段时间的锁
mu.Lock()
time.Sleep(time.Duration(rand.Intn(2)) * time.Second)
mu.Unlock()
}()
time.Sleep(time.Second)
ok := mu.TryLock() // 尝试获取到锁
if ok { // 获取成功
fmt.Println("got the lock")
// do something
mu.Unlock()
return
}
// 没有获取到
fmt.Println("can't get the lock")
}
/*
怎么获取等待者数量等指标
第二讲中,我们已经学习了 Mutex 的结构。先来回顾一下 Mutex 的数据结构,如下面的代码所示。它包含两个字段,state 和 sema。前四个字节(int32)就是 state 字段
Mutex 结构中的 state 字段有很多个含义,通过 state 字段,你可以知道锁是否已经被某个 goroutine 持有、当前是否处于饥饿状态、
是否有等待的 goroutine 被唤醒、等待者的数量等信息。但是,state 这个字段并没有暴露出来,所以,我们需要想办法获取到这个字段,并进行解析。
怎么获取未暴露的字段呢?很简单,我们可以通过 unsafe 的方式实现。我来举一个例子,你一看就明白了。
有一点你需要注意一下,在获取 state 字段的时候,并没有通过 Lock 获取这把锁,所以获取的这个 state 的值是一个瞬态的值,
可能在你解析出这个字段之后,锁的状态已经发生了变化。不过没关系,因为你查看的就是调用的那一时刻的锁的状态
*/
func TestLockGoRoutineCount(t *testing.T) {
var mu Mutex
for i := 0; i < 1000; i++ { // 启动1000个goroutine
go func() {
mu.Lock()
time.Sleep(time.Second)
mu.Unlock()
}()
}
time.Sleep(time.Second)
// 输出锁的信息
fmt.Printf("waitings: %d, isLocked: %t, woken: %t, starving: %t\n", mu.Count(), mu.IsLocked(), mu.IsWoken(), mu.IsStarving())
} |
package merrors
const (
ERR_OCT_SUCCESS = iota
ERR_DB_ERR
ERR_NOT_ENOUGH_PARAS
ERR_TOO_MANY_PARAS
ERR_UNACCP_PARAS
ERR_CMD_ERR
ERR_COMMON_ERR
ERR_SEGMENT_NOT_EXIST
ERR_SEGMENT_ALREADY_EXIST
ERR_TIMEOUT
ERR_SYSCALL_ERR
ERR_SYSTEM_ERR
ERR_NO_SUCH_API
ERR_NOT_IMPLEMENTED
// User
ERR_USER_NOT_EXIST
ERR_USER_ALREADY_EXIST
ERR_PASSWORD_DONT_MATCH
ERR_USER_NOT_LOGIN
ERR_USER_GROUPS_NOT_EMPTY
// User Group
ERR_USERGROUP_NOT_EXIST
ERR_USERGROUP_ALREADY_EXIST
ERR_USERGROUP_USERS_NOT_EMPTY
)
var GErrors = map[int]string{
ERR_OCT_SUCCESS: "Command Success",
ERR_DB_ERR: "Database Error",
ERR_NOT_ENOUGH_PARAS: "No Enough Paras",
ERR_TOO_MANY_PARAS: "Too Many Paras",
ERR_UNACCP_PARAS: "Unaccept Paras",
ERR_CMD_ERR: "Command Error",
ERR_COMMON_ERR: "Common Error",
ERR_SEGMENT_NOT_EXIST: "Segment Not Exist",
ERR_SEGMENT_ALREADY_EXIST: "Segment Already Exist",
ERR_TIMEOUT: "Timeout Error",
ERR_SYSCALL_ERR: "System Call Error",
ERR_SYSTEM_ERR: "System Error",
ERR_NO_SUCH_API: "No Such API",
ERR_NOT_IMPLEMENTED: "Function not Implemented",
// User
ERR_USER_NOT_EXIST: "User Not Exist",
ERR_USER_ALREADY_EXIST: "User Already Exist",
ERR_PASSWORD_DONT_MATCH: "User And Password Not Match",
ERR_USER_NOT_LOGIN: "User Not Login",
ERR_USER_GROUPS_NOT_EMPTY: "Groups under Account must be empty",
// User group
ERR_USERGROUP_NOT_EXIST: "User Group Not Exist",
ERR_USERGROUP_ALREADY_EXIST: "User Group Already Exist",
ERR_USERGROUP_USERS_NOT_EMPTY: "Users under Group must be empty",
}
var GErrorsCN = map[int]string{
ERR_OCT_SUCCESS: "操作成功",
ERR_DB_ERR: "数据库错误",
ERR_NOT_ENOUGH_PARAS: "参数不足",
ERR_TOO_MANY_PARAS: "太多参数",
ERR_UNACCP_PARAS: "参数不合法",
ERR_CMD_ERR: "命令执行错误",
ERR_COMMON_ERR: "通用错误",
ERR_SEGMENT_NOT_EXIST: "对象不存在",
ERR_SEGMENT_ALREADY_EXIST: "对象已存在",
ERR_TIMEOUT: "超时错误",
ERR_SYSCALL_ERR: "系统调用错误",
ERR_SYSTEM_ERR: "系统错误",
ERR_NO_SUCH_API: "无此API",
ERR_NOT_IMPLEMENTED: "功能未实现",
// User
ERR_USER_NOT_EXIST: "用户不存在",
ERR_USER_ALREADY_EXIST: "用户已经存在",
ERR_PASSWORD_DONT_MATCH: "用户和密码不匹配",
ERR_USER_NOT_LOGIN: "用户未登录",
ERR_USER_GROUPS_NOT_EMPTY: "该账号下的用户组不为空",
// User group
ERR_USERGROUP_NOT_EXIST: "用户组不存在",
ERR_USERGROUP_ALREADY_EXIST: "用户组已经存在",
ERR_USERGROUP_USERS_NOT_EMPTY: "该用户组下的用户不为空",
}
type MirageError struct {
ErrorNo int `json:"no"`
ErrorMsg string `json:"msg"`
}
func NewError(code int, message string) *MirageError {
return &MirageError{
ErrorNo: code,
ErrorMsg: message,
}
}
func GetMsg(errorNo int) string {
return GErrors[errorNo]
}
func GetMsgCN(errorNo int) string {
return GErrorsCN[errorNo]
}
|
package controller
import (
"github.com/patrickeasters/ipa-cert-operator/pkg/controller/ipacert"
)
func init() {
// AddToManagerFuncs is a list of functions to create controllers and add them to a manager.
AddToManagerFuncs = append(AddToManagerFuncs, ipacert.Add)
}
|
package main
import (
"log"
"html/template"
"fmt"
"net/http"
)
func helloWorld(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello World")
}
func login(w http.ResponseWriter, r *http.Request) {
fmt.Println("method:", r.Method)
if r.Method == "GET" {
//template.ParseFiles("")創建模板回傳到t 並解析文本""
t, _ := template.ParseFiles("login.html")
//t.Execute執行解析後的模板,執行過成為合併、替換文本的{{.}}
log.Println(t.Execute(w, "HelloWorld"))
} else {
r.ParseForm()//server端解析資料表單
fmt.Println("username:", r.Form["username"])
//r.Form 接收來自客戶端,html定義,符合 "username" "password"的資訊
fmt.Println("password:", r.Form["password"])
}
}
func main() {
http.HandleFunc("/", helloWorld)
http.HandleFunc("/login", login)
err := http.ListenAndServe(":8080", nil)
if err != nil {
log.Fatal("ListenAndServe:", err)
}
} |
package main
import "testing"
func BenchmarkFib(b *testing.B) {
for i := 0; i < b.N; i++ {
fib(30)
}
}
|
package endpoints
import (
"encoding/json"
"github.com/pquerna/ffjson/ffjson"
"github.com/valyala/fasthttp"
"log"
"technodb-final/app/dbhandlers"
"technodb-final/app/models"
)
func UserCreate(ctx *fasthttp.RequestCtx) {
var user models.User
nickname := ctx.UserValue("nickname").(string)
user.Nickname = &nickname
err := json.Unmarshal(ctx.PostBody(), &user)
if err != nil {
ctx.SetStatusCode(fasthttp.StatusBadRequest)
//ctx.WriteString(err.Error())
return
}
users, err := dbhandlers.UserCreate(&user)
if err == nil {
resp, _ := json.Marshal(users)
ctx.SetBody(resp[1 : len(resp)-1])
ctx.SetStatusCode(fasthttp.StatusCreated)
} else if err == dbhandlers.UserErrors["conflict"] {
resp, _ := json.Marshal(users)
ctx.SetBody(resp)
ctx.SetStatusCode(fasthttp.StatusConflict)
} else {
//ctx.WriteString(err.Error())
ctx.SetStatusCode(fasthttp.StatusBadRequest)
}
}
func UserUpdate(ctx *fasthttp.RequestCtx) {
var user models.User
nickname := ctx.UserValue("nickname").(string)
user.Nickname = &nickname
if err := json.Unmarshal(ctx.PostBody(), &user); err != nil {
ctx.SetStatusCode(fasthttp.StatusBadRequest)
//ctx.WriteString(err.Error())
return
}
res, err := dbhandlers.UpdateUser(&user)
resp, _ := json.Marshal(res)
if err == dbhandlers.UserErrors["conflict"] {
ctx.SetBody(resp)
ctx.SetStatusCode(fasthttp.StatusConflict)
} else if err == dbhandlers.UserErrors["none"] {
resp, _ = ffjson.Marshal(err)
ctx.SetBody(resp)
ctx.SetStatusCode(fasthttp.StatusNotFound)
} else if err == nil {
ctx.SetBody(resp)
ctx.SetStatusCode(fasthttp.StatusOK)
} else {
log.Print(err)
}
}
func UserInfo(ctx *fasthttp.RequestCtx) {
nickname := ctx.UserValue("nickname").(string)
res, err := dbhandlers.GetUserByNickname(nickname)
resp, _ := json.Marshal(res)
if err == dbhandlers.UserErrors["conflict"] {
ctx.SetBody(resp)
ctx.SetStatusCode(fasthttp.StatusConflict)
} else if err == dbhandlers.UserErrors["none"] {
resp, _ = ffjson.Marshal(err)
ctx.SetBody(resp)
ctx.SetStatusCode(fasthttp.StatusNotFound)
} else if err == nil {
ctx.SetBody(resp)
ctx.SetStatusCode(fasthttp.StatusOK)
} else {
log.Print(err)
}
}
|
package main
type Entity struct {
name string
glyph *Glyph
world *World
position *Point
}
func (entity *Entity) Move(point *Point) {
entity.position = point
}
// func (entity *Entity) MoveBy(offset ...int) {
// entity.position = entity.position.Add(offset...)
// }
func NewEntity(name string) *Entity {
return &Entity{name: name}
}
|
package main
import "fmt"
func Demo() {
fmt.Println("HI")
}
|
/*
* @lc app=leetcode id=102 lang=golang
*
* [102] Binary Tree Level Order Traversal
*
* https://leetcode.com/problems/binary-tree-level-order-traversal/description/
*
* algorithms
* Medium (50.99%)
* Likes: 1966
* Dislikes: 54
* Total Accepted: 467.3K
* Total Submissions: 916.3K
* Testcase Example: '[3,9,20,null,null,15,7]'
*
* Given a binary tree, return the level order traversal of its nodes' values.
* (ie, from left to right, level by level).
*
*
* For example:
* Given binary tree [3,9,20,null,null,15,7],
*
* 3
* / \
* 9 20
* / \
* 15 7
*
*
*
* return its level order traversal as:
*
* [
* [3],
* [9,20],
* [15,7]
* ]
*
*
*/
// @lc code=start
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func printNodes(list []*TreeNode, results *[][]int) []*TreeNode {
ret := []*TreeNode{}
result := []int {}
for _, n := range list {
result = append(result, n.Val)
if n.Left != nil {
ret = append(ret, n.Left)
}
if n.Right!= nil {
ret = append(ret, n.Right)
}
}
if len(result) > 0 {
*results = append(*results, result)
}
return ret
}
func levelOrder(root *TreeNode) [][]int {
ret := [][]int{}
if root == nil {
return ret
}
list := []*TreeNode{root}
for len(list) > 0 {
list = printNodes(list, &ret)
}
return ret
}
// @lc code=end
|
/*
Take the code from the previous exercise, then store the values of type person in a map
with the key of last name. Access each value in the map. Print out the values, ranging over the slice.
*/
package main
import "fmt"
type person struct {
firstname string
lastname string
favoriteIceCreamFlavors []string
}
func main() {
p1 := person{
"Jotaro",
"Kujo",
[]string{"Vanilla", "Tripple Caramel Chunk", "Cookie Dough"},
}
p2 := person{
"Joseph",
"Joestar",
[]string{"NewYork Super Fudge Chunck", "Cherry Garcia", "Chocolate Fudge Brownie"},
}
m := map[string]person{
p1.lastname: p1,
p2.lastname: p2,
}
for k, v := range m {
fmt.Printf("Key %v have the value: %+v \n", k, v)
}
}
|
package webscraper
import (
"fmt"
"testing"
)
func TestGetLinksWithDivClass(t *testing.T) {
// GIVEN
pageStr := `
<html>
<head>
<title>THIS IS THE TITLE</title>
</head>
<body>
<div class="product ">
<div class="productInner">
<div class="productInfoWrapper">
<div class="productInfo">
<h3>
<a href="http://www.sainsburys.co.uk/shop/gb/groceries/ripe---ready/sainsburys-avocado--ripe---ready-x2" >
Sainsbury's Avocado, Ripe & Ready x2
<img src="http://c2.sainsburys.co.uk/wcsstore7.20.1.145/ExtendedSitesCatalogAssetStore/images/catalog/productImages/22/0000001600322/0000001600322_M.jpeg" alt="" />
</a>
</h3>
</div>
</div>
</div>
</div>
<div class="product ">
<div class="productInner">
<div class="productInfoWrapper">
<div class="productInfo">
<h3>
<a href="http://www.sainsburys.co.uk/shop/gb/groceries/ripe---ready/sainsburys-avocados--ripe---ready-x4" >
Sainsbury's Avocados, Ripe & Ready x4
<img src="http://c2.sainsburys.co.uk/wcsstore7.20.1.145/ExtendedSitesCatalogAssetStore/images/catalog/productImages/15/0000000184915/0000000184915_M.jpeg" alt="" />
</a>
</h3>
</div>
</div>
</div>
</div>
</body>
</html>
`
//page := []byte(pageStr)
// WHEN
urls := GetLinksWithDivClass([]byte(pageStr), "productInfo")
fmt.Println(len(urls))
for _, url := range urls {
fmt.Println("urls", url)
}
// THEN
expects := []string{
"http://www.sainsburys.co.uk/shop/gb/groceries/ripe---ready/sainsburys-avocado--ripe---ready-x2",
"http://www.sainsburys.co.uk/shop/gb/groceries/ripe---ready/sainsburys-avocados--ripe---ready-x4",
}
for i, expect := range expects {
if urls[i] != expect {
t.Errorf("Test %d failed, Expected '%s', got '%s'", i, expect, urls[i])
}
}
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
betapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/gkehub/beta/gkehub_beta_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta"
)
// FeatureServer implements the gRPC interface for Feature.
type FeatureServer struct{}
// ProtoToFeatureResourceStateStateEnum converts a FeatureResourceStateStateEnum enum from its proto representation.
func ProtoToGkehubBetaFeatureResourceStateStateEnum(e betapb.GkehubBetaFeatureResourceStateStateEnum) *beta.FeatureResourceStateStateEnum {
if e == 0 {
return nil
}
if n, ok := betapb.GkehubBetaFeatureResourceStateStateEnum_name[int32(e)]; ok {
e := beta.FeatureResourceStateStateEnum(n[len("GkehubBetaFeatureResourceStateStateEnum"):])
return &e
}
return nil
}
// ProtoToFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum converts a FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum enum from its proto representation.
func ProtoToGkehubBetaFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum(e betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum) *beta.FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum {
if e == 0 {
return nil
}
if n, ok := betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum_name[int32(e)]; ok {
e := beta.FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum(n[len("GkehubBetaFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum"):])
return &e
}
return nil
}
// ProtoToFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum converts a FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum enum from its proto representation.
func ProtoToGkehubBetaFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum(e betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum) *beta.FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum {
if e == 0 {
return nil
}
if n, ok := betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum_name[int32(e)]; ok {
e := beta.FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum(n[len("GkehubBetaFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum"):])
return &e
}
return nil
}
// ProtoToFeatureStateStateCodeEnum converts a FeatureStateStateCodeEnum enum from its proto representation.
func ProtoToGkehubBetaFeatureStateStateCodeEnum(e betapb.GkehubBetaFeatureStateStateCodeEnum) *beta.FeatureStateStateCodeEnum {
if e == 0 {
return nil
}
if n, ok := betapb.GkehubBetaFeatureStateStateCodeEnum_name[int32(e)]; ok {
e := beta.FeatureStateStateCodeEnum(n[len("GkehubBetaFeatureStateStateCodeEnum"):])
return &e
}
return nil
}
// ProtoToFeatureResourceState converts a FeatureResourceState object from its proto representation.
func ProtoToGkehubBetaFeatureResourceState(p *betapb.GkehubBetaFeatureResourceState) *beta.FeatureResourceState {
if p == nil {
return nil
}
obj := &beta.FeatureResourceState{
State: ProtoToGkehubBetaFeatureResourceStateStateEnum(p.GetState()),
HasResources: dcl.Bool(p.GetHasResources()),
}
return obj
}
// ProtoToFeatureSpec converts a FeatureSpec object from its proto representation.
func ProtoToGkehubBetaFeatureSpec(p *betapb.GkehubBetaFeatureSpec) *beta.FeatureSpec {
if p == nil {
return nil
}
obj := &beta.FeatureSpec{
Multiclusteringress: ProtoToGkehubBetaFeatureSpecMulticlusteringress(p.GetMulticlusteringress()),
Fleetobservability: ProtoToGkehubBetaFeatureSpecFleetobservability(p.GetFleetobservability()),
}
return obj
}
// ProtoToFeatureSpecMulticlusteringress converts a FeatureSpecMulticlusteringress object from its proto representation.
func ProtoToGkehubBetaFeatureSpecMulticlusteringress(p *betapb.GkehubBetaFeatureSpecMulticlusteringress) *beta.FeatureSpecMulticlusteringress {
if p == nil {
return nil
}
obj := &beta.FeatureSpecMulticlusteringress{
ConfigMembership: dcl.StringOrNil(p.GetConfigMembership()),
}
return obj
}
// ProtoToFeatureSpecFleetobservability converts a FeatureSpecFleetobservability object from its proto representation.
func ProtoToGkehubBetaFeatureSpecFleetobservability(p *betapb.GkehubBetaFeatureSpecFleetobservability) *beta.FeatureSpecFleetobservability {
if p == nil {
return nil
}
obj := &beta.FeatureSpecFleetobservability{
LoggingConfig: ProtoToGkehubBetaFeatureSpecFleetobservabilityLoggingConfig(p.GetLoggingConfig()),
}
return obj
}
// ProtoToFeatureSpecFleetobservabilityLoggingConfig converts a FeatureSpecFleetobservabilityLoggingConfig object from its proto representation.
func ProtoToGkehubBetaFeatureSpecFleetobservabilityLoggingConfig(p *betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfig) *beta.FeatureSpecFleetobservabilityLoggingConfig {
if p == nil {
return nil
}
obj := &beta.FeatureSpecFleetobservabilityLoggingConfig{
DefaultConfig: ProtoToGkehubBetaFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(p.GetDefaultConfig()),
FleetScopeLogsConfig: ProtoToGkehubBetaFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(p.GetFleetScopeLogsConfig()),
}
return obj
}
// ProtoToFeatureSpecFleetobservabilityLoggingConfigDefaultConfig converts a FeatureSpecFleetobservabilityLoggingConfigDefaultConfig object from its proto representation.
func ProtoToGkehubBetaFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(p *betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigDefaultConfig) *beta.FeatureSpecFleetobservabilityLoggingConfigDefaultConfig {
if p == nil {
return nil
}
obj := &beta.FeatureSpecFleetobservabilityLoggingConfigDefaultConfig{
Mode: ProtoToGkehubBetaFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum(p.GetMode()),
}
return obj
}
// ProtoToFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig converts a FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig object from its proto representation.
func ProtoToGkehubBetaFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(p *betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) *beta.FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig {
if p == nil {
return nil
}
obj := &beta.FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{
Mode: ProtoToGkehubBetaFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum(p.GetMode()),
}
return obj
}
// ProtoToFeatureState converts a FeatureState object from its proto representation.
func ProtoToGkehubBetaFeatureState(p *betapb.GkehubBetaFeatureState) *beta.FeatureState {
if p == nil {
return nil
}
obj := &beta.FeatureState{
State: ProtoToGkehubBetaFeatureStateState(p.GetState()),
}
return obj
}
// ProtoToFeatureStateState converts a FeatureStateState object from its proto representation.
func ProtoToGkehubBetaFeatureStateState(p *betapb.GkehubBetaFeatureStateState) *beta.FeatureStateState {
if p == nil {
return nil
}
obj := &beta.FeatureStateState{
Code: ProtoToGkehubBetaFeatureStateStateCodeEnum(p.GetCode()),
Description: dcl.StringOrNil(p.GetDescription()),
UpdateTime: dcl.StringOrNil(p.GetUpdateTime()),
}
return obj
}
// ProtoToFeature converts a Feature resource from its proto representation.
func ProtoToFeature(p *betapb.GkehubBetaFeature) *beta.Feature {
obj := &beta.Feature{
Name: dcl.StringOrNil(p.GetName()),
ResourceState: ProtoToGkehubBetaFeatureResourceState(p.GetResourceState()),
Spec: ProtoToGkehubBetaFeatureSpec(p.GetSpec()),
State: ProtoToGkehubBetaFeatureState(p.GetState()),
CreateTime: dcl.StringOrNil(p.GetCreateTime()),
UpdateTime: dcl.StringOrNil(p.GetUpdateTime()),
DeleteTime: dcl.StringOrNil(p.GetDeleteTime()),
Project: dcl.StringOrNil(p.GetProject()),
Location: dcl.StringOrNil(p.GetLocation()),
}
return obj
}
// FeatureResourceStateStateEnumToProto converts a FeatureResourceStateStateEnum enum to its proto representation.
func GkehubBetaFeatureResourceStateStateEnumToProto(e *beta.FeatureResourceStateStateEnum) betapb.GkehubBetaFeatureResourceStateStateEnum {
if e == nil {
return betapb.GkehubBetaFeatureResourceStateStateEnum(0)
}
if v, ok := betapb.GkehubBetaFeatureResourceStateStateEnum_value["FeatureResourceStateStateEnum"+string(*e)]; ok {
return betapb.GkehubBetaFeatureResourceStateStateEnum(v)
}
return betapb.GkehubBetaFeatureResourceStateStateEnum(0)
}
// FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnumToProto converts a FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum enum to its proto representation.
func GkehubBetaFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnumToProto(e *beta.FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum) betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum {
if e == nil {
return betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum(0)
}
if v, ok := betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum_value["FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum"+string(*e)]; ok {
return betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum(v)
}
return betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum(0)
}
// FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnumToProto converts a FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum enum to its proto representation.
func GkehubBetaFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnumToProto(e *beta.FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum) betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum {
if e == nil {
return betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum(0)
}
if v, ok := betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum_value["FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum"+string(*e)]; ok {
return betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum(v)
}
return betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum(0)
}
// FeatureStateStateCodeEnumToProto converts a FeatureStateStateCodeEnum enum to its proto representation.
func GkehubBetaFeatureStateStateCodeEnumToProto(e *beta.FeatureStateStateCodeEnum) betapb.GkehubBetaFeatureStateStateCodeEnum {
if e == nil {
return betapb.GkehubBetaFeatureStateStateCodeEnum(0)
}
if v, ok := betapb.GkehubBetaFeatureStateStateCodeEnum_value["FeatureStateStateCodeEnum"+string(*e)]; ok {
return betapb.GkehubBetaFeatureStateStateCodeEnum(v)
}
return betapb.GkehubBetaFeatureStateStateCodeEnum(0)
}
// FeatureResourceStateToProto converts a FeatureResourceState object to its proto representation.
func GkehubBetaFeatureResourceStateToProto(o *beta.FeatureResourceState) *betapb.GkehubBetaFeatureResourceState {
if o == nil {
return nil
}
p := &betapb.GkehubBetaFeatureResourceState{}
p.SetState(GkehubBetaFeatureResourceStateStateEnumToProto(o.State))
p.SetHasResources(dcl.ValueOrEmptyBool(o.HasResources))
return p
}
// FeatureSpecToProto converts a FeatureSpec object to its proto representation.
func GkehubBetaFeatureSpecToProto(o *beta.FeatureSpec) *betapb.GkehubBetaFeatureSpec {
if o == nil {
return nil
}
p := &betapb.GkehubBetaFeatureSpec{}
p.SetMulticlusteringress(GkehubBetaFeatureSpecMulticlusteringressToProto(o.Multiclusteringress))
p.SetFleetobservability(GkehubBetaFeatureSpecFleetobservabilityToProto(o.Fleetobservability))
return p
}
// FeatureSpecMulticlusteringressToProto converts a FeatureSpecMulticlusteringress object to its proto representation.
func GkehubBetaFeatureSpecMulticlusteringressToProto(o *beta.FeatureSpecMulticlusteringress) *betapb.GkehubBetaFeatureSpecMulticlusteringress {
if o == nil {
return nil
}
p := &betapb.GkehubBetaFeatureSpecMulticlusteringress{}
p.SetConfigMembership(dcl.ValueOrEmptyString(o.ConfigMembership))
return p
}
// FeatureSpecFleetobservabilityToProto converts a FeatureSpecFleetobservability object to its proto representation.
func GkehubBetaFeatureSpecFleetobservabilityToProto(o *beta.FeatureSpecFleetobservability) *betapb.GkehubBetaFeatureSpecFleetobservability {
if o == nil {
return nil
}
p := &betapb.GkehubBetaFeatureSpecFleetobservability{}
p.SetLoggingConfig(GkehubBetaFeatureSpecFleetobservabilityLoggingConfigToProto(o.LoggingConfig))
return p
}
// FeatureSpecFleetobservabilityLoggingConfigToProto converts a FeatureSpecFleetobservabilityLoggingConfig object to its proto representation.
func GkehubBetaFeatureSpecFleetobservabilityLoggingConfigToProto(o *beta.FeatureSpecFleetobservabilityLoggingConfig) *betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfig {
if o == nil {
return nil
}
p := &betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfig{}
p.SetDefaultConfig(GkehubBetaFeatureSpecFleetobservabilityLoggingConfigDefaultConfigToProto(o.DefaultConfig))
p.SetFleetScopeLogsConfig(GkehubBetaFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigToProto(o.FleetScopeLogsConfig))
return p
}
// FeatureSpecFleetobservabilityLoggingConfigDefaultConfigToProto converts a FeatureSpecFleetobservabilityLoggingConfigDefaultConfig object to its proto representation.
func GkehubBetaFeatureSpecFleetobservabilityLoggingConfigDefaultConfigToProto(o *beta.FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) *betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigDefaultConfig {
if o == nil {
return nil
}
p := &betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigDefaultConfig{}
p.SetMode(GkehubBetaFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnumToProto(o.Mode))
return p
}
// FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigToProto converts a FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig object to its proto representation.
func GkehubBetaFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigToProto(o *beta.FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) *betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig {
if o == nil {
return nil
}
p := &betapb.GkehubBetaFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{}
p.SetMode(GkehubBetaFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnumToProto(o.Mode))
return p
}
// FeatureStateToProto converts a FeatureState object to its proto representation.
func GkehubBetaFeatureStateToProto(o *beta.FeatureState) *betapb.GkehubBetaFeatureState {
if o == nil {
return nil
}
p := &betapb.GkehubBetaFeatureState{}
p.SetState(GkehubBetaFeatureStateStateToProto(o.State))
return p
}
// FeatureStateStateToProto converts a FeatureStateState object to its proto representation.
func GkehubBetaFeatureStateStateToProto(o *beta.FeatureStateState) *betapb.GkehubBetaFeatureStateState {
if o == nil {
return nil
}
p := &betapb.GkehubBetaFeatureStateState{}
p.SetCode(GkehubBetaFeatureStateStateCodeEnumToProto(o.Code))
p.SetDescription(dcl.ValueOrEmptyString(o.Description))
p.SetUpdateTime(dcl.ValueOrEmptyString(o.UpdateTime))
return p
}
// FeatureToProto converts a Feature resource to its proto representation.
func FeatureToProto(resource *beta.Feature) *betapb.GkehubBetaFeature {
p := &betapb.GkehubBetaFeature{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetResourceState(GkehubBetaFeatureResourceStateToProto(resource.ResourceState))
p.SetSpec(GkehubBetaFeatureSpecToProto(resource.Spec))
p.SetState(GkehubBetaFeatureStateToProto(resource.State))
p.SetCreateTime(dcl.ValueOrEmptyString(resource.CreateTime))
p.SetUpdateTime(dcl.ValueOrEmptyString(resource.UpdateTime))
p.SetDeleteTime(dcl.ValueOrEmptyString(resource.DeleteTime))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
p.SetLocation(dcl.ValueOrEmptyString(resource.Location))
mLabels := make(map[string]string, len(resource.Labels))
for k, r := range resource.Labels {
mLabels[k] = r
}
p.SetLabels(mLabels)
return p
}
// applyFeature handles the gRPC request by passing it to the underlying Feature Apply() method.
func (s *FeatureServer) applyFeature(ctx context.Context, c *beta.Client, request *betapb.ApplyGkehubBetaFeatureRequest) (*betapb.GkehubBetaFeature, error) {
p := ProtoToFeature(request.GetResource())
res, err := c.ApplyFeature(ctx, p)
if err != nil {
return nil, err
}
r := FeatureToProto(res)
return r, nil
}
// applyGkehubBetaFeature handles the gRPC request by passing it to the underlying Feature Apply() method.
func (s *FeatureServer) ApplyGkehubBetaFeature(ctx context.Context, request *betapb.ApplyGkehubBetaFeatureRequest) (*betapb.GkehubBetaFeature, error) {
cl, err := createConfigFeature(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyFeature(ctx, cl, request)
}
// DeleteFeature handles the gRPC request by passing it to the underlying Feature Delete() method.
func (s *FeatureServer) DeleteGkehubBetaFeature(ctx context.Context, request *betapb.DeleteGkehubBetaFeatureRequest) (*emptypb.Empty, error) {
cl, err := createConfigFeature(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteFeature(ctx, ProtoToFeature(request.GetResource()))
}
// ListGkehubBetaFeature handles the gRPC request by passing it to the underlying FeatureList() method.
func (s *FeatureServer) ListGkehubBetaFeature(ctx context.Context, request *betapb.ListGkehubBetaFeatureRequest) (*betapb.ListGkehubBetaFeatureResponse, error) {
cl, err := createConfigFeature(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListFeature(ctx, request.GetProject(), request.GetLocation())
if err != nil {
return nil, err
}
var protos []*betapb.GkehubBetaFeature
for _, r := range resources.Items {
rp := FeatureToProto(r)
protos = append(protos, rp)
}
p := &betapb.ListGkehubBetaFeatureResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigFeature(ctx context.Context, service_account_file string) (*beta.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return beta.NewClient(conf), nil
}
|
package router
import (
"fmt"
"gopkg.in/macaron.v1"
"gopkg.in/mgo.v2/bson"
"qiniupkg.com/x/log.v7"
"tech/model"
"tech/modules/page"
)
// 首页的所有视频
func All(ctx *macaron.Context) {
var serieses []model.Series
var err error
err = page.Page(ctx, model.SERIES, &bson.M{}, &serieses)
if err != nil {
ctx.Data["message"] = fmt.Sprintf("%v", err)
log.Error(err)
} else {
ctx.Data["data"] = serieses
}
}
|
package utils
import (
"fmt"
"net/http"
)
func Render(w http.ResponseWriter, r *http.Request, filename string, props interface{}) {
tmpl := templates[filename]
if tmpl != nil {
data := r.Context().Value("data")
if data != nil {
for k, v := range *data.(*Props) {
(*props.(*Props))[k] = v
}
}
if err := tmpl.ExecuteTemplate(w, "layout", props); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
} else {
NotFound(w, r)
}
}
func NotAuthorized(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/signup", 307)
}
func Forbidden(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusForbidden)
fmt.Fprintf(w, "403 forbidden")
}
func NotFound(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
fmt.Fprintf(w, "404 not found")
}
func BadRequest(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "400 bad request")
}
|
package main
import (
"bufio"
"encoding/json"
"flag"
"log"
"net/http"
"os"
"strings"
"sync"
"github.com/hpcloud/tail"
"github.com/miekg/dns"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/abh/geodns/countries"
"github.com/abh/geodns/querylog"
)
// TODO:
// Add vendor yes/no
// add server region tag (identifier)?
const UserAgent = "geodns-logs/2.0"
func main() {
log.Printf("Starting %q", UserAgent)
identifierFlag := flag.String("identifier", "", "identifier (hostname, pop name or similar)")
// verboseFlag := flag.Bool("verbose", false, "verbose output")
flag.Parse()
var serverID string
// var serverGroups []string
if len(*identifierFlag) > 0 {
ids := strings.Split(*identifierFlag, ",")
serverID = ids[0]
if len(ids) > 1 {
// serverGroups = ids[1:]
}
}
if len(serverID) == 0 {
var err error
serverID, err = os.Hostname()
if err != nil {
log.Printf("Could not get hostname: %s", err)
os.Exit(2)
}
}
queries = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "dns_logs_total",
Help: "Number of served queries",
},
[]string{"zone", "vendor", "usercc", "poolcc", "qtype"},
)
prometheus.MustRegister(queries)
buildInfo := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "geodns_logs_build_info",
Help: "GeoDNS logs build information (in labels)",
},
[]string{"Version"},
)
prometheus.MustRegister(buildInfo)
buildInfo.WithLabelValues(UserAgent).Set(1)
http.Handle("/metrics", promhttp.Handler())
go func() {
err := http.ListenAndServe(":8054", nil)
if err != nil {
log.Printf("could not start http server: %s", err)
}
}()
if len(flag.Args()) < 1 {
log.Printf("filename to process required")
os.Exit(2)
}
filename := flag.Arg(0)
logf, err := tail.TailFile(filename, tail.Config{
// Location: &tail.SeekInfo{-1, 0},
Poll: true, // inotify is flaky on EL6, so try this ...
ReOpen: true,
MustExist: false,
Follow: true,
})
if err != nil {
log.Printf("Could not tail '%s': %s", filename, err)
}
in := make(chan string)
go processChan(in, nil)
for line := range logf.Lines {
if line.Err != nil {
log.Printf("Error tailing file: %s", line.Err)
}
in <- line.Text
}
}
var extraValidLabels = map[string]struct{}{
"uk": struct{}{},
"_status": struct{}{},
"_country": struct{}{},
"www": struct{}{},
"nag-test": struct{}{},
}
func validCC(label string) bool {
if _, ok := countries.CountryContinent[label]; ok {
return true
}
if _, ok := countries.ContinentCountries[label]; ok {
return true
}
if _, ok := countries.RegionGroupRegions[label]; ok {
return true
}
if _, ok := countries.RegionGroups[label]; ok {
return true
}
if _, ok := extraValidLabels[label]; ok {
return true
}
return false
}
func getPoolCC(label string) (string, bool) {
l := dns.SplitDomainName(label)
// log.Printf("LABEL: %+v", l)
if len(l) == 0 {
return "", true
}
for _, cc := range l {
if validCC(cc) {
return cc, true
}
}
if len(l[0]) == 1 && strings.ContainsAny(l[0], "01234") {
if len(l) == 1 {
return "", true
}
}
// log.Printf("LABEL '%s' unhandled cc...", label)
return "", false
}
func processChan(in chan string, wg *sync.WaitGroup) error {
e := querylog.Entry{}
stats := NewStats()
for line := range in {
err := json.Unmarshal([]byte(line), &e)
if err != nil {
log.Printf("Can't unmarshal '%s': %s", line, err)
return err
}
e.Name = strings.ToLower(e.Name)
// fmt.Printf("%s %s\n", e.Origin, e.Name)
err = stats.Add(&e)
if err != nil {
return err
}
}
if wg != nil {
wg.Done()
}
return nil
}
func processFile(file string, out chan<- *Stats) error {
fh, err := os.Open(file)
if err != nil {
return err
}
in := make(chan string)
wg := sync.WaitGroup{}
wg.Add(1)
go processChan(in, &wg)
scanner := bufio.NewScanner(fh)
for scanner.Scan() {
in <- scanner.Text()
}
if err := scanner.Err(); err != nil {
log.Println("reading standard input:", err)
}
close(in)
wg.Wait()
return nil
}
|
package router
import (
"context"
"fmt"
"net/http"
"github.com/julienschmidt/httprouter"
)
// Make sure the Router conforms with the Router interface
var _ Router = newMuxHTTPRouter(context.Background())
type muxHTTPRouter struct {
ctx context.Context
mux *httprouter.Router
}
func newMuxHTTPRouter(ctx context.Context) Router {
return &muxHTTPRouter{
ctx: ctx,
mux: httprouter.New(),
}
}
func (m *muxHTTPRouter) Add(method, path string, h http.Handler) error {
err := validateAddParams(method, path, h)
if err != nil {
return err
}
m.mux.Handle(method, path,
func(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
ctx := r.Context()
for i := range p {
ctx = ctxWithParamValue(ctx, p[i].Key, p[i].Value)
}
for k := range r.URL.Query() {
ctx = ctxWithQueryValue(ctx, k, r.URL.Query().Get(k))
}
r = r.WithContext(ctx)
h.ServeHTTP(w, r)
})
return nil
}
func (m *muxHTTPRouter) Set404(h http.Handler) error {
if h == nil {
return fmt.Errorf("%v handler", h)
}
m.mux.NotFound = h
return nil
}
func (m *muxHTTPRouter) Set405(h http.Handler) error {
if h == nil {
return fmt.Errorf("%v handler", h)
}
m.mux.MethodNotAllowed = h
return nil
}
func (m *muxHTTPRouter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
r = r.WithContext(m.ctx)
m.mux.ServeHTTP(w, r)
}
|
package servers
type UpdatePublicIpAddressReq struct {
Ports []PortDef
SourceRestrictions []SourceRestrictionDef
}
type PortDef struct {
Protocol string
Port int
PortTo int
}
type SourceRestrictionDef struct {
Cidr string
}
|
package model
type Discards struct {
Cards
}
func (up *Discards) Put(c Card) {
up.Cards = append(up.Cards, c)
}
|
package udig
import (
"github.com/stretchr/testify/assert"
"testing"
)
func Test_DissectDomainsFrom_By_simple_domain(t *testing.T) {
// Execute.
domains := dissectDomainsFromString("example.com")
// Assert.
assert.Len(t, domains, 1)
assert.Equal(t, "example.com", domains[0])
}
func Test_DissectDomainsFrom_By_subdomain(t *testing.T) {
// Execute.
domains := dissectDomainsFromString("example.domain-hyphen.com")
// Assert.
assert.Len(t, domains, 1)
assert.Equal(t, "example.domain-hyphen.com", domains[0])
}
func Test_DissectDomainsFrom_By_www_subdomain(t *testing.T) {
// Execute.
domains := dissectDomainsFromString("www.example.domain-hyphen.com")
// Assert.
assert.Len(t, domains, 1)
assert.Equal(t, "example.domain-hyphen.com", domains[0])
}
func Test_DissectDomainsFrom_By_exotic_tld(t *testing.T) {
// Execute.
domains := dissectDomainsFromString("www.example.domain-hyphen.museum")
// Assert.
assert.Len(t, domains, 1)
assert.Equal(t, "example.domain-hyphen.museum", domains[0])
}
func Test_DissectDomainsFrom_By_complex_domain(t *testing.T) {
// Execute.
domains := dissectDomainsFromString("external.asd1230-123.asd_internal.asd.gm-_ail.aero")
// Assert.
assert.Len(t, domains, 1)
assert.Equal(t, "external.asd1230-123.asd_internal.asd.gm-_ail.aero", domains[0])
}
func Test_DissectDomainsFrom_By_complex_url_in_text(t *testing.T) {
// Execute.
domains := dissectDomainsFromString("Hello world: https://user:password@external.asd1230-123.asd_internal.asd.gm-_ail.aero:8080/foo/bar.html is really cool\nURL")
// Assert.
assert.Len(t, domains, 1)
assert.Equal(t, "external.asd1230-123.asd_internal.asd.gm-_ail.aero", domains[0])
}
func Test_DissectDomainsFrom_By_multiple_urls(t *testing.T) {
// Execute.
domains := dissectDomainsFromString("Hello world: https://user:password@external.asd1230-123.asd_internal.asd.gm-_ail.aero:8080/foo/bar.html is really cool\nURL and this is another one http://www.foo-bar_baz.co")
// Assert.
assert.Len(t, domains, 2)
assert.Equal(t, "external.asd1230-123.asd_internal.asd.gm-_ail.aero", domains[0])
assert.Equal(t, "foo-bar_baz.co", domains[1])
}
func Test_DissectDomainsFrom_By_invalid_domain(t *testing.T) {
// Execute.
domains := dissectDomainsFromString("bad.-example.com")
// Assert.
assert.Len(t, domains, 1)
assert.Equal(t, "example.com", domains[0])
}
func Test_isDomainRelated_By_same_domain(t *testing.T) {
// Setup.
domainA := "example.com"
domainB := domainA
// Execute.
res1 := isDomainRelated(domainA, domainB, false)
res2 := isDomainRelated(domainB, domainA, false)
// Assert.
assert.Equal(t, true, res1)
assert.Equal(t, true, res2)
}
func Test_isDomainRelated_By_subdomain(t *testing.T) {
// Setup.
domainA := "example.com"
domainB := "sub.example.com"
// Execute.
res1 := isDomainRelated(domainA, domainB, false)
res2 := isDomainRelated(domainB, domainA, false)
// Assert.
assert.Equal(t, true, res1)
assert.Equal(t, true, res2)
}
func Test_isDomainRelated_By_domain_with_different_TLD(t *testing.T) {
// Setup.
domainA := "example.com"
domainB := "sub.example.net"
// Execute.
res1 := isDomainRelated(domainA, domainB, false)
res2 := isDomainRelated(domainB, domainA, false)
// Assert.
assert.Equal(t, true, res1)
assert.Equal(t, true, res2)
}
func Test_isDomainRelated_By_domain_with_different_TLD_strict(t *testing.T) {
// Setup.
domainA := "example.com"
domainB := "sub.example.net"
// Execute.
res1 := isDomainRelated(domainA, domainB, true)
res2 := isDomainRelated(domainB, domainA, true)
// Assert.
assert.Equal(t, false, res1)
assert.Equal(t, false, res2)
}
func Test_isDomainRelated_By_TLDs(t *testing.T) {
// Setup.
domainA := "com"
domainB := "com"
// Execute.
res := isDomainRelated(domainA, domainB, false)
// Assert.
assert.Equal(t, false, res)
}
func Test_isDomainRelated_By_invalid_domain(t *testing.T) {
// Setup.
domainA := "."
domainB := "example.com"
// Execute.
res1 := isDomainRelated(domainA, domainB, false)
res2 := isDomainRelated(domainB, domainA, false)
// Assert.
assert.Equal(t, false, res1)
assert.Equal(t, false, res2)
}
|
package node
import (
"github.com/sherifabdlnaby/prism/app/component"
"github.com/sherifabdlnaby/prism/pkg/job"
"go.uber.org/zap"
)
type core interface {
process(j job.Job)
processStream(j job.Job)
}
func newBase(id ID, core core, async bool, nexts []Next,
createAsync createAsyncFunc, jobChan <-chan job.Job, resource *component.Resource,
logger zap.SugaredLogger) *Node {
return &Node{
ID: id,
async: async,
nexts: nexts,
core: core,
createAsyncJob: createAsync,
resource: resource,
logger: *logger.Named(string(id)),
receiveJobChan: jobChan,
}
}
//NewReadOnly Construct a new ReadOnly node
func NewReadOnly(ID ID, processor *component.ProcessorReadOnly, async bool, nexts []Next,
createAsync createAsyncFunc, jobChan <-chan job.Job, logger zap.SugaredLogger) *Node {
core := &readOnly{processor: processor}
base := newBase(ID, core, async, nexts, createAsync, jobChan, &processor.Resource, logger)
core.Node = base
return core.Node
}
//NewReadWrite Construct a new ReadWrite Node
func NewReadWrite(ID ID, processor *component.ProcessorReadWrite, async bool, nexts []Next,
createAsync createAsyncFunc, jobChan <-chan job.Job, logger zap.SugaredLogger) *Node {
core := &readWrite{processor: processor}
base := newBase(ID, core, async, nexts, createAsync, jobChan, &processor.Resource, logger)
core.Node = base
return core.Node
}
//NewReadWriteStream Construct a new ReadWriteStream Node
func NewReadWriteStream(ID ID, processor *component.ProcessorReadWriteStream, async bool, nexts []Next,
createAsync createAsyncFunc, jobChan <-chan job.Job, logger zap.SugaredLogger) *Node {
core := &readWriteStream{processor: processor}
base := newBase(ID, core, async, nexts, createAsync, jobChan, &processor.Resource, logger)
core.Node = base
return core.Node
}
//NewOutput Construct a new Output Node
func NewOutput(ID ID, out *component.Output, async bool, nexts []Next,
createAsync createAsyncFunc, jobChan <-chan job.Job, logger zap.SugaredLogger) *Node {
core := &output{output: out}
base := newBase(ID, core, async, nexts, createAsync, jobChan, &out.Resource, logger)
core.Node = base
return core.Node
}
//NewDummy Construct a new Dummy Node
func NewDummy(nexts []Next, jobChan <-chan job.Job, logger zap.SugaredLogger) *Node {
core := &dummy{}
base := newBase("", core, false, nexts, nil, jobChan, nil, logger)
core.Node = base
return core.Node
}
|
package carbon
import (
"testing"
"time"
"github.com/Kretech/xgo/test"
)
func TestUnixOf(t *testing.T) {
In(Shanghai)
test.AssertEqual(t, UnixOf(0, 0).Format("Y-m-d H:i:s"), "1970-01-01 08:00:00")
In(time.UTC)
test.AssertEqual(t, UnixOf(0, 0).In(Shanghai).Format("Y-m-d H:i:s"), "1970-01-01 08:00:00")
test.AssertEqual(t, UnixOf(0, 0).Format("Y-m-d H:i:s"), "1970-01-01 00:00:00")
In(time.UTC)
test.AssertEqual(t, UnixOf(0, 0).Time().String(), "1970-01-01 00:00:00 +0000 UTC")
test.AssertEqual(t, UnixOf(0, 0).Time().Unix(), 0)
test.AssertEqual(t, UnixOf(0, 0).Format("Y-m-d H:i:s"), "1970-01-01 00:00:00")
test.AssertEqual(t, UnixOf(0, 0).In(Shanghai).Time(), "1970-01-01 08:00:00 +0800 CST")
test.AssertEqual(t, UnixOf(0, 0).In(Shanghai).Time().Unix(), 0)
test.AssertEqual(t, UnixOf(0, 0).In(Shanghai).Format("Y-m-d H:i:s"), "1970-01-01 08:00:00")
}
func TestParse(t *testing.T) {
Parse("2012-1-1 01:02:03")
Parse("+1 day")
}
func TestCarbon_Format(t *testing.T) {
In(Shanghai)
test.AssertEqual(t, UnixOf(1524379525, 0).Format(`Y-m-d`), `2018-04-22`)
test.AssertEqual(t, UnixOf(1524379525, 0).Format(`Y-n-j`), `2018-4-22`)
test.AssertEqual(t, UnixOf(1523031400, 0).Format(`Y-n-j`), `2018-4-7`)
test.AssertEqual(t, UnixOf(1524379525, 0).Format(`Y-m-d H:i:s`), `2018-04-22 14:45:25`)
}
func TestCarbon_In(t *testing.T) {
t1 := Now()
test.AssertEqual(t, t1.Time().Location(), time.Local)
t2 := t1.In(Shanghai)
// t1未修改
test.AssertEqual(t, t1.Time().Location(), time.Local)
// t2修改成功
test.AssertEqual(t, t2.Time().Location(), Shanghai)
}
func TestCarbon_Sub(t *testing.T) {
t1 := TParse("Y-m-d H:i:s", "2018-01-02 09:00:00")
t2 := t1.Sub(time.Hour)
test.AssertEqual(t, t2.Format("Y-m-d H:i:s"), "2018-01-02 08:00:00")
}
|
package pie_test
import (
"github.com/elliotchance/pie/v2"
"github.com/stretchr/testify/assert"
"testing"
)
func TestInts(t *testing.T) {
assert.Equal(t, []int(nil), pie.Ints([]int(nil)))
assert.Equal(t,
[]int{92, 823, 453},
pie.Ints([]float64{92.384, 823.324, 453}))
}
|
package gsysint
import (
"unsafe"
)
/*
* defined constants
*/
const (
// G status
//
// If you add to this list, add to the list
// of "okay during garbage collection" status
// in mgcmark.go too.
_Gidle = iota // 0
_Grunnable // 1 runnable and on a run queue
_Grunning // 2
_Gsyscall // 3
_Gwaiting // 4
_Gmoribund_unused // 5 currently unused, but hardcoded in gdb scripts
_Gdead // 6
_Genqueue // 7 Only the Gscanenqueue is used.
_Gcopystack // 8 in this state when newstack is moving the stack
// the following encode that the GC is scanning the stack and what to do when it is done
_Gscan = 0x1000 // atomicstatus&~Gscan = the non-scan state,
// _Gscanidle = _Gscan + _Gidle, // Not used. Gidle only used with newly malloced gs
_Gscanrunnable = _Gscan + _Grunnable // 0x1001 When scanning completes make Grunnable (it is already on run queue)
_Gscanrunning = _Gscan + _Grunning // 0x1002 Used to tell preemption newstack routine to scan preempted stack.
_Gscansyscall = _Gscan + _Gsyscall // 0x1003 When scanning completes make it Gsyscall
_Gscanwaiting = _Gscan + _Gwaiting // 0x1004 When scanning completes make it Gwaiting
// _Gscanmoribund_unused, // not possible
// _Gscandead, // not possible
_Gscanenqueue = _Gscan + _Genqueue // When scanning completes make it Grunnable and put on runqueue
)
const (
// P status
_Pidle = iota
_Prunning // Only this P is allowed to change from _Prunning.
_Psyscall
_Pgcstop
_Pdead
)
type Mutex struct {
// Futex-based impl treats it as uint32 key,
// while sema-based impl as M* waitm.
// Used to be a union, but unions break precise GC.
key uintptr
}
type Note struct {
// Futex-based impl treats it as uint32 key,
// while sema-based impl as M* waitm.
// Used to be a union, but unions break precise GC.
key uintptr
}
type FuncVal struct {
fn uintptr
// variable-size, fn-specific data here
}
// The guintptr, muintptr, and puintptr are all used to bypass write barriers.
// It is particularly important to avoid write barriers when the current P has
// been released, because the GC thinks the world is stopped, and an
// unexpected write barrier would not be synchronized with the GC,
// which can lead to a half-executed write barrier that has marked the object
// but not queued it. If the GC skips the object and completes before the
// queuing can occur, it will incorrectly free the object.
//
// We tried using special assignment functions invoked only when not
// holding a running P, but then some updates to a particular memory
// word went through write barriers and some did not. This breaks the
// write barrier shadow checking mode, and it is also scary: better to have
// a word that is completely ignored by the GC than to have one for which
// only a few updates are ignored.
//
// Gs, Ms, and Ps are always reachable via true pointers in the
// allgs, allm, and allp lists or (during allocation before they reach those lists)
// from stack variables.
// A guintptr holds a goroutine pointer, but typed as a uintptr
// to bypass write barriers. It is used in the Gobuf goroutine state
// and in scheduling lists that are manipulated without a P.
//
// The Gobuf.g goroutine pointer is almost always updated by assembly code.
// In one of the few places it is updated by Go code - func save - it must be
// treated as a uintptr to avoid a write barrier being emitted at a bad time.
// Instead of figuring out how to emit the write barriers missing in the
// assembly manipulation, we change the type of the field to uintptr,
// so that it does not require write barriers at all.
//
// Goroutine structs are published in the allg list and never freed.
// That will keep the goroutine structs from being collected.
// There is never a time that Gobuf.g's contain the only references
// to a goroutine: the publishing of the goroutine in allg comes first.
// Goroutine pointers are also kept in non-GC-visible places like TLS,
// so I can't see them ever moving. If we did want to start moving data
// in the GC, we'd need to allocate the goroutine structs from an
// alternate arena. Using guintptr doesn't make that problem any worse.
type Guintptr uintptr
//go:nosplit
func (gp Guintptr) ptr() *G { return (*G)(unsafe.Pointer(gp)) }
//go:nosplit
func (gp *Guintptr) set(g *G) { *gp = Guintptr(unsafe.Pointer(g)) }
//go:nosplit
//func (gp *guintptr) cas(old, new guintptr) bool {
// return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new))
//}
type Puintptr uintptr
type Muintptr uintptr
//go:nosplit
func (mp Muintptr) ptr() *M { return (*M)(unsafe.Pointer(mp)) }
//go:nosplit
func (mp *Muintptr) set(m *M) { *mp = Muintptr(unsafe.Pointer(m)) }
type Uintreg uint64
type GoBuf struct {
// The offsets of sp, pc, and g are known to (hard-coded in) libmach.
sp uintptr
pc uintptr
g Guintptr
ctxt unsafe.Pointer // this has to be a pointer so that gc scans it
ret Uintreg
lr uintptr
bp uintptr // for GOEXPERIMENT=framepointer
}
// Known to compiler.
// Changes here must also be made in src/cmd/internal/gc/select.go's selecttype.
type Sudog struct {
g *G
selectdone *uint32
next *Sudog
prev *Sudog
elem unsafe.Pointer // data element
releasetime int64
nrelease int32 // -1 for acquire
waitlink *Sudog // g.waiting list
}
type GCStats struct {
// the struct must consist of only uint64's,
// because it is casted to uint64[].
nhandoff uint64
nhandoffcnt uint64
nprocyield uint64
nosyield uint64
nsleep uint64
}
type LibCall struct {
fn uintptr
n uintptr // number of parameters
args uintptr // parameters
r1 uintptr // return values
r2 uintptr
err uintptr // error number
}
// describes how to handle callback
type WinCallBackContext struct {
gobody unsafe.Pointer // go function to call
argsize uintptr // callback arguments size (in bytes)
restorestack uintptr // adjust stack on return by (in bytes) (386 only)
cleanstack bool
}
// Stack describes a Go execution stack.
// The bounds of the stack are exactly [lo, hi),
// with no implicit data structures on either side.
type Stack struct {
lo uintptr
hi uintptr
}
// stkbar records the state of a G's stack barrier.
type StkBar struct {
savedLRPtr uintptr // location overwritten by stack barrier PC
savedLRVal uintptr // value overwritten at savedLRPtr
}
/*
* deferred subroutine calls
*/
type Defer struct {
siz int32
started bool
sp uintptr // sp at time of defer
pc uintptr
fn *FuncVal
_panic *Panic // panic that is running defer
link *Defer
}
/*
* panics
*/
type Panic struct {
argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
arg interface{} // argument to panic
link *Panic // link to earlier panic
recovered bool // whether this panic is over
aborted bool // the panic was aborted
}
// Layout of in-memory per-function information prepared by linker
// See https://golang.org/s/go12symtab.
// Keep in sync with linker
// and with package debug/gosym and with symtab.go in package runtime.
type Func struct {
entry uintptr // start pc
nameoff int32 // function name
args int32 // in/out args size
_ int32 // previously legacy frame size; kept for layout compatibility
pcsp int32
pcfile int32
pcln int32
npcdata int32
nfuncdata int32
}
/*
* stack traces
*/
type StkFrame struct {
fn *Func // function being run
pc uintptr // program counter within fn
continpc uintptr // program counter where execution can continue, or 0 if not
lr uintptr // program counter at caller aka link register
sp uintptr // stack pointer at pc
fp uintptr // stack pointer at caller aka frame pointer
varp uintptr // top of local variables
argp uintptr // pointer to function arguments
arglen uintptr // number of bytes at argp
argmap *BitVector // force use of this argmap
}
// Per-thread (in Go, per-P) cache for small objects.
// No locking needed because it is per-thread (per-P).
//
// mcaches are allocated from non-GC'd memory, so any heap pointers
// must be specially handled.
type MCache struct {
// ...
}
type G struct {
// Stack parameters.
// stack describes the actual stack memory: [stack.lo, stack.hi).
// stackguard0 is the stack pointer compared in the Go stack growth prologue.
// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
// stackguard1 is the stack pointer compared in the C stack growth prologue.
// It is stack.lo+StackGuard on g0 and gsignal stacks.
// It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
stack Stack // offset known to runtime/cgo
stackguard0 uintptr // offset known to liblink
stackguard1 uintptr // offset known to liblink
_panic *Panic // innermost panic - offset known to liblink
_defer *Defer // innermost defer
m *M // current m; offset known to arm liblink
stackAlloc uintptr // stack allocation is [stack.lo,stack.lo+stackAlloc)
sched GoBuf
syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
stkbar []StkBar // stack barriers, from low to high (see top of mstkbar.go)
stkbarPos uintptr // index of lowest stack barrier not hit
stktopsp uintptr // expected sp at top of stack, to check in traceback
param unsafe.Pointer // passed parameter on wakeup
atomicstatus uint32
stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
goid int64
waitsince int64 // approx time when the g become blocked
waitreason string // if status==Gwaiting
schedlink Guintptr
preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
paniconfault bool // panic (instead of crash) on unexpected fault address
preemptscan bool // preempted g does scan for gc
gcscandone bool // g has scanned stack; protected by _Gscan bit in status
gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan
throwsplit bool // must not split stack
raceignore int8 // ignore race detection events
sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine
sysexitticks int64 // cputicks when syscall has returned (for tracing)
sysexitseq uint64 // trace seq when syscall has returned (for tracing)
lockedm *M
sig uint32
writebuf []byte
sigcode0 uintptr
sigcode1 uintptr
sigpc uintptr
gopc uintptr // pc of go statement that created this goroutine
startpc uintptr // pc of goroutine function
racectx uintptr
waiting *Sudog // sudog structures this g is waiting on (that have a valid elem ptr)
// Per-G gcController state
// gcAssistBytes is this G's GC assist credit in terms of
// bytes allocated. If this is positive, then the G has credit
// to allocate gcAssistBytes bytes without assisting. If this
// is negative, then the G must correct this by performing
// scan work. We track this in bytes to make it fast to update
// and check for debt in the malloc hot path. The assist ratio
// determines how this corresponds to scan work debt.
gcAssistBytes int64
}
type M struct {
g0 *G // goroutine with scheduling stack
morebuf GoBuf // gobuf arg to morestack
divmod uint32 // div/mod denominator for arm - known to liblink
// Fields not known to debuggers.
procid uint64 // for debuggers, but offset not hard-coded
gsignal *G // signal-handling g
sigmask SigSet // storage for saved signal mask
tls [6]uintptr // thread-local storage (for x86 extern register)
mstartfn func()
curg *G // current running goroutine
caughtsig Guintptr // goroutine running during fatal signal
p Puintptr // attached p for executing go code (nil if not executing go code)
nextp Puintptr
id int32
mallocing int32
throwing int32
preemptoff string // if != "", keep curg running on this m
locks int32
softfloat int32
dying int32
profilehz int32
helpgc int32
spinning bool // m is out of work and is actively looking for work
blocked bool // m is blocked on a note
inwb bool // m is executing a write barrier
newSigstack bool // minit on C thread called sigaltstack
printlock int8
fastrand uint32
ncgocall uint64 // number of cgo calls in total
ncgo int32 // number of cgo calls currently in progress
park Note
alllink *M // on allm
schedlink Muintptr
machport uint32 // return address for mach ipc (os x)
mcache *MCache
lockedg *G
createstack [32]uintptr // stack that created this thread.
freglo [16]uint32 // d[i] lsb and f[i]
freghi [16]uint32 // d[i] msb and f[i+16]
fflag uint32 // floating point compare flags
locked uint32 // tracking for lockosthread
nextwaitm uintptr // next m waiting for lock
gcstats GCStats
needextram bool
traceback uint8
waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool
waitlock unsafe.Pointer
waittraceev byte
waittraceskip int
startingtrace bool
syscalltick uint32
//#ifdef GOOS_windows
thread uintptr // thread handle
// these are here because they are too large to be on the stack
// of low-level NOSPLIT functions.
libcall LibCall
libcallpc uintptr // for cpu profiler
libcallsp uintptr
libcallg Guintptr
syscall LibCall // stores syscall parameters on windows
//#endif
mOS
}
|
package common
import (
"bytes"
"io"
"sync"
"github.com/sirupsen/logrus"
)
// LogTracer traces log output.
type LogTracer struct {
buffer *bytes.Buffer
writer io.Writer
mutex sync.Mutex
tracers []io.Writer
}
// NewLogTracer creates a LogTracer.
func NewLogTracer(id string, tracers ...io.Writer) *LogTracer {
buffer := &bytes.Buffer{}
writers := []io.Writer{
buffer,
newLogrusTracer(id),
}
writers = append(writers, tracers...)
return &LogTracer{
buffer: buffer,
writer: io.MultiWriter(writers...),
tracers: tracers,
}
}
// Read implements io.Reader.
func (l *LogTracer) Read(p []byte) (int, error) {
l.mutex.Lock()
defer l.mutex.Unlock()
return l.buffer.Read(p)
}
// Write implements io.Writer.
// Also Write will write data to os.Stdout for output.
func (l *LogTracer) Write(p []byte) (int, error) {
l.mutex.Lock()
defer l.mutex.Unlock()
return l.writer.Write(p)
}
// Close implements io.Closer.
// But Close won't close DonCloseWriter.
func (l *LogTracer) Close() error {
for _, tracer := range l.tracers {
if _, ok := tracer.(DonCloseWriter); ok {
continue
}
if t, ok := tracer.(io.Closer); ok {
if err := t.Close(); err != nil {
return err
}
}
}
return nil
}
type logrusTracer struct {
entry *logrus.Entry
}
func newLogrusTracer(id string) *logrusTracer {
return &logrusTracer{
entry: logrus.WithFields(logrus.Fields{
"JobRunID": id,
}),
}
}
// Write implements io.Writer.
func (l *logrusTracer) Write(p []byte) (int, error) {
l.entry.Info(string(p))
return len(p), nil
}
// DonCloseWriter wraps an io.Writer, to avoid being closed by LogTracer.
type DonCloseWriter struct {
io.Writer
}
// Close tries to close the io.Writer if it's an io.WriteCloser.
// And by doing this, DonCloseWriter is an io.WriteCloser,
// which can be used as the log tracer for PhistageTask.
func (dcw DonCloseWriter) Close() error {
if closer, ok := dcw.Writer.(io.Closer); ok {
return closer.Close()
}
return nil
}
// ClosableDiscard is a discard that implements io.WriteCloser
// and won't do anything when writting to this object.
var ClosableDiscard io.WriteCloser = discard{}
type discard struct{}
func (discard) Write(p []byte) (int, error) {
return len(p), nil
}
func (discard) Close() error {
return nil
}
|
// Copyright 2018 The Netstack Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux,!amd64
package rawfile
import (
"syscall"
"unsafe"
)
func blockingPoll(fds *pollEvent, nfds int, timeout int64) (int, syscall.Errno) {
n, _, e := syscall.Syscall(syscall.SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
return int(n), e
}
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"bufio"
"fmt"
"sort"
"strings"
"github.com/cloudspannerecosystem/harbourbridge/schema"
"github.com/cloudspannerecosystem/harbourbridge/spanner/ddl"
)
// GenerateReport analyzes schema and data conversion stats and writes a
// detailed report to w and returns a brief summary (as a string).
func GenerateReport(driverName string, conv *Conv, w *bufio.Writer, badWrites map[string]int64, printTableReports bool, printUnexpecteds bool) string {
reports := AnalyzeTables(conv, badWrites)
summary := GenerateSummary(conv, reports, badWrites)
writeHeading(w, "Summary of Conversion")
w.WriteString(summary)
ignored := IgnoredStatements(conv)
w.WriteString("\n")
if len(ignored) > 0 {
justifyLines(w, fmt.Sprintf("Note that the following source DB statements "+
"were detected but ignored: %s.",
strings.Join(ignored, ", ")), 80, 0)
w.WriteString("\n\n")
}
statementsMsg := ""
var isDump bool
if strings.Contains(driverName, "dump") {
isDump = true
}
if isDump {
statementsMsg = "stats on the " + driverName + " statements processed, followed by "
}
justifyLines(w, "The remainder of this report provides "+statementsMsg+
"a table-by-table listing of schema and data conversion details. "+
"For background on the schema and data conversion process used, "+
"and explanations of the terms and notes used in this "+
"report, see HarbourBridge's README.", 80, 0)
w.WriteString("\n\n")
if isDump {
writeStmtStats(driverName, conv, w)
}
if printTableReports {
for _, t := range reports {
h := fmt.Sprintf("Table %s", t.SrcTable)
if t.SrcTable != t.SpTable {
h = h + fmt.Sprintf(" (mapped to Spanner table %s)", t.SpTable)
}
writeHeading(w, h)
w.WriteString(rateConversion(t.rows, t.badRows, t.Cols, t.Warnings, t.SyntheticPKey != "", false, conv.SchemaMode()))
w.WriteString("\n")
for _, x := range t.Body {
fmt.Fprintf(w, "%s\n", x.Heading)
for i, l := range x.Lines {
justifyLines(w, fmt.Sprintf("%d) %s.\n", i+1, l), 80, 3)
}
w.WriteString("\n")
}
}
}
if printUnexpecteds {
writeUnexpectedConditions(driverName, conv, w)
}
return summary
}
type tableReport struct {
SrcTable string
SpTable string
rows int64
badRows int64
Cols int64
Warnings int64
SyntheticPKey string // Empty string means no synthetic primary key was needed.
Body []tableReportBody
}
type tableReportBody struct {
Heading string
Lines []string
}
func AnalyzeTables(conv *Conv, badWrites map[string]int64) (r []tableReport) {
// Process tables in alphabetical order. This ensures that tables
// appear in alphabetical order in report.txt.
var tables []string
for t := range conv.SrcSchema {
tables = append(tables, t)
}
sort.Strings(tables)
for _, srcTable := range tables {
r = append(r, buildTableReport(conv, srcTable, badWrites))
}
return r
}
func buildTableReport(conv *Conv, srcTable string, badWrites map[string]int64) tableReport {
spTable, err := GetSpannerTable(conv, srcTable)
srcSchema, ok1 := conv.SrcSchema[srcTable]
spSchema, ok2 := conv.SpSchema[spTable]
tr := tableReport{SrcTable: srcTable, SpTable: spTable}
if err != nil || !ok1 || !ok2 {
m := "bad source-DB-to-Spanner table mapping or Spanner schema"
conv.Unexpected("report: " + m)
tr.Body = []tableReportBody{tableReportBody{Heading: "Internal error: " + m}}
return tr
}
issues, cols, warnings := analyzeCols(conv, srcTable, spTable)
tr.Cols = cols
tr.Warnings = warnings
if pk, ok := conv.SyntheticPKeys[spTable]; ok {
tr.SyntheticPKey = pk.Col
tr.Body = buildTableReportBody(conv, srcTable, issues, spSchema, srcSchema, &pk.Col)
} else {
tr.Body = buildTableReportBody(conv, srcTable, issues, spSchema, srcSchema, nil)
}
if !conv.SchemaMode() {
fillRowStats(conv, srcTable, badWrites, &tr)
}
return tr
}
func buildTableReportBody(conv *Conv, srcTable string, issues map[string][]SchemaIssue, spSchema ddl.CreateTable, srcSchema schema.Table, syntheticPK *string) []tableReportBody {
var body []tableReportBody
for _, p := range []struct {
heading string
severity severity
}{
{"Warning", warning},
{"Note", note},
} {
// Print out issues is alphabetical column order.
var cols []string
for t := range issues {
cols = append(cols, t)
}
sort.Strings(cols)
var l []string
if syntheticPK != nil {
// Warnings about synthetic primary keys must be handled as a special case
// because we have a Spanner column with no matching source DB col.
// Much of the generic code for processing issues assumes we have both.
if p.severity == warning {
l = append(l, fmt.Sprintf("Column '%s' was added because this table didn't have a primary key. Spanner requires a primary key for every table", *syntheticPK))
}
}
issueBatcher := make(map[SchemaIssue]bool)
for _, srcCol := range cols {
for _, i := range issues[srcCol] {
if IssueDB[i].severity != p.severity {
continue
}
if IssueDB[i].batch {
if issueBatcher[i] {
// Have already reported a previous instance of this
// (batched) issue, so skip this one.
continue
}
issueBatcher[i] = true
}
spCol, err := GetSpannerCol(conv, srcTable, srcCol, true)
if err != nil {
conv.Unexpected(err.Error())
}
srcType := srcSchema.ColDefs[srcCol].Type.Print()
spType := spSchema.ColDefs[spCol].T.PrintColumnDefType()
// A note on case: Spanner types are case insensitive, but
// default to upper case. In particular, the Spanner AST uses
// upper case, so spType is upper case. Many source DBs
// default to lower case. When printing source DB and
// Spanner types for comparison purposes, this can be distracting.
// Hence we switch to lower-case for Spanner types here.
// TODO: add logic to choose case for Spanner types based
// on case of srcType.
spType = strings.ToLower(spType)
switch i {
case DefaultValue:
l = append(l, fmt.Sprintf("%s e.g. column '%s'", IssueDB[i].Brief, srcCol))
case ForeignKey:
l = append(l, fmt.Sprintf("Column '%s' uses foreign keys which HarbourBridge does not support yet", srcCol))
case AutoIncrement:
l = append(l, fmt.Sprintf("Column '%s' is an autoincrement column. %s", srcCol, IssueDB[i].Brief))
case Timestamp:
// Avoid the confusing "timestamp is mapped to timestamp" message.
l = append(l, fmt.Sprintf("Some columns have source DB type 'timestamp without timezone' which is mapped to Spanner type timestamp e.g. column '%s'. %s", srcCol, IssueDB[i].Brief))
case Datetime:
l = append(l, fmt.Sprintf("Some columns have source DB type 'datetime' which is mapped to Spanner type timestamp e.g. column '%s'. %s", srcCol, IssueDB[i].Brief))
case Widened:
l = append(l, fmt.Sprintf("%s e.g. for column '%s', source DB type %s is mapped to Spanner type %s", IssueDB[i].Brief, srcCol, srcType, spType))
default:
l = append(l, fmt.Sprintf("Column '%s': type %s is mapped to %s. %s", srcCol, srcType, spType, IssueDB[i].Brief))
}
}
}
if len(l) == 0 {
continue
}
heading := p.heading
if len(l) > 1 {
heading = heading + "s"
}
body = append(body, tableReportBody{Heading: heading, Lines: l})
}
return body
}
func fillRowStats(conv *Conv, srcTable string, badWrites map[string]int64, tr *tableReport) {
rows := conv.Stats.Rows[srcTable]
goodConvRows := conv.Stats.GoodRows[srcTable]
badConvRows := conv.Stats.BadRows[srcTable]
badRowWrites := badWrites[srcTable]
// Note on rows:
// rows: all rows we encountered during processing.
// goodConvRows: rows we successfully converted.
// badConvRows: rows we failed to convert.
// badRowWrites: rows we converted, but could not write to Spanner.
if rows != goodConvRows+badConvRows || badRowWrites > goodConvRows {
conv.Unexpected(fmt.Sprintf("Inconsistent row counts for table %s: %d %d %d %d\n", srcTable, rows, goodConvRows, badConvRows, badRowWrites))
}
tr.rows = rows
tr.badRows = badConvRows + badRowWrites
}
// Provides a description and severity for each schema issue.
// Note on batch: for some issues, we'd like to report just the first instance
// in a table and suppress other instances i.e. adding more instances
// of the issue in the same table has little value and could be very noisy.
// This is controlled via 'batch': if true, we count only the first instance
// for assessing warnings, and we give only the first instance in the report.
// TODO: add links in these descriptions to further documentation
// e.g. for timestamp description.
var IssueDB = map[SchemaIssue]struct {
Brief string // Short description of issue.
severity severity
batch bool // Whether multiple instances of this issue are combined.
}{
DefaultValue: {Brief: "Some columns have default values which Spanner does not support", severity: warning, batch: true},
ForeignKey: {Brief: "Spanner does not support foreign keys", severity: warning},
MultiDimensionalArray: {Brief: "Spanner doesn't support multi-dimensional arrays", severity: warning},
NoGoodType: {Brief: "No appropriate Spanner type", severity: warning},
Numeric: {Brief: "Spanner does not support numeric. This type mapping could lose precision and is not recommended for production use", severity: warning},
NumericThatFits: {Brief: "Spanner does not support numeric, but this type mapping preserves the numeric's specified precision", severity: note},
Decimal: {Brief: "Spanner does not support decimal. This type mapping could lose precision and is not recommended for production use", severity: warning},
DecimalThatFits: {Brief: "Spanner does not support decimal, but this type mapping preserves the decimal's specified precision", severity: note},
Serial: {Brief: "Spanner does not support autoincrementing types", severity: warning},
AutoIncrement: {Brief: "Spanner does not support auto_increment attribute", severity: warning},
Timestamp: {Brief: "Spanner timestamp is closer to PostgreSQL timestamptz", severity: note, batch: true},
Datetime: {Brief: "Spanner timestamp is closer to MySQL timestamp", severity: note, batch: true},
Time: {Brief: "Spanner does not support time/year types", severity: note, batch: true},
Widened: {Brief: "Some columns will consume more storage in Spanner", severity: note, batch: true},
}
type severity int
const (
warning severity = iota
note
)
// analyzeCols returns information about the quality of schema mappings
// for table 'srcTable'. It assumes 'srcTable' is in the conv.SrcSchema map.
func analyzeCols(conv *Conv, srcTable, spTable string) (map[string][]SchemaIssue, int64, int64) {
srcSchema := conv.SrcSchema[srcTable]
m := make(map[string][]SchemaIssue)
warnings := int64(0)
warningBatcher := make(map[SchemaIssue]bool)
// Note on how we count warnings when there are multiple warnings
// per column and/or multiple warnings per table.
// non-batched warnings: count at most one warning per column.
// batched warnings: count at most one warning per table.
for c, l := range conv.Issues[srcTable] {
colWarning := false
m[c] = l
for _, i := range l {
switch {
case IssueDB[i].severity == warning && IssueDB[i].batch:
warningBatcher[i] = true
case IssueDB[i].severity == warning && !IssueDB[i].batch:
colWarning = true
}
}
if colWarning {
warnings++
}
}
warnings += int64(len(warningBatcher))
return m, int64(len(srcSchema.ColDefs)), warnings
}
// rateSchema returns an string summarizing the quality of source DB
// to Spanner schema conversion. 'cols' and 'warnings' are respectively
// the number of columns converted and the warnings encountered
// (both weighted by number of data rows).
// 'missingPKey' indicates whether the source DB schema had a primary key.
// 'summary' indicates whether this is a per-table rating or an overall
// summary rating.
func rateSchema(cols, warnings int64, missingPKey, summary bool) string {
pkMsg := "missing primary key"
if summary {
pkMsg = "some missing primary keys"
}
switch {
case cols == 0:
return "NONE (no schema found)"
case warnings == 0 && !missingPKey:
return "EXCELLENT (all columns mapped cleanly)"
case warnings == 0 && missingPKey:
return fmt.Sprintf("GOOD (all columns mapped cleanly, but %s)", pkMsg)
case good(cols, warnings) && !missingPKey:
return "GOOD (most columns mapped cleanly)"
case good(cols, warnings) && missingPKey:
return fmt.Sprintf("GOOD (most columns mapped cleanly, but %s)", pkMsg)
case ok(cols, warnings) && !missingPKey:
return "OK (some columns did not map cleanly)"
case ok(cols, warnings) && missingPKey:
return fmt.Sprintf("OK (some columns did not map cleanly + %s)", pkMsg)
case !missingPKey:
return "POOR (many columns did not map cleanly)"
default:
return fmt.Sprintf("POOR (many columns did not map cleanly + %s)", pkMsg)
}
}
func rateData(rows int64, badRows int64) string {
s := fmt.Sprintf(" (%s%% of %d rows written to Spanner)", pct(rows, badRows), rows)
switch {
case rows == 0:
return "NONE (no data rows found)"
case badRows == 0:
return fmt.Sprintf("EXCELLENT (all %d rows written to Spanner)", rows)
case good(rows, badRows):
return "GOOD" + s
case ok(rows, badRows):
return "OK" + s
default:
return "POOR" + s
}
}
func good(total, badCount int64) bool {
return badCount < total/20
}
func ok(total, badCount int64) bool {
return badCount < total/3
}
func rateConversion(rows, badRows, cols, warnings int64, missingPKey, summary bool, schemaOnly bool) string {
rate := fmt.Sprintf("Schema conversion: %s.\n", rateSchema(cols, warnings, missingPKey, summary))
if !schemaOnly {
rate = rate + fmt.Sprintf("Data conversion: %s.\n", rateData(rows, badRows))
}
return rate
}
func GenerateSummary(conv *Conv, r []tableReport, badWrites map[string]int64) string {
cols := int64(0)
warnings := int64(0)
missingPKey := false
for _, t := range r {
weight := t.rows // Weight col data by how many rows in table.
if weight == 0 { // Tables without data count as if they had one row.
weight = 1
}
cols += t.Cols * weight
warnings += t.Warnings * weight
if t.SyntheticPKey != "" {
missingPKey = true
}
}
// Don't use tableReport for rows/badRows stats because tableReport
// provides per-table stats for each table in the schema i.e. it omits
// rows for tables not in the schema. To handle this corner-case, use
// the source of truth for row stats: conv.Stats.
rows := conv.Rows()
badRows := conv.BadRows() // Bad rows encountered during data conversion.
// Add in bad rows while writing to Spanner.
for _, n := range badWrites {
badRows += n
}
return rateConversion(rows, badRows, cols, warnings, missingPKey, true, conv.SchemaMode())
}
func IgnoredStatements(conv *Conv) (l []string) {
for s := range conv.Stats.Statement {
switch s {
case "CreateFunctionStmt":
l = append(l, "functions")
case "CreateSeqStmt", "CreateSequenceStmt":
l = append(l, "sequences")
case "CreatePLangStmt", "CreateProcedureStmt":
l = append(l, "procedures")
case "CreateTrigStmt":
l = append(l, "triggers")
case "IndexStmt", "CreateIndexStmt":
l = append(l, "(non-primary) indexes")
case "ViewStmt", "CreateViewStmt":
l = append(l, "views")
}
}
sort.Strings(l)
return l
}
func writeStmtStats(driverName string, conv *Conv, w *bufio.Writer) {
type stat struct {
statement string
count int64
}
var l []stat
for s, x := range conv.Stats.Statement {
l = append(l, stat{s, x.Schema + x.Data + x.Skip + x.Error})
}
// Sort by alphabetical order of statements.
sort.Slice(l, func(i, j int) bool {
return l[i].statement < l[j].statement
})
writeHeading(w, "Statements Processed")
w.WriteString("Analysis of statements in " + driverName + " output, broken down by statement type.\n")
w.WriteString(" schema: statements successfully processed for Spanner schema information.\n")
w.WriteString(" data: statements successfully processed for data.\n")
w.WriteString(" skip: statements not relevant for Spanner schema or data.\n")
w.WriteString(" error: statements that could not be processed.\n")
w.WriteString(" --------------------------------------\n")
fmt.Fprintf(w, " %6s %6s %6s %6s %s\n", "schema", "data", "skip", "error", "statement")
w.WriteString(" --------------------------------------\n")
for _, x := range l {
s := conv.Stats.Statement[x.statement]
fmt.Fprintf(w, " %6d %6d %6d %6d %s\n", s.Schema, s.Data, s.Skip, s.Error, x.statement)
}
if driverName == "pg_dump" {
w.WriteString("See github.com/pganalyze/pg_query_go for definitions of statement types\n")
w.WriteString("(pganalyze/pg_query_go is the library we use for parsing pg_dump output).\n")
w.WriteString("\n")
} else if driverName == "mysqldump" {
w.WriteString("See https://github.com/pingcap/parser for definitions of statement types\n")
w.WriteString("(pingcap/parser is the library we use for parsing mysqldump output).\n")
w.WriteString("\n")
}
}
func writeUnexpectedConditions(driverName string, conv *Conv, w *bufio.Writer) {
reparseInfo := func() {
if conv.Stats.Reparsed > 0 {
fmt.Fprintf(w, "Note: there were %d %s reparse events while looking for statement boundaries.\n\n", conv.Stats.Reparsed, driverName)
}
}
writeHeading(w, "Unexpected Conditions")
if len(conv.Stats.Unexpected) == 0 {
w.WriteString("There were no unexpected conditions encountered during processing.\n\n")
reparseInfo()
return
}
switch driverName {
case "mysqldump":
w.WriteString("For debugging only. This section provides details of unexpected conditions\n")
w.WriteString("encountered as we processed the mysqldump data. In particular, the AST node\n")
w.WriteString("representation used by the pingcap/parser library used for parsing\n")
w.WriteString("mysqldump output is highly permissive: almost any construct can appear at\n")
w.WriteString("any node in the AST tree. The list details all unexpected nodes and\n")
w.WriteString("conditions.\n")
case "pg_dump":
w.WriteString("For debugging only. This section provides details of unexpected conditions\n")
w.WriteString("encountered as we processed the pg_dump data. In particular, the AST node\n")
w.WriteString("representation used by the pganalyze/pg_query_go library used for parsing\n")
w.WriteString("pg_dump output is highly permissive: almost any construct can appear at\n")
w.WriteString("any node in the AST tree. The list details all unexpected nodes and\n")
w.WriteString("conditions.\n")
default:
w.WriteString("For debugging only. This section provides details of unexpected conditions\n")
w.WriteString("encountered as we processed the " + driverName + " data. The list details\n")
w.WriteString("all unexpected conditions\n")
}
w.WriteString(" --------------------------------------\n")
fmt.Fprintf(w, " %6s %s\n", "count", "condition")
w.WriteString(" --------------------------------------\n")
for s, n := range conv.Stats.Unexpected {
fmt.Fprintf(w, " %6d %s\n", n, s)
}
w.WriteString("\n")
reparseInfo()
}
// justifyLines writes s out to w, adding newlines between words
// to keep line length under 'limit'. Newlines are indented
// 'indent' spaces.
func justifyLines(w *bufio.Writer, s string, limit int, indent int) {
n := 0
startOfLine := true
words := strings.Split(s, " ") // This only handles spaces (newlines, tabs ignored).
for _, x := range words {
if n+len(x) > limit && !startOfLine {
w.WriteString("\n")
w.WriteString(strings.Repeat(" ", indent))
n = indent
startOfLine = true
}
if startOfLine {
w.WriteString(x)
n += len(x)
} else {
w.WriteString(" " + x)
n += len(x) + 1
}
startOfLine = false
}
}
// pct prints a percentage representation of (total-bad)/total
func pct(total, bad int64) string {
if bad == 0 || total == 0 {
return "100"
}
pct := 100.0 * float64(total-bad) / float64(total)
if pct > 99.9 {
return fmt.Sprintf("%2.5f", pct)
}
if pct > 95.0 {
return fmt.Sprintf("%2.3f", pct)
}
return fmt.Sprintf("%2.0f", pct)
}
func writeHeading(w *bufio.Writer, s string) {
w.WriteString(strings.Join([]string{
"----------------------------\n",
s, "\n",
"----------------------------\n"}, ""))
}
|
package accounts
import (
"github.com/iotaledger/wasp/packages/coretypes/coreutil"
"github.com/iotaledger/wasp/packages/hashing"
)
const (
Name = "accounts"
description = "Chain account ledger contract"
)
var (
Interface = &coreutil.ContractInterface{
Name: Name,
Description: description,
ProgramHash: hashing.HashStrings(Name),
}
)
func init() {
Interface.WithFunctions(initialize, []coreutil.ContractFunctionInterface{
coreutil.ViewFunc(FuncBalance, getBalance),
coreutil.ViewFunc(FuncTotalAssets, getTotalAssets),
coreutil.ViewFunc(FuncAccounts, getAccounts),
coreutil.Func(FuncDeposit, deposit),
coreutil.Func(FuncWithdrawToAddress, withdrawToAddress),
coreutil.Func(FuncWithdrawToChain, withdrawToChain),
})
}
const (
FuncBalance = "balance"
FuncTotalAssets = "totalAssets"
FuncDeposit = "deposit"
FuncWithdrawToAddress = "withdrawToAddress"
FuncWithdrawToChain = "withdrawToChain"
FuncAccounts = "accounts"
ParamAgentID = "a"
)
|
package main
//region Usings
import "github.com/ravendb/ravendb-go-client"
//endregion
var globalDocumentStore *ravendb.DocumentStore
func main() {
createDocumentStore()
editDocument("newCompanyName")
globalDocumentStore.Close()
}
func createDocumentStore() (*ravendb.DocumentStore, error) {
if globalDocumentStore != nil {
return globalDocumentStore, nil
}
urls := []string{"http://localhost:8080"}
store := ravendb.NewDocumentStore(urls, "testGO")
err := store.Initialize()
if err != nil {
return nil, err
}
globalDocumentStore = store
return globalDocumentStore, nil
}
//region Demo
func editDocument(companyName string) error {
session, err := globalDocumentStore.OpenSession("")
if err != nil {
return err
}
defer session.Close()
//region Step_1
var company *Company
err = session.Load(&company, "companies/5-A")
if err != nil {
return err
}
if company == nil {
return nil
}
//endregion
//region Step_2
company.Name = companyName
//endregion
//region Step_3
err = session.SaveChanges()
if err != nil {
return err
}
//endregion
return nil
}
type Company struct {
ID string
Name string
Phone string
}
//endregion
|
package main
import (
"fmt"
"math/rand"
"time"
)
type Metric string
// BulkUploadMessages will batch up to 10 messages from ch and send
// them to upload(). Rather than block for all 10 messages, it will
// call upload() directly with any number of Metrics if ch is empty.
func BulkUploadMessages(ch <-chan Metric) {
maximumItemsPerPost := 10
bulkPost := make([]Metric, 0, maximumItemsPerPost)
for metric := range ch {
bulkPost = append(bulkPost[:0], metric)
outer:
for len(bulkPost) < maximumItemsPerPost {
select {
case metric, ok := <-ch:
if !ok {
break outer
}
bulkPost = append(bulkPost, metric)
default:
break outer
}
}
upload(bulkPost)
}
}
func upload(bulkPost []Metric) {
fmt.Printf("HTTP POST len=%d\n", len(bulkPost))
}
// ProduceMetrics sends "hello world" 50 times to ch with
// sleeps between random Metrics.
func ProduceMetrics(ch chan<- Metric) {
defer close(ch)
for i := 0; i < 50; i++ {
ch <- Metric("hello world")
if rand.Intn(10) == 0 {
time.Sleep(time.Nanosecond)
}
}
}
func main() {
ch := make(chan Metric, 1024)
go ProduceMetrics(ch)
BulkUploadMessages(ch)
}
|
// @Description rbac中间件
// @Author jiangyang
// @Created 2020/11/17 11:32 上午
package middlewares
import (
"net/http"
"github.com/gin-gonic/gin"
core "github.com/comeonjy/util/ctx"
"github.com/comeonjy/util/errno"
"github.com/comeonjy/util/jwt"
"github.com/comeonjy/util/tool"
)
func Rbac(checkFunc func(interface{}, string) error) func(context *gin.Context) {
return func(context *gin.Context) {
ctx := core.Context{
Context: context,
}
bus, exists := ctx.Get("business")
if !exists {
ctx.Fail(errno.BusNotFound)
return
}
if checkFunc != nil {
if err := checkFunc(bus, ctx.Request.URL.String()); err != nil {
ctx.Fail(err, http.StatusForbidden)
return
}
}
ctx.Next()
}
}
// Example:
// 权限校验例子
// bus: ctx中存储的interface类型的业务相关信息
func checkFunc(bus interface{}, url string) error {
b := jwt.Business{}
if err := tool.InterfaceToPointer(&b, bus); err != nil {
return err
}
// TODO 权限校验 开箱即用
return nil
}
|
//一个简单的web服务
package main
import (
"net/http"
)
//请求相应
func hello66(res http.ResponseWriter, req *http.Request) {
res.Header().Set("Content-Type", "text/plain")
res.Write([]byte("Hello world\n"))
}
func main() {
//将hello66负责相应/路径的请求
http.HandleFunc("/", hello66)
//绑定端口5000
http.ListenAndServe(":5000", nil)
}
|
package bmc
import (
pb "github.com/stopa323/kimbap/api/bmc"
)
func ConvertGofishPowerStateToProto(status string) pb.PowerStatus {
switch status {
case "On":
return pb.PowerStatus_ON
case "Off":
return pb.PowerStatus_OFF
case "PoweringOn":
return pb.PowerStatus_POWERING_ON
case "PoweringOff":
return pb.PowerStatus_POWERING_OFF
default:
return pb.PowerStatus_UNKNOWN
}
}
|
package linkedlist
func mergeTwoLists1(l1 *ListNode, l2 *ListNode) *ListNode {
if l1 == nil {
return l2
}
if l2 == nil {
return l1
}
var newList, r *ListNode
if l1.Val <= l2.Val {
r = l1
l1 = l1.Next
} else {
r = l2
l2 = l2.Next
}
newList = r
for l1 != nil && l2 != nil {
if l1.Val <= l2.Val {
r.Next = l1
l1 = l1.Next
} else {
r.Next = l2
l2 = l2.Next
}
r = r.Next
}
if l1 != nil {
r.Next = l1
}
if l2 != nil {
r.Next = l2
}
return newList
}
func mergeTwoLists2(l1 *ListNode, l2 *ListNode) *ListNode {
if l1 == nil {
return l2
}
if l2 == nil {
return l1
}
if l1.Val > l2.Val {
l2.Next = mergeTwoLists2(l1, l2.Next)
return l2
}
l1.Next = mergeTwoLists2(l1.Next, l2)
return l1
}
func main() {
l1 := newListNodes([]int{10, 20, 30, 40, 47, 81}, false)
print(l1)
l2 := newListNodes([]int{11, 20, 22, 23, 30, 50, 51, 60, 81}, false)
print(l2)
l := mergeTwoLists1(l1, l2)
print(l)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.