text stringlengths 11 4.05M |
|---|
package gotoml
import (
"strconv"
"time"
)
func (m TOMLMap) GetString(key string) (s string, e error) {
exists := false
s, exists = m[key]
if !exists {
e = NewKeyNotFoundError(key, "string")
return
}
return
}
func (m TOMLMap) GetBool(key string) (b bool, e error) {
str, exists := m[key]
if !exists {
e = NewKeyNotFoundError(key, "bool")
return
}
switch str {
case "true":
b = true
case "false":
b = false
default:
e = NewInvalidTypeError(key, str, "bool")
}
return
}
func (m TOMLMap) GetInt64(key string) (i int64, e error) {
str, exists := m[key]
if !exists {
e = NewKeyNotFoundError(key, "int64")
return
}
i, e = strconv.ParseInt(str, 10, 64)
return
}
func (m TOMLMap) GetFloat64(key string) (f float64, e error) {
str, exists := m[key]
if !exists {
e = NewKeyNotFoundError(key, "float64")
return
}
f, e = strconv.ParseFloat(str, 64)
return
}
func (m TOMLMap) GetTime(key string) (t time.Time, e error) {
str, exists := m[key]
if !exists {
e = NewKeyNotFoundError(key, "time")
return
}
t, e = time.Parse(time.RFC3339, str)
return
}
|
package main
import (
"fmt"
"os"
"strconv"
)
func sieve(max int) []bool {
a := make([]bool, max+1)
for i := 0; i <= max; i++ {
a[i] = true
}
for p := 2; p*p <= max; p++ {
if a[p] == true {
for idx := p * p; idx <= max; idx += p {
a[idx] = false
}
}
}
return a
}
func main() {
args := os.Args[1:]
if len(args) != 1 {
fmt.Println("[usage error] - please provide an upper limit.")
fmt.Println("Try again, such as: ~ ./sieve 123")
os.Exit(-1)
}
max, _ := strconv.Atoi(args[0])
fmt.Printf("[Sieve of Eratosthenes] - Find All Primes To An Upper Bound Of %v.\n", max)
res := sieve(max)
fmt.Print("Found all prime numbers up to max: ")
for i := 2; i < len(res); i++ {
if res[i] == true {
fmt.Printf("%v ", i)
}
}
fmt.Print("\n\n")
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//99. Recover Binary Search Tree
//Two elements of a binary search tree (BST) are swapped by mistake.
//Recover the tree without changing its structure.
//Example 1:
//Input: [1,3,null,null,2]
// 1
// /
// 3
// \
// 2
//Output: [3,1,null,null,2]
// 3
// /
// 1
// \
// 2
//Example 2:
//Input: [3,1,4,null,null,2]
// 3
// / \
//1 4
// /
// 2
//Output: [2,1,4,null,null,3]
// 2
// / \
//1 4
// /
// 3
//Follow up:
//A solution using O(n) space is pretty straight forward.
//Could you devise a constant space solution?
///**
// * Definition for a binary tree node.
// * type TreeNode struct {
// * Val int
// * Left *TreeNode
// * Right *TreeNode
// * }
// */
//func recoverTree(root *TreeNode) {
//}
// Time Is Money |
package main
import (
"fmt"
)
// func main() {
// // v := "/a/b/c/"
// // if strings.HasSuffix(v, "/") {
// // v = strings.TrimSuffix(v, "/")
// // }
// // fmt.Println(v)
//
// s := fmt.Sprintf("%%%s%%", 433)
// fmt.Println(string(s))
// }
type TestStruck struct {
msg string
}
func (t *TestStruck) Read() {
if len(t.msg) == 0 {
fmt.Println("wwwwwwwwwwww")
return
}
fmt.Println(t.msg)
}
func (t *TestStruck) Write(msg string) {
t.msg = msg
}
func main() {
// TestStruck{}.Read() // output error
// struck := TestStruck{}
// struck.Read()
// var l *TestStruck
var l *string
if l == nil {
fmt.Println("nil=======")
return
}
}
|
package main
import (
"fmt"
"bytes"
)
type Writer interface {
Write([]byte) (int, error)
}
type Closer interface {
Close() (error)
}
type WriterCloser interface {
Writer
Closer
}
type BufferedWriterCloser struct {
buffer *bytes.Buffer
}
func(bwc *BufferedWriterCloser) Write(data []byte) (int, error){
n, err := bwc.buffer.Write(data)
if err != nil {
return 0, err
}
v := make([]byte, 8)
for bwc.buffer.Len() > 8 {
_, err := bwc.buffer.Read(data)
if err != nil{
return 0, err
}
_, err = fmt.Println(string(v))
if(err != nil){
return 0, err
}
}
return n, nil
}
func(bwc *BufferedWriterCloser) Close() (error){
for bwc.buffer.Len() > 0 {
data := bwc.buffer.Next(8)
_, err := fmt.Println(data)
if err != nil {
return err
}
}
return nil
}
func NewBufferedWriterCloser() *BufferedWriterCloser{
return &BufferedWriterCloser{buffer: bytes.NewBuffer([]byte{})}
}
func main(){
var wc WriterCloser = NewBufferedWriterCloser()
wc.Write([]byte("Hey there, I'm a SDE Intern in Qube Cinemas"))
wc.Close()
} |
package message
const (
// MaxMessageLength limit the Marshaled message length
MaxMessageLength = 8192
)
// Message Type
const (
MessageTypeHello = 0
MessageTypeVcardEx1 = 1
MessageTypeVcardEx2 = 2
MessageTypeMessage = 3
MessageTypeReceipt = 4
)
|
package main
import (
"context"
"fmt"
"log"
"github.com/silviog1990/grpc-golang-course/streaming-client/ComputeAverage/computeaveragepb"
"google.golang.org/grpc"
)
func main() {
cc, err := grpc.Dial("localhost:50000", grpc.WithInsecure())
if err != nil {
log.Fatalf("could not connect to: %v", err)
}
defer cc.Close()
c := computeaveragepb.NewComputeAverageServiceClient(cc)
doClientStream(c)
}
func doClientStream(c computeaveragepb.ComputeAverageServiceClient) {
fmt.Println("Start client stream invocation")
numbers := []int32{3, 5, 10, 2, 1, 8}
stream, err := c.ComputeAverage(context.Background())
if err != nil {
log.Fatalf("error while calling ComputeAverage: %v", err)
}
for _, n := range numbers {
req := &computeaveragepb.ComputeAverageRequest{
N: n,
}
fmt.Printf("sending req: %v\n", req)
stream.Send(req)
}
resp, err := stream.CloseAndRecv()
if err != nil {
log.Fatalf("error while reading resp: %v\n", err)
}
fmt.Printf("Average: %v\n", resp)
}
|
package main
import (
"context"
"fmt"
"github.com/Whisker17/goMicroDemo/proto/model"
"github.com/Whisker17/goMicroDemo/proto/rpcapi"
"github.com/Whisker17/goMicroDemo/util"
"github.com/lpxxn/gomicrorpc/example2/lib"
"github.com/micro/go-micro"
"github.com/micro/go-micro/client"
"io"
"os"
"os/signal"
)
func main() {
// 我这里用的etcd 做为服务发现
//reg := etcdv3.NewRegistry(func(op *registry.Options) {
// op.Addrs = []string{
// "http://192.168.3.156:2379", "http://192.168.3.18:2379", "http://192.168.3.110:2379",
// }
//})
// 初始化服务
service := micro.NewService(
//micro.Registry(reg),
)
service.Init()
service.Client().Init(client.Retries(3),
client.PoolSize(5))
sayClent := rpcapi.NewSayService(util.ServiceName, service.Client())
SayHello(sayClent)
NotifyTopic(service)
GetStreamValues(sayClent)
TsBidirectionalStream(sayClent)
st := make(chan os.Signal)
signal.Notify(st, os.Interrupt)
<-st
fmt.Println("server stopped.....")
}
func SayHello(client rpcapi.SayService) {
rsp, err := client.Hello(context.Background(), &model.SayParam{Msg: "hello server"})
if err != nil {
panic(err)
}
fmt.Println(rsp)
}
// test stream
func GetStreamValues(client rpcapi.SayService) {
rspStream, err := client.Stream(context.Background(), &model.SRequest{Count: 10})
if err != nil {
panic(err)
}
idx := 1
for {
rsp, err := rspStream.Recv()
if err == io.EOF {
break
} else if err != nil {
panic(err)
}
fmt.Printf("test stream get idx %d data %v\n", idx, rsp)
idx++
}
// close the stream
if err := rspStream.Close(); err != nil {
fmt.Println("stream close err:", err)
}
fmt.Println("Read Value End")
}
func TsBidirectionalStream(client rpcapi.SayService) {
rspStream, err := client.BidirectionalStream(context.Background())
if err != nil {
panic(err)
}
for i := int64(0); i < 7; i++ {
if err := rspStream.Send(&model.SRequest{Count: i}); err != nil {
fmt.Println("send error", err)
break
}
rsp, err := rspStream.Recv()
if err == io.EOF {
break
} else if err != nil {
panic(err)
}
fmt.Printf("test stream get idx %d data %v\n", i, rsp)
}
// close the stream
if err := rspStream.Close(); err != nil {
fmt.Println("stream close err:", err)
}
fmt.Println("TsBidirectionalStream: Read Value End")
}
func NotifyTopic(service micro.Service) {
p := micro.NewPublisher(util.Topic, service.Client())
p.Publish(context.TODO(), &model.SayParam{Msg: lib.RandomStr(lib.Random(3, 10))})
}
|
package common
import "strconv"
// PriceLevel is a common structure for bids and asks in the
// order book.
type PriceLevel struct {
Price string
Quantity string
}
// Parse parses this PriceLevel's Price and Quantity and
// returns them both. It also returns an error if either
// fails to parse.
func (p *PriceLevel) Parse() (float64, float64, error) {
price, err := strconv.ParseFloat(p.Price, 64)
if err != nil {
return 0, 0, err
}
quantity, err := strconv.ParseFloat(p.Quantity, 64)
if err != nil {
return price, 0, err
}
return price, quantity, nil
}
|
package main
import (
"context"
pb "github.com/little-go/practices/grpc/helloworld/proto"
zipkin "github.com/openzipkin/zipkin-go"
zipkingrpc "github.com/openzipkin/zipkin-go/middleware/grpc"
httpReporter "github.com/openzipkin/zipkin-go/reporter/http"
"google.golang.org/grpc"
"log"
"net"
)
const (
port = ":50051"
)
type server struct {
pb.UnimplementedGreeterServer
}
func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloResponse, error) {
log.Printf("Received: %v", in.GetName())
return &pb.HelloResponse{Message: "Hello" + in.GetName()}, nil
}
func main() {
// simulate start service
tcp, err := net.Listen("tcp", port)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
reporter := httpReporter.NewReporter("http://localhost:9411")
tracer, err := zipkin.NewTracer(reporter)
if err != nil {
log.Fatalf("failed to start zipkin: %v", err)
}
// new GRPC SERVER
s := grpc.NewServer(grpc.StatsHandler(zipkingrpc.NewServerHandler(tracer)))
// register our service
pb.RegisterGreeterServer(s, new(server))
// listen call
if err := s.Serve(tcp); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
|
package aiven
import (
"fmt"
"github.com/aiven/aiven-go-client"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
)
var aivenAccountTeamMemberSchema = map[string]*schema.Schema{
"account_id": {
Type: schema.TypeString,
Description: "Account id",
Required: true,
ForceNew: true,
},
"team_id": {
Type: schema.TypeString,
Description: "Account team id",
Required: true,
ForceNew: true,
},
"user_email": {
Type: schema.TypeString,
Description: "Team invite user email",
Required: true,
ForceNew: true,
},
"invited_by_user_email": {
Type: schema.TypeString,
Description: "Team invited by user email",
Optional: true,
Computed: true,
},
"accepted": {
Type: schema.TypeBool,
Description: "Team member invitation status",
Optional: true,
Computed: true,
},
"create_time": {
Type: schema.TypeString,
Description: "Time of creation",
Optional: true,
Computed: true,
},
}
func resourceAccountTeamMember() *schema.Resource {
return &schema.Resource{
Create: resourceAccountTeamMemberCreate,
Read: resourceAccountTeamMemberRead,
Update: resourceAccountTeamMemberCreate,
Delete: resourceAccountTeamMemberDelete,
Exists: resourceAccountTeamMemberExists,
Importer: &schema.ResourceImporter{
State: resourceAccountTeamMemberState,
},
Schema: aivenAccountTeamMemberSchema,
}
}
func resourceAccountTeamMemberCreate(d *schema.ResourceData, m interface{}) error {
client := m.(*aiven.Client)
accountId := d.Get("account_id").(string)
teamId := d.Get("team_id").(string)
userEmail := d.Get("user_email").(string)
err := client.AccountTeamMembers.Invite(
accountId,
teamId,
userEmail)
if err != nil {
return err
}
d.SetId(buildResourceID(accountId, teamId, userEmail))
return resourceAccountTeamMemberRead(d, m)
}
func resourceAccountTeamMemberRead(d *schema.ResourceData, m interface{}) error {
var found bool
client := m.(*aiven.Client)
accountId, teamId, userEmail := splitResourceID3(d.Id())
r, err := client.AccountTeamInvites.List(accountId, teamId)
if err != nil {
return err
}
for _, invite := range r.Invites {
if invite.UserEmail == userEmail {
found = true
if err := d.Set("account_id", invite.AccountId); err != nil {
return err
}
if err := d.Set("team_id", invite.TeamId); err != nil {
return err
}
if err := d.Set("user_email", invite.UserEmail); err != nil {
return err
}
if err := d.Set("invited_by_user_email", invite.InvitedByUserEmail); err != nil {
return err
}
if err := d.Set("create_time", invite.CreateTime.String()); err != nil {
return err
}
// if a user is in the invitations list, it means invitation was sent but not yet accepted
if err := d.Set("accepted", false); err != nil {
return err
}
}
}
if !found {
rm, err := client.AccountTeamMembers.List(accountId, teamId)
if err != nil {
return err
}
for _, member := range rm.Members {
if member.UserEmail == userEmail {
found = true
if err := d.Set("account_id", accountId); err != nil {
return err
}
if err := d.Set("team_id", member.TeamId); err != nil {
return err
}
if err := d.Set("user_email", member.UserEmail); err != nil {
return err
}
if err := d.Set("create_time", member.CreateTime.String()); err != nil {
return err
}
// when a user accepts an invitation, it will appear in the member's list
// and disappear from invitations list
if err := d.Set("accepted", true); err != nil {
return err
}
}
}
}
if !found {
return fmt.Errorf("cannot find user invitation for %s", d.Id())
}
return nil
}
func resourceAccountTeamMemberDelete(d *schema.ResourceData, m interface{}) error {
client := m.(*aiven.Client)
accountId, teamId, userEmail := splitResourceID3(d.Id())
// delete account team user invitation
err := client.AccountTeamInvites.Delete(accountId, teamId, userEmail)
if err != nil {
if err.(aiven.Error).Status != 404 {
return err
}
}
r, err := client.AccountTeamMembers.List(accountId, teamId)
if err != nil {
return err
}
// delete account team member
for _, m := range r.Members {
if m.UserEmail == userEmail {
err = client.AccountTeamMembers.Delete(splitResourceID3(d.Id()))
if err != nil {
if err.(aiven.Error).Status != 404 {
return err
}
}
}
}
return nil
}
func resourceAccountTeamMemberExists(d *schema.ResourceData, m interface{}) (bool, error) {
client := m.(*aiven.Client)
accountId, teamId, userEmail := splitResourceID3(d.Id())
members, err := client.AccountTeamMembers.List(accountId, teamId)
if err != nil {
return false, err
}
invites, err := client.AccountTeamInvites.List(accountId, teamId)
if err != nil {
return false, err
}
if isUserInMembersOrInvites(userEmail, invites, members) {
return true, nil
}
return false, nil
}
func isUserInMembersOrInvites(
email string,
i *aiven.AccountTeamInvitesResponse,
m *aiven.AccountTeamMembersResponse) bool {
for _, invite := range i.Invites {
if invite.UserEmail == email {
return true
}
}
for _, member := range m.Members {
if member.UserEmail == email {
return true
}
}
return false
}
func resourceAccountTeamMemberState(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) {
err := resourceAccountTeamMemberRead(d, m)
if err != nil {
return nil, err
}
return []*schema.ResourceData{d}, nil
}
|
package remt
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00100101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:remt.001.001.01 Document"`
Message *RemittanceAdviceV01 `xml:"RmtAdvc"`
}
func (d *Document00100101) AddMessage() *RemittanceAdviceV01 {
d.Message = new(RemittanceAdviceV01)
return d.Message
}
// The RemittanceAdvice message allows the originator to provide remittance details that can be associated with a payment.
type RemittanceAdviceV01 struct {
// Set of characteristics shared by all remittance information included in the message.
GroupHeader *iso20022.GroupHeader62 `xml:"GrpHdr"`
// Provides information to enable the matching of an entry with the items that the associated payment is intended to settle, such as commercial invoices in an accounts' receivable system, tax obligations, or garnishment orders.
RemittanceInformation []*iso20022.RemittanceInformation8 `xml:"RmtInf"`
// Additional information that cannot be captured in the structured elements and/or any other specific block.
SupplementaryData []*iso20022.SupplementaryData1 `xml:"SplmtryData,omitempty"`
}
func (r *RemittanceAdviceV01) AddGroupHeader() *iso20022.GroupHeader62 {
r.GroupHeader = new(iso20022.GroupHeader62)
return r.GroupHeader
}
func (r *RemittanceAdviceV01) AddRemittanceInformation() *iso20022.RemittanceInformation8 {
newValue := new(iso20022.RemittanceInformation8)
r.RemittanceInformation = append(r.RemittanceInformation, newValue)
return newValue
}
func (r *RemittanceAdviceV01) AddSupplementaryData() *iso20022.SupplementaryData1 {
newValue := new(iso20022.SupplementaryData1)
r.SupplementaryData = append(r.SupplementaryData, newValue)
return newValue
}
|
package main
import "io"
import "os"
import "fmt"
import "sync"
import "time"
import "strconv"
import "path/filepath"
import "math/rand"
import "github.com/bnclabs/gostore/api"
import "github.com/bnclabs/gostore/bubt"
import humanize "github.com/dustin/go-humanize"
func perfbubt() error {
paths := bubtpaths(options.npaths)
name := "dbperf"
//rnd := rand.New(rand.NewSource(int64(options.seed)))
msize, zsize := int64(options.msize), int64(options.zsize)
vsize, mmap := int64(options.vsize), options.mmap
bt, err := bubt.NewBubt(name, paths, msize, zsize, vsize)
if err != nil {
panic(err)
}
klen, vlen := int64(options.keylen), int64(options.vallen)
seed, n := int64(options.seed), int64(options.load)
iter := makeiterator(klen, vlen, n, delmod)
md := generatemeta(seed)
fmsg := "msize: %v zsize:%v vsize: %v mmap:%v mdsize:%v\n"
fmt.Printf(fmsg, msize, zsize, vsize, mmap, len(md))
now := time.Now()
bt.Build(iter, md)
took := time.Since(now).Round(time.Second)
bt.Close()
iter(true /*fin*/)
index, err := bubt.OpenSnapshot(name, paths, mmap)
if err != nil {
panic(err)
}
defer index.Destroy()
defer index.Close()
fmsg = "Took %v to build %v entries with footprint %v\n"
fmt.Printf(fmsg, took, n, humanize.Bytes(uint64(index.Footprint())))
if index.Count() != n {
panic(fmt.Errorf("expected %v, got %v", n, index.Count()))
} else if index.ID() != name {
panic(fmt.Errorf("expected %v, got %v", name, index.ID()))
}
var rwg sync.WaitGroup
finch := make(chan struct{})
if options.gets > 0 {
for i := 0; i < options.cpu; i++ {
go bubtGetter(index, n, seed, finch, &rwg)
rwg.Add(1)
}
}
if options.ranges > 0 {
for i := 0; i < options.cpu; i++ {
go bubtRanger(index, n, seed, finch, &rwg)
rwg.Add(1)
}
}
rwg.Wait()
close(finch)
time.Sleep(1 * time.Second)
index.Log()
//index.Validate()
fmsg = "BUBT total indexed %v items, footprint %v\n"
fmt.Printf(fmsg, index.Count(), index.Footprint())
return nil
}
type bubtgetfn = func(
*bubt.Snapshot, []byte, []byte) ([]byte, uint64, bool, bool)
var bubtgets = map[string][]bubtgetfn{
"get": []bubtgetfn{bubtGet1},
"view": []bubtgetfn{bubtGet2},
"all": []bubtgetfn{bubtGet1, bubtGet2},
}
func bubtGetter(
index *bubt.Snapshot, n, seed int64, finch chan struct{},
wg *sync.WaitGroup) {
var ngets, nmisses int64
var key []byte
g := Generatereadseq(int64(options.keylen), n, seed)
epoch, now, markercount := time.Now(), time.Now(), int64(10000000)
value := make([]byte, options.vallen)
rnd := rand.New(rand.NewSource(seed))
cs := bubtgets[options.getas]
bubtget := cs[rnd.Intn(len(cs))]
loop:
for {
ngets++
key = g(key, 0)
_, _, _, ok := bubtget(index, key, value)
if !ok {
nmisses++
}
ngm := (ngets + nmisses)
if ngm%markercount == 0 {
x := time.Since(now).Round(time.Second)
y := time.Since(epoch).Round(time.Second)
fmsg := "bubtGetter {%v items in %v} {%v:%v items in %v}\n"
fmt.Printf(fmsg, markercount, x, ngets, nmisses, y)
now = time.Now()
}
if ngm > int64(options.gets) {
break loop
}
}
took := time.Since(epoch).Round(time.Second)
wg.Done()
<-finch
fmsg := "at exit, bubtGetter %v:%v items in %v\n"
fmt.Printf(fmsg, ngets, nmisses, took)
}
func bubtGet1(
index *bubt.Snapshot, key, value []byte) ([]byte, uint64, bool, bool) {
return index.Get(key, value)
}
func bubtGet2(
index *bubt.Snapshot, key, value []byte) ([]byte, uint64, bool, bool) {
view := index.View(0x1235)
value, _, del, ok := view.Get(key, value)
view.Abort()
return value, 0, del, ok
}
type bubtrngfn = func(*bubt.Snapshot, []byte, []byte) int64
var bubtrngs = map[string][]bubtrngfn{
"vgn": []bubtrngfn{bubtRange1},
"vyn": []bubtrngfn{bubtRange2},
"all": []bubtrngfn{bubtRange1, bubtRange2},
}
func bubtRanger(
index *bubt.Snapshot, n, seed int64, finch chan struct{},
wg *sync.WaitGroup) {
var nranges int64
var key []byte
g := Generatereadseq(int64(options.keylen), n, seed)
rnd := rand.New(rand.NewSource(seed))
epoch, value := time.Now(), make([]byte, options.vallen)
ds := bubtrngs[options.rngas]
bubtrng := ds[rnd.Intn(len(ds))]
loop:
for {
key = g(key, 0)
n := bubtrng(index, key, value)
nranges += n
if nranges > int64(options.ranges) {
break loop
}
}
took := time.Since(epoch).Round(time.Second)
wg.Done()
<-finch
fmt.Printf("at exit, bubtRanger %v items in %v\n", nranges, took)
}
func bubtRange1(index *bubt.Snapshot, key, value []byte) (n int64) {
//fmt.Printf("bubtRange1 %q\n", key)
view := index.View(0x1236)
cur, err := view.OpenCursor(key)
if err != nil {
panic(err)
}
for i := 0; i < 100; i++ {
_, _, _, err := cur.GetNext()
if err == io.EOF {
continue
} else if err != nil {
panic(err)
}
n++
}
view.Abort()
return
}
func bubtRange2(index *bubt.Snapshot, key, value []byte) (n int64) {
//fmt.Printf("bubtRange2 %q\n", key)
view := index.View(0x1237)
cur, err := view.OpenCursor(key)
if err != nil {
panic(err)
}
for i := 0; i < 100; i++ {
_, _, _, _, err := cur.YNext(false /*fin*/)
if err == io.EOF {
continue
} else if err != nil {
panic(err)
}
n++
}
view.Abort()
return
}
func makeiterator(klen, vlen, entries, mod int64) api.EntryIterator {
g := Generateloads(klen, vlen, entries)
entry := &indexentry{
key: make([]byte, 0, 16), value: make([]byte, 0, 16),
seqno: 0, deleted: false, err: nil,
}
return func(fin bool) api.IndexEntry {
entry.key, entry.value = g(entry.key, entry.value)
if entry.key != nil {
entry.seqno += 1
x, _ := strconv.Atoi(Bytes2str(entry.key))
entry.deleted = false
if (int64(x) % 2) == mod {
entry.deleted = true
}
entry.err = nil
//fmt.Printf("iterate %q %q %v %v\n", key, value, seqno, deleted)
return entry
}
entry.key, entry.value = nil, nil
entry.seqno, entry.deleted, entry.err = 0, false, io.EOF
return entry
}
}
func generatemeta(seed int64) []byte {
rnd := rand.New(rand.NewSource(seed))
md := make([]byte, rnd.Intn(20000))
for i := range md {
md[i] = byte(97 + rnd.Intn(26))
}
return md
}
func bubtpaths(npaths int) []string {
path, paths := os.TempDir(), []string{}
for i := 0; i < npaths; i++ {
base := fmt.Sprintf("%v", i+1)
path := filepath.Join(path, base)
paths = append(paths, path)
fmt.Printf("Path %v %q\n", i+1, path)
if err := os.RemoveAll(path); err != nil {
panic(err)
}
if err := os.MkdirAll(path, 0755); err != nil {
panic(err)
}
}
return paths
}
type indexentry struct {
key []byte
value []byte
seqno uint64
deleted bool
err error
}
func (entry *indexentry) ID() string {
return ""
}
func (entry *indexentry) Key() ([]byte, uint64, bool, error) {
return entry.key, entry.seqno, entry.deleted, entry.err
}
func (entry *indexentry) Value() []byte {
return entry.value
}
func (entry *indexentry) Valueref() (valuelen uint64, vpos int64) {
return 0, -1
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aggfuncs
import (
"unsafe"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/util/chunk"
)
const (
// DefPartialResult4CumeDistSize is the size of partialResult4CumeDist
DefPartialResult4CumeDistSize = int64(unsafe.Sizeof(partialResult4CumeDist{}))
)
type cumeDist struct {
baseAggFunc
rowComparer
}
type partialResult4CumeDist struct {
curIdx int
lastRank int
rows []chunk.Row
}
func (*cumeDist) AllocPartialResult() (pr PartialResult, memDelta int64) {
return PartialResult(&partialResult4CumeDist{}), DefPartialResult4CumeDistSize
}
func (*cumeDist) ResetPartialResult(pr PartialResult) {
p := (*partialResult4CumeDist)(pr)
p.curIdx = 0
p.lastRank = 0
p.rows = p.rows[:0]
}
func (*cumeDist) UpdatePartialResult(_ sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) {
p := (*partialResult4CumeDist)(pr)
p.rows = append(p.rows, rowsInGroup...)
memDelta += int64(len(rowsInGroup)) * DefRowSize
return memDelta, nil
}
func (r *cumeDist) AppendFinalResult2Chunk(_ sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error {
p := (*partialResult4CumeDist)(pr)
numRows := len(p.rows)
for p.lastRank < numRows && r.compareRows(p.rows[p.curIdx], p.rows[p.lastRank]) == 0 {
p.lastRank++
}
p.curIdx++
chk.AppendFloat64(r.ordinal, float64(p.lastRank)/float64(numRows))
return nil
}
|
package test
import (
"reflect"
"testing"
"unsafe"
"github.com/cilium/ebpf/internal/testutils"
)
func TestLoadingSpec(t *testing.T) {
spec, err := loadTest()
testutils.SkipIfNotSupported(t, err)
if err != nil {
t.Fatal("Can't load spec:", err)
}
if spec == nil {
t.Fatal("Got a nil spec")
}
}
func TestLoadingObjects(t *testing.T) {
var objs testObjects
err := loadTestObjects(&objs, nil)
testutils.SkipIfNotSupported(t, err)
if err != nil {
t.Fatal("Can't load objects:", err)
}
defer objs.Close()
if objs.Filter == nil {
t.Error("Loading returns an object with nil programs")
}
if objs.Map1 == nil {
t.Error("Loading returns an object with nil maps")
}
}
func TestTypes(t *testing.T) {
if testEHOOPY != 0 {
t.Error("Expected testEHOOPY to be 0, got", testEHOOPY)
}
if testEFROOD != 1 {
t.Error("Expected testEFROOD to be 0, got", testEFROOD)
}
e := testE(0)
if size := unsafe.Sizeof(e); size != 4 {
t.Error("Expected size of exampleE to be 4, got", size)
}
bf := testBarfoo{}
if size := unsafe.Sizeof(bf); size != 16 {
t.Error("Expected size of exampleE to be 16, got", size)
}
if reflect.TypeOf(bf.Bar).Kind() != reflect.Int64 {
t.Error("Expected testBarfoo.Bar to be int64")
}
if reflect.TypeOf(bf.Baz).Kind() != reflect.Bool {
t.Error("Expected testBarfoo.Baz to be bool")
}
if reflect.TypeOf(bf.Boo) != reflect.TypeOf(e) {
t.Error("Expected testBarfoo.Boo to be exampleE")
}
}
|
var res []string
func generateParenthesis(n int) []string {
res = make([]string, 0)
genHelper(n, n, "")
return res
}
func genHelper(left, right int, cur string){
if left == 0 && right == 0 {
res = append(res, cur)
return
}
if left == 0 {
genHelper(left, right - 1, cur + ")")
} else if left < right {
genHelper(left, right - 1, cur + ")")
genHelper(left - 1, right, cur + "(")
} else if left == right {
genHelper(left - 1, right, cur + "(")
}
}
|
package bidi
import (
"strings"
"github.com/gioui/uax/internal/tracing"
"golang.org/x/text/unicode/bidi"
)
// We create a set of bidi rules as layed out in UAX#9.
// To understand the rules it is probably best to consult the UAX algorithm
// description. Headers and rule names will be similar to names in UAX#9.
//
// Rules are structs which contain an action function. The rule's struct
// has an left-hand-side (LHS) part that will be matched against the input text
// (more specific: against bidi class clusters = scraps) and the rule's action will
// substitute the LHS with an appropriate RHS.
//
// Invariants:
// - The RHS must be of equal length to the LHS or shorter.
// - No LHS must be prefix of another rule's LHS, except for |LHS|=1 .
// - Every rule is required to either have RHS≠LHS or to return a jump offset ≠ 0,
// otherwise the parser will enter an infinite loop.
//
// All rules are hand-checked to not create circular identities and therefore
// are guaranteed to not send the parser into an infinite loop.
//
type bidiRule struct {
name string // name of the rule according to UAX#9
lhsLen int // number of symbols in the left hand side (LHS)
pass int // this is a 2-pass system, every rule is active during just one phase
action ruleAction // action to perform on match of LHS
}
// ruleAction is an action on bidi class intervals. Input is a slice of (consecutive)
// class intervals which have been matched. The action's task is to substitute all or some
// of the input intervals by one or more output intervals (reduce action). The ``cursor''
// will be positioned after the substitution by the parser, according to the second result
// of the action, an integer. This position hint will be negative most of the time, telling
// the parser to backtrack and try to re-apply other BiDi rules.
type ruleAction func([]scrap) ([]scrap, int, bool)
// Headers and header numbers of the following comment sections correspond to UAX#9.
// ---------------------------------------------------------------------------
// 3.3.4 Resolving Weak Types
// W1 – W3 are handled by the scanner.
// W1. Examine each nonspacing mark (NSM) in the isolating run sequence, and change the
// type of the NSM to Other Neutral if the previous character is an isolate
// initiator or PDI, and to the type of the previous character otherwise.
// W2. Search backward from each instance of a European number until the first strong type
// (R, L, AL, or sos) is found. If an AL is found, change the type of the
// European number to Arabic number.
// AL EN → AL AN
// AL NI EN → AL NI AN
// W3. Change all ALs to R.
// --- Actions ---
// W4. A single European separator between two European numbers changes to
// a European number. A single common separator between two numbers of the
// same type changes to that type.
// EN ES EN → EN EN EN
// EN CS EN → EN EN EN
// AN CS AN → AN AN AN
func ruleW4_1() (*bidiRule, []byte) {
lhs := makeLHS(bidi.EN, bidi.ES, bidi.EN)
return makeSquashRule("W4-1", lhs, bidi.EN, -2), lhs
}
func ruleW4_2() (*bidiRule, []byte) {
lhs := makeLHS(bidi.EN, bidi.CS, bidi.EN)
return makeSquashRule("W4-2", lhs, bidi.EN, -2), lhs
}
func ruleW4_3() (*bidiRule, []byte) {
lhs := makeLHS(bidi.AN, bidi.CS, bidi.AN)
return makeSquashRule("W4-3", lhs, bidi.AN, -2), lhs
}
// W5. A sequence of European terminators adjacent to European numbers
// changes to all European numbers.
// ET ET EN → EN EN EN
// EN ET ET → EN EN EN
// AN ET EN → AN EN EN
func ruleW5_1() (*bidiRule, []byte) { // W5-1 and W5-3
lhs := makeLHS(bidi.ET, bidi.EN)
return makeSquashRule("W5-1", lhs, bidi.EN, -2), lhs
}
func ruleW5_2() (*bidiRule, []byte) {
lhs := makeLHS(bidi.EN, bidi.ET)
return makeSquashRule("W5-2", lhs, bidi.EN, -2), lhs
}
// W6. Otherwise, separators and terminators change to Other Neutral.
// AN ET → AN ON
// L ES EN → L ON EN
// EN CS AN → EN ON AN
// ET AN → ON AN
func ruleW6_1() (*bidiRule, []byte) {
lhs := makeLHS(bidi.ET)
return makeSquashRule("W6-1", lhs, cNI, 0), lhs
}
func ruleW6_2() (*bidiRule, []byte) {
lhs := makeLHS(bidi.ES)
return makeSquashRule("W6-2", lhs, cNI, 0), lhs
}
func ruleW6_3() (*bidiRule, []byte) {
lhs := makeLHS(bidi.CS)
return makeSquashRule("W6-3", lhs, cNI, 0), lhs
}
func ruleW6x() (*bidiRule, []byte) {
lhs := makeLHS(cNI, cNI)
return makeSquashRule("W6-x", lhs, cNI, -1), lhs
}
// W7. Search backward from each instance of a European number until the
// first strong type (R, L, or sos) is found. If an L is found, then change the
// type of the European number to L.
// L NI EN → L NI L
// R NI EN → R NI EN
func ruleW7() (*bidiRule, []byte) {
lhs := makeLHS(bidi.EN)
return &bidiRule{
name: "W7",
lhsLen: len(lhs),
pass: 1,
action: func(match []scrap) ([]scrap, int, bool) {
if match[0].Context() == bidi.L {
L := match[:1]
L[0].bidiclz = bidi.L
return L, 0, true // replace EN with L, start again with L
}
return match, 1, true // step over EN
},
}, lhs
}
// ---------------------------------------------------------------------------
// 3.3.5 Resolving Neutral and Iolate Formatting Types
// func ruleN1_0() (*bidiRule, []byte) {
// lhs := makeLHS(cNI, cNI)
// return makeSquashRule("N1-0", lhs, bidi.L, 0), lhs
// }
// N1. A sequence of NIs takes the direction of the surrounding strong text if the text
// on both sides has the same direction. European and Arabic numbers act as if they
// were R in terms of their influence on NIs. The start-of-sequence (sos) and
// end-of-sequence (eos) types are used at isolating run sequence boundaries.
// L NI L → L L L (1)
// R NI R → R R R (2)
// R NI AN → R R AN (3)
// R NI EN → R R EN (4)
// AN NI R → AN R R (5)
// AN NI AN → AN R AN (6)
// AN NI EN → AN R EN (7)
// EN NI R → EN R R (8)
// EN NI AN → EN R AN (9)
// EN NI EN → EN R EN (10)
func ruleN1_1() (*bidiRule, []byte) {
lhs := makeLHS(bidi.L, cNI, bidi.L)
return makeSquashRule("N1-1", lhs, bidi.L, 0), lhs
}
func ruleN1_2() (*bidiRule, []byte) {
lhs := makeLHS(bidi.R, cNI, bidi.R)
return makeSquashRule("N1-2", lhs, bidi.R, 0), lhs
}
func ruleN1_3() (*bidiRule, []byte) {
lhs := makeLHS(bidi.R, cNI, bidi.AN) // R NI → R
return &bidiRule{
name: "N1-3",
lhsLen: len(lhs),
pass: 2,
action: func(match []scrap) ([]scrap, int, bool) {
collapse(match[0], match[1], bidi.R)
match[1].bidiclz = bidi.AN
match[1].appendAllChildrenOf(match[2])
return match[:2], 1, false
},
}, lhs
}
func ruleN1_4() (*bidiRule, []byte) {
lhs := makeLHS(bidi.R, cNI, bidi.EN) // R NI → R
return &bidiRule{
name: "N1-4",
lhsLen: len(lhs),
pass: 2,
action: func(match []scrap) ([]scrap, int, bool) {
collapse(match[0], match[1], bidi.R)
match[1].bidiclz = bidi.EN
match[1].appendAllChildrenOf(match[2])
return match[:2], 1, false
},
}, lhs
}
func ruleN1_5() (*bidiRule, []byte) {
lhs := makeLHS(bidi.AN, cNI, bidi.R) // NI R → R
return &bidiRule{
name: "N1-5",
lhsLen: len(lhs),
pass: 2,
action: func(match []scrap) ([]scrap, int, bool) {
collapse(match[1], match[2], bidi.R)
return match[:2], 1, false
},
}, lhs
}
func ruleN1_6() (*bidiRule, []byte) {
lhs := makeLHS(bidi.AN, cNI, bidi.AN) // NI → R
return makeMidSwapRule("N1-6", lhs, bidi.R, 2), lhs
}
func ruleN1_7() (*bidiRule, []byte) {
lhs := makeLHS(bidi.AN, cNI, bidi.EN) // NI → R
return makeMidSwapRule("N1-7", lhs, bidi.R, 2), lhs
}
func ruleN1_8() (*bidiRule, []byte) {
lhs := makeLHS(bidi.EN, cNI, bidi.R) // NI R → R
return &bidiRule{
name: "N1-8",
lhsLen: len(lhs),
pass: 2,
action: func(match []scrap) ([]scrap, int, bool) {
collapse(match[1], match[2], bidi.R)
return match[:2], 1, false
},
}, lhs
}
func ruleN1_9() (*bidiRule, []byte) {
lhs := makeLHS(bidi.EN, cNI, bidi.AN) // NI → R
return makeMidSwapRule("N1-9", lhs, bidi.R, 2), lhs
}
func ruleN1_10() (*bidiRule, []byte) {
lhs := makeLHS(bidi.EN, cNI, bidi.EN) // NI → R
return makeMidSwapRule("N1-10", lhs, bidi.R, 2), lhs
}
// N2. Any remaining NIs take the embedding direction.
// NI → e
func ruleN2() (*bidiRule, []byte) {
lhs := makeLHS(cNI) // NI → e
return &bidiRule{
name: "N2",
lhsLen: len(lhs),
pass: 2,
action: func(match []scrap) ([]scrap, int, bool) {
ni := match[0]
ni.bidiclz = ni.e()
tracing.Debugf("rule N2: produced e=%s with context=%v", ni, ni.context)
return []scrap{ni}, -1, false
},
}, lhs
}
func ruleL() (*bidiRule, []byte) {
lhs := makeLHS(bidi.L, bidi.L)
return makeSquashRule("L+L=L", lhs, bidi.L, 0), lhs
}
func ruleR() (*bidiRule, []byte) {
lhs := makeLHS(bidi.R, bidi.R)
return makeSquashRule("R+R=R", lhs, bidi.R, 0), lhs
}
// ---------------------------------------------------------------------------
func makeSquashRule(name string, lhs []byte, c bidi.Class, jmp int) *bidiRule {
r := &bidiRule{
name: name,
lhsLen: len(lhs),
action: squash(c, len(lhs), jmp),
}
if strings.HasPrefix(name, "W") {
r.pass = 1
} else {
r.pass = 2
}
return r
}
func squash(c bidi.Class, n int, jmp int) ruleAction {
return func(match []scrap) ([]scrap, int, bool) {
last := match[n-1]
//T().Debugf("squash: match=%v, last = %s", match, last)
match[0].r = last.r
match[0].bidiclz = c
for i, iv := range match[:n] {
if i == 0 {
continue
}
match[0].appendAllChildrenOf(iv)
}
return match[:1], jmp, false
}
}
func makeMidSwapRule(name string, lhs []byte, c bidi.Class, jmp int) *bidiRule {
return &bidiRule{
name: name,
lhsLen: len(lhs),
pass: 2, // all mid-swap rules are Nx rules ⇒ pass 2
action: func(match []scrap) ([]scrap, int, bool) {
match[1].bidiclz = c // change class of middle interval
return match, jmp, false
},
}
}
func makeLHS(toks ...bidi.Class) []byte {
b := make([]byte, len(toks))
for i, t := range toks {
b[i] = byte(t)
}
return b
}
func appendChildren(dest scrap, src scrap) {
tracing.Errorf("appendChildren(…) not yet implemented")
}
|
package main
import (
"go-study/rpcclient/funcs"
)
func main(){
funcs.RpcClient()
}
|
// Copyright 2019 Radiation Detection and Imaging (RDI), LLC
// Use of this source code is governed by the BSD 3-clause
// license that can be found in the LICENSE file.
package message
import (
"context"
"encoding/json"
"log"
"github.com/go-redis/redis"
"github.com/gorilla/websocket"
)
type Msg struct {
Type string
Metadata map[string]string
Payload []byte
}
func PublishJsonMsg(redis *redis.Client, channel string, msg *Msg) error {
msgBytes, err := json.Marshal(msg)
if err != nil {
return err
}
redis.Publish(channel, string(msgBytes))
return nil
}
type Cmd struct {
Command string
Metadata map[string]string
}
type Executer interface {
Execute(*Cmd) error
}
func ReceivePubSubCmds(ctx context.Context, addr, channel string) <-chan *Cmd {
cmds := make(chan *Cmd)
go func() {
defer close(cmds)
redisClient := redis.NewClient(&redis.Options{Addr: addr})
defer redisClient.Close()
sub := redisClient.Subscribe(channel)
_, err := sub.Receive()
if err != nil {
log.Println("sub.Receive():", err)
redisClient.Close()
return
}
defer sub.Close()
log.Println("listening for commands on channel", channel)
defer log.Println("done listening for commands on channel", channel)
channel := sub.ChannelSize(10)
for {
select {
case msg := <-channel:
var cmd Cmd
err := json.Unmarshal([]byte(msg.Payload), &cmd)
if err != nil {
return
}
cmds <- &cmd
case <-ctx.Done():
return
}
}
}()
return cmds
}
func ReceiveWsCmds(ctx context.Context, c *websocket.Conn) <-chan *Cmd {
cmds := make(chan *Cmd)
go func() {
defer close(cmds)
for {
var cmd Cmd
err := c.ReadJSON(&cmd)
if err != nil {
return
}
cmds <- &cmd
}
}()
return cmds
}
|
package models
type SuccessMessage struct {
Message string `json:"success"`
}
|
package main
import (
"bufio"
"fmt"
"net"
"os"
"strings"
)
import "project/errorDispose"
func main() {
// 使用tcp链接服务器
tcpAddr, _ := net.ResolveTCPAddr("tcp", ":7777")
tcpConn, error := net.DialTCP("tcp", nil, tcpAddr)
defer func() {
_ = tcpConn.Close()
fmt.Println("链接关闭")
}()
errorDispose.ErrorPrint(error, "链接错误")
textBufio := bufio.NewReader(os.Stdin)
for {
// 读取信息 消除空格
var text [1024]byte
n, _ := textBufio.Read(text[:])
// 消除空格
textRemoveSpace := strings.TrimSpace(string(text[:n]))
// 发送信息
_, error = tcpConn.Write([]byte(textRemoveSpace))
//errorDispose.ErrorPrint(error, "发送失败")
if error != nil {
fmt.Println("发送失败")
return
}
}
}
|
package gotten_test
import (
"github.com/Hexilee/gotten"
"github.com/Hexilee/gotten/headers"
"github.com/stretchr/testify/assert"
"io"
"io/ioutil"
"net/http"
"reflect"
"strconv"
"testing"
)
type (
EmptyParams struct {
}
EmptyService struct {
EmptyGet func(*EmptyParams) (*http.Request, error)
}
)
func TestBuilder(t *testing.T) {
creator, err := gotten.NewBuilder().
SetBaseUrl("https://mock.io").
AddCookie(&http.Cookie{Name: "ga", Value: TestString}).
AddCookies([]*http.Cookie{
{Name: "ga_id", Value: TestString},
{Name: "qsc_session", Value: TestString},
}).AddHeader("HOST", "mock.io").
SetHeader("HOST", "hexilee.me").
Build()
assert.Nil(t, err)
var service EmptyService
assert.Nil(t, creator.Impl(&service))
req, err := service.EmptyGet(&EmptyParams{})
assert.Nil(t, err)
cookie, err := req.Cookie("ga_id")
assert.Nil(t, err)
assert.Equal(t, TestString, cookie.Value)
cookie, err = req.Cookie("ga")
assert.Nil(t, err)
assert.Equal(t, TestString, cookie.Value)
cookie, err = req.Cookie("qsc_session")
assert.Nil(t, err)
assert.Equal(t, TestString, cookie.Value)
assert.Equal(t, "hexilee.me", req.Header.Get("HOST"))
}
func TestBuilder_AddUnmarshalFunc(t *testing.T) {
type TextService struct {
Get func(*struct{}) (gotten.Response, error) `path:"/text"`
}
creator, err := gotten.NewBuilder().
SetBaseUrl("https://mock.io").
AddUnmarshalFunc(func(data []byte, v interface{}) (err error) {
var success bool
success, err = strconv.ParseBool(string(data))
if err == nil {
value := reflect.ValueOf(v)
if value.Kind() == reflect.Ptr && value.Elem().Kind() == reflect.Bool {
value.Elem().SetBool(success)
}
}
return
}, new(gotten.CheckerFactory).WhenContentType(headers.MIMETextPlain).Create()).
SetClient(mockClient).
Build()
assert.Nil(t, err)
service := new(TextService)
assert.Nil(t, creator.Impl(service))
assert.NotNil(t, service.Get)
resp, err := service.Get(nil)
assert.Nil(t, err)
var success bool
assert.Nil(t, resp.Unmarshal(&success))
assert.True(t, success)
}
func TestBuilder_AddReaderUnmarshalerFunc(t *testing.T) {
type TextService struct {
Get func(*struct{}) (gotten.Response, error) `path:"/text"`
}
creator, err := gotten.NewBuilder().
SetBaseUrl("https://mock.io").
AddReadUnmarshalFunc(func(body io.ReadCloser, _ http.Header, v interface{}) (err error) {
var data []byte
data, err = ioutil.ReadAll(body)
body.Close()
if err == nil {
var success bool
success, err = strconv.ParseBool(string(data))
if err == nil {
value := reflect.ValueOf(v)
if value.Kind() == reflect.Ptr && value.Elem().Kind() == reflect.Bool {
value.Elem().SetBool(success)
}
}
}
return
}, new(gotten.CheckerFactory).WhenContentType(headers.MIMETextPlain).Create()).
SetClient(mockClient).
Build()
assert.Nil(t, err)
service := new(TextService)
assert.Nil(t, creator.Impl(service))
assert.NotNil(t, service.Get)
resp, err := service.Get(nil)
assert.Nil(t, err)
var success bool
assert.Nil(t, resp.Unmarshal(&success))
assert.True(t, success)
}
|
package main
import (
"fmt"
)
func main() {
// arrays
var arr [3]int
arr[0] = 1
arr[1] = 2
arr[2] = 3
fmt.Println(arr)
// implicit initialization
arr1 := [3]int{1, 2, 3}
fmt.Println(arr1)
// slice from array declaration
slice := arr1[:]
arr1[1] = 42
slice[2] = 27
fmt.Println(arr1, slice)
// slices
slice1 := []int{1, 2, 3}
// append operation resizes memory for array
slice1 = append(slice1, 4, 8, 16)
fmt.Println(slice1)
// subslices
s2 := slice1[1:]
s3 := slice1[:2]
s4 := slice1[1:2]
fmt.Println(s2, s3, s4)
// maps
m := map[string]int{"foo": 42}
fmt.Println(m)
fmt.Println(m["foo"])
// assignment via key
m["foo"] = 27
fmt.Println(m)
// deletion via key
delete(m, "foo")
fmt.Println(m)
// structs
type user struct {
ID int
FirstName string
LastName string
}
// initilization
var u user
// assigning values
u.ID = 1
u.FirstName = "Bob"
u.LastName = "Builder"
fmt.Println(u)
// shorthand initilization
u1 := user{
ID: 2,
FirstName: "Pete",
LastName: "Miller",
}
fmt.Println(u1)
}
|
// +build !windows
package analytics
import (
"os"
)
func sigterm(pid int) {
p, err := os.FindProcess(pid)
if err != nil {
return
}
p.Signal(os.Interrupt)
}
|
package service
import (
"github.com/gorilla/mux"
"github.com/moorara/log"
)
// Mock is the interface for a mock type.
type Mock interface {
String() string
Hash() uint64
RegisterRoutes(*mux.Router)
}
// MockService provides functionalities to manage mocks.
type MockService struct {
logger log.Logger
mocks map[uint64]Mock
}
// NewMockService creates a new instance of MockService.
func NewMockService(logger log.Logger) *MockService {
return &MockService{
logger: logger,
mocks: map[uint64]Mock{},
}
}
// Add registers a new mock.
// If a mock already exists, it will be replaced.
func (s *MockService) Add(m Mock) {
key := m.Hash()
s.mocks[key] = m
s.logger.Debug("message", "mock added", "mock", m.String())
}
// Delete deregisters an existing mock.
func (s *MockService) Delete(m Mock) {
key := m.Hash()
delete(s.mocks, key)
s.logger.Debug("message", "mock deleted", "mock", m.String())
}
// Router creates a new router for mocks.
func (s *MockService) Router() *mux.Router {
router := mux.NewRouter()
for _, m := range s.mocks {
m.RegisterRoutes(router)
}
return router
}
|
package oracle
import (
"github.com/InjectiveLabs/injective-oracle-scaffold/injective-chain/modules/oracle/keeper"
"github.com/InjectiveLabs/injective-oracle-scaffold/injective-chain/modules/oracle/types"
sdk "github.com/cosmos/cosmos-sdk/types"
)
func InitGenesis(ctx sdk.Context, keeper keeper.Keeper, data types.GenesisState) {
// TODO: @Albert initialize genesis properly
keeper.SetParams(ctx, data.Params)
}
func ExportGenesis(ctx sdk.Context, k keeper.Keeper) *types.GenesisState {
// TODO: @Albert export genesis properly
return &types.GenesisState{
Params: k.GetParams(ctx),
}
}
|
package rdq
import (
"context"
"errors"
"fmt"
"sync"
"time"
)
// RDQOptions is a settings for RDQ.
type RDQOptions struct {
// Queue is the name of the ZSet in redis
Queue string
// Redis is redis client
Redis Redis
// Now is function returning current time (usefull for tests). By default time.Now.
Now func() time.Time
// PollInterval to check if there is appropriate events. By default 1 second.
PollInterval time.Duration
// ReturnTimeout is timeout for returning event that is not came yet. By default 100 msec.
ReturnTimeout time.Duration
}
// NewRDQ creates new RedisDelayedQueue
func NewRDQ(opts *RDQOptions) *RedisDelayedQueue {
if opts.Queue == "" {
panic("queue can not be empty")
}
if opts.Redis == nil {
panic("redis can not be nil")
}
if opts.Now == nil {
opts.Now = time.Now
}
if opts.PollInterval <= 0 {
opts.PollInterval = time.Second
}
if opts.ReturnTimeout <= 0 {
opts.ReturnTimeout = time.Millisecond * 100
}
return &RedisDelayedQueue{
opts: *opts,
}
}
var ErrNil = errors.New("redis has no data")
type Redis interface {
BZPOPMIN(ctx context.Context, key string, timeout time.Duration) (float64, []byte, error)
ZADD(ctx context.Context, key string, score float64, memeber []byte) error
}
// RedisDelayedQueue is delayed queue persisted in redis.
// You can call Add from multiple goroutines, but you should call to Pop in single goroutine.
type RedisDelayedQueue struct {
opts RDQOptions
pollTimer *time.Timer
wg sync.WaitGroup
}
// AddAfter adds new delayed event to queue.
func (rdq *RedisDelayedQueue) AddAfter(ctx context.Context, delay time.Duration, item []byte) error {
return rdq.Add(ctx, rdq.opts.Now().Add(delay), item)
}
// Add adds new delayed event to queue.
func (rdq *RedisDelayedQueue) Add(ctx context.Context, at time.Time, item []byte) error {
return rdq.opts.Redis.ZADD(ctx, rdq.opts.Queue, float64(at.UnixNano()), item)
}
// Pop blocks until get event with appropriate time. Pop is not safe for concurrent use.
func (rdq *RedisDelayedQueue) Pop(ctx context.Context) (time.Time, []byte, error) {
rdq.wg.Add(1)
defer rdq.wg.Done()
for {
s, d, err := rdq.opts.Redis.BZPOPMIN(ctx, rdq.opts.Queue, rdq.opts.PollInterval)
if err != nil {
if errors.Is(err, ErrNil) {
if ctx.Err() == nil {
continue
}
return time.Time{}, nil, fmt.Errorf("error on waiting event: %w", ctx.Err())
}
return time.Time{}, nil, fmt.Errorf("error on waiting event: %w", err)
}
tt := time.Unix(0, int64(s))
if tt.After(rdq.opts.Now()) {
rctx, rcancel := context.WithTimeout(context.Background(), rdq.opts.ReturnTimeout)
err = rdq.Add(rctx, tt, d)
rcancel()
if err != nil {
return time.Time{}, nil, fmt.Errorf("error on returning event: %w", err)
}
if rdq.pollTimer == nil {
rdq.pollTimer = time.NewTimer(rdq.opts.PollInterval)
} else {
rdq.pollTimer.Reset(rdq.opts.PollInterval)
}
select {
case <-ctx.Done():
if !rdq.pollTimer.Stop() {
<-rdq.pollTimer.C
}
return time.Time{}, nil, ctx.Err()
case <-rdq.pollTimer.C:
rdq.pollTimer.Stop()
continue
}
}
return tt, d, nil
}
}
// Stop waits until Pop stops it's processing.
func (rdq *RedisDelayedQueue) Stop() {
rdq.wg.Wait()
}
|
package request
import "github.com/astaxie/beego/validation"
func TagAddRequestValid(name string, state int) validation.Validation {
valid := validation.Validation{}
valid.Required(name, "name").Message("标签名称不能为空")
valid.MaxSize(name, 100, "name").Message("标签名称最长为100字符")
valid.Range(state, 0, 1, "state").Message("标签状态只允许0或1")
return valid
}
|
/*
Copyright 2016 Padduck, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"encoding/json"
"flag"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"fmt"
"github.com/braintree/manners"
"github.com/gin-gonic/gin"
"github.com/pufferpanel/pufferd/config"
"github.com/pufferpanel/pufferd/data"
"github.com/pufferpanel/pufferd/data/templates"
"github.com/pufferpanel/pufferd/httphandlers"
"github.com/pufferpanel/pufferd/install"
"github.com/pufferpanel/pufferd/logging"
"github.com/pufferpanel/pufferd/migration"
"github.com/pufferpanel/pufferd/programs"
"github.com/pufferpanel/pufferd/routing"
"github.com/pufferpanel/pufferd/routing/server"
"github.com/pufferpanel/pufferd/sftp"
"github.com/pufferpanel/pufferd/utils"
"net/http"
"strings"
)
var (
VERSION = "nightly"
MAJORVERSION = "nightly"
BUILDDATE = "unknown"
GITHASH = "unknown"
)
func main() {
var loggingLevel string
var webPort int
var webHost string
var authRoot string
var authToken string
var runInstaller bool
var version bool
var license bool
var migrate bool
var configPath string
flag.StringVar(&loggingLevel, "logging", "INFO", "Lowest logging level to display")
flag.IntVar(&webPort, "webport", 5656, "Port to run web service on")
flag.StringVar(&authRoot, "auth", "", "Base URL to the authorization server")
flag.StringVar(&authToken, "token", "", "Authorization token")
flag.BoolVar(&runInstaller, "install", false, "If installing instead of running")
flag.BoolVar(&version, "version", false, "Get the version")
flag.BoolVar(&license, "license", false, "View license")
flag.BoolVar(&migrate, "migrate", false, "Migrate Scales data to pufferd")
flag.StringVar(&configPath, "config", "config.json", "Path to pufferd config.json")
flag.Parse()
versionString := fmt.Sprintf("pufferd %s (%s %s)", VERSION, BUILDDATE, GITHASH)
if version {
os.Stdout.WriteString(versionString + "\r\n")
}
if license {
os.Stdout.WriteString(data.LICENSE + "\r\n")
}
if migrate {
config.Load(configPath)
templates.Initialize()
programs.Initialize()
migration.MigrateFromScales()
}
if license || version || migrate {
return
}
logging.SetLevelByString(loggingLevel)
logging.Init()
gin.SetMode(gin.ReleaseMode)
logging.Info(versionString)
logging.Info("Logging set to " + loggingLevel)
if runInstaller {
if authRoot == "" {
logging.Error("Authorization server root not passed")
os.Exit(1)
}
if authToken == "" {
logging.Error("Authorization token not passed")
os.Exit(1)
}
config := data.CONFIG
replacements := make(map[string]interface{})
replacements["authurl"] = strings.TrimSuffix(authRoot, "/")
replacements["authtoken"] = authToken
replacements["webport"] = webPort
configData := []byte(utils.ReplaceTokens(config, replacements))
var prettyJson bytes.Buffer
json.Indent(&prettyJson, configData, "", " ")
err := ioutil.WriteFile(configPath, prettyJson.Bytes(), 0664)
if err != nil {
logging.Error("Error writing new config")
os.Exit(1)
}
logging.Info("Config saved")
logging.Info("Attempting to install service")
install.InstallService(configPath)
os.Exit(0)
}
templates.Initialize()
programs.Initialize()
if _, err := os.Stat(templates.Folder); os.IsNotExist(err) {
logging.Info("No template directory found, creating")
err = os.MkdirAll(templates.Folder, 0755)
if err != nil {
logging.Error("Error creating template folder", err)
}
}
if files, _ := ioutil.ReadDir(templates.Folder); len(files) == 0 {
logging.Info("Templates being copied to " + templates.Folder)
templates.CopyTemplates()
}
if _, err := os.Stat(programs.ServerFolder); os.IsNotExist(err) {
logging.Info("No server directory found, creating")
os.MkdirAll(programs.ServerFolder, 0755)
}
programs.LoadFromFolder()
for _, element := range programs.GetAll() {
if element.IsEnabled() {
logging.Info("Starting server " + element.Id())
element.Start()
err := programs.Save(element.Id())
if err != nil {
logging.Error("Error saving server file", err)
}
}
}
r := gin.New()
{
r.Use(gin.Recovery())
routing.RegisterRoutes(r)
server.RegisterRoutes(r)
}
if config.GetOrDefault("log.api", "false") == "true" {
r.Use(httphandlers.ApiLoggingHandler)
}
var useHttps bool
useHttps = false
if _, err := os.Stat(filepath.Join("data", "https.pem")); os.IsNotExist(err) {
logging.Warn("No HTTPS.PEM found in data folder, will use no http")
} else if _, err := os.Stat(filepath.Join("data", "https.key")); os.IsNotExist(err) {
logging.Warn("No HTTPS.KEY found in data folder, will use no http")
} else {
useHttps = true
}
sftp.Run()
//check if there's an update
if config.GetOrDefault("update-check", "true") == "true" {
go func() {
resp, err := http.Get("https://dl.pufferpanel.com/pufferd/" + MAJORVERSION + "/version.txt")
if err != nil {
return
}
defer resp.Body.Close()
onlineVersion, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
if string(onlineVersion) != GITHASH {
logging.Warn("DL server reports a different hash than this version, an update may be available")
logging.Warnf("Installed: %s", GITHASH)
logging.Warnf("Online: %s", onlineVersion)
}
}()
}
webHost = config.GetOrDefault("webhost", "0.0.0.0")
webPort, _ = strconv.Atoi(config.GetOrDefault("webport", "5656"))
logging.Infof("Starting web access on %s:%d", webHost, webPort)
var err error
if useHttps {
err = manners.ListenAndServeTLS(webHost+":"+strconv.FormatInt(int64(webPort), 10), filepath.Join("data", "https.pem"), filepath.Join("data", "https.key"), r)
} else {
err = manners.ListenAndServe(webHost+":"+strconv.FormatInt(int64(webPort), 10), r)
}
if err != nil {
logging.Error("Error starting web service", err)
}
}
|
package main
import (
"log"
"os"
app "github.com/chutommy/metal-price/api-server/app"
config "github.com/chutommy/metal-price/api-server/config"
_ "github.com/chutommy/metal-price/api-server/docs" // documentation
)
// @title Metal Price API
// @version 1.0
// @description This API returns the current price of precious metals in different currencies and weight units.
// @contact.name Tommy Chu
// @contact.email tommychu2256@gmail.com
// @schemes http
// @host localhost:8080
// @BasePath /
func main() {
logger := log.New(os.Stdout, "[SERVER] ", log.LstdFlags)
// config
cfg := config.GetConfig()
// init app
a := app.NewApp(logger)
err := a.Init(cfg)
if err != nil {
logger.Panicf("initialize app: %v", err)
}
defer func() {
errs := a.Stop()
for i, err := range errs {
logger.Printf("close error %d: %v\n", i, err)
}
}()
// run
logger.Panicf("run app: %v", a.Run())
}
|
package main
import (
"context"
"crypto/rand"
"encoding/base64"
"encoding/json"
"log"
"net/http"
"os"
"github.com/gorilla/mux"
"github.com/gorilla/sessions"
"golang.org/x/oauth2"
)
// ClientID for Auth0
const ClientID = "RpIMZwjG6BQ9uR6I6IUOPLt4kdmN68Ck"
// Domain for Sharks SBYS Auth0
const Domain = "sharkssbys.auth0.com"
var store = sessions.NewCookieStore([]byte(os.Getenv("SHARKS_COOKIE_ID")))
func main() {
r := mux.NewRouter()
r.HandleFunc("/callback", CallbackHandler)
r.HandleFunc("/login", LoginHandler)
log.Fatal(http.ListenAndServe(":3000", r))
}
// CallbackHandler handles the Auth0 callback
func CallbackHandler(w http.ResponseWriter, r *http.Request) {
conf := &oauth2.Config{
ClientID: ClientID,
ClientSecret: os.Getenv("AUTH0_SECRET"),
RedirectURL: "http://localhost:3000/callback",
Scopes: []string{"openid", "profile"},
Endpoint: oauth2.Endpoint{
AuthURL: "https://" + Domain + "/authorize",
TokenURL: "https://" + Domain + "/oauth/token",
},
}
state := r.URL.Query().Get("state")
session, err := store.Get(r, "state")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if state != session.Values["state"] {
http.Error(w, "Invalid state parameter", http.StatusInternalServerError)
return
}
code := r.URL.Query().Get("code")
token, err := conf.Exchange(context.TODO(), code)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Getting now the userInfo
client := conf.Client(context.TODO(), token)
resp, err := client.Get("https://" + Domain + "/userinfo")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer resp.Body.Close()
var profile map[string]interface{}
if err = json.NewDecoder(resp.Body).Decode(&profile); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
session, err = store.Get(r, "auth-session")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
session.Values["id_token"] = token.Extra("id_token")
session.Values["access_token"] = token.AccessToken
session.Values["profile"] = profile
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Redirect to logged in page
http.Redirect(w, r, "/user", http.StatusSeeOther)
}
//LoginHandler logs users in using Auth0
func LoginHandler(w http.ResponseWriter, r *http.Request) {
conf := &oauth2.Config{
ClientID: ClientID,
ClientSecret: os.Getenv("AUTH0_SECRET"),
RedirectURL: "http://localhost:3000/callback",
Scopes: []string{"openid", "profile"},
Endpoint: oauth2.Endpoint{
AuthURL: "https://" + Domain + "/authorize",
TokenURL: "https://" + Domain + "/oauth/token",
},
}
aud := "https://" + Domain + "/userinfo"
// Generate random state
b := make([]byte, 32)
rand.Read(b)
state := base64.StdEncoding.EncodeToString(b)
session, err := store.Get(r, "state")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
session.Values["state"] = state
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
audience := oauth2.SetAuthURLParam("audience", aud)
url := conf.AuthCodeURL(state, audience)
http.Redirect(w, r, url, http.StatusTemporaryRedirect)
}
|
package wuser
type User struct {
Name string
Phone string
Email string
UserName string
Password string
Token string
}
|
//Package permuter provides a utility for permuting lists
package permuter
import ()
//Permute permutates the original list in the sink function
//the sink must not modify the list
func Permute(original []interface{}, sink func(permutation []interface{})) {
//length := len(original)
}
|
package main
//#include <stdint.h>
//#include <stdlib.h>
//#include <string.h>
//#include "moc.h"
import "C"
import (
"runtime"
"strings"
"unsafe"
"github.com/therecipe/qt"
std_core "github.com/therecipe/qt/core"
)
func cGoUnpackString(s C.struct_Moc_PackedString) string {
if int(s.len) == -1 {
return C.GoString(s.data)
}
return C.GoStringN(s.data, C.int(s.len))
}
func cGoUnpackBytes(s C.struct_Moc_PackedString) []byte {
if int(s.len) == -1 {
gs := C.GoString(s.data)
return *(*[]byte)(unsafe.Pointer(&gs))
}
return C.GoBytes(unsafe.Pointer(s.data), C.int(s.len))
}
func unpackStringList(s string) []string {
if len(s) == 0 {
return make([]string, 0)
}
return strings.Split(s, "¡¦!")
}
type QtBridge_ITF interface {
std_core.QObject_ITF
QtBridge_PTR() *QtBridge
}
func (ptr *QtBridge) QtBridge_PTR() *QtBridge {
return ptr
}
func (ptr *QtBridge) Pointer() unsafe.Pointer {
if ptr != nil {
return ptr.QObject_PTR().Pointer()
}
return nil
}
func (ptr *QtBridge) SetPointer(p unsafe.Pointer) {
if ptr != nil {
ptr.QObject_PTR().SetPointer(p)
}
}
func PointerFromQtBridge(ptr QtBridge_ITF) unsafe.Pointer {
if ptr != nil {
return ptr.QtBridge_PTR().Pointer()
}
return nil
}
func NewQtBridgeFromPointer(ptr unsafe.Pointer) (n *QtBridge) {
if gPtr, ok := qt.Receive(ptr); !ok {
n = new(QtBridge)
n.SetPointer(ptr)
} else {
switch deduced := gPtr.(type) {
case *QtBridge:
n = deduced
case *std_core.QObject:
n = &QtBridge{QObject: *deduced}
default:
n = new(QtBridge)
n.SetPointer(ptr)
}
}
return
}
//export callbackQtBridgea35b5a_Constructor
func callbackQtBridgea35b5a_Constructor(ptr unsafe.Pointer) {
this := NewQtBridgeFromPointer(ptr)
qt.Register(ptr, this)
}
//export callbackQtBridgea35b5a_UpdateRun
func callbackQtBridgea35b5a_UpdateRun(ptr unsafe.Pointer) {
if signal := qt.GetSignal(ptr, "updateRun"); signal != nil {
(*(*func())(signal))()
}
}
func (ptr *QtBridge) ConnectUpdateRun(f func()) {
if ptr.Pointer() != nil {
if !qt.ExistsSignal(ptr.Pointer(), "updateRun") {
C.QtBridgea35b5a_ConnectUpdateRun(ptr.Pointer())
}
if signal := qt.LendSignal(ptr.Pointer(), "updateRun"); signal != nil {
f := func() {
(*(*func())(signal))()
f()
}
qt.ConnectSignal(ptr.Pointer(), "updateRun", unsafe.Pointer(&f))
} else {
qt.ConnectSignal(ptr.Pointer(), "updateRun", unsafe.Pointer(&f))
}
}
}
func (ptr *QtBridge) DisconnectUpdateRun() {
if ptr.Pointer() != nil {
C.QtBridgea35b5a_DisconnectUpdateRun(ptr.Pointer())
qt.DisconnectSignal(ptr.Pointer(), "updateRun")
}
}
func (ptr *QtBridge) UpdateRun() {
if ptr.Pointer() != nil {
C.QtBridgea35b5a_UpdateRun(ptr.Pointer())
}
}
//export callbackQtBridgea35b5a_UpdateStop
func callbackQtBridgea35b5a_UpdateStop(ptr unsafe.Pointer) {
if signal := qt.GetSignal(ptr, "updateStop"); signal != nil {
(*(*func())(signal))()
}
}
func (ptr *QtBridge) ConnectUpdateStop(f func()) {
if ptr.Pointer() != nil {
if !qt.ExistsSignal(ptr.Pointer(), "updateStop") {
C.QtBridgea35b5a_ConnectUpdateStop(ptr.Pointer())
}
if signal := qt.LendSignal(ptr.Pointer(), "updateStop"); signal != nil {
f := func() {
(*(*func())(signal))()
f()
}
qt.ConnectSignal(ptr.Pointer(), "updateStop", unsafe.Pointer(&f))
} else {
qt.ConnectSignal(ptr.Pointer(), "updateStop", unsafe.Pointer(&f))
}
}
}
func (ptr *QtBridge) DisconnectUpdateStop() {
if ptr.Pointer() != nil {
C.QtBridgea35b5a_DisconnectUpdateStop(ptr.Pointer())
qt.DisconnectSignal(ptr.Pointer(), "updateStop")
}
}
func (ptr *QtBridge) UpdateStop() {
if ptr.Pointer() != nil {
C.QtBridgea35b5a_UpdateStop(ptr.Pointer())
}
}
func QtBridge_QRegisterMetaType() int {
return int(int32(C.QtBridgea35b5a_QtBridgea35b5a_QRegisterMetaType()))
}
func (ptr *QtBridge) QRegisterMetaType() int {
return int(int32(C.QtBridgea35b5a_QtBridgea35b5a_QRegisterMetaType()))
}
func QtBridge_QRegisterMetaType2(typeName string) int {
var typeNameC *C.char
if typeName != "" {
typeNameC = C.CString(typeName)
defer C.free(unsafe.Pointer(typeNameC))
}
return int(int32(C.QtBridgea35b5a_QtBridgea35b5a_QRegisterMetaType2(typeNameC)))
}
func (ptr *QtBridge) QRegisterMetaType2(typeName string) int {
var typeNameC *C.char
if typeName != "" {
typeNameC = C.CString(typeName)
defer C.free(unsafe.Pointer(typeNameC))
}
return int(int32(C.QtBridgea35b5a_QtBridgea35b5a_QRegisterMetaType2(typeNameC)))
}
func QtBridge_QmlRegisterType() int {
return int(int32(C.QtBridgea35b5a_QtBridgea35b5a_QmlRegisterType()))
}
func (ptr *QtBridge) QmlRegisterType() int {
return int(int32(C.QtBridgea35b5a_QtBridgea35b5a_QmlRegisterType()))
}
func QtBridge_QmlRegisterType2(uri string, versionMajor int, versionMinor int, qmlName string) int {
var uriC *C.char
if uri != "" {
uriC = C.CString(uri)
defer C.free(unsafe.Pointer(uriC))
}
var qmlNameC *C.char
if qmlName != "" {
qmlNameC = C.CString(qmlName)
defer C.free(unsafe.Pointer(qmlNameC))
}
return int(int32(C.QtBridgea35b5a_QtBridgea35b5a_QmlRegisterType2(uriC, C.int(int32(versionMajor)), C.int(int32(versionMinor)), qmlNameC)))
}
func (ptr *QtBridge) QmlRegisterType2(uri string, versionMajor int, versionMinor int, qmlName string) int {
var uriC *C.char
if uri != "" {
uriC = C.CString(uri)
defer C.free(unsafe.Pointer(uriC))
}
var qmlNameC *C.char
if qmlName != "" {
qmlNameC = C.CString(qmlName)
defer C.free(unsafe.Pointer(qmlNameC))
}
return int(int32(C.QtBridgea35b5a_QtBridgea35b5a_QmlRegisterType2(uriC, C.int(int32(versionMajor)), C.int(int32(versionMinor)), qmlNameC)))
}
func (ptr *QtBridge) __dynamicPropertyNames_atList(i int) *std_core.QByteArray {
if ptr.Pointer() != nil {
tmpValue := std_core.NewQByteArrayFromPointer(C.QtBridgea35b5a___dynamicPropertyNames_atList(ptr.Pointer(), C.int(int32(i))))
runtime.SetFinalizer(tmpValue, (*std_core.QByteArray).DestroyQByteArray)
return tmpValue
}
return nil
}
func (ptr *QtBridge) __dynamicPropertyNames_setList(i std_core.QByteArray_ITF) {
if ptr.Pointer() != nil {
C.QtBridgea35b5a___dynamicPropertyNames_setList(ptr.Pointer(), std_core.PointerFromQByteArray(i))
}
}
func (ptr *QtBridge) __dynamicPropertyNames_newList() unsafe.Pointer {
return C.QtBridgea35b5a___dynamicPropertyNames_newList(ptr.Pointer())
}
func (ptr *QtBridge) __findChildren_atList2(i int) *std_core.QObject {
if ptr.Pointer() != nil {
tmpValue := std_core.NewQObjectFromPointer(C.QtBridgea35b5a___findChildren_atList2(ptr.Pointer(), C.int(int32(i))))
if !qt.ExistsSignal(tmpValue.Pointer(), "destroyed") {
tmpValue.ConnectDestroyed(func(*std_core.QObject) { tmpValue.SetPointer(nil) })
}
return tmpValue
}
return nil
}
func (ptr *QtBridge) __findChildren_setList2(i std_core.QObject_ITF) {
if ptr.Pointer() != nil {
C.QtBridgea35b5a___findChildren_setList2(ptr.Pointer(), std_core.PointerFromQObject(i))
}
}
func (ptr *QtBridge) __findChildren_newList2() unsafe.Pointer {
return C.QtBridgea35b5a___findChildren_newList2(ptr.Pointer())
}
func (ptr *QtBridge) __findChildren_atList3(i int) *std_core.QObject {
if ptr.Pointer() != nil {
tmpValue := std_core.NewQObjectFromPointer(C.QtBridgea35b5a___findChildren_atList3(ptr.Pointer(), C.int(int32(i))))
if !qt.ExistsSignal(tmpValue.Pointer(), "destroyed") {
tmpValue.ConnectDestroyed(func(*std_core.QObject) { tmpValue.SetPointer(nil) })
}
return tmpValue
}
return nil
}
func (ptr *QtBridge) __findChildren_setList3(i std_core.QObject_ITF) {
if ptr.Pointer() != nil {
C.QtBridgea35b5a___findChildren_setList3(ptr.Pointer(), std_core.PointerFromQObject(i))
}
}
func (ptr *QtBridge) __findChildren_newList3() unsafe.Pointer {
return C.QtBridgea35b5a___findChildren_newList3(ptr.Pointer())
}
func (ptr *QtBridge) __findChildren_atList(i int) *std_core.QObject {
if ptr.Pointer() != nil {
tmpValue := std_core.NewQObjectFromPointer(C.QtBridgea35b5a___findChildren_atList(ptr.Pointer(), C.int(int32(i))))
if !qt.ExistsSignal(tmpValue.Pointer(), "destroyed") {
tmpValue.ConnectDestroyed(func(*std_core.QObject) { tmpValue.SetPointer(nil) })
}
return tmpValue
}
return nil
}
func (ptr *QtBridge) __findChildren_setList(i std_core.QObject_ITF) {
if ptr.Pointer() != nil {
C.QtBridgea35b5a___findChildren_setList(ptr.Pointer(), std_core.PointerFromQObject(i))
}
}
func (ptr *QtBridge) __findChildren_newList() unsafe.Pointer {
return C.QtBridgea35b5a___findChildren_newList(ptr.Pointer())
}
func (ptr *QtBridge) __children_atList(i int) *std_core.QObject {
if ptr.Pointer() != nil {
tmpValue := std_core.NewQObjectFromPointer(C.QtBridgea35b5a___children_atList(ptr.Pointer(), C.int(int32(i))))
if !qt.ExistsSignal(tmpValue.Pointer(), "destroyed") {
tmpValue.ConnectDestroyed(func(*std_core.QObject) { tmpValue.SetPointer(nil) })
}
return tmpValue
}
return nil
}
func (ptr *QtBridge) __children_setList(i std_core.QObject_ITF) {
if ptr.Pointer() != nil {
C.QtBridgea35b5a___children_setList(ptr.Pointer(), std_core.PointerFromQObject(i))
}
}
func (ptr *QtBridge) __children_newList() unsafe.Pointer {
return C.QtBridgea35b5a___children_newList(ptr.Pointer())
}
func NewQtBridge(parent std_core.QObject_ITF) *QtBridge {
tmpValue := NewQtBridgeFromPointer(C.QtBridgea35b5a_NewQtBridge(std_core.PointerFromQObject(parent)))
if !qt.ExistsSignal(tmpValue.Pointer(), "destroyed") {
tmpValue.ConnectDestroyed(func(*std_core.QObject) { tmpValue.SetPointer(nil) })
}
return tmpValue
}
//export callbackQtBridgea35b5a_DestroyQtBridge
func callbackQtBridgea35b5a_DestroyQtBridge(ptr unsafe.Pointer) {
if signal := qt.GetSignal(ptr, "~QtBridge"); signal != nil {
(*(*func())(signal))()
} else {
NewQtBridgeFromPointer(ptr).DestroyQtBridgeDefault()
}
}
func (ptr *QtBridge) ConnectDestroyQtBridge(f func()) {
if ptr.Pointer() != nil {
if signal := qt.LendSignal(ptr.Pointer(), "~QtBridge"); signal != nil {
f := func() {
(*(*func())(signal))()
f()
}
qt.ConnectSignal(ptr.Pointer(), "~QtBridge", unsafe.Pointer(&f))
} else {
qt.ConnectSignal(ptr.Pointer(), "~QtBridge", unsafe.Pointer(&f))
}
}
}
func (ptr *QtBridge) DisconnectDestroyQtBridge() {
if ptr.Pointer() != nil {
qt.DisconnectSignal(ptr.Pointer(), "~QtBridge")
}
}
func (ptr *QtBridge) DestroyQtBridge() {
if ptr.Pointer() != nil {
C.QtBridgea35b5a_DestroyQtBridge(ptr.Pointer())
ptr.SetPointer(nil)
runtime.SetFinalizer(ptr, nil)
}
}
func (ptr *QtBridge) DestroyQtBridgeDefault() {
if ptr.Pointer() != nil {
C.QtBridgea35b5a_DestroyQtBridgeDefault(ptr.Pointer())
ptr.SetPointer(nil)
runtime.SetFinalizer(ptr, nil)
}
}
//export callbackQtBridgea35b5a_Event
func callbackQtBridgea35b5a_Event(ptr unsafe.Pointer, e unsafe.Pointer) C.char {
if signal := qt.GetSignal(ptr, "event"); signal != nil {
return C.char(int8(qt.GoBoolToInt((*(*func(*std_core.QEvent) bool)(signal))(std_core.NewQEventFromPointer(e)))))
}
return C.char(int8(qt.GoBoolToInt(NewQtBridgeFromPointer(ptr).EventDefault(std_core.NewQEventFromPointer(e)))))
}
func (ptr *QtBridge) EventDefault(e std_core.QEvent_ITF) bool {
if ptr.Pointer() != nil {
return int8(C.QtBridgea35b5a_EventDefault(ptr.Pointer(), std_core.PointerFromQEvent(e))) != 0
}
return false
}
//export callbackQtBridgea35b5a_EventFilter
func callbackQtBridgea35b5a_EventFilter(ptr unsafe.Pointer, watched unsafe.Pointer, event unsafe.Pointer) C.char {
if signal := qt.GetSignal(ptr, "eventFilter"); signal != nil {
return C.char(int8(qt.GoBoolToInt((*(*func(*std_core.QObject, *std_core.QEvent) bool)(signal))(std_core.NewQObjectFromPointer(watched), std_core.NewQEventFromPointer(event)))))
}
return C.char(int8(qt.GoBoolToInt(NewQtBridgeFromPointer(ptr).EventFilterDefault(std_core.NewQObjectFromPointer(watched), std_core.NewQEventFromPointer(event)))))
}
func (ptr *QtBridge) EventFilterDefault(watched std_core.QObject_ITF, event std_core.QEvent_ITF) bool {
if ptr.Pointer() != nil {
return int8(C.QtBridgea35b5a_EventFilterDefault(ptr.Pointer(), std_core.PointerFromQObject(watched), std_core.PointerFromQEvent(event))) != 0
}
return false
}
//export callbackQtBridgea35b5a_ChildEvent
func callbackQtBridgea35b5a_ChildEvent(ptr unsafe.Pointer, event unsafe.Pointer) {
if signal := qt.GetSignal(ptr, "childEvent"); signal != nil {
(*(*func(*std_core.QChildEvent))(signal))(std_core.NewQChildEventFromPointer(event))
} else {
NewQtBridgeFromPointer(ptr).ChildEventDefault(std_core.NewQChildEventFromPointer(event))
}
}
func (ptr *QtBridge) ChildEventDefault(event std_core.QChildEvent_ITF) {
if ptr.Pointer() != nil {
C.QtBridgea35b5a_ChildEventDefault(ptr.Pointer(), std_core.PointerFromQChildEvent(event))
}
}
//export callbackQtBridgea35b5a_ConnectNotify
func callbackQtBridgea35b5a_ConnectNotify(ptr unsafe.Pointer, sign unsafe.Pointer) {
if signal := qt.GetSignal(ptr, "connectNotify"); signal != nil {
(*(*func(*std_core.QMetaMethod))(signal))(std_core.NewQMetaMethodFromPointer(sign))
} else {
NewQtBridgeFromPointer(ptr).ConnectNotifyDefault(std_core.NewQMetaMethodFromPointer(sign))
}
}
func (ptr *QtBridge) ConnectNotifyDefault(sign std_core.QMetaMethod_ITF) {
if ptr.Pointer() != nil {
C.QtBridgea35b5a_ConnectNotifyDefault(ptr.Pointer(), std_core.PointerFromQMetaMethod(sign))
}
}
//export callbackQtBridgea35b5a_CustomEvent
func callbackQtBridgea35b5a_CustomEvent(ptr unsafe.Pointer, event unsafe.Pointer) {
if signal := qt.GetSignal(ptr, "customEvent"); signal != nil {
(*(*func(*std_core.QEvent))(signal))(std_core.NewQEventFromPointer(event))
} else {
NewQtBridgeFromPointer(ptr).CustomEventDefault(std_core.NewQEventFromPointer(event))
}
}
func (ptr *QtBridge) CustomEventDefault(event std_core.QEvent_ITF) {
if ptr.Pointer() != nil {
C.QtBridgea35b5a_CustomEventDefault(ptr.Pointer(), std_core.PointerFromQEvent(event))
}
}
//export callbackQtBridgea35b5a_DeleteLater
func callbackQtBridgea35b5a_DeleteLater(ptr unsafe.Pointer) {
if signal := qt.GetSignal(ptr, "deleteLater"); signal != nil {
(*(*func())(signal))()
} else {
NewQtBridgeFromPointer(ptr).DeleteLaterDefault()
}
}
func (ptr *QtBridge) DeleteLaterDefault() {
if ptr.Pointer() != nil {
C.QtBridgea35b5a_DeleteLaterDefault(ptr.Pointer())
runtime.SetFinalizer(ptr, nil)
}
}
//export callbackQtBridgea35b5a_Destroyed
func callbackQtBridgea35b5a_Destroyed(ptr unsafe.Pointer, obj unsafe.Pointer) {
if signal := qt.GetSignal(ptr, "destroyed"); signal != nil {
(*(*func(*std_core.QObject))(signal))(std_core.NewQObjectFromPointer(obj))
}
qt.Unregister(ptr)
}
//export callbackQtBridgea35b5a_DisconnectNotify
func callbackQtBridgea35b5a_DisconnectNotify(ptr unsafe.Pointer, sign unsafe.Pointer) {
if signal := qt.GetSignal(ptr, "disconnectNotify"); signal != nil {
(*(*func(*std_core.QMetaMethod))(signal))(std_core.NewQMetaMethodFromPointer(sign))
} else {
NewQtBridgeFromPointer(ptr).DisconnectNotifyDefault(std_core.NewQMetaMethodFromPointer(sign))
}
}
func (ptr *QtBridge) DisconnectNotifyDefault(sign std_core.QMetaMethod_ITF) {
if ptr.Pointer() != nil {
C.QtBridgea35b5a_DisconnectNotifyDefault(ptr.Pointer(), std_core.PointerFromQMetaMethod(sign))
}
}
//export callbackQtBridgea35b5a_ObjectNameChanged
func callbackQtBridgea35b5a_ObjectNameChanged(ptr unsafe.Pointer, objectName C.struct_Moc_PackedString) {
if signal := qt.GetSignal(ptr, "objectNameChanged"); signal != nil {
(*(*func(string))(signal))(cGoUnpackString(objectName))
}
}
//export callbackQtBridgea35b5a_TimerEvent
func callbackQtBridgea35b5a_TimerEvent(ptr unsafe.Pointer, event unsafe.Pointer) {
if signal := qt.GetSignal(ptr, "timerEvent"); signal != nil {
(*(*func(*std_core.QTimerEvent))(signal))(std_core.NewQTimerEventFromPointer(event))
} else {
NewQtBridgeFromPointer(ptr).TimerEventDefault(std_core.NewQTimerEventFromPointer(event))
}
}
func (ptr *QtBridge) TimerEventDefault(event std_core.QTimerEvent_ITF) {
if ptr.Pointer() != nil {
C.QtBridgea35b5a_TimerEventDefault(ptr.Pointer(), std_core.PointerFromQTimerEvent(event))
}
}
|
package c21_mt19937
// w: word size (in number of bits)
// n: degree of recurrence
// m: middle word, an offset used in the recurrence relation defining the series x, 1 ≤ m < n
// r: separation point of one word, or the number of bits of the lower bitmask, 0 ≤ r ≤ w - 1
// a: coefficients of the rational normal form twist matrix
// b, c: TGFSR(R) tempering bitmasks
// s, t: TGFSR(R) tempering bit shifts
// u, d, l: additional Mersenne Twister tempering bit shifts/masks
const (
wMT = 32
nMT = 624
mMT = 397
rMT = 31
aMT = 0x9908b0df
uMT = 11
dMT = 0xffffffff
sMT = 7
bMT = 0x9d2c5680
tMT = 15
cMT = 0xefc60000
lMT = 18
fMT = 1812433253
lowerMask = (1 << rMT) - 1
upperMask = 0x80000000
)
type MT19937 struct {
index uint32
mt [nMT]uint32
}
func NewMT19937(seed uint32) *MT19937 {
obj := MT19937{
index: nMT,
}
obj.mt[0] = seed
for i := 1; i < nMT; i++ {
obj.mt[i] = fMT*(obj.mt[i-1]^(obj.mt[i-1]>>(wMT-2))) + uint32(i)
}
return &obj
}
func (obj *MT19937) Update(state [nMT]uint32, idx uint32) {
obj.index = idx
obj.mt = state
}
func (obj *MT19937) ExtractNumber() uint32 {
if obj.index >= nMT {
obj.twist()
}
y := Temper(obj.mt[obj.index])
obj.index += 1
return y
}
func Temper(y uint32) uint32 {
y ^= y >> uMT & dMT
y ^= y << sMT & bMT
y ^= y << tMT & cMT
y ^= y >> lMT
return y
}
func (obj *MT19937) twist() {
for i := 0; i < len(obj.mt)-1; i++ {
x := (obj.mt[i] & upperMask) + (obj.mt[i+1%nMT] & lowerMask)
xA := x >> 1
if (x % 2) != 0 {
xA ^= aMT
}
obj.mt[i] = obj.mt[(i+mMT)%nMT] ^ xA
}
obj.index = 0
}
|
package main
// Lab 1. Hello World
// Requirements:
// As a lonely person, I would like an application to greet the world
//
// Objective:
// 01 - Understand package main
// 02 - Be able to run, build, and install basic applications
//
// Steps:
// 01 - Import the 'fmt' (format) package
// 02 - Use fmt.Println to write "Hello Dinner Party" to the console
// 03 - Run the application from the console 'go run main.go' or 'go run .'
// 04 - Build the application 'go build -o "App1.exe" .'
// 05 - Verify that the output prints correctly
// 06 - Install the application
// 07 - Verify that the output prints correctly
func main() {
}
|
//+build ignore
package drm
//#cgo pkg-config: libdrm
//#include <linux/types.h>
//#include <stddef.h>
//#include <asm/ioctl.h>
//#include <libdrm/drm.h>
//#include <libdrm/drm_mode.h>
//#include <libdrm/drm_fourcc.h>
//#include <libdrm/drm_sarea.h>
import "C"
import "syscall"
type (
Handle C.drm_handle_t
Context C.drm_context_t
Drawable C.drm_drawable_t
Magic C.drm_magic_t
ClipRect C.struct_drm_clip_rect
DrawableInfo C.struct_drm_drawable_info
TexRegion C.struct_drm_tex_region
HwLock C.struct_drm_hw_lock
Version C.struct_drm_version
Unique C.struct_drm_unique
List C.struct_drm_list
Block C.struct_drm_block
Control C.struct_drm_control
MapType C.enum_drm_map_type
MapFlags C.enum_drm_map_flags
CtxPrivmap C.struct_ctx_priv_map
Map C.struct_drm_map
Client C.struct_drm_client
StatType C.enum_drm_stat_type
Stats C.struct_drm_stats
LockFlags C.enum_drm_lock_flags
Lock C.struct_drm_lock
DmaFlags C.enum_drm_dma_flags
BufDesk C.struct_drm_buf_desc
BufInfo C.struct_drm_buf_info
BufFree C.struct_drm_buf_free
BufPub C.struct_drm_buf_pub
BufMap C.struct_drm_buf_map
Dma C.struct_drm_dma
CtxFlags C.enum_drm_ctx_flags
Ctx C.struct_drm_ctx
CtxRes C.struct_drm_ctx_res
Draw C.struct_drm_draw
DrawableInfoType C.drm_drawable_info_type_t
UpdateDraw C.struct_drm_update_draw
Auth C.struct_drm_auth
IrqBusid C.struct_drm_irq_busid
VBlankSeqType C.enum_drm_vblank_seq_type
WaitVBlankRequest C.struct_drm_wait_vblank_request
WaitVBlankReply C.struct_drm_wait_vblank_reply
WaitVBlank C.union_drm_wait_vblank
ModesetCtl C.struct_drm_modeset_ctl
AgpMode C.struct_drm_agp_mode
AgpBuffer C.struct_drm_agp_buffer
AgpBinding C.struct_drm_agp_binding
AgpInfo C.struct_drm_agp_info
ScatterGather C.struct_drm_scatter_gather
SetVersion C.struct_drm_set_version
GemClose C.struct_drm_gem_close
GemFlink C.struct_drm_gem_flink
GemOpen C.struct_drm_gem_open
GetCap C.struct_drm_get_cap
PrimeHandle C.struct_drm_prime_handle
Event C.struct_drm_event
EventVBlank C.struct_drm_event_vblank
)
const (
NAME = C.DRM_NAME
MIN_ORDER = C.DRM_MIN_ORDER
MAX_ORDER = C.DRM_MAX_ORDER
RAM_PERCENT = C.DRM_RAM_PERCENT
LOCK_HELD = C._DRM_LOCK_HELD
LOCK_CONT = C._DRM_LOCK_CONT
//drm_map_type
FRAME_BUFFER = C._DRM_FRAME_BUFFER
REGISTERS = C._DRM_REGISTERS
SHM = C._DRM_SHM
AGP = C._DRM_AGP
SCATTER_GATHER = C._DRM_SCATTER_GATHER
CONSISTENT = C._DRM_CONSISTENT
GEM = C._DRM_GEM
//drm_map_flags
RESTRICTED = C._DRM_RESTRICTED
READ_ONLY = C._DRM_READ_ONLY
LOCKED = C._DRM_LOCKED
KERNEL = C._DRM_KERNEL
WRITE_COMBINING = C._DRM_WRITE_COMBINING
CONTAINS_LOCK = C._DRM_CONTAINS_LOCK
REMOVABLE = C._DRM_REMOVABLE
DRIVER = C._DRM_DRIVER
//drm_stat_type
STAT_LOCK = C._DRM_STAT_LOCK
STAT_OPENS = C._DRM_STAT_OPENS
STAT_CLOSES = C._DRM_STAT_CLOSES
STAT_IOCTLS = C._DRM_STAT_IOCTLS
STAT_LOCKS = C._DRM_STAT_LOCKS
STAT_UNLOCKS = C._DRM_STAT_UNLOCKS
STAT_VALUE = C._DRM_STAT_VALUE
STAT_BYTE = C._DRM_STAT_BYTE
STAT_COUNT = C._DRM_STAT_COUNT
STAT_IRQ = C._DRM_STAT_IRQ
STAT_PRIMARY = C._DRM_STAT_PRIMARY
STAT_SECONDARY = C._DRM_STAT_SECONDARY
STAT_DMA = C._DRM_STAT_DMA
STAT_SPECIAL = C._DRM_STAT_SPECIAL
STAT_MISSED = C._DRM_STAT_MISSED
//drm_lock_flags
LOCK_READY = C._DRM_LOCK_READY
LOCK_QUIESCENT = C._DRM_LOCK_QUIESCENT
LOCK_FLUSH = C._DRM_LOCK_FLUSH
LOCK_FLUSH_ALL = C._DRM_LOCK_FLUSH_ALL
HALT_ALL_QUEUES = C._DRM_HALT_ALL_QUEUES
HALT_CUR_QUEUES = C._DRM_HALT_CUR_QUEUES
//drm_dma_flags
DMA_BLOCK = C._DRM_DMA_BLOCK
DMA_WHILE_LOCKED = C._DRM_DMA_WHILE_LOCKED
DMA_PRIORITY = C._DRM_DMA_PRIORITY
DMA_WaIT = C._DRM_DMA_WAIT
DMA_SMALLER_OK = C._DRM_DMA_SMALLER_OK
DMA_LARGER_OK = C._DRM_DMA_LARGER_OK
//drm_buf_desc.flags
PAGE_ALIGN = C._DRM_PAGE_ALIGN
AGP_BUFFER = C._DRM_AGP_BUFFER
SG_BUFFER = C._DRM_SG_BUFFER
FB_BUFFER = C._DRM_FB_BUFFER
PCI_BUFFER_RO = C._DRM_PCI_BUFFER_RO
//drm_ctx_flags
CONTEXT_PRESERVED = C._DRM_CONTEXT_PRESERVED
CONTEXT_2DONLY = C._DRM_CONTEXT_2DONLY
//drm_vblank_seq_type
VBLANK_ABSOLUTE = C._DRM_VBLANK_ABSOLUTE
VBLANK_RELATIVE = C._DRM_VBLANK_RELATIVE
VBLANK_EVENT = C._DRM_VBLANK_EVENT
VBLANK_FLIP = C._DRM_VBLANK_FLIP
VBLANK_NEXTONMISS = C._DRM_VBLANK_NEXTONMISS
VBLANK_SECONDARY = C._DRM_VBLANK_SECONDARY
VBLANK_SIGNAL = C._DRM_VBLANK_SIGNAL
VBLANK_TYPES_MASK = C._DRM_VBLANK_TYPES_MASK
VBLANK_FLAGS_MASK = C._DRM_VBLANK_FLAGS_MASK
PRE_MODESET = C._DRM_PRE_MODESET
POST_MODESET = C._DRM_POST_MODESET
CLOEXEC = syscall.O_CLOEXEC
)
//drm_mode.h
const (
DISPLAY_INFO_LEN = C.DRM_DISPLAY_INFO_LEN
CONNECTOR_NAME_LEN = C.DRM_CONNECTOR_NAME_LEN
DISPLAY_MODE_LEN = C.DRM_DISPLAY_MODE_LEN
PROP_NAME_LEN = C.DRM_PROP_NAME_LEN
MODE_TYPE_BUILTIN = C.DRM_MODE_TYPE_BUILTIN
MODE_TYPE_CLOCK_C = C.DRM_MODE_TYPE_CLOCK_C
MODE_TYPE_CRTC_C = C.DRM_MODE_TYPE_CRTC_C
MODE_TYPE_PREFERRED = C.DRM_MODE_TYPE_PREFERRED
MODE_TYPE_DEFAULT = C.DRM_MODE_TYPE_DEFAULT
MODE_TYPE_USERDEF = C.DRM_MODE_TYPE_USERDEF
MODE_TYPE_DRIVER = C.DRM_MODE_TYPE_DRIVER
/* Video mode flags */
// bit compatible with the xorg definitions.
MODE_FLAG_PHSYNC = C.DRM_MODE_FLAG_PHSYNC
MODE_FLAG_NHSYNC = C.DRM_MODE_FLAG_NHSYNC
MODE_FLAG_PVSYNC = C.DRM_MODE_FLAG_PVSYNC
MODE_FLAG_NVSYNC = C.DRM_MODE_FLAG_NVSYNC
MODE_FLAG_INTERLACE = C.DRM_MODE_FLAG_INTERLACE
MODE_FLAG_DBLSCAN = C.DRM_MODE_FLAG_DBLSCAN
MODE_FLAG_CSYNC = C.DRM_MODE_FLAG_CSYNC
MODE_FLAG_PCSYNC = C.DRM_MODE_FLAG_PCSYNC
MODE_FLAG_NCSYNC = C.DRM_MODE_FLAG_NCSYNC
// hskew provided
MODE_FLAG_HSKEW = C.DRM_MODE_FLAG_HSKEW
MODE_FLAG_BCAST = C.DRM_MODE_FLAG_BCAST
MODE_FLAG_PIXMUX = C.DRM_MODE_FLAG_PIXMUX
MODE_FLAG_DBLCLK = C.DRM_MODE_FLAG_DBLCLK
MODE_FLAG_CLKDIV2 = C.DRM_MODE_FLAG_CLKDIV2
/* DPMS flags */
// bit compatible with the xorg definitions.
MODE_DPMS_ON = C.DRM_MODE_DPMS_ON
MODE_DPMS_STANDBY = C.DRM_MODE_DPMS_STANDBY
MODE_DPMS_SUSPEND = C.DRM_MODE_DPMS_SUSPEND
MODE_DPMS_OFF = C.DRM_MODE_DPMS_OFF
/* Scaling mode options */
// Unmodified timing (display or software can still scale)
MODE_SCALE_NONE = C.DRM_MODE_SCALE_NONE
// Full screen, ignore aspect
MODE_SCALE_FULLSCREEN = C.DRM_MODE_SCALE_FULLSCREEN
// Centered, no scaling
MODE_SCALE_CENTER = C.DRM_MODE_SCALE_CENTER
// Full screen, preserve aspect
MODE_SCALE_ASPECT = C.DRM_MODE_SCALE_ASPECT
/* Dithering mode options */
MODE_DITHERING_OFF = C.DRM_MODE_DITHERING_OFF
MODE_DITHERING_ON = C.DRM_MODE_DITHERING_ON
MODE_DITHERING_AUTO = C.DRM_MODE_DITHERING_AUTO
/* Dirty info options */
MODE_DIRTY_OFF = C.DRM_MODE_DIRTY_OFF
MODE_DIRTY_ON = C.DRM_MODE_DIRTY_ON
MODE_DIRTY_ANNOTATE = C.DRM_MODE_DIRTY_ANNOTATE
)
type (
ModeModeInfo C.struct_drm_mode_modeinfo
ModeCardRes C.struct_drm_mode_card_res
ModeCrtc C.struct_drm_mode_crtc
)
const (
MODE_PRESENT_TOP_FIELD = C.DRM_MODE_PRESENT_TOP_FIELD
MODE_PRESENT_BOTTOM_FIELD = C.DRM_MODE_PRESENT_BOTTOM_FIELD
)
type (
/* Planes blend with or override other bits on the CRTC */
ModeSetPlane C.struct_drm_mode_set_plane
ModeGetPlane C.struct_drm_mode_get_plane
ModeGetPlaneRes C.struct_drm_mode_get_plane_res
)
const (
MODE_ENCODER_NONE = C.DRM_MODE_ENCODER_NONE
MODE_ENCODER_DAC = C.DRM_MODE_ENCODER_DAC
MODE_ENCODER_TMDS = C.DRM_MODE_ENCODER_TMDS
MODE_ENCODER_LVDS = C.DRM_MODE_ENCODER_LVDS
MODE_ENCODER_TVDAC = C.DRM_MODE_ENCODER_TVDAC
)
type ModeGetEncoder C.struct_drm_mode_get_encoder
/*
This is for connectors with multiple signal types.
Try to match DRM_MODE_CONNECTOR_X as closely as possible.
*/
const (
MODE_SUBCONNECTOR_Automatic = C.DRM_MODE_SUBCONNECTOR_Automatic
MODE_SUBCONNECTOR_Unknown = C.DRM_MODE_SUBCONNECTOR_Unknown
MODE_SUBCONNECTOR_DVID = C.DRM_MODE_SUBCONNECTOR_DVID
MODE_SUBCONNECTOR_DVIA = C.DRM_MODE_SUBCONNECTOR_DVIA
MODE_SUBCONNECTOR_Composite = C.DRM_MODE_SUBCONNECTOR_Composite
MODE_SUBCONNECTOR_SVIDEO = C.DRM_MODE_SUBCONNECTOR_SVIDEO
MODE_SUBCONNECTOR_Component = C.DRM_MODE_SUBCONNECTOR_Component
MODE_SUBCONNECTOR_SCART = C.DRM_MODE_SUBCONNECTOR_SCART
MODE_CONNECTOR_Unknown = C.DRM_MODE_CONNECTOR_Unknown
MODE_CONNECTOR_VGA = C.DRM_MODE_CONNECTOR_VGA
MODE_CONNECTOR_DVII = C.DRM_MODE_CONNECTOR_DVII
MODE_CONNECTOR_DVID = C.DRM_MODE_CONNECTOR_DVID
MODE_CONNECTOR_DVIA = C.DRM_MODE_CONNECTOR_DVIA
MODE_CONNECTOR_Composite = C.DRM_MODE_CONNECTOR_Composite
MODE_CONNECTOR_SVIDEO = C.DRM_MODE_CONNECTOR_SVIDEO
MODE_CONNECTOR_LVDS = C.DRM_MODE_CONNECTOR_LVDS
MODE_CONNECTOR_Component = C.DRM_MODE_CONNECTOR_Component
MODE_CONNECTOR_9PinDIN = C.DRM_MODE_CONNECTOR_9PinDIN
MODE_CONNECTOR_DisplayPort = C.DRM_MODE_CONNECTOR_DisplayPort
MODE_CONNECTOR_HDMIA = C.DRM_MODE_CONNECTOR_HDMIA
MODE_CONNECTOR_HDMIB = C.DRM_MODE_CONNECTOR_HDMIB
MODE_CONNECTOR_TV = C.DRM_MODE_CONNECTOR_TV
MODE_CONNECTOR_eDP = C.DRM_MODE_CONNECTOR_eDP
)
type ModeGetConnector C.struct_drm_mode_get_connector
const (
MODE_PROP_PENDING = C.DRM_MODE_PROP_PENDING
MODE_PROP_RANGE = C.DRM_MODE_PROP_RANGE
MODE_PROP_IMMUTABLE = C.DRM_MODE_PROP_IMMUTABLE
// enumerated type with text strings */
MODE_PROP_ENUM = C.DRM_MODE_PROP_ENUM
MODE_PROP_BLOB = C.DRM_MODE_PROP_BLOB
// bitmask of enumerated types */
MODE_PROP_BITMASK = C.DRM_MODE_PROP_BITMASK
)
type (
ModePropertyEnum C.struct_drm_mode_property_enum
ModeGetProperty C.struct_drm_mode_get_property
ModeConnectorSetProperty C.struct_drm_mode_connector_set_property
)
const (
MODE_OBJECT_CRTC = C.DRM_MODE_OBJECT_CRTC
MODE_OBJECT_CONNECTOR = C.DRM_MODE_OBJECT_CONNECTOR
MODE_OBJECT_ENCODER = C.DRM_MODE_OBJECT_ENCODER
MODE_OBJECT_MODE = C.DRM_MODE_OBJECT_MODE
MODE_OBJECT_PROPERTY = C.DRM_MODE_OBJECT_PROPERTY
MODE_OBJECT_FB = C.DRM_MODE_OBJECT_FB
MODE_OBJECT_BLOB = C.DRM_MODE_OBJECT_BLOB
MODE_OBJECT_PLANE = C.DRM_MODE_OBJECT_PLANE
)
type (
ModeObjGetProperties C.struct_drm_mode_obj_get_properties
ModeObjSetProperty C.struct_drm_mode_obj_set_property
ModeGetBlob C.struct_drm_mode_get_blob
ModeFbCmd C.struct_drm_mode_fb_cmd
)
// for interlaced framebuffers */
const MODE_FB_INTERLACED = C.DRM_MODE_FB_INTERLACED
type ModeFbCmd2 C.struct_drm_mode_fb_cmd2
const (
MODE_FB_DIRTY_ANNOTATE_COPY = C.DRM_MODE_FB_DIRTY_ANNOTATE_COPY
MODE_FB_DIRTY_ANNOTATE_FILL = C.DRM_MODE_FB_DIRTY_ANNOTATE_FILL
MODE_FB_DIRTY_FLAGS = C.DRM_MODE_FB_DIRTY_FLAGS
)
type ModeFbDirtyCmd C.struct_drm_mode_fb_dirty_cmd
type ModeModeCmd C.struct_drm_mode_mode_cmd
const (
MODE_CURSOR_BO = C.DRM_MODE_CURSOR_BO
MODE_CURSOR_MOVE = C.DRM_MODE_CURSOR_MOVE
)
/*
depending on the value in flags diffrent members are used.
CURSOR_BO uses crtc width height handle - if 0 turns the cursor of
CURSOR_MOVE uses crtc x y
*/
type (
ModeCursor C.struct_drm_mode_cursor
ModeCrtcLut C.struct_drm_mode_crtc_lut
)
const (
MODE_PAGE_FLIP_EVENT = C.DRM_MODE_PAGE_FLIP_EVENT
MODE_PAGE_FLIP_FLAGS = C.DRM_MODE_PAGE_FLIP_FLAGS
)
type (
ModeCrtcPageFlip C.struct_drm_mode_crtc_page_flip
// create a dumb scanout buffer */
ModeCreateDumb C.struct_drm_mode_create_dumb
// set up for mmap of a dumb scanout buffer */
ModeMapDumb C.struct_drm_mode_map_dumb
ModeDestroyDumb C.struct_drm_mode_destroy_dumb
)
const (
IOCTL_BASE = C.DRM_IOCTL_BASE
IOCTL_VERSION = C.DRM_IOCTL_VERSION
IOCTL_GET_UNIQUE = C.DRM_IOCTL_GET_UNIQUE
IOCTL_GET_MAGIC = C.DRM_IOCTL_GET_MAGIC
IOCTL_IRQ_BUSID = C.DRM_IOCTL_IRQ_BUSID
IOCTL_GET_MAP = C.DRM_IOCTL_GET_MAP
IOCTL_GET_CLIENT = C.DRM_IOCTL_GET_CLIENT
IOCTL_GET_STATS = C.DRM_IOCTL_GET_STATS
IOCTL_SET_VERSION = C.DRM_IOCTL_SET_VERSION
IOCTL_MODESET_CTL = C.DRM_IOCTL_MODESET_CTL
IOCTL_GEM_CLOSE = C.DRM_IOCTL_GEM_CLOSE
IOCTL_GEM_FLINK = C.DRM_IOCTL_GEM_FLINK
IOCTL_GEM_OPEN = C.DRM_IOCTL_GEM_OPEN
IOCTL_GET_CAP = C.DRM_IOCTL_GET_CAP
IOCTL_SET_UNIQUE = C.DRM_IOCTL_SET_UNIQUE
IOCTL_AUTH_MAGIC = C.DRM_IOCTL_AUTH_MAGIC
IOCTL_BLOCK = C.DRM_IOCTL_BLOCK
IOCTL_UNBLOCK = C.DRM_IOCTL_UNBLOCK
IOCTL_CONTROL = C.DRM_IOCTL_CONTROL
IOCTL_ADD_MAP = C.DRM_IOCTL_ADD_MAP
IOCTL_ADD_BUFS = C.DRM_IOCTL_ADD_BUFS
IOCTL_MARK_BUFS = C.DRM_IOCTL_MARK_BUFS
IOCTL_INFO_BUFS = C.DRM_IOCTL_INFO_BUFS
IOCTL_MAP_BUFS = C.DRM_IOCTL_MAP_BUFS
IOCTL_FREE_BUFS = C.DRM_IOCTL_FREE_BUFS
IOCTL_RM_MAP = C.DRM_IOCTL_RM_MAP
IOCTL_SET_SAREA_CTX = C.DRM_IOCTL_SET_SAREA_CTX
IOCTL_GET_SAREA_CTX = C.DRM_IOCTL_GET_SAREA_CTX
IOCTL_SET_MASTER = C.DRM_IOCTL_SET_MASTER
IOCTL_DROP_MASTER = C.DRM_IOCTL_DROP_MASTER
IOCTL_ADD_CTX = C.DRM_IOCTL_ADD_CTX
IOCTL_RM_CTX = C.DRM_IOCTL_RM_CTX
IOCTL_MOD_CTX = C.DRM_IOCTL_MOD_CTX
IOCTL_GET_CTX = C.DRM_IOCTL_GET_CTX
IOCTL_SWITCH_CTX = C.DRM_IOCTL_SWITCH_CTX
IOCTL_NEW_CTX = C.DRM_IOCTL_NEW_CTX
IOCTL_RES_CTX = C.DRM_IOCTL_RES_CTX
IOCTL_ADD_DRAW = C.DRM_IOCTL_ADD_DRAW
IOCTL_RM_DRAW = C.DRM_IOCTL_RM_DRAW
IOCTL_DMA = C.DRM_IOCTL_DMA
IOCTL_LOCK = C.DRM_IOCTL_LOCK
IOCTL_UNLOCK = C.DRM_IOCTL_UNLOCK
IOCTL_FINISH = C.DRM_IOCTL_FINISH
IOCTL_PRIME_HANDLE_TO_FD = C.DRM_IOCTL_PRIME_HANDLE_TO_FD
IOCTL_PRIME_FD_TO_HANDLE = C.DRM_IOCTL_PRIME_FD_TO_HANDLE
IOCTL_AGP_AQUIRE = C.DRM_IOCTL_AGP_ACQUIRE
IOCTL_AGP_RELEASE = C.DRM_IOCTL_AGP_RELEASE
IOCTL_AGP_ENABLE = C.DRM_IOCTL_AGP_ENABLE
IOCTL_AGP_INFO = C.DRM_IOCTL_AGP_INFO
IOCTL_AGP_ALLOC = C.DRM_IOCTL_AGP_ALLOC
IOCTL_AGP_FREE = C.DRM_IOCTL_AGP_FREE
IOCTL_AGP_BIND = C.DRM_IOCTL_AGP_BIND
IOCTL_AGP_UNBIND = C.DRM_IOCTL_AGP_UNBIND
IOCTL_SG_ALLOC = C.DRM_IOCTL_SG_ALLOC
IOCTL_SG_FREE = C.DRM_IOCTL_SG_FREE
IOCTL_WAIT_VBLANK = C.DRM_IOCTL_WAIT_VBLANK
IOCTL_UPDATE_DRAW = C.DRM_IOCTL_UPDATE_DRAW
IOCTL_MODE_GETRESOURCES = C.DRM_IOCTL_MODE_GETRESOURCES
IOCTL_MODE_GETCRTC = C.DRM_IOCTL_MODE_GETCRTC
IOCTL_MODE_SETCRTC = C.DRM_IOCTL_MODE_SETCRTC
IOCTL_MODE_CURSOR = C.DRM_IOCTL_MODE_CURSOR
IOCTL_MODE_GETGAMMA = C.DRM_IOCTL_MODE_GETGAMMA
IOCTL_MODE_SETGAMMA = C.DRM_IOCTL_MODE_SETGAMMA
IOCTL_MODE_GETENCODER = C.DRM_IOCTL_MODE_GETENCODER
IOCTL_MODE_GETCONNECTOR = C.DRM_IOCTL_MODE_GETCONNECTOR
IOCTL_MODE_ATTACHMODE = C.DRM_IOCTL_MODE_ATTACHMODE
IOCTL_MODE_DETACHMODE = C.DRM_IOCTL_MODE_DETACHMODE
IOCTL_MODE_GETPROPERTY = C.DRM_IOCTL_MODE_GETPROPERTY
IOCTL_MODE_SETPROPERTY = C.DRM_IOCTL_MODE_SETPROPERTY
IOCTL_MODE_GETPROPBLOB = C.DRM_IOCTL_MODE_GETPROPBLOB
IOCTL_MODE_GETFB = C.DRM_IOCTL_MODE_GETFB
IOCTL_MODE_ADDFB = C.DRM_IOCTL_MODE_ADDFB
IOCTL_MODE_RMFB = C.DRM_IOCTL_MODE_RMFB
IOCTL_MODE_PAGE_FLIP = C.DRM_IOCTL_MODE_PAGE_FLIP
IOCTL_MODE_DIRTYFB = C.DRM_IOCTL_MODE_DIRTYFB
IOCTL_MODE_CREATE_DUMB = C.DRM_IOCTL_MODE_CREATE_DUMB
IOCTL_MODE_MAP_DUMB = C.DRM_IOCTL_MODE_MAP_DUMB
IOCTL_MODE_DESTROY_DUM = C.DRM_IOCTL_MODE_DESTROY_DUMB
IOCTL_MODE_GETPLANERESOURCES = C.DRM_IOCTL_MODE_GETPLANERESOURCES
IOCTL_MODE_GETPLANE = C.DRM_IOCTL_MODE_GETPLANE
IOCTL_MODE_SETPLANE = C.DRM_IOCTL_MODE_SETPLANE
IOCTL_MODE_ADDFB2 = C.DRM_IOCTL_MODE_ADDFB2
IOCTL_MODE_OBJ_GETPROPERTIES = C.DRM_IOCTL_MODE_OBJ_GETPROPERTIES
IOCTL_MODE_OBJ_SETPROPERTY = C.DRM_IOCTL_MODE_OBJ_SETPROPERTY
COMMAND_BASE = C.DRM_COMMAND_BASE
COMMAND_END = C.DRM_COMMAND_END
EVENT_VBLANK = C.DRM_EVENT_VBLANK
EVENT_FLIP_COMPLETE = C.DRM_EVENT_FLIP_COMPLETE
CAP_DUMB_BUFFER = C.DRM_CAP_DUMB_BUFFER
CAP_VBLANK_HIGH_CRTC = C.DRM_CAP_VBLANK_HIGH_CRTC
CAP_DUMB_PREFERRED_DEPTH = C.DRM_CAP_DUMB_PREFERRED_DEPTH
CAP_DUMB_PREFER_SHADOW = C.DRM_CAP_DUMB_PREFER_SHADOW
CAP_PRIME = C.DRM_CAP_PRIME
PRIME_CAP_IMPORT = C.DRM_PRIME_CAP_IMPORT
PRIME_CAP_EXPORT = C.DRM_PRIME_CAP_EXPORT
)
//drm_fourcc.h
const (
FORMAT_BIG_ENDIAN = C.DRM_FORMAT_BIG_ENDIAN
FORMAT_C8 = C.DRM_FORMAT_C8
FORMAT_RGB332 = C.DRM_FORMAT_RGB332
FORMAT_BGR233 = C.DRM_FORMAT_BGR233
FORMAT_XRGB4444 = C.DRM_FORMAT_XRGB4444
FORMAT_XBGR4444 = C.DRM_FORMAT_XBGR4444
FORMAT_RGBX4444 = C.DRM_FORMAT_RGBX4444
FORMAT_BGRX4444 = C.DRM_FORMAT_BGRX4444
FORMAT_ARGB4444 = C.DRM_FORMAT_ARGB4444
FORMAT_ABGR4444 = C.DRM_FORMAT_ABGR4444
FORMAT_RGBA4444 = C.DRM_FORMAT_RGBA4444
FORMAT_BGRA4444 = C.DRM_FORMAT_BGRA4444
FORMAT_XRGB1555 = C.DRM_FORMAT_XRGB1555
FORMAT_XBGR1555 = C.DRM_FORMAT_XBGR1555
FORMAT_RGBX5551 = C.DRM_FORMAT_RGBX5551
FORMAT_BGRX5551 = C.DRM_FORMAT_BGRX5551
FORMAT_ARGB1555 = C.DRM_FORMAT_ARGB1555
FORMAT_ABGR1555 = C.DRM_FORMAT_ABGR1555
FORMAT_RGBA5551 = C.DRM_FORMAT_RGBA5551
FORMAT_BGRA5551 = C.DRM_FORMAT_BGRA5551
FORMAT_RGB565 = C.DRM_FORMAT_RGB565
FORMAT_BGR565 = C.DRM_FORMAT_BGR565
FORMAT_RGB888 = C.DRM_FORMAT_RGB888
FORMAT_BGR888 = C.DRM_FORMAT_BGR888
FORMAT_XRGB8888 = C.DRM_FORMAT_XRGB8888
FORMAT_XBGR8888 = C.DRM_FORMAT_XBGR8888
FORMAT_RGBX8888 = C.DRM_FORMAT_RGBX8888
FORMAT_BGRX8888 = C.DRM_FORMAT_BGRX8888
FORMAT_ARGB8888 = C.DRM_FORMAT_ARGB8888
FORMAT_ABGR8888 = C.DRM_FORMAT_ABGR8888
FORMAT_RGBA8888 = C.DRM_FORMAT_RGBA8888
FORMAT_BGRA8888 = C.DRM_FORMAT_BGRA8888
FORMAT_XRGB2101010 = C.DRM_FORMAT_XRGB2101010
FORMAT_XBGR2101010 = C.DRM_FORMAT_XBGR2101010
FORMAT_RGBX1010102 = C.DRM_FORMAT_RGBX1010102
FORMAT_BGRX1010102 = C.DRM_FORMAT_BGRX1010102
FORMAT_ARGB2101010 = C.DRM_FORMAT_ARGB2101010
FORMAT_ABGR2101010 = C.DRM_FORMAT_ABGR2101010
FORMAT_RGBA1010102 = C.DRM_FORMAT_RGBA1010102
FORMAT_BGRA1010102 = C.DRM_FORMAT_BGRA1010102
FORMAT_YUYV = C.DRM_FORMAT_YUYV
FORMAT_YVYU = C.DRM_FORMAT_YVYU
FORMAT_UYVY = C.DRM_FORMAT_UYVY
FORMAT_VYUY = C.DRM_FORMAT_VYUY
FORMAT_AYUV = C.DRM_FORMAT_AYUV
FORMAT_NV12 = C.DRM_FORMAT_NV12
FORMAT_NV21 = C.DRM_FORMAT_NV21
FORMAT_NV16 = C.DRM_FORMAT_NV16
FORMAT_NV61 = C.DRM_FORMAT_NV61
FORMAT_YUV410 = C.DRM_FORMAT_YUV410
FORMAT_YVU410 = C.DRM_FORMAT_YVU410
FORMAT_YUV411 = C.DRM_FORMAT_YUV411
FORMAT_YVU411 = C.DRM_FORMAT_YVU411
FORMAT_YUV420 = C.DRM_FORMAT_YUV420
FORMAT_YVU420 = C.DRM_FORMAT_YVU420
FORMAT_YUV422 = C.DRM_FORMAT_YUV422
FORMAT_YVU422 = C.DRM_FORMAT_YVU422
FORMAT_YUV444 = C.DRM_FORMAT_YUV444
FORMAT_YVU444 = C.DRM_FORMAT_YVU444
)
//drm_sarea.h
const (
SAREA_MAX = C.SAREA_MAX
SAREA_MAX_DRAWABLES = C.SAREA_MAX_DRAWABLES
SAREA_DRAWABLE_CLAIMED_ENTRY = C.SAREA_DRAWABLE_CLAIMED_ENTRY
)
type (
SareaDrawable C.struct_drm_sarea_drawable
/** SAREA frame */
SareaFrame C.struct_drm_sarea_frame
/** SAREA */
Sarea C.struct_drm_sarea
)
|
package types
import (
"github.com/segmentio/kafka-go"
)
// RelayMessage encapsulates a kafka message that is read by relay.Run()
type RelayMessage struct {
Value *kafka.Message
Options *RelayMessageOptions
}
// RelayMessageOptions contains any additional options necessary for processing of Kafka messages by the relayer
type RelayMessageOptions struct {
}
// OffsetInfo encapsulates info related to the offset count in the
type OffsetInfo struct {
Count int
LastOffset int64
}
|
/*
Copyright 2017 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth
import (
"crypto/tls"
"encoding/json"
"net/http"
"net/url"
"strings"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/lib"
"github.com/gravitational/teleport/lib/httplib"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/roundtrip"
"github.com/gravitational/trace"
log "github.com/sirupsen/logrus"
)
// UpsertTrustedCluster creates or toggles a Trusted Cluster relationship.
func (a *AuthServer) UpsertTrustedCluster(trustedCluster services.TrustedCluster) error {
var establishTrust bool
_, err := a.Presence.GetTrustedCluster(trustedCluster.GetName())
if trace.IsNotFound(err) {
establishTrust = true
}
if trustedCluster.GetEnabled() == true {
err = a.enableTrustedCluster(trustedCluster, establishTrust)
if err != nil {
return trace.Wrap(err)
}
return nil
}
err = a.disableTrustedCluster(trustedCluster, establishTrust)
if err != nil {
return trace.Wrap(err)
}
return nil
}
// DeleteTrustedCluster removes services.CertAuthority, services.ReverseTunnel,
// and services.TrustedCluster resources.
func (a *AuthServer) DeleteTrustedCluster(name string) error {
err := a.DeleteCertAuthority(services.CertAuthID{Type: services.HostCA, DomainName: name})
if err != nil {
if !trace.IsNotFound(err) {
return trace.Wrap(err)
}
}
err = a.DeleteCertAuthority(services.CertAuthID{Type: services.UserCA, DomainName: name})
if err != nil {
if !trace.IsNotFound(err) {
return trace.Wrap(err)
}
}
err = a.DeleteReverseTunnel(name)
if err != nil {
if !trace.IsNotFound(err) {
return trace.Wrap(err)
}
}
err = a.Presence.DeleteTrustedCluster(name)
if err != nil {
if !trace.IsNotFound(err) {
return trace.Wrap(err)
}
}
return nil
}
func (a *AuthServer) enableTrustedCluster(t services.TrustedCluster, establishTrust bool) error {
log.Debugf("[TRUSTED CLUSTER] Enable Trusted Cluster: %v; Establish Trust: %v", t.GetEnabled(), establishTrust)
// start with a disabled cluster, we will enable it afterwards.
err := a.disableTrustedCluster(t, establishTrust)
if err != nil {
return trace.Wrap(err)
}
// active the cert authority, we always start out with a disable cert
// authority so we must have something to activate.
err = a.ActivateCertAuthority(services.CertAuthID{Type: services.UserCA, DomainName: t.GetName()})
if err != nil {
return trace.Wrap(err)
}
err = a.ActivateCertAuthority(services.CertAuthID{Type: services.HostCA, DomainName: t.GetName()})
if err != nil {
return trace.Wrap(err)
}
// add reverse tunnel to backend
reverseTunnel := services.NewReverseTunnel(
t.GetName(),
[]string{t.GetReverseTunnelAddress()},
)
err = a.UpsertReverseTunnel(reverseTunnel)
if err != nil {
return trace.Wrap(err)
}
// update resource
err = a.Presence.UpsertTrustedCluster(t)
if err != nil {
return trace.Wrap(err)
}
return nil
}
func (a *AuthServer) disableTrustedCluster(t services.TrustedCluster, establishTrust bool) error {
log.Debugf("[TRUSTED CLUSTER] Disable Trusted Cluster: %v; Establish Trust: %v", t.GetEnabled(), establishTrust)
// do token exchange if we are establishing trust
if establishTrust {
remoteCAs, err := a.establishTrust(t)
if err != nil {
return trace.Wrap(err)
}
err = a.addRemoteCAs(remoteCAs, t)
if err != nil {
return trace.Wrap(err)
}
}
// deactive cert authorities. it's okay if we don't find a cert authority to
// deactive, that occurs when the cert authority is already disabled and you
// try to disable it again or enable it (remember, we always call disable
// first, then enable).
err := a.DeactivateCertAuthority(services.CertAuthID{Type: services.UserCA, DomainName: t.GetName()})
if err != nil && !trace.IsNotFound(err) {
return trace.Wrap(err)
}
err = a.DeactivateCertAuthority(services.CertAuthID{Type: services.HostCA, DomainName: t.GetName()})
if err != nil && !trace.IsNotFound(err) {
return trace.Wrap(err)
}
// remove the reverse tunnel (if it exists)
err = a.DeleteReverseTunnel(t.GetName())
if err != nil && !trace.IsNotFound(err) {
return trace.Wrap(err)
}
// update the resource
err = a.Presence.UpsertTrustedCluster(t)
if err != nil {
return trace.Wrap(err)
}
return nil
}
func (a *AuthServer) establishTrust(trustedCluster services.TrustedCluster) ([]services.CertAuthority, error) {
var localCertAuthorities []services.CertAuthority
domainName, err := a.GetDomainName()
if err != nil {
return nil, trace.Wrap(err)
}
// get a list of certificate authorities for this auth server
allLocalCAs, err := a.GetCertAuthorities(services.HostCA, false)
if err != nil {
return nil, trace.Wrap(err)
}
for _, lca := range allLocalCAs {
if lca.GetClusterName() == domainName {
localCertAuthorities = append(localCertAuthorities, lca)
}
}
// create a request to validate a trusted cluster (token and local certificate authorities)
validateRequest := ValidateTrustedClusterRequest{
Token: trustedCluster.GetToken(),
CAs: localCertAuthorities,
}
// log the local certificate authorities that we are sending
log.Debugf("[TRUSTED CLUSTER] Sending validate request; token=%v, CAs=%v", validateRequest.Token, validateRequest.CAs)
// send the request to the remote auth server via the proxy
validateResponse, err := a.sendValidateRequestToProxy(trustedCluster.GetProxyAddress(), &validateRequest)
if err != nil {
log.Error(err)
if strings.Contains(err.Error(), "x509") {
return nil, trace.AccessDenied("the trusted cluster uses misconfigured HTTP/TLS certificate.")
}
return nil, trace.Wrap(err)
}
// log the remote certificate authorities we are adding
log.Debugf("[TRUSTED CLUSTER] Received validate response; CAs=%v", validateResponse.CAs)
return validateResponse.CAs, nil
}
func (a *AuthServer) addRemoteCAs(remoteCAs []services.CertAuthority, trustedCluster services.TrustedCluster) error {
// the remote auth server has verified our token. add the
// remote certificate authority to our backend
for _, remoteCertAuthority := range remoteCAs {
// change the name of the remote ca to the name of the trusted cluster
remoteCertAuthority.SetName(trustedCluster.GetName())
// wipe out roles sent from the remote cluster and set roles from the trusted cluster
remoteCertAuthority.SetRoles(nil)
if remoteCertAuthority.GetType() == services.UserCA {
for _, r := range trustedCluster.GetRoles() {
remoteCertAuthority.AddRole(r)
}
remoteCertAuthority.SetRoleMap(trustedCluster.GetRoleMap())
}
// we use create here instead of upsert to prevent people from wiping out
// their own ca if it has the same name as the remote ca
err := a.CreateCertAuthority(remoteCertAuthority)
if err != nil {
return trace.Wrap(err)
}
}
return nil
}
func (a *AuthServer) validateTrustedCluster(validateRequest *ValidateTrustedClusterRequest) (*ValidateTrustedClusterResponse, error) {
domainName, err := a.GetDomainName()
if err != nil {
return nil, trace.Wrap(err)
}
// validate that we generated the token
err = a.validateTrustedClusterToken(validateRequest.Token)
if err != nil {
return nil, trace.Wrap(err)
}
// log the remote certificate authorities we are adding
log.Debugf("[TRUSTED CLUSTER] Received validate request: token=%v, CAs=%v", validateRequest.Token, validateRequest.CAs)
// token has been validated, upsert the given certificate authority
for _, certAuthority := range validateRequest.CAs {
// don't add a ca with the same as as your own
if certAuthority.GetName() == domainName {
return nil, trace.AccessDenied("remote certificate authority has same name as cluster certificate authority: %v", domainName)
}
err = a.UpsertCertAuthority(certAuthority)
if err != nil {
return nil, trace.Wrap(err)
}
}
// export our certificate authority and return it to the cluster
validateResponse := ValidateTrustedClusterResponse{
CAs: []services.CertAuthority{},
}
for _, caType := range []services.CertAuthType{services.HostCA, services.UserCA} {
certAuthorities, err := a.GetCertAuthorities(caType, false)
if err != nil {
return nil, trace.Wrap(err)
}
for _, certAuthority := range certAuthorities {
if certAuthority.GetClusterName() == domainName {
validateResponse.CAs = append(validateResponse.CAs, certAuthority)
}
}
}
// log the local certificate authorities we are sending
log.Debugf("[TRUSTED CLUSTER] Sending validate response: CAs=%v", validateResponse.CAs)
return &validateResponse, nil
}
func (a *AuthServer) validateTrustedClusterToken(token string) error {
roles, err := a.ValidateToken(token)
if err != nil {
return trace.AccessDenied("the remote server denied access: invalid cluster token")
}
if !roles.Include(teleport.RoleTrustedCluster) && !roles.Include(teleport.LegacyClusterTokenType) {
return trace.AccessDenied("role does not match")
}
if !a.checkTokenTTL(token) {
return trace.AccessDenied("expired token")
}
return nil
}
func (s *AuthServer) sendValidateRequestToProxy(host string, validateRequest *ValidateTrustedClusterRequest) (*ValidateTrustedClusterResponse, error) {
proxyAddr := url.URL{
Scheme: "https",
Host: host,
}
var opts []roundtrip.ClientParam
if lib.IsInsecureDevMode() {
log.Warn("insecureSkipVerify is used to communicate with proxy. make sure you intend to run Teleport in insecure mode!")
// get the default transport (so we can get the proxy from environment)
// but disable tls certificate checking.
tr, ok := http.DefaultTransport.(*http.Transport)
if !ok {
return nil, trace.BadParameter("unable to get default transport")
}
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
insecureWebClient := &http.Client{
Transport: tr,
}
opts = append(opts, roundtrip.HTTPClient(insecureWebClient))
}
clt, err := roundtrip.NewClient(proxyAddr.String(), teleport.WebAPIVersion, opts...)
if err != nil {
return nil, trace.Wrap(err)
}
validateRequestRaw, err := validateRequest.ToRaw()
if err != nil {
return nil, trace.Wrap(err)
}
out, err := httplib.ConvertResponse(clt.PostJSON(clt.Endpoint("webapi", "trustedclusters", "validate"), validateRequestRaw))
if err != nil {
return nil, trace.Wrap(err)
}
var validateResponseRaw *ValidateTrustedClusterResponseRaw
err = json.Unmarshal(out.Bytes(), &validateResponseRaw)
if err != nil {
return nil, trace.Wrap(err)
}
validateResponse, err := validateResponseRaw.ToNative()
if err != nil {
return nil, trace.Wrap(err)
}
return validateResponse, nil
}
type ValidateTrustedClusterRequest struct {
Token string `json:"token"`
CAs []services.CertAuthority `json:"certificate_authorities"`
}
func (v *ValidateTrustedClusterRequest) ToRaw() (*ValidateTrustedClusterRequestRaw, error) {
cas := [][]byte{}
for _, certAuthority := range v.CAs {
data, err := services.GetCertAuthorityMarshaler().MarshalCertAuthority(certAuthority)
if err != nil {
return nil, trace.Wrap(err)
}
cas = append(cas, data)
}
return &ValidateTrustedClusterRequestRaw{
Token: v.Token,
CAs: cas,
}, nil
}
type ValidateTrustedClusterRequestRaw struct {
Token string `json:"token"`
CAs [][]byte `json:"certificate_authorities"`
}
func (v *ValidateTrustedClusterRequestRaw) ToNative() (*ValidateTrustedClusterRequest, error) {
cas := []services.CertAuthority{}
for _, rawCertAuthority := range v.CAs {
certAuthority, err := services.GetCertAuthorityMarshaler().UnmarshalCertAuthority(rawCertAuthority)
if err != nil {
return nil, trace.Wrap(err)
}
cas = append(cas, certAuthority)
}
return &ValidateTrustedClusterRequest{
Token: v.Token,
CAs: cas,
}, nil
}
type ValidateTrustedClusterResponse struct {
CAs []services.CertAuthority `json:"certificate_authorities"`
}
func (v *ValidateTrustedClusterResponse) ToRaw() (*ValidateTrustedClusterResponseRaw, error) {
cas := [][]byte{}
for _, certAuthority := range v.CAs {
data, err := services.GetCertAuthorityMarshaler().MarshalCertAuthority(certAuthority)
if err != nil {
return nil, trace.Wrap(err)
}
cas = append(cas, data)
}
return &ValidateTrustedClusterResponseRaw{
CAs: cas,
}, nil
}
type ValidateTrustedClusterResponseRaw struct {
CAs [][]byte `json:"certificate_authorities"`
}
func (v *ValidateTrustedClusterResponseRaw) ToNative() (*ValidateTrustedClusterResponse, error) {
cas := []services.CertAuthority{}
for _, rawCertAuthority := range v.CAs {
certAuthority, err := services.GetCertAuthorityMarshaler().UnmarshalCertAuthority(rawCertAuthority)
if err != nil {
return nil, trace.Wrap(err)
}
cas = append(cas, certAuthority)
}
return &ValidateTrustedClusterResponse{
CAs: cas,
}, nil
}
|
package main
import (
"github.com/omise/omise-go"
"github.com/omise/omise-go/internal"
)
var client *omise.Client
func getClient() (*omise.Client, error) {
if client != nil {
return client, nil
}
cl, e := omise.NewClient(config.PKey, config.SKey)
if e != nil {
return nil, e
}
client = cl
return client, nil
}
func do(result interface{}, op internal.Operation) error {
client, e := getClient()
if e != nil {
return e
}
if e := client.Do(result, op); e != nil {
return e
}
return output(result)
}
|
package suites
import (
"os"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/authelia/authelia/v4/internal/utils"
)
//nolint:unparam
func waitUntilServiceLogDetected(
interval time.Duration,
timeout time.Duration,
dockerEnvironment *DockerEnvironment,
service string,
logPatterns []string) error {
log.Debug("Waiting for service " + service + " to be ready...")
err := utils.CheckUntil(interval, timeout, func() (bool, error) {
logs, err := dockerEnvironment.Logs(service, []string{"--tail", "20"})
if err != nil {
return false, err
}
for _, pattern := range logPatterns {
if strings.Contains(logs, pattern) {
return true, nil
}
}
return false, nil
})
return err
}
func waitUntilAutheliaBackendIsReady(dockerEnvironment *DockerEnvironment) error {
return waitUntilServiceLogDetected(
5*time.Second,
90*time.Second,
dockerEnvironment,
"authelia-backend",
[]string{"Startup complete"})
}
func waitUntilAutheliaFrontendIsReady(dockerEnvironment *DockerEnvironment) error {
return waitUntilServiceLogDetected(
5*time.Second,
90*time.Second,
dockerEnvironment,
"authelia-frontend",
[]string{"dev server running at", "ready in", "server restarted"})
}
func waitUntilK3DIsReady(dockerEnvironment *DockerEnvironment) error {
return waitUntilServiceLogDetected(
5*time.Second,
90*time.Second,
dockerEnvironment,
"k3d",
[]string{"API listen on [::]:2376"})
}
func waitUntilSambaIsReady(dockerEnvironment *DockerEnvironment) error {
return waitUntilServiceLogDetected(
5*time.Second,
90*time.Second,
dockerEnvironment,
"sambaldap",
[]string{"samba entered RUNNING state"})
}
func waitUntilAutheliaIsReady(dockerEnvironment *DockerEnvironment, suite string) error {
log.Info("Waiting for Authelia to be ready...")
if err := waitUntilAutheliaBackendIsReady(dockerEnvironment); err != nil {
return err
}
if os.Getenv("CI") != t && suite != "CLI" {
if err := waitUntilAutheliaFrontendIsReady(dockerEnvironment); err != nil {
return err
}
}
if suite == "ActiveDirectory" {
if err := waitUntilSambaIsReady(dockerEnvironment); err != nil {
return err
}
}
log.Info("Authelia is now ready!")
return nil
}
|
package pathfileops
import (
"errors"
"io"
"os"
"strings"
"testing"
"time"
)
func TestFileHelper_OpenFileReadOnly_01(t *testing.T) {
fh := FileHelper{}
source := "../../logTest/topTest2.txt"
source = fh.AdjustPathSlash(alogtopTest2Text)
target := "../../checkfiles/TestFileHelper_OpenFileReadOnly_01.txt"
target = fh.AdjustPathSlash(target)
expected := "Top level test file # 2."
if fh.DoesFileExist(target) {
err := fh.DeleteDirFile(target)
if err != nil {
t.Errorf("Test Setup Error: Attempted deletion of preexisting "+
"target file FAILED!\ntargetFile='%v'\nError='%v'\n",
target, err.Error())
return
}
if fh.DoesFileExist(target) {
t.Errorf("Test Setup Error: Verification of target file deletion FAILED!\n"+
"Target File still exists after attempted deletion!\ntargetFile='%v'\n",
target)
return
}
}
err := fh.CopyFileByIo(source, target)
if err != nil {
t.Errorf("Test Setup Error: Copy of source file to target file FAILED!\n"+
"sourceFile='%v'\ntargetFile='%v'\nError='%v'\n",
source, target, err.Error())
return
}
f, err := fh.OpenFileReadOnly(target)
if err != nil {
t.Errorf("Failed to open file: '%v'\nError='%v'",
target, err.Error())
return
}
bytes := make([]byte, 500)
bytesRead, err := f.Read(bytes)
if err != nil {
t.Errorf("Error returned from f.Read(bytes).\n"+
"targetFile='%v'\nError='%v'\n", target, err.Error())
_ = f.Close()
_ = fh.DeleteDirFile(target)
return
}
s := string(bytes[0:bytesRead])
if expected != s {
t.Errorf("Expected read string='%v'. Instead read string='%v'",
expected, s)
}
_ = f.Close()
_ = fh.DeleteDirFile(target)
}
func TestFileHelper_OpenFileReadOnly_02(t *testing.T) {
fh := FileHelper{}
_, err := fh.OpenFileReadOnly("")
if err == nil {
t.Error("Expected an error from fh.OpenFileReadOnly(\"\") " +
"because the input parameter is an empty string.\n" +
"However, NO ERROR WAS RETURNED!")
}
}
func TestFileHelper_OpenFileReadOnly_03(t *testing.T) {
fh := FileHelper{}
_, err := fh.OpenFileReadOnly(" ")
if err == nil {
t.Error("Expected an error from fh.OpenFileReadOnly(\"\") " +
"because the input parameter consists entirely of blank spaces.\n" +
"However, NO ERROR WAS RETURNED!")
}
}
func TestFileHelper_OpenFileReadOnly_04(t *testing.T) {
fh := FileHelper{}
targetFile := "../../filesfortest/levelfilesfortest/iDoNotExist.txt"
targetFile = fh.AdjustPathSlash(targetFile)
_, err := fh.OpenFileReadOnly(targetFile)
if err == nil {
t.Error("Expected an error from fh.OpenFileReadOnly(targetFile) " +
"because the input parameter 'targetFile' does not exist.\n" +
"However, NO ERROR WAS RETURNED!")
}
}
func TestFileHelper_OpenFileReadOnly_05(t *testing.T) {
fh := FileHelper{}
source := "../../logTest/topTest2.txt"
source = fh.AdjustPathSlash(alogtopTest2Text)
target := "../../checkfiles/TestFileHelper_OpenFileReadOnly_01.txt"
target = fh.AdjustPathSlash(target)
if fh.DoesFileExist(target) {
err := fh.DeleteDirFile(target)
if err != nil {
t.Errorf("Test Setup Error: Attempted deletion of preexisting "+
"target file FAILED!\ntargetFile='%v'\nError='%v'\n",
target, err.Error())
return
}
if fh.DoesFileExist(target) {
t.Errorf("Test Setup Error: Verification of target file deletion FAILED!\n"+
"Target File still exists after attempted deletion!\ntargetFile='%v'\n",
target)
return
}
}
err := fh.CopyFileByIo(source, target)
if err != nil {
t.Errorf("Test Setup Error: Copy of source file to target file FAILED!\n"+
"sourceFile='%v'\ntargetFile='%v'\nError='%v'\n",
source, target, err.Error())
return
}
f, err := fh.OpenFileReadOnly(target)
if err != nil {
t.Errorf("Failed to open file: '%v'\nError='%v'",
target, err.Error())
return
}
testText := "Cannot write text to read-only file!"
_, err = f.WriteString(testText)
if err == nil {
t.Errorf("Expected an error return from f.WriteString(testText) " +
"because\n'f' references a read-only file. However, NO ERROR WAS RETURNED!\n")
}
err = f.Close()
if err != nil {
t.Errorf("Test Clean-up Error: Error return from f.Close().\n"+
"File Name='%v'\nError='%v'\n",
target, err.Error())
}
err = fh.DeleteDirFile(target)
if err != nil {
t.Errorf("Test Clean-up Error: Error return from fh.DeleteDirFile(target).\n"+
"target='%v'\nError='%v'", target, err.Error())
}
}
func TestFileHelper_OpenFileReadOnly_06(t *testing.T) {
fh := FileHelper{}
target := "../../createFilesTest/Level01/Level02"
target = fh.AdjustPathSlash(target)
fPtr, err := fh.OpenFileReadOnly(target)
if err == nil {
t.Error("Expected an error return from fh.OpenFileReadOnly(target)\n" +
"because 'target' is a Directory and NOT a File!\n" +
"However, NO ERROR WAS RETURNED!!!\n",)
}
if fPtr != nil {
_ = fPtr.Close()
}
}
func TestFileHelper_OpenFileReadWrite_01(t *testing.T) {
fh := FileHelper{}
targetFile := "../../checkfiles/scratchTestFileHelper_OpenFileForWriting_01.txt"
targetFile = fh.AdjustPathSlash(targetFile)
testString := "How now, brown cow!"
if fh.DoesFileExist(targetFile) {
err := fh.DeleteDirFile(targetFile)
if err != nil {
t.Errorf("ERROR: Test Setup attempted to delete 'targetFile'.\n"+
"fh.DeleteDirFile(targetFile) returned an error!\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
return
}
if fh.DoesFileExist(targetFile) {
t.Errorf("ERROR: Test Setup attempted deletion of 'targetFile'.\n"+
"'targetFile' STILL EXISTS!\n"+
"targetFile='%v'\n", targetFile)
return
}
}
// truncateFile == false - targetFile does not yet exist!
fPtr, err := fh.OpenFileReadWrite(targetFile, false)
if err != nil {
t.Errorf("Error returned by fh.OpenFileReadWrite(targetFile, false)\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
return
}
if fPtr == nil {
t.Errorf("ERROR: File Pointer returned by fh.OpenFileReadWrite(targetFile)\n"+
"is 'nil'!\ntargetFile='%v'", targetFile)
return
}
bytesWritten, err := fPtr.WriteString(testString)
if bytesWritten != len(testString) {
t.Errorf("ERROR: Bytes written to 'targetFile' DO NOT EQUAL the lenth\n"+
"of 'testString'.\ntargetFile='%v'\nBytesWritten='%v' Length of Test String='%v'\n",
targetFile, bytesWritten, len(testString))
_ = fPtr.Close()
_ = fh.DeleteDirFile(targetFile)
return
}
err = fPtr.Sync()
if err != nil {
t.Errorf("Error returned by fPtr.Sync() for 'targetFile'!\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
_ = fPtr.Close()
_ = fh.DeleteDirFile(targetFile)
return
}
b := make([]byte, 500)
bytesRead, err := fPtr.ReadAt(b, 0)
if err != nil {
if err != io.EOF {
t.Errorf("Non-EOF error returned by fPtr.ReadAt(b,0).\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
_ = fPtr.Close()
_ = fh.DeleteDirFile(targetFile)
return
}
}
if bytesRead != bytesWritten {
t.Errorf("ERROR: The bytes written to 'targetFile' do NOT EQUAL the bytes\n"+
"read from 'targetFile'.\ntargetFile='%v'\nBytes Read='%v' Bytes Written='%v'\n",
targetFile, bytesRead, bytesWritten)
_ = fPtr.Close()
_ = fh.DeleteDirFile(targetFile)
return
}
resultStr := string(b[0:bytesRead])
if testString != resultStr {
t.Errorf("ERROR: Expected read string='%v'.\nInstead, read string='%v'.\n",
testString, resultStr)
}
err = fPtr.Close()
if err != nil {
t.Errorf("Test Clean-up Error: Error returned from fPtr.Close().\n"+
"targetFile='%v'\nError='%v'", targetFile, err.Error())
}
err = fh.DeleteDirFile(targetFile)
if err != nil {
t.Errorf("Test Clean-up Error: Error returned from fh.DeleteDirFile(targetFile).\n"+
"targetFile='%v'\nError='%v'", targetFile, err.Error())
}
}
func TestFileHelper_OpenFileReadWrite_02(t *testing.T) {
fh := FileHelper{}
srcFile := "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
srcFile = fh.AdjustPathSlash(srcFile)
targetFile := "../../checkfiles/scratchTestFileHelper_OpenFileForWriting_02.txt"
targetFile = fh.AdjustPathSlash(targetFile)
testString := "How now, brown cow!"
err := fh.DeleteDirFile(targetFile)
if err != nil {
t.Errorf("Test Setup Error returned from fh.DeleteDirFile(targetFile).\n" +
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
return
}
fInfo, err := os.Stat(srcFile)
if err != nil {
t.Errorf("ERROR: Test Setup Source File DOES NOT EXIST!\n"+
"Source File='%v'\n", srcFile)
return
}
sourceByteSize := fInfo.Size()
if fh.DoesFileExist(targetFile) {
err := fh.DeleteDirFile(targetFile)
if err != nil {
t.Errorf("ERROR: Test Setup attempted to delete 'targetFile'.\n"+
"fh.DeleteDirFile(targetFile) returned an error!\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
return
}
if fh.DoesFileExist(targetFile) {
t.Errorf("ERROR: Test Setup attempted deletion of 'targetFile'.\n"+
"'targetFile' STILL EXISTS!\n"+
"targetFile='%v'\n", targetFile)
return
}
}
err = fh.CopyFileByIo(srcFile, targetFile)
if err != nil {
t.Errorf("Error returned by test setup op fh.CopyFileByIo(srcFile, targetFile).\n"+
"srcFile='%v'\ntargetFile='%v'\nError='%v'\n",
srcFile, targetFile, err.Error())
return
}
if !fh.DoesFileExist(targetFile) {
t.Errorf("Test Setup Failed! 'targetFile' does NOT EXIST!\n"+
"targetFile='%v'\n", targetFile)
return
}
// Open file with truncateFile=true
fPtr, err := fh.OpenFileReadWrite(targetFile, true)
if err != nil {
t.Errorf("Error returned by fh.OpenFileReadWrite(targetFile)\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
return
}
if fPtr == nil {
t.Errorf("ERROR: File Pointer returned by fh.OpenFileReadWrite(targetFile)\n"+
"is 'nil'!\ntargetFile='%v'", targetFile)
return
}
bytesWritten, err := fPtr.WriteString(testString)
if bytesWritten != len(testString) {
t.Errorf("ERROR: Bytes written to 'targetFile' DO NOT EQUAL the lenth\n"+
"of 'testString'.\ntargetFile='%v'\nBytesWritten='%v' Length of Test String='%v'\n",
targetFile, bytesWritten, len(testString))
_ = fPtr.Close()
_ = fh.DeleteDirFile(targetFile)
return
}
err = fPtr.Sync()
if err != nil {
t.Errorf("Error returned by fPtr.Sync() for 'targetFile'!\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
_ = fPtr.Close()
_ = fh.DeleteDirFile(targetFile)
return
}
b := make([]byte, 500)
bytesRead, err := fPtr.ReadAt(b, 0)
if err != nil {
if err != io.EOF {
t.Errorf("Non-EOF error returned by fPtr.ReadAt(b,0).\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
_ = fPtr.Close()
_ = fh.DeleteDirFile(targetFile)
return
}
}
err = fPtr.Close()
if err != nil {
t.Errorf("Error returned after Read Operation on fPtr.Close()!\n"+
"targetFile='%v'\nError='%v'", targetFile, err.Error())
_ = fh.DeleteDirFile(targetFile)
return
}
fInfo, err = os.Stat(targetFile)
if err != nil {
t.Errorf("ERROR: os.Stat(targetFile) shows targetFile DOES NOT EXIST!\n"+
"targetFile='%v'\n", targetFile)
return
}
targetFileByteSize := fInfo.Size()
if sourceByteSize <= targetFileByteSize {
t.Errorf("ERROR: Orginal Source File Byte Size is less than new "+
"'targetFile' Byte Size!\nSource File Byte Size='%v' "+
"Target File Byte Size='%v'\ntargetFile='%v'\n",
sourceByteSize, targetFileByteSize, targetFile)
_ = fh.DeleteDirFile(targetFile)
return
}
if bytesRead != bytesWritten {
t.Errorf("ERROR: The bytes written to 'targetFile' do NOT EQUAL the bytes\n"+
"read from 'targetFile'.\ntargetFile='%v'\nBytes Read='%v' Bytes Written='%v'\n",
targetFile, bytesRead, bytesWritten)
_ = fh.DeleteDirFile(targetFile)
return
}
resultStr := string(b[0:bytesRead])
if testString != resultStr {
t.Errorf("ERROR: Expected read string='%v'.\nInstead, read string='%v'.\n",
testString, resultStr)
}
err = fh.DeleteDirFile(targetFile)
if err != nil {
t.Errorf("Test Clean-up Error: Error returned from fh.DeleteDirFile(targetFile).\n"+
"targetFile='%v'\nError='%v'", targetFile, err.Error())
}
}
func TestFileHelper_OpenFileReadWrite_03(t *testing.T) {
targetFile := ""
fh := FileHelper{}
fPtr, err := fh.OpenFileReadWrite(targetFile, false)
if err == nil {
t.Error("ERROR: Expected an error return from fh.OpenFileReadWrite" +
"(targetFile, false)\n" +
"because 'targetFile' is an empty string.\n" +
"However NO ERROR WAS RETURNED!!!\n")
if fPtr != nil {
err = fPtr.Close()
if err != nil {
t.Errorf("Test Clean-up Error: Error returned from fPtr.Close().\n"+
"targetFile='%v'\nError='%v'", targetFile, err.Error())
}
}
}
}
func TestFileHelper_OpenFileReadWrite_04(t *testing.T) {
targetFile := " "
fh := FileHelper{}
fPtr, err := fh.OpenFileReadWrite(targetFile, false)
if err == nil {
t.Error("ERROR: Expected an error return from fh.OpenFileReadWrite" +
"(targetFile, false)\n" +
"because the 'targetFile' parameter consists entirely of blank spaces.\n" +
"However NO ERROR WAS RETURNED!!!\n")
if fPtr != nil {
err = fPtr.Close()
if err != nil {
t.Errorf("Test Clean-up Error: Error returned from fPtr.Close().\n"+
"targetFile='%v'\nError='%v'", targetFile, err.Error())
}
}
}
}
func TestFileHelper_OpenFileReadWrite_05(t *testing.T) {
targetFile := "../../checkfiles/idontexist1/idontexist2/TestFileHelper_OpenFileReadWrite_05.txt"
fh := FileHelper{}
targetFile = fh.AdjustPathSlash(targetFile)
fPtr, err := fh.OpenFileReadWrite(targetFile, false)
if err == nil {
t.Error("ERROR: Expected an error return from fh.OpenFileReadWrite" +
"(targetFile, false)\n" +
"because the 'targetFile' parameter includes parent directories which DO NOT EXIST.\n" +
"However NO ERROR WAS RETURNED!!!\n")
if fPtr != nil {
err = fPtr.Close()
if err != nil {
t.Errorf("Test Clean-up Error: Error returned from fPtr.Close().\n"+
"targetFile='%v'\nError='%v'", targetFile, err.Error())
}
err = fh.DeleteDirFile(targetFile)
if err != nil {
t.Errorf("Test Clean-up Error: Error returned from fh.DeleteDirFile(targetFile).\n"+
"targetFile='%v'\nError='%v'", targetFile, err.Error())
}
}
}
}
func TestFileHelper_OpenFileWriteOnly_01(t *testing.T) {
fh := FileHelper{}
srcFile := "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
srcFile = fh.AdjustPathSlash(srcFile)
targetFile := "../../checkfiles/TestFileHelper_OpenFileWriteOnly_01.txt"
targetFile = fh.AdjustPathSlash(targetFile)
if fh.DoesFileExist(targetFile) {
err := fh.DeleteDirFile(targetFile)
if err != nil {
t.Errorf("ERROR: Test Setup attempted to delete 'targetFile'.\n"+
"fh.DeleteDirFile(targetFile) returned an error!\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
return
}
if fh.DoesFileExist(targetFile) {
t.Errorf("ERROR: Test Setup attempted deletion of 'targetFile'.\n"+
"'targetFile' STILL EXISTS!\n"+
"targetFile='%v'\n", targetFile)
return
}
}
err := fh.CopyFileByIo(srcFile, targetFile)
if err != nil {
t.Errorf("Error returned by test setup op fh.CopyFileByIo(srcFile, targetFile).\n"+
"srcFile='%v'\ntargetFile='%v'\nError='%v'\n",
srcFile, targetFile, err.Error())
return
}
if !fh.DoesFileExist(targetFile) {
t.Errorf("Test Setup Failed! 'targetFile' does NOT EXIST!\n"+
"targetFile='%v'\n", targetFile)
return
}
fPtr, err := fh.OpenFileWriteOnly(targetFile, false)
if err != nil {
t.Errorf("Error returned from fh.OpenFileWriteOnly"+
"(targetFile,false).\ntargetFile='%v'\nError='%v'\n",
targetFile, err.Error())
if fPtr != nil {
_ = fPtr.Close()
}
err = fh.DeleteDirFile(targetFile)
if err != nil {
t.Errorf("After OpenFileWriteOnly() an error was returned by "+
"fh.DeleteDirFile(targetFile)\ntargetFile='%v'\nError='%v'\n",
targetFile, err.Error())
}
return
}
bytes := make([]byte, 3000)
_, err = fPtr.Read(bytes)
if err == nil {
t.Errorf("Expected an error retun from fPtr.Read(bytes) "+
"because\nthe file pointer 'fPtr' was opened as 'Write-Only'!\n"+
"targetFile='%v'\n", targetFile)
}
if fPtr != nil {
err = fPtr.Close()
if err != nil {
t.Errorf("Test Clean-up Error returned by fPtr.Close().\n"+
"targetFile='%v'\nError='%v'\n",
targetFile, err.Error())
}
}
err = fh.DeleteDirFile(targetFile)
if err != nil {
t.Errorf("Test Clean-up Error returned by fh.DeleteDirFile("+
"targetFile)\ntargetFile='%v'\nError='%v'\n",
targetFile, err.Error())
}
}
func TestFileHelper_OpenFileWriteOnly_02(t *testing.T) {
fh := FileHelper{}
srcFile := "../../filesfortest/levelfilesfortest/level_0_3_test.txt"
srcFile = fh.AdjustPathSlash(srcFile)
targetFile := "../../checkfiles/TestFileHelper_OpenFileWriteOnly_01.txt"
targetFile = fh.AdjustPathSlash(targetFile)
expectedStr := "How Now Brown Cow!"
if fh.DoesFileExist(targetFile) {
err := fh.DeleteDirFile(targetFile)
if err != nil {
t.Errorf("ERROR: Test Setup attempted to delete 'targetFile'.\n"+
"fh.DeleteDirFile(targetFile) returned an error!\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
return
}
if fh.DoesFileExist(targetFile) {
t.Errorf("ERROR: Test Setup attempted deletion of 'targetFile'.\n"+
"'targetFile' STILL EXISTS!\n"+
"targetFile='%v'\n", targetFile)
return
}
}
err := fh.CopyFileByIo(srcFile, targetFile)
if err != nil {
t.Errorf("Error returned by test setup op fh.CopyFileByIo(srcFile, targetFile).\n"+
"srcFile='%v'\ntargetFile='%v'\nError='%v'\n",
srcFile, targetFile, err.Error())
return
}
if !fh.DoesFileExist(targetFile) {
t.Errorf("Test Setup Failed! 'targetFile' does NOT EXIST!\n"+
"targetFile='%v'\n", targetFile)
return
}
fPtr, err := fh.OpenFileWriteOnly(targetFile, true)
if err != nil {
t.Errorf("Error returned from fh.OpenFileWriteOnly"+
"(targetFile,false).\ntargetFile='%v'\nError='%v'\n",
targetFile, err.Error())
if fPtr != nil {
_ = fPtr.Close()
}
_ = fh.DeleteDirFile(targetFile)
return
}
if fPtr == nil {
t.Errorf("ERROR: fh.OpenFileWriteOnly(targetFile,true)\n"+
"returned a 'nil' file pointer!\ntargetFile='%v'\n", targetFile)
_ = fh.DeleteDirFile(targetFile)
return
}
bytesWritten, err := fPtr.WriteString(expectedStr)
if err != nil {
t.Errorf("Error returned by fPtr.WriteString(expectedStr).\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
_ = fPtr.Close()
_ = fh.DeleteDirFile(targetFile)
return
}
err = fPtr.Close()
if err != nil {
t.Errorf("Error returned by fPtr.Close() after writing bytes to file.\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
_ = fh.DeleteDirFile(targetFile)
return
}
if bytesWritten != len(expectedStr) {
t.Errorf("Expected bytes written='%v'. Instead, bytes written='%v'.",
bytesWritten, len(expectedStr))
_ = fh.DeleteDirFile(targetFile)
return
}
fPtr, err = fh.OpenFileReadWrite(targetFile, false)
if err != nil {
t.Errorf("Error returned by fh.OpenFileReadWrite(targetFile, false).\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
if fPtr != nil {
_ = fPtr.Close()
}
_ = fh.DeleteDirFile(targetFile)
return
}
bytes := make([]byte, 3000)
bytesRead, err := fPtr.Read(bytes)
if err != nil {
t.Errorf("Error returned by fPtr.Read(bytes).\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
_ = fPtr.Close()
_ = fh.DeleteDirFile(targetFile)
return
}
err = fPtr.Close()
if err != nil {
t.Errorf("Error returned by fPtr.Close() after bytes read operation.\n"+
"targetFile='%v'\nError='%v'", targetFile, err.Error())
_ = fh.DeleteDirFile(targetFile)
return
}
if bytesWritten != bytesRead {
t.Errorf("Expected bytes read='%v'. Instead, bytes read='%v'\n",
bytesWritten, bytesRead)
_ = fh.DeleteDirFile(targetFile)
return
}
strRead := string(bytes[0:bytesRead])
if expectedStr != strRead {
t.Errorf("Expected read string = '%v'\n"+
"Instead, read string='%v'\n",
expectedStr, strRead)
}
err = fh.DeleteDirFile(targetFile)
if err != nil {
t.Errorf("Test Clean-up Error: Error returned by fh.DeleteDirFile(targetFile).\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
}
return
}
func TestFileHelper_OpenFileWriteOnly_03(t *testing.T) {
fh := FileHelper{}
targetFile := "../../checkfiles/TestFileHelper_OpenFileWriteOnly_03.txt"
targetFile = fh.AdjustPathSlash(targetFile)
expectedStr := "Now is the time for all good men to come to the aid of their country."
if fh.DoesFileExist(targetFile) {
err := fh.DeleteDirFile(targetFile)
if err != nil {
t.Errorf("ERROR: Test Setup attempted to delete 'targetFile'.\n"+
"fh.DeleteDirFile(targetFile) returned an error!\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
return
}
if fh.DoesFileExist(targetFile) {
t.Errorf("ERROR: Test Setup attempted deletion of 'targetFile'.\n"+
"'targetFile' STILL EXISTS!\n"+
"targetFile='%v'\n", targetFile)
return
}
}
fPtr, err := fh.OpenFileWriteOnly(targetFile, false)
if err != nil {
t.Errorf("Error returned from fh.OpenFileWriteOnly"+
"(targetFile,false).\ntargetFile='%v'\nError='%v'\n",
targetFile, err.Error())
if fPtr != nil {
_ = fPtr.Close()
}
_ = fh.DeleteDirFile(targetFile)
return
}
if fPtr == nil {
t.Errorf("ERROR: fh.OpenFileWriteOnly(targetFile,true)\n"+
"returned a 'nil' file pointer!\ntargetFile='%v'\n", targetFile)
_ = fh.DeleteDirFile(targetFile)
return
}
bytesWritten, err := fPtr.WriteString(expectedStr)
if err != nil {
t.Errorf("Error returned by fPtr.WriteString(expectedStr).\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
_ = fPtr.Close()
_ = fh.DeleteDirFile(targetFile)
return
}
err = fPtr.Close()
if err != nil {
t.Errorf("Error returned by fPtr.Close() after writing bytes to file.\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
_ = fh.DeleteDirFile(targetFile)
return
}
if bytesWritten != len(expectedStr) {
t.Errorf("Expected bytes written='%v'. Instead, bytes written='%v'.",
bytesWritten, len(expectedStr))
_ = fh.DeleteDirFile(targetFile)
return
}
fPtr, err = fh.OpenFileReadWrite(targetFile, false)
if err != nil {
t.Errorf("Error returned by fh.OpenFileReadWrite(targetFile, false).\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
if fPtr != nil {
_ = fPtr.Close()
}
_ = fh.DeleteDirFile(targetFile)
return
}
bytes := make([]byte, 3000)
bytesRead, err := fPtr.Read(bytes)
if err != nil {
t.Errorf("Error returned by fPtr.Read(bytes).\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
_ = fPtr.Close()
_ = fh.DeleteDirFile(targetFile)
return
}
err = fPtr.Close()
if err != nil {
t.Errorf("Error returned by fPtr.Close() after bytes read operation.\n"+
"targetFile='%v'\nError='%v'", targetFile, err.Error())
_ = fh.DeleteDirFile(targetFile)
return
}
if bytesWritten != bytesRead {
t.Errorf("Expected bytes read='%v'. Instead, bytes read='%v'\n",
bytesWritten, bytesRead)
_ = fh.DeleteDirFile(targetFile)
return
}
strRead := string(bytes[0:bytesRead])
if expectedStr != strRead {
t.Errorf("Expected read string = '%v'\n"+
"Instead, read string='%v'\n",
expectedStr, strRead)
}
err = fh.DeleteDirFile(targetFile)
if err != nil {
t.Errorf("Test Clean-up Error: Error returned by fh.DeleteDirFile(targetFile).\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
}
return
}
func TestFileHelper_OpenFileWriteOnly_04(t *testing.T) {
fh := FileHelper{}
targetFile := "../../checkfiles/TestFileHelper_OpenFileWriteOnly_03.txt"
targetFile = fh.AdjustPathSlash(targetFile)
expectedStr := "The cow jumped over the moon."
if fh.DoesFileExist(targetFile) {
err := fh.DeleteDirFile(targetFile)
if err != nil {
t.Errorf("ERROR: Test Setup attempted to delete 'targetFile'.\n"+
"fh.DeleteDirFile(targetFile) returned an error!\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
return
}
if fh.DoesFileExist(targetFile) {
t.Errorf("ERROR: Test Setup attempted deletion of 'targetFile'.\n"+
"'targetFile' STILL EXISTS!\n"+
"targetFile='%v'\n", targetFile)
return
}
}
fPtr, err := fh.OpenFileWriteOnly(targetFile, false)
if err != nil {
t.Errorf("Error returned from fh.OpenFileWriteOnly"+
"(targetFile,false).\ntargetFile='%v'\nError='%v'\n",
targetFile, err.Error())
if fPtr != nil {
_ = fPtr.Close()
}
_ = fh.DeleteDirFile(targetFile)
return
}
if fPtr == nil {
t.Errorf("ERROR: fh.OpenFileWriteOnly(targetFile,true)\n"+
"returned a 'nil' file pointer!\ntargetFile='%v'\n", targetFile)
_ = fh.DeleteDirFile(targetFile)
return
}
bytesWritten, err := fPtr.WriteString(expectedStr)
if err != nil {
t.Errorf("Error returned by fPtr.WriteString(expectedStr).\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
_ = fPtr.Close()
_ = fh.DeleteDirFile(targetFile)
return
}
err = fPtr.Close()
if err != nil {
t.Errorf("Error returned by fPtr.Close() after writing bytes to file.\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
_ = fh.DeleteDirFile(targetFile)
return
}
if bytesWritten != len(expectedStr) {
t.Errorf("Expected bytes written='%v'. Instead, bytes written='%v'.",
bytesWritten, len(expectedStr))
_ = fh.DeleteDirFile(targetFile)
return
}
fPtr, err = fh.OpenFileReadWrite(targetFile, false)
if err != nil {
t.Errorf("Error returned by fh.OpenFileReadWrite(targetFile, false).\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
if fPtr != nil {
_ = fPtr.Close()
}
_ = fh.DeleteDirFile(targetFile)
return
}
bytes := make([]byte, 3000)
bytesRead, err := fPtr.Read(bytes)
if err != nil {
t.Errorf("Error returned by fPtr.Read(bytes).\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
_ = fPtr.Close()
_ = fh.DeleteDirFile(targetFile)
return
}
err = fPtr.Close()
if err != nil {
t.Errorf("Error returned by fPtr.Close() after bytes read operation.\n"+
"targetFile='%v'\nError='%v'", targetFile, err.Error())
_ = fh.DeleteDirFile(targetFile)
return
}
if bytesWritten != bytesRead {
t.Errorf("Expected bytes read='%v'. Instead, bytes read='%v'\n",
bytesWritten, bytesRead)
_ = fh.DeleteDirFile(targetFile)
return
}
strRead := string(bytes[0:bytesRead])
if expectedStr != strRead {
t.Errorf("Expected read string = '%v'\n"+
"Instead, read string='%v'\n",
expectedStr, strRead)
}
err = fh.DeleteDirFile(targetFile)
if err != nil {
t.Errorf("Test Clean-up Error: Error returned by fh.DeleteDirFile(targetFile).\n"+
"targetFile='%v'\nError='%v'\n", targetFile, err.Error())
}
return
}
func TestFileHelper_OpenFileWriteOnly_05(t *testing.T) {
fh := FileHelper{}
targetFile := ""
_, err := fh.OpenFileWriteOnly(targetFile, false)
if err == nil {
t.Error("Expected an error return from fh.OpenFileWriteOnly(targetFile, false)\n" +
"because parameter 'targetFile' is an empty string.\n" +
"However, NO ERROR WAS RETURNED!")
}
return
}
func TestFileHelper_OpenFileWriteOnly_06(t *testing.T) {
fh := FileHelper{}
targetFile := " "
_, err := fh.OpenFileWriteOnly(targetFile, false)
if err == nil {
t.Error("Expected an error return from fh.OpenFileWriteOnly(targetFile, false)\n" +
"because parameter 'targetFile' consists entirely of blank spaces.\n" +
"However, NO ERROR WAS RETURNED!")
}
return
}
func TestFileHelper_RemovePathSeparatorFromEndOfPathString_01(t *testing.T) {
fh := FileHelper{}
pathStr := ""
newPathStr := fh.RemovePathSeparatorFromEndOfPathString(pathStr)
if newPathStr != "" {
t.Errorf("Expected result from fh.RemovePathSeparatorFromEndOfPathString(pathStr) to\n" +
"equal an empty string because 'pathStr' is an empty string.\n" +
"However, a valid string was returned! ERROR!\nresult='%v'", newPathStr)
}
}
func TestFileHelper_RemovePathSeparatorFromEndOfPathString_02(t *testing.T) {
fh := FileHelper{}
pathStr := " "
newPathStr := fh.RemovePathSeparatorFromEndOfPathString(pathStr)
if newPathStr != "" {
t.Errorf("Expected result from fh.RemovePathSeparatorFromEndOfPathString(pathStr) to\n" +
"equal an empty string because 'pathStr' consists entirely of blank spaces.\n" +
"However, a valid string was returned! ERROR!\nresult='%v'", newPathStr)
}
}
func TestFileHelper_RemovePathSeparatorFromEndOfPathString_03(t *testing.T) {
fh := FileHelper{}
pathStrBase := "../../filesfortest/levelfilesfortest/level_01_dir/level_02_dir/level_03_dir/" +
"level_04_dir"
pathStr := pathStrBase + "/"
pathStr = fh.AdjustPathSlash(pathStr)
pathStrBase = fh.AdjustPathSlash(pathStrBase)
newPathStr := fh.RemovePathSeparatorFromEndOfPathString(pathStr)
if pathStrBase != newPathStr {
t.Errorf("Expected result from fh.RemovePathSeparatorFromEndOfPathString(pathStr) to\n" +
"equal\npathStrBase='%v'.\n" +
"Instead,\nnewPathStr='%v'", pathStrBase, newPathStr)
}
}
func TestFileHelper_RemovePathSeparatorFromEndOfPathString_04(t *testing.T) {
fh := FileHelper{}
pathStrBase := "../../filesfortest/levelfilesfortest/level_01_dir/level_02_dir/level_03_dir/" +
"level_04_dir"
pathStr := pathStrBase
pathStr = fh.AdjustPathSlash(pathStr)
pathStrBase = fh.AdjustPathSlash(pathStrBase)
newPathStr := fh.RemovePathSeparatorFromEndOfPathString(pathStr)
if pathStrBase != newPathStr {
t.Errorf("Expected result from fh.RemovePathSeparatorFromEndOfPathString(pathStr) to\n" +
"equal\npathStrBase='%v'.\n" +
"Instead,\nnewPathStr='%v'", pathStrBase, newPathStr)
}
}
func TestFileHelper_RemovePathSeparatorFromEndOfPathString_05(t *testing.T) {
fh := FileHelper{}
pathStrBase := "..\\filesfortest\\levelfilesfortest\\level_01_dir\\level_02_dir\\level_03_dir\\" +
"level_04_dir"
pathStr := pathStrBase + "\\"
if os.PathSeparator == '\\' {
pathStr = strings.ReplaceAll(pathStr,"\\", "/")
pathStrBase = strings.ReplaceAll(pathStrBase,"\\", "/")
}
newPathStr := fh.RemovePathSeparatorFromEndOfPathString(pathStr)
if pathStrBase != newPathStr {
t.Errorf("Expected result from fh.RemovePathSeparatorFromEndOfPathString(pathStr) to\n" +
"equal\npathStrBase='%v'.\n" +
"Instead,\nnewPathStr='%v'", pathStrBase, newPathStr)
}
}
func TestFileHelper_RemovePathSeparatorFromEndOfPathString_06(t *testing.T) {
fh := FileHelper{}
pathStrBase := "..\\filesfortest\\levelfilesfortest\\level_01_dir\\level_02_dir\\level_03_dir\\" +
"level_04_dir"
pathStr := pathStrBase
if os.PathSeparator == '\\' {
pathStr = strings.ReplaceAll(pathStr,"\\", "/")
pathStrBase = strings.ReplaceAll(pathStrBase,"\\", "/")
}
newPathStr := fh.RemovePathSeparatorFromEndOfPathString(pathStr)
if pathStrBase != newPathStr {
t.Errorf("Expected result from fh.RemovePathSeparatorFromEndOfPathString(pathStr) to\n" +
"equal\npathStrBase='%v'.\n" +
"Instead,\nnewPathStr='%v'", pathStrBase, newPathStr)
}
}
func TestFileHelper_SearchFileModeMatch_01(t *testing.T) {
targetFile := "../../filesfortest/levelfilesfortest/level_01_dir/level_1_3_test.txt"
fh := FileHelper{}
fInfo, err := fh.GetFileInfo(targetFile)
if err != nil {
t.Errorf("Error returned by fh.GetFileInfo(targetFile).\n" +
"targetFile='%v'\nError='%v'", targetFile, err.Error())
return
}
fileSelection := FileSelectionCriteria{}
err = fileSelection.SelectByFileMode.SetFileModeByTextCode("-r--r--r--")
if err != nil {
t.Errorf("Error returned by fileSelection.SelectByFileMode.SetFileModeByTextCode" +
"(\"-r--r--r--\").\nError='%v'\n", err.Error())
return
}
isFileModeSet, isFileModeMatch, err := fh.SearchFileModeMatch(fInfo, fileSelection)
if err != nil {
t.Errorf("Error returned by fh.SearchFileModeMatch(fInfo, fileSelection).\n" +
"Error='%v'\n", err.Error())
}
if isFileModeSet == false {
t.Error("Expected isFileModeSet=='true'. Instead, it is 'false'!")
}
if isFileModeMatch == true {
t.Error("Expected isFileModeMatch=='false'. Instead, it is 'true'!")
}
}
func TestFileHelper_SearchFileModeMatch_02(t *testing.T) {
targetFile := "../../filesfortest/levelfilesfortest/level_01_dir/level_1_3_test.txt"
fh := FileHelper{}
fInfo, err := fh.GetFileInfo(targetFile)
if err != nil {
t.Errorf("Error returned by fh.GetFileInfo(targetFile).\n" +
"targetFile='%v'\nError='%v'", targetFile, err.Error())
return
}
fileSelection := FileSelectionCriteria{}
err = fileSelection.SelectByFileMode.SetByFileMode(fInfo.Mode())
if err != nil {
t.Errorf("Error returned by fileSelection.SelectByFileMode.SetByFileMode"+
"(fInfo.Mode()).\nError='%v'\n", err.Error())
return
}
isFileModeSet, isFileModeMatch, err := fh.SearchFileModeMatch(fInfo, fileSelection)
if err != nil {
t.Errorf("Error returned by fh.SearchFileModeMatch(fInfo, fileSelection).\n" +
"Error='%v'\n", err.Error())
}
if isFileModeSet == false {
t.Error("Expected isFileModeSet=='true'. Instead, it is 'false'!")
}
if isFileModeMatch == false {
t.Error("Expected isFileModeMatch=='true'. Instead, it is 'false'!")
}
}
func TestFileHelper_SearchFileModeMatch_03(t *testing.T) {
fh := FileHelper{}
fileSelection := FileSelectionCriteria{}
var fInfo os.FileInfo
isFileModeSet, isFileModeMatch, err := fh.SearchFileModeMatch(fInfo, fileSelection)
if err != nil {
t.Errorf("Error returned by fh.SearchFileModeMatch(fInfo, fileSelection).\n" +
"Error='%v'\n", err.Error())
}
if isFileModeSet == true {
t.Error("Expected isFileModeSet=='false'. Instead, it is 'true'!")
}
if isFileModeMatch == true {
t.Error("Expected isFileModeMatch=='false'. Instead, it is 'true'!")
}
}
func TestFileHelper_SwapBasePath_01(t *testing.T) {
fh := FileHelper{}
targetPath := "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
targetPath = fh.AdjustPathSlash(targetPath)
oldBasePath := "../../filesfortest/levelfilesfortest"
oldBasePath = fh.AdjustPathSlash(oldBasePath)
newBasePath := "../../dirmgrtests"
newBasePath = fh.AdjustPathSlash(newBasePath)
expectedTargetPath := "../../dirmgrtests/level_0_0_test.txt"
expectedTargetPath = fh.AdjustPathSlash(expectedTargetPath)
newPath, err := fh.SwapBasePath(
oldBasePath,
newBasePath,
targetPath)
if err != nil {
t.Errorf("Error returned from FileHelper{}.SwapBasePath(...) "+
"Error='%v' ", err.Error())
}
if expectedTargetPath != newPath {
t.Errorf("Error: Expected newPath='%v'. Instead, newPath='%v' ",
expectedTargetPath, newPath)
}
}
func TestFileHelper_SwapBasePath_02(t *testing.T) {
fh := FileHelper{}
targetPath := "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
targetPath = fh.AdjustPathSlash(targetPath)
oldBasePath := "../../filesforTest/levelfilesfortest"
oldBasePath = fh.AdjustPathSlash(oldBasePath)
newBasePath := "../../dirmgrtests"
newBasePath = fh.AdjustPathSlash(newBasePath)
expectedTargetPath := "../../dirmgrtests/level_0_0_test.txt"
expectedTargetPath = fh.AdjustPathSlash(expectedTargetPath)
newPath, err := fh.SwapBasePath(
oldBasePath,
newBasePath,
targetPath)
if err != nil {
t.Errorf("Error returned from FileHelper{}.SwapBasePath(...) "+
"Error='%v' ", err.Error())
}
if expectedTargetPath != newPath {
t.Errorf("Error: Expected newPath='%v'. Instead, newPath='%v' ",
expectedTargetPath, newPath)
}
}
func TestFileHelper_SwapBasePath_03(t *testing.T) {
fh := FileHelper{}
targetPath := "../../filesfortest/newfilesfortest/newerFileForTest_01.txt"
targetPath = fh.AdjustPathSlash(targetPath)
oldBasePath := "../../filesforTest/levelfilesfortest"
oldBasePath = fh.AdjustPathSlash(oldBasePath)
newBasePath := "../../dirmgrtests"
newBasePath = fh.AdjustPathSlash(newBasePath)
_, err := fh.SwapBasePath(
oldBasePath,
newBasePath,
targetPath)
if err == nil {
t.Error("Expected an error return from FileHelper{}.SwapBasePath(...) " +
"NO ERROR WAS GENERATED!")
}
}
func TestFileHelper_SwapBasePath_04(t *testing.T) {
fh := FileHelper{}
targetPath := "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
targetPath = fh.AdjustPathSlash(targetPath)
oldBasePath := ""
newBasePath := "../../dirmgrtests"
newBasePath = fh.AdjustPathSlash(newBasePath)
_, err := fh.SwapBasePath(
oldBasePath,
newBasePath,
targetPath)
if err == nil {
t.Error("Expected an error return from fh.SwapBasePath(oldBasePath,newBasePath,targetPath)\n" +
"because parameter 'oldBasePath' is an empty string.\n" +
"However, NO ERROR WAS RETURNED!!")
}
}
func TestFileHelper_SwapBasePath_05(t *testing.T) {
fh := FileHelper{}
targetPath := "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
targetPath = fh.AdjustPathSlash(targetPath)
oldBasePath := " "
newBasePath := "../../dirmgrtests"
newBasePath = fh.AdjustPathSlash(newBasePath)
_, err := fh.SwapBasePath(
oldBasePath,
newBasePath,
targetPath)
if err == nil {
t.Error("Expected an error return from fh.SwapBasePath(oldBasePath,newBasePath,targetPath)\n" +
"because parameter 'oldBasePath' consists entirely of blank spaces.\n" +
"However, NO ERROR WAS RETURNED!!")
}
}
func TestFileHelper_SwapBasePath_06(t *testing.T) {
fh := FileHelper{}
targetPath := "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
targetPath = fh.AdjustPathSlash(targetPath)
oldBasePath := "../../filesforTest/levelfilesfortest"
oldBasePath = fh.AdjustPathSlash(oldBasePath)
newBasePath := " "
_, err := fh.SwapBasePath(
oldBasePath,
newBasePath,
targetPath)
if err == nil {
t.Error("Expected an error return from fh.SwapBasePath(oldBasePath,newBasePath,targetPath)\n" +
"because parameter 'newBasePath' consists entirely of blank spaces.\n" +
"However, NO ERROR WAS RETURNED!!")
}
}
func TestFileHelper_SwapBasePath_07(t *testing.T) {
fh := FileHelper{}
targetPath := "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
targetPath = fh.AdjustPathSlash(targetPath)
oldBasePath := "../../filesforTest/levelfilesfortest"
oldBasePath = fh.AdjustPathSlash(oldBasePath)
newBasePath := ""
_, err := fh.SwapBasePath(
oldBasePath,
newBasePath,
targetPath)
if err == nil {
t.Error("Expected an error return from fh.SwapBasePath(oldBasePath,newBasePath,targetPath)\n" +
"because parameter 'newBasePath' is an empty string.\n" +
"However, NO ERROR WAS RETURNED!!")
}
}
func TestFileHelper_SwapBasePath_08(t *testing.T) {
fh := FileHelper{}
targetPath := " "
oldBasePath := "../../filesforTest/levelfilesfortest"
oldBasePath = fh.AdjustPathSlash(oldBasePath)
newBasePath := "../../dirmgrtests"
newBasePath = fh.AdjustPathSlash(newBasePath)
_, err := fh.SwapBasePath(
oldBasePath,
newBasePath,
targetPath)
if err == nil {
t.Error("Expected an error return from fh.SwapBasePath(oldBasePath,newBasePath,targetPath)\n" +
"because parameter 'targetPath' consists entirely of blank spaces.\n" +
"However, NO ERROR WAS RETURNED!!")
}
}
func TestFileHelper_SwapBasePath_09(t *testing.T) {
fh := FileHelper{}
targetPath := ""
oldBasePath := "../../filesforTest/levelfilesfortest"
oldBasePath = fh.AdjustPathSlash(oldBasePath)
newBasePath := "../../dirmgrtests"
newBasePath = fh.AdjustPathSlash(newBasePath)
_, err := fh.SwapBasePath(
oldBasePath,
newBasePath,
targetPath)
if err == nil {
t.Error("Expected an error return from fh.SwapBasePath(oldBasePath,newBasePath,targetPath)\n" +
"because parameter 'targetPath' is an empty string.\n" +
"However, NO ERROR WAS RETURNED!!")
}
}
func TestFileHelper_SwapBasePath_10(t *testing.T) {
fh := FileHelper{}
targetPath := "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
targetPath = fh.AdjustPathSlash(targetPath)
oldBasePath := "../../filesforTest/levelfilesfortest/dir01/dir02/dir03/dir05/dir06"
oldBasePath = fh.AdjustPathSlash(oldBasePath)
newBasePath := "../../dirmgrtests"
newBasePath = fh.AdjustPathSlash(newBasePath)
_, err := fh.SwapBasePath(
oldBasePath,
newBasePath,
targetPath)
if err == nil {
t.Error("Expected an error return from fh.SwapBasePath(oldBasePath,newBasePath,targetPath)\n" +
"because parameter 'oldBasePath' is longer than 'targetBasePath.\n" +
"However, NO ERROR WAS RETURNED!!")
}
}
func TestFileHelper_SwapBasePath_11(t *testing.T) {
fh := FileHelper{}
targetPath := "../../filesfortest/levelfilesfortest/level_0_0_test.txt"
targetPath = fh.AdjustPathSlash(targetPath)
oldBasePath := "/levelfilesfortest"
oldBasePath = fh.AdjustPathSlash(oldBasePath)
newBasePath := "../../dirmgrtests"
newBasePath = fh.AdjustPathSlash(newBasePath)
_, err := fh.SwapBasePath(
oldBasePath,
newBasePath,
targetPath)
if err == nil {
t.Error("Expected an error return from fh.SwapBasePath(oldBasePath,newBasePath,targetPath)\n" +
"because 'oldBasePath' does NOT begin at beginning of 'targetBasePath.\n" +
"However, NO ERROR WAS RETURNED!!")
}
}
func createALogTestBottomDir() error {
fh := FileHelper{}
targetDir, err1 := fh.MakeAbsolutePath(fh.AdjustPathSlash(alogTestBottomDir))
if err1 != nil {
return err1
}
if !fh.DoesFileExist(targetDir) {
err2 := fh.MakeDirAll(targetDir)
if err2 != nil {
return err2
}
}
targetFile := fh.JoinPathsAdjustSeparators(targetDir, alogFile)
if fh.DoesFileExist(targetFile) {
err3 := fh.DeleteDirFile(targetFile)
if err3 != nil {
return err3
}
}
f, err4 := fh.CreateFile(targetFile)
if err4 != nil {
return err4
}
t := time.Now().Local()
nowTime := t.Format("2006-01-02 15:04:05.000000000")
_, err5 := f.WriteString("Sample Write - " + nowTime + "/n")
if err5 != nil {
_ = f.Close()
return err5
}
_, err6 := f.WriteString("File Name: " + targetFile)
if err6 != nil {
_ = f.Close()
return err6
}
_ = f.Close()
return nil
}
func deleteALogTestBottomDirTargetDir() error {
fh := FileHelper{}
targetDir, err1 := fh.MakeAbsolutePath(fh.AdjustPathSlash(alogTestBottomDir))
if err1 != nil {
return err1
}
if fh.DoesFileExist(targetDir) {
err2 := fh.DeleteDirPathAll(targetDir)
if err2 != nil {
return err2
}
if fh.DoesFileExist(targetDir) {
return errors.New("File still exists:" + targetDir)
}
}
return nil
}
|
// +k8s:deepcopy-gen=package
// +groupName=amritgill.alpha.coveros.com
package v1alpha1
|
// This file was generated for SObject CustomPermissionDependency, API Version v43.0 at 2018-07-30 03:47:24.059760542 -0400 EDT m=+10.402757321
package sobjects
import (
"fmt"
"strings"
)
type CustomPermissionDependency struct {
BaseSObject
CreatedById string `force:",omitempty"`
CreatedDate string `force:",omitempty"`
CustomPermissionId string `force:",omitempty"`
Id string `force:",omitempty"`
IsDeleted bool `force:",omitempty"`
LastModifiedById string `force:",omitempty"`
LastModifiedDate string `force:",omitempty"`
RequiredCustomPermissionId string `force:",omitempty"`
SystemModstamp string `force:",omitempty"`
}
func (t *CustomPermissionDependency) ApiName() string {
return "CustomPermissionDependency"
}
func (t *CustomPermissionDependency) String() string {
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("CustomPermissionDependency #%s - %s\n", t.Id, t.Name))
builder.WriteString(fmt.Sprintf("\tCreatedById: %v\n", t.CreatedById))
builder.WriteString(fmt.Sprintf("\tCreatedDate: %v\n", t.CreatedDate))
builder.WriteString(fmt.Sprintf("\tCustomPermissionId: %v\n", t.CustomPermissionId))
builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id))
builder.WriteString(fmt.Sprintf("\tIsDeleted: %v\n", t.IsDeleted))
builder.WriteString(fmt.Sprintf("\tLastModifiedById: %v\n", t.LastModifiedById))
builder.WriteString(fmt.Sprintf("\tLastModifiedDate: %v\n", t.LastModifiedDate))
builder.WriteString(fmt.Sprintf("\tRequiredCustomPermissionId: %v\n", t.RequiredCustomPermissionId))
builder.WriteString(fmt.Sprintf("\tSystemModstamp: %v\n", t.SystemModstamp))
return builder.String()
}
type CustomPermissionDependencyQueryResponse struct {
BaseQuery
Records []CustomPermissionDependency `json:"Records" force:"records"`
}
|
package domain
import (
"fmt"
)
type Products []Product
func (this Products) ToMap() (map[string]*Product, error) {
result := make(map[string]*Product)
for i := range this {
v := this[i]
if len(v.Code) == 0 {
return nil, fmt.Errorf("missing code for product, cannot convert to map")
}
result[v.Code] = &v
}
return result, nil
}
func (this Products) Print() {
fmt.Printf("code\tname\tprice\n")
for _, v := range this {
fmt.Printf("%s\t%s\t%v\n", v.Code, v.Name, v.Price)
}
}
|
// ˅
package main
import (
"fmt"
"os"
"github.com/lxn/walk"
. "github.com/lxn/walk/declarative"
)
// ˄
type AppLogin struct {
// ˅
// ˄
radioLogin *ColleagueRadioButton
radioGuest *ColleagueRadioButton
textUsername *ColleagueTextField
textPassword *ColleagueTextField
buttonOk *ColleagueButton
buttonCancel *ColleagueButton
// ˅
// ˄
}
func NewAppLogin() *AppLogin {
// ˅
appLogin := &AppLogin{}
appLogin.CreateColleagues()
return appLogin
// ˄
}
func (self *AppLogin) CreateColleagues() {
// ˅
// Create LineEdit, PushButton and RadioButton
var rd1 *walk.RadioButton
var rd2 *walk.RadioButton
var edit1 *walk.LineEdit
var edit2 *walk.LineEdit
var pb1 *walk.PushButton
var pb2 *walk.PushButton
self.radioGuest = NewColleagueRadioButton(rd1)
self.radioLogin = NewColleagueRadioButton(rd2)
self.textUsername = NewColleagueTextField(edit1)
self.textPassword = NewColleagueTextField(edit2)
self.buttonOk = NewColleagueButton(pb1)
self.buttonCancel = NewColleagueButton(pb2)
// Set mediators
self.radioLogin.mediator = self
self.radioGuest.mediator = self
self.textUsername.mediator = self
self.textPassword.mediator = self
self.buttonOk.mediator = self
self.buttonCancel.mediator = self
// Create main window
mainWindow := MainWindow{
Title: "Mediator Example",
Size: Size{250, 200},
Layout: VBox{},
Children: []Widget{
RadioButtonGroupBox{
Layout: HBox{},
Buttons: []RadioButton{
RadioButton{
AssignTo: &self.radioGuest.radioButton,
Text: "Guest",
OnClicked: self.radioGuest.OnClicked,
},
RadioButton{
AssignTo: &self.radioLogin.radioButton,
Text: "Login",
OnClicked: self.radioLogin.OnClicked,
},
},
},
Composite{
Layout: HBox{},
Children: []Widget{
Label{
Text: "Username:",
},
LineEdit{
AssignTo: &self.textUsername.lineEdit,
MinSize: Size{110, 0},
OnTextChanged: self.textUsername.OnTextChanged,
},
},
},
Composite{
Layout: HBox{},
Children: []Widget{
Label{
Text: "Password:",
},
LineEdit{
AssignTo: &self.textPassword.lineEdit,
MinSize: Size{110, 0},
PasswordMode: true,
OnTextChanged: self.textPassword.OnTextChanged,
},
},
},
Composite{
Layout: HBox{},
Children: []Widget{
PushButton{
AssignTo: &self.buttonOk.pushButton,
Text: "OK",
OnClicked: self.buttonOk.OnClicked,
},
PushButton{
AssignTo: &self.buttonCancel.pushButton,
Text: "Cancel",
OnClicked: self.buttonCancel.OnClicked,
},
},
},
},
}
if _, err := mainWindow.Run(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
// ˄
}
// Change enable/disable of the Colleagues when notified from the Mediators.
func (self *AppLogin) ColleagueChanged() {
// ˅
if self.buttonOk.IsSelected() == true || self.buttonCancel.IsSelected() == true {
os.Exit(0)
} else {
if self.radioGuest.IsSelected() == true { // Guest mode
self.textUsername.SetActivation(false)
self.textPassword.SetActivation(false)
self.buttonOk.SetActivation(true)
} else { // Login mode
self.textUsername.SetActivation(true)
self.textPassword.SetActivation(true)
// Judge whether the changed Colleage is enabled or disabled
if self.textUsername.IsEmpty() == false && self.textPassword.IsEmpty() == false {
self.buttonOk.SetActivation(true)
} else {
self.buttonOk.SetActivation(false)
}
}
}
// ˄
}
// ˅
// ˄
|
package utils
import (
"encoding/json"
"github.com/riposa/utils/log"
"github.com/valyala/fasthttp"
url2 "net/url"
"time"
)
type requests struct {
// nothing
}
type HTTPCallback interface {
Do(req *fasthttp.Request, resp *fasthttp.Response) interface{}
}
type HTTPResponse struct {
status int
contentType []byte
body []byte
CallbackOutput []interface{}
}
func (h *HTTPResponse) Status() int {
return h.status
}
func (h *HTTPResponse) ContentType() string {
return string(h.contentType)
}
func (h *HTTPResponse) Body() []byte {
return h.body
}
func (h *HTTPResponse) Json(v interface{}) error {
return json.Unmarshal(h.body, v)
}
func (h *HTTPResponse) Text() string {
return string(h.body)
}
var (
Requests requests
reqLogger = log.New()
)
func (r *requests) Get(url string, param map[string]string, headers map[string]string, callback ...HTTPCallback) (*HTTPResponse, error) {
var result HTTPResponse
var t1, t2, t3, t4 int64
var cbCount int
t1 = time.Now().UnixNano()
u, err := url2.Parse(url)
if err != nil {
reqLogger.Exception(err)
return nil, err
}
req := fasthttp.AcquireRequest()
reqUri := fasthttp.AcquireURI()
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseRequest(req)
defer fasthttp.ReleaseResponse(resp)
defer fasthttp.ReleaseURI(reqUri)
reqUri.SetHost(u.Host)
reqUri.SetPath(u.Path)
reqUri.SetScheme(u.Scheme)
queryValues := make(url2.Values)
for k, v := range param {
queryValues[k] = []string{v}
}
queryString := queryValues.Encode()
reqUri.SetQueryString(queryString)
req.SetRequestURIBytes(reqUri.FullURI())
req.Header.SetMethod("GET")
for k, v := range headers {
req.Header.Set(k, v)
}
t2 = time.Now().UnixNano()
err = fasthttp.Do(req, resp)
if err != nil {
reqLogger.Error("[Request.Get] %s", err.Error())
return nil, err
}
t3 = time.Now().UnixNano()
for _, cb := range callback {
result.CallbackOutput = append(result.CallbackOutput, cb.Do(req, resp))
cbCount++
}
result.status = resp.StatusCode()
result.body = resp.Body()
result.contentType = resp.Header.ContentType()
t4 = time.Now().UnixNano()
reqLogger.Infof("[Request.Get] target url: %s, transport time cost: %.3fms, total time cost: %.3fms", string(req.URI().FullURI()), float64(t3-t2)/1e6, float64(t4-t1)/1e6)
reqLogger.Infof("[Request.Get] %d callback triggered, check response on HTTPResponse.CallbackOutput", cbCount)
return &result, nil
}
func (r *requests) PostJson(url string, param interface{}, headers map[string]string, callback ...HTTPCallback) (*HTTPResponse, error) {
var result HTTPResponse
var t1, t2, t3, t4 int64
var cbCount int
t1 = time.Now().UnixNano()
u, err := url2.Parse(url)
if err != nil {
reqLogger.Exception(err)
return nil, err
}
req := fasthttp.AcquireRequest()
reqUri := fasthttp.AcquireURI()
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseRequest(req)
defer fasthttp.ReleaseResponse(resp)
defer fasthttp.ReleaseURI(reqUri)
reqUri.SetHost(u.Host)
reqUri.SetPath(u.Path)
reqUri.SetScheme(u.Scheme)
req.SetRequestURIBytes(reqUri.FullURI())
req.Header.SetMethod("POST")
req.Header.SetContentType("application/json")
body, err := json.Marshal(param)
if err != nil {
reqLogger.Error("[Request.Post] %s", err.Error())
return nil, err
}
req.SetBody(body)
for k, v := range headers {
req.Header.Set(k, v)
}
t2 = time.Now().UnixNano()
err = fasthttp.Do(req, resp)
if err != nil {
reqLogger.Error("[Request.Post] %s", err.Error())
return nil, err
}
t3 = time.Now().UnixNano()
for _, cb := range callback {
result.CallbackOutput = append(result.CallbackOutput, cb.Do(req, resp))
cbCount++
}
result.status = resp.StatusCode()
result.body = resp.Body()
result.contentType = resp.Header.ContentType()
t4 = time.Now().UnixNano()
reqLogger.Infof("[Request.Post] target url: %s, transport time cost: %.3fms, total time cost: %.3fms", string(req.URI().FullURI()), float64(t3-t2)/1e6, float64(t4-t1)/1e6)
reqLogger.Infof("[Request.Post] %d callback triggered, check response on HTTPResponse.CallbackOutput", cbCount)
return &result, nil
}
func (r *requests) PostForm(url string, param map[string]string, headers map[string]string, callback ...HTTPCallback) (*HTTPResponse, error) {
var result HTTPResponse
var t1, t2, t3, t4 int64
var cbCount int
t1 = time.Now().UnixNano()
u, err := url2.Parse(url)
if err != nil {
reqLogger.Exception(err)
return nil, err
}
req := fasthttp.AcquireRequest()
reqUri := fasthttp.AcquireURI()
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseRequest(req)
defer fasthttp.ReleaseResponse(resp)
defer fasthttp.ReleaseURI(reqUri)
reqUri.SetHost(u.Host)
reqUri.SetPath(u.Path)
reqUri.SetScheme(u.Scheme)
req.SetRequestURIBytes(reqUri.FullURI())
req.Header.SetMethod("POST")
req.Header.SetContentType("application/x-www-form-urlencoded")
values := make(url2.Values)
for k, v := range param {
values[k] = []string{v}
}
body := values.Encode()
req.SetBody([]byte(body))
for k, v := range headers {
req.Header.Set(k, v)
}
t2 = time.Now().UnixNano()
err = fasthttp.Do(req, resp)
if err != nil {
reqLogger.Error("[Request.Post] %s", err.Error())
return nil, err
}
t3 = time.Now().UnixNano()
for _, cb := range callback {
result.CallbackOutput = append(result.CallbackOutput, cb.Do(req, resp))
cbCount++
}
result.status = resp.StatusCode()
result.body = resp.Body()
result.contentType = resp.Header.ContentType()
t4 = time.Now().UnixNano()
reqLogger.Infof("[Request.Post] target url: %s, transport time cost: %.3fms, total time cost: %.3fms", string(req.URI().FullURI()), float64(t3-t2)/1e6, float64(t4-t1)/1e6)
reqLogger.Infof("[Request.Post] %d callback triggered, check response on HTTPResponse.CallbackOutput", cbCount)
return &result, nil
}
func (r *requests) PostJsonWithQueryString(url string, param interface{}, headers map[string]string, query map[string]string, callback ...HTTPCallback) (*HTTPResponse, error) {
var result HTTPResponse
var t1, t2, t3, t4 int64
var cbCount int
t1 = time.Now().UnixNano()
u, err := url2.Parse(url)
if err != nil {
reqLogger.Exception(err)
return nil, err
}
req := fasthttp.AcquireRequest()
reqUri := fasthttp.AcquireURI()
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseRequest(req)
defer fasthttp.ReleaseResponse(resp)
defer fasthttp.ReleaseURI(reqUri)
reqUri.SetHost(u.Host)
reqUri.SetPath(u.Path)
reqUri.SetScheme(u.Scheme)
queryValues := make(url2.Values)
for k, v := range query {
queryValues[k] = []string{v}
}
queryString := queryValues.Encode()
reqUri.SetQueryString(queryString)
req.SetRequestURIBytes(reqUri.FullURI())
req.Header.SetMethod("POST")
req.Header.SetContentType("application/json")
body, err := json.Marshal(param)
if err != nil {
reqLogger.Exception(err)
return nil, err
}
req.SetBody(body)
for k, v := range headers {
req.Header.Set(k, v)
}
t2 = time.Now().UnixNano()
err = fasthttp.Do(req, resp)
if err != nil {
reqLogger.Exception(err)
return nil, err
}
t3 = time.Now().UnixNano()
for _, cb := range callback {
result.CallbackOutput = append(result.CallbackOutput, cb.Do(req, resp))
cbCount++
}
result.status = resp.StatusCode()
result.body = resp.Body()
result.contentType = resp.Header.ContentType()
t4 = time.Now().UnixNano()
reqLogger.Infof("[Request.Post] target url: %s, transport time cost: %.3fms, total time cost: %.3fms", string(req.URI().FullURI()), float64(t3-t2)/1e6, float64(t4-t1)/1e6)
reqLogger.Infof("[Request.Post] %d callback triggered, check response on HTTPResponse.CallbackOutput", cbCount)
return &result, nil
}
|
package remove
/*import (
"encoding/base64"
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"time"
cloudpkg "github.com/devspace-cloud/devspace/pkg/devspace/cloud"
cloudconfig "github.com/devspace-cloud/devspace/pkg/devspace/cloud/config"
cloudlatest "github.com/devspace-cloud/devspace/pkg/devspace/cloud/config/versions/latest"
"github.com/devspace-cloud/devspace/pkg/devspace/cloud/token"
"github.com/devspace-cloud/devspace/pkg/util/log"
"github.com/devspace-cloud/devspace/pkg/util/survey"
homedir "github.com/mitchellh/go-homedir"
"gotest.tools/assert"
)
type removeProviderTestCase struct {
name string
args []string
answers []string
graphQLResponses []interface{}
provider string
providerList []*cloudlatest.Provider
expectedErr string
}
func TestRunRemoveProvider(t *testing.T) {
claimAsJSON, _ := json.Marshal(token.ClaimSet{
Expiration: time.Now().Add(time.Hour).Unix(),
})
validEncodedClaim := base64.URLEncoding.EncodeToString(claimAsJSON)
for strings.HasSuffix(string(validEncodedClaim), "=") {
validEncodedClaim = strings.TrimSuffix(validEncodedClaim, "=")
}
testCases := []removeProviderTestCase{
removeProviderTestCase{
name: "Remove not existent provider",
providerList: []*cloudlatest.Provider{
&cloudlatest.Provider{},
},
args: []string{"Doesn'tExist"},
},
removeProviderTestCase{
name: "Remove existent provider",
providerList: []*cloudlatest.Provider{
&cloudlatest.Provider{
Name: "Exists",
},
},
args: []string{"Exists"},
},
}
log.SetInstance(&log.DiscardLogger{PanicOnExit: true})
for _, testCase := range testCases {
testRunRemoveProvider(t, testCase)
}
}
func testRunRemoveProvider(t *testing.T, testCase removeProviderTestCase) {
dir, err := ioutil.TempDir("", "test")
if err != nil {
t.Fatalf("Error creating temporary directory: %v", err)
}
wdBackup, err := os.Getwd()
if err != nil {
t.Fatalf("Error getting current working directory: %v", err)
}
err = os.Chdir(dir)
if err != nil {
t.Fatalf("Error changing working directory: %v", err)
}
homedir, err := homedir.Dir()
assert.NilError(t, err, "Error getting homedir in testCase %s", testCase.name)
relDir, err := filepath.Rel(homedir, dir)
assert.NilError(t, err, "Error getting relative dir path in testCase %s", testCase.name)
cloudconfig.DevSpaceProvidersConfigPath = filepath.Join(relDir, "Doesn'tExist")
cloudconfig.LegacyDevSpaceCloudConfigPath = filepath.Join(relDir, "Doesn'tExist")
providerConfig, err := cloudconfig.Load()
assert.NilError(t, err, "Error getting provider config in testCase %s", testCase.name)
providerConfig.Providers = testCase.providerList
for _, answer := range testCase.answers {
survey.SetNextAnswer(answer)
}
cloudpkg.DefaultGraphqlClient = &customGraphqlClient{
responses: testCase.graphQLResponses,
}
defer func() {
//Delete temp folder
err = os.Chdir(wdBackup)
if err != nil {
t.Fatalf("Error changing dir back: %v", err)
}
err = os.RemoveAll(dir)
if err != nil {
t.Fatalf("Error removing dir: %v", err)
}
}()
if len(testCase.args) == 0 {
testCase.args = []string{""}
}
err = (&providerCmd{
Name: testCase.provider,
}).RunRemoveCloudProvider(nil, testCase.args)
if testCase.expectedErr == "" {
assert.NilError(t, err, "Unexpected error in testCase %s.", testCase.name)
} else {
assert.Error(t, err, testCase.expectedErr, "Wrong or no error in testCase %s.", testCase.name)
}
}*/
|
package main
import (
"context"
"fmt"
"log"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
)
func main() {
client, err := ethclient.Dial("https://mainnet.infura.io/v3/YOUR_PROJECT_ID")
if err != nil {
log.Fatal(err)
}
blockNumber := big.NewInt(1345675)
block, err := client.BlockByNumber(context.Background(), blockNumber)
if err != nil {
log.Fatal(err)
}
// Transaction information
for _, tx := range block.Transactions() {
fmt.Println("Hash: ", tx.Hash().Hex())
fmt.Println("Value: ", tx.Value().String())
fmt.Println("Gas: ", tx.Gas())
fmt.Println("Gas Price: ", tx.GasPrice().Uint64())
fmt.Println("Nonce: ", tx.Nonce())
fmt.Println("Data: ", tx.Data())
fmt.Println("To: ", tx.To().Hex())
if err != nil {
log.Fatal(err)
}
if msg, err := tx.AsMessage(types.NewEIP155Signer(tx.ChainId()), big.NewInt(1)); err == nil {
fmt.Println("From: ", msg.From().Hex())
}
// receipt, err := client.TransactionReceipt(context.Background(), tx.Hash())
// if err != nil {
// log.Fatal(err)
// }
// fmt.Println("Receipt: ", receipt.Status)
fmt.Println()
}
// Transaction count at specific block hash
blockHash := common.HexToHash("0x9e8751ebb5069389b855bba72d94902cc385042661498a415979b7b6ee9ba4b9")
count, err := client.TransactionCount(context.Background(), blockHash)
if err != nil {
log.Fatal(err)
}
// Just transaction hashes
for idx := uint(0); idx < count; idx++ {
tx, err := client.TransactionInBlock(context.Background(), blockHash, idx)
if err != nil {
log.Fatal(err)
}
fmt.Println(tx.Hash().Hex())
}
// Is pending
txHash := common.HexToHash("0x5d49fcaa394c97ec8a9c3e7bd9e8388d420fb050a52083ca52ff24b3b65bc9c2")
tx, isPending, err := client.TransactionByHash(context.Background(), txHash)
if err != nil {
log.Fatal(err)
}
fmt.Println(tx.Hash().Hex())
fmt.Println(isPending)
}
|
package mmap
import (
"testing"
"fmt"
)
func TestBitMap(t *testing.T) {
m := NewBitMap(10)
m.SetBit(1)
m.SetBit(3)
m.SetBit(5)
m.SetBit(7)
fmt.Println("是否包含5?", m.Contain(5))
fmt.Println(m.PrintBit())
fmt.Println(m.PrintNum())
m.Clear(5)
fmt.Println("是否包含5?", m.Contain(5))
}
|
// +build !linux
package sstable
func (t *SSTable) tryMMap() error {
return errNotImplemented
}
func (t *SSTable) tryMunmap() {
}
|
package examples
import (
"fmt"
"github.com/corbym/gocrest/then"
"testing"
// import these to test a postgres container
"github.com/corbym/gocrest/is"
"github.com/cybernostics/cntest"
"github.com/cybernostics/cntest/postgres"
"github.com/jmoiron/sqlx"
)
func TestPostgresRunWith(t *testing.T) {
cntest.PullImage("postgres", "11", cntest.FromDockerHub)
cnt := postgres.Container(cntest.PropertyMap{"initdb_path": "../fixtures/testschema"}).
WithImage("postgres:11")
cntest.ExecuteWithRunningContainer(t, cnt, func(t *testing.T) {
// Open up our database connection.
db, err := cnt.DBConnect(cnt.MaxStartTimeSeconds)
then.AssertThat(t, err, is.Nil())
defer db.Close()
err = db.Ping()
then.AssertThat(t, err, is.Nil())
// Test some db code
dbx := sqlx.NewDb(db, cnt.Props["driver"])
store := AgentStore{dbx}
agents, err := store.GetAgents()
then.AssertThat(t, err, is.Nil())
for _, agent := range agents {
fmt.Printf("%v\n", agent)
}
})
}
|
package gui
import (
"tetra/lib/geom"
"tetra/lib/glman"
)
// Pane3D is a pane for 3D scene
type Pane3D struct {
Pane
MatP geom.Mat4
MatV geom.Mat4
}
// Init a new object
func (pn *Pane3D) Init() {
pn.Pane.Init()
pn.MatP = geom.Mat4Ident()
pn.MatV = geom.Mat4Ident()
}
// State to string
func (pn *Pane3D) State() ([]byte, error) {
//return []byte(pn.btn.Text()), nil
return nil, nil
}
// SetState from string
func (pn *Pane3D) SetState(data []byte) error {
//pn.btn.SetText(string(data))
return nil
}
// Is3D reports whether pane is 3D scene
func (pn *Pane3D) Is3D() bool {
return true
}
// Ortho set projection to orthographic
func (pn *Pane3D) Ortho(xMin float32, xMax float32, yMin float32, yMax float32, zMin float32, zMax float32) {
pn.MatP = geom.Mat4Ortho(xMin, xMax, yMin, yMax, zMin, zMax)
}
// Frustum set projection to special frustum
func (pn *Pane3D) Frustum(left float32, right float32, bottom float32, top float32, near float32, far float32) {
pn.MatP = geom.Mat4Frustum(left, right, bottom, top, near, far)
}
// Perspective set projection to perspective
func (pn *Pane3D) Perspective(fovy float32, aspect float32, near float32, far float32) {
pn.MatP = geom.Mat4Perspective(fovy, aspect, near, far)
}
// LookAt set view matrix to look at special point
func (pn *Pane3D) LookAt(eye Vec3, lookAt Vec3, up Vec3) {
pn.MatV = geom.Mat4LookAt(eye, lookAt, up)
}
// Render the pane
func (pn *Pane3D) Render() {
viewportBak := glman.GetViewport()
//fmt.Println(viewportBak)
glman.SetViewport(pn.BoundsGLCoord())
glman.StackMatP.Push()
glman.StackMatV.Push()
glman.StackMatM.Push()
glman.StackMatP.Load(pn.MatP)
glman.StackMatV.Load(pn.MatV)
glman.StackMatM.Load(geom.Mat4Ident())
glman.StackClip2D.Push()
rect := pn.bounds
glman.StackClip2D.Load(rect)
for _, c := range pn.child {
c.Render()
}
glman.StackClip2D.Pop()
glman.StackMatM.Pop()
glman.StackMatV.Pop()
glman.StackMatP.Pop()
glman.SetViewport(viewportBak)
}
|
/*
You are given a binary array nums (0-indexed).
We define xi as the number whose binary representation is the subarray nums[0..i] (from most-significant-bit to least-significant-bit).
For example, if nums = [1,0,1], then x0 = 1, x1 = 2, and x2 = 5.
Return an array of booleans answer where answer[i] is true if xi is divisible by 5.
Example 1:
Input: nums = [0,1,1]
Output: [true,false,false]
Explanation: The input numbers in binary are 0, 01, 011; which are 0, 1, and 3 in base-10.
Only the first number is divisible by 5, so answer[0] is true.
Example 2:
Input: nums = [1,1,1]
Output: [false,false,false]
Constraints:
1 <= nums.length <= 10^5
nums[i] is either 0 or 1.
*/
package main
import (
"fmt"
"reflect"
)
func main() {
test([]int{1, 0, 1}, []bool{false, false, true})
test([]int{0, 1, 1}, []bool{true, false, false})
test([]int{1, 1, 1}, []bool{false, false, false})
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func test(a []int, r []bool) {
p := pdb5(a)
fmt.Println(p)
assert(reflect.DeepEqual(p, r))
}
func pdb5(a []int) []bool {
n := len(a)
if n == 0 {
return []bool{}
}
c := a[0]
r := []bool{a[0] == 0}
for i := 1; i < n; i++ {
c = ((c << 1) + a[i]) % 5
r = append(r, c == 0)
}
return r
}
|
package boost
import (
"github.com/caddyserver/caddy"
"strings"
"testing"
"time"
)
func TestSetup(t *testing.T) {
tests := []struct {
input string
shouldErr bool
err string
method Method
pingCount int
pingInterval time.Duration
pingTimeout time.Duration
}{
{"boost", false, "", defaultMethod, defaultPingCount, defaultPingInterval, defaultPingTimeout},
{"boost {\nmethod ping\n}", false, "", defaultMethod, defaultPingCount, defaultPingInterval, defaultPingTimeout},
{"boost {\nping_count 5\n}", false, "", defaultMethod, 5, defaultPingInterval, defaultPingTimeout},
{"boost {\nping_interval 0.5\n}", false, "", defaultMethod, defaultPingCount, 500 * time.Millisecond, defaultPingTimeout},
{"boost {\nping_timeout 1.0\n}", false, "", defaultMethod, defaultPingCount, defaultPingInterval, time.Second},
{"boost {\nping_count 0\n}", true, "can not be less than 1", 0, 0, 0, 0},
{"boost {\nping_interval 0\n}", true, "must be greater than 0", 0, 0, 0, 0},
{"boost {\nping_timeout 0\n}", true, "must be greater than 0", 0, 0, 0, 0},
}
for i, test := range tests {
c := caddy.NewTestController("dns", test.input)
b, err := parse(c)
if test.shouldErr && err == nil {
t.Errorf("Test %d: expected error but found %s for input %s", i, err, test.input)
}
if err != nil {
if !test.shouldErr {
t.Errorf("Test %d: expected no error but found one for input %s, got: %v", i, test.input, err)
}
if !strings.Contains(err.Error(), test.err) {
t.Errorf("Test %d: expected error to contain: %v, found error: %v, input: %s", i, test.err, err, test.input)
}
}
if !test.shouldErr {
if test.method != b.Method {
t.Errorf("Test %d: expected %d, got: %d", i, test.method, b.Method)
}
if test.pingCount != b.PingCount {
t.Errorf("Test %d: expected %d, got: %d", i, test.pingCount, b.PingCount)
}
if test.pingInterval != b.PingInterval {
t.Errorf("Test %d: expected %s, got: %s", i, test.pingInterval, b.PingInterval)
}
if test.pingTimeout != b.PingTimeout {
t.Errorf("Test %d: expected %s, got: %s", i, test.pingTimeout, b.PingTimeout)
}
}
}
}
|
package main
import "fmt"
func main() {
v := 42
fmt.Printf("v is of type %T\n", v)
x := 42.111
fmt.Printf("x is of type %T\n", x)
y := -42
fmt.Printf("y is of type %T\n", y)
z := "42"
fmt.Printf("z is of type %T\n", z)
}
|
package time_machine
import (
"context"
"time"
"time_machine/dao"
"time_machine/model"
)
type Machine struct {
timeDuration time.Duration //存储数据的时间
storage dao.Storage // 存储引擎
}
func NewTimeMachine(conf *TimeMachineConf) *Machine {
return &Machine{storage: dao.InitRedisStorage(conf.RedisConf, conf.TTL)}
}
func (r *Machine) Put(ctx context.Context, event *model.Event) error {
return r.storage.PutEvent(ctx, event)
}
func (r *Machine) ListByTime(ctx context.Context, start, end int64) ([]*model.Event, error) {
return r.storage.ListEventsByTime(ctx, start, end)
}
func (r *Machine) CountsByTime(ctx context.Context, start, end int64) (int64, error) {
return r.storage.CountEventsByTime(ctx, start, end)
}
func (r *Machine) ListTypeByTime(ctx context.Context, eventType string, start, end int64) ([]*model.Event, error) {
return r.storage.ListTypeEventsByTime(ctx, eventType, start, end)
}
func (r *Machine) CountsTypeByTime(ctx context.Context, eventType string, start, end int64) (int64, error) {
return r.storage.CountTypeEventsByTime(ctx, eventType, start, end)
}
|
package releaze
import (
"encoding/json"
"net/http"
)
func HttpHandler(resp http.ResponseWriter, req *http.Request) {
info := Get()
bytes, err := json.MarshalIndent(info, "", " ")
if err != nil {
http.Error(resp, err.Error(), http.StatusInternalServerError)
}
resp.Write(bytes)
}
|
package signer
import (
"crypto"
"errors"
"fmt"
"io"
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp"
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp/utils"
)
type bccspCryptoSigner struct {
csp bccsp.BCCSP
key bccsp.Key
pk interface{}
}
func New(csp bccsp.BCCSP, key bccsp.Key) (crypto.Signer, error) {
if csp == nil {
return nil, errors.New("bccsp instance must be different from nil.")
}
if key == nil {
return nil, errors.New("key must be different from nil.")
}
if key.Symmetric() {
return nil, errors.New("key must be asymmetric.")
}
pub, err := key.PublicKey()
if err != nil {
return nil, fmt.Errorf("failed getting public key [%s]", err)
}
raw, err := pub.Bytes()
if err != nil {
return nil, fmt.Errorf("failed marshalling public key [%s]", err)
}
pk, err := utils.DERToPublicKey(raw)
if err != nil {
return nil, fmt.Errorf("failed marshalling der to public key [%s]", err)
}
return &bccspCryptoSigner{csp, key, pk}, nil
}
func (s *bccspCryptoSigner) Public() crypto.PublicKey {
return s.pk
}
func (s *bccspCryptoSigner) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) {
return s.csp.Sign(s.key, digest, opts)
}
|
package elasticsearch
import (
"elktools/cmd/utils"
"encoding/json"
"fmt"
"github.com/desertbit/grumble"
)
func init() {
Register("route", initRoot)
}
func initRoot(name string) {
routeCommand := &grumble.Command{
Name: name,
Help: "POST /_cluster/reroute",
HelpGroup: defaultApp.App.Config().Name,
Usage: utils.Usage("elastic route"),
}
defaultApp.App.AddCommand(routeCommand)
routeCommand.AddCommand(&grumble.Command{
Name: "retry",
Help: "will attempt a single retry round for these shards",
HelpGroup: routeCommand.Name,
Usage: utils.Usage("elastic route retry"),
Flags: initFlags,
AllowArgs: false,
Run: defaultApp.InitRun(retryFailedRun),
})
}
func retryFailedRun(c *grumble.Context) ([]byte, error) {
data, err := defaultApp.search.RetryFailedRoute(c.Args)
if err != nil {
return nil, err
}
result := struct {
Acknowledged bool `json:"acknowledged"`
Error interface{} `json:"error"`
Status int `json:"status"`
}{}
if err := json.Unmarshal(data, &result); err != nil {
return nil, err
}
if result.Status > 0 {
return nil, fmt.Errorf("%s\n", data)
}
return []byte(fmt.Sprintf("%+v\n", result)), nil
}
|
package main
import "fmt"
func main() {
// mySlice := []string{"a", "b", "c", "g", "m", "z"}
// fmt.Println(mySlice)
// // everything from 2 to 4, 4 excluded
// fmt.Println(mySlice[2:4]) // slicing a slice
// fmt.Println(mySlice[2]) // index access ; acessing by index
// fmt.Println("myString"[2]) // index access ; acessing by index -> 83 -> S
greeting := []string{
"Good morning!",
"Bonjour!",
"Dias!",
"Bongiorno",
"Ohayo",
"Selamat pagil!",
"Gutten morgen",
}
fmt.Print("[1:2] ")
fmt.Println(greeting[1:2])
fmt.Print("[:2] ")
fmt.Println(greeting[:2])
fmt.Print("[5:] ")
fmt.Println(greeting[5:])
fmt.Print("[:] ")
fmt.Println(greeting[:])
}
|
// Copyright 2015-2018 trivago N.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package format
import (
"testing"
"gollum/core"
"github.com/trivago/tgo/ttesting"
)
func TestFormatterTrimToBounds(t *testing.T) {
expect := ttesting.NewExpect(t)
config := core.NewPluginConfig("", "format.TrimToBounds")
config.Override("LeftBounds", "|")
config.Override("RightBounds", "|")
plugin, err := core.NewPluginWithConfig(config)
expect.NoError(err)
formatter, casted := plugin.(*TrimToBounds)
expect.True(casted)
msg := core.NewMessage(nil, []byte("|foo bar foobar|"), nil, core.InvalidStreamID)
err = formatter.ApplyFormatter(msg)
expect.NoError(err)
expect.Equal("foo bar foobar", msg.String())
}
func TestFormatterTrimToBoundsOffset(t *testing.T) {
expect := ttesting.NewExpect(t)
config := core.NewPluginConfig("", "format.TrimToBounds")
config.Override("LeftBounds", "|")
config.Override("RightBounds", "|")
config.Override("LeftOffset", "1")
config.Override("RightOffset", "1")
plugin, err := core.NewPluginWithConfig(config)
expect.NoError(err)
formatter, casted := plugin.(*TrimToBounds)
expect.True(casted)
msg := core.NewMessage(nil, []byte("||a||"), nil, core.InvalidStreamID)
err = formatter.ApplyFormatter(msg)
expect.NoError(err)
expect.Equal("a", msg.String())
}
func TestFormatterTrimToBoundsEmpty(t *testing.T) {
expect := ttesting.NewExpect(t)
config := core.NewPluginConfig("", "format.TrimToBounds")
config.Override("LeftBounds", "|")
config.Override("RightBounds", "|")
plugin, err := core.NewPluginWithConfig(config)
expect.NoError(err)
formatter, casted := plugin.(*TrimToBounds)
expect.True(casted)
msg := core.NewMessage(nil, []byte("||"), nil, core.InvalidStreamID)
err = formatter.ApplyFormatter(msg)
expect.NoError(err)
expect.Equal("", msg.String())
}
func TestFormatterTrimToBoundsOverlap(t *testing.T) {
expect := ttesting.NewExpect(t)
config := core.NewPluginConfig("", "format.TrimToBounds")
config.Override("LeftBounds", "|")
config.Override("RightBounds", "|")
config.Override("LeftOffset", "1")
config.Override("RightOffset", "1")
plugin, err := core.NewPluginWithConfig(config)
expect.NoError(err)
formatter, casted := plugin.(*TrimToBounds)
expect.True(casted)
msg := core.NewMessage(nil, []byte("|a|"), nil, core.InvalidStreamID)
err = formatter.ApplyFormatter(msg)
expect.NoError(err)
expect.Equal("", msg.String())
}
func TestFormatterTrimToBoundsWithSpaces(t *testing.T) {
expect := ttesting.NewExpect(t)
config := core.NewPluginConfig("", "format.TrimToBounds")
config.Override("LeftBounds", " ")
config.Override("RightBounds", " ")
plugin, err := core.NewPluginWithConfig(config)
expect.NoError(err)
formatter, casted := plugin.(*TrimToBounds)
expect.True(casted)
msg := core.NewMessage(nil, []byte(" foo bar foobar "), nil, core.InvalidStreamID)
err = formatter.ApplyFormatter(msg)
expect.NoError(err)
expect.Equal("foo bar foobar", msg.String())
}
func TestFormatterTrimToBoundsTarget(t *testing.T) {
expect := ttesting.NewExpect(t)
config := core.NewPluginConfig("", "format.TrimToBounds")
config.Override("LeftBounds", "|")
config.Override("RightBounds", "|")
config.Override("ApplyTo", "foo")
plugin, err := core.NewPluginWithConfig(config)
expect.NoError(err)
formatter, casted := plugin.(*TrimToBounds)
expect.True(casted)
msg := core.NewMessage(nil, []byte("|foo bar foobar|"), nil, core.InvalidStreamID)
msg.GetMetadata().Set("foo", []byte("|foo bar foobar|second foo bar|"))
err = formatter.ApplyFormatter(msg)
expect.NoError(err)
foo, err := msg.GetMetadata().Bytes("foo")
expect.NoError(err)
expect.Equal("|foo bar foobar|", msg.String())
expect.Equal("foo bar foobar|second foo bar", string(foo))
}
|
package main
import (
"flag"
"github.com/DimkaTheGreat/sittme/models"
"github.com/DimkaTheGreat/sittme/routing"
)
var (
timeout = flag.Int("timeout", 20, "timeout between interrupted and finished state")
port = flag.String("port", "8086", "server port")
)
func main() {
flag.Parse()
translations := models.Translations{}
translations.LoadTestData()
routing.Run(translations, *timeout, *port)
}
|
// Copyright 2019 The OpenSDS Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package scms
import (
"fmt"
)
type CmsVolume struct {
VolumeId string
VolumeName string
}
type CmsTask struct {
bandwidth int64
cdpFlag bool
taskVolumes map[string]string
volumeList map[string]CmsVolume
}
func NewCmsTask(bandwidth int64, cdpFlag bool) *CmsTask {
return &CmsTask{
bandwidth: bandwidth,
cdpFlag: cdpFlag,
taskVolumes: make(map[string]string),
volumeList: make(map[string]CmsVolume),
}
}
func checkVolume(c *CmsTask, volumeId string) bool {
_, find := c.volumeList[volumeId]
return find
}
func (t *CmsTask) AddVolume(source CmsVolume, target CmsVolume) error {
if findSource := checkVolume(t, source.VolumeId); findSource {
return fmt.Errorf("source volume[%s] already exists", source.VolumeId)
}
if findTarget := checkVolume(t, target.VolumeId); findTarget {
return fmt.Errorf("target volume[%s] already exists", target.VolumeId)
}
t.taskVolumes[source.VolumeId] = target.VolumeId
t.volumeList[source.VolumeId] = source
t.volumeList[target.VolumeId] = target
return nil
}
|
package backup
import (
"bytes"
"context"
"encoding/json"
"fmt"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"sync/atomic"
"time"
"github.com/AlexAkulov/clickhouse-backup/pkg/clickhouse"
"github.com/AlexAkulov/clickhouse-backup/pkg/metadata"
"github.com/AlexAkulov/clickhouse-backup/utils"
apexLog "github.com/apex/log"
"github.com/yargevad/filepathx"
)
func (b *Backuper) Upload(backupName, tablePattern, diffFrom string, schemaOnly bool) error {
if b.cfg.General.RemoteStorage == "none" {
fmt.Println("Upload aborted: RemoteStorage set to \"none\"")
return nil
}
if backupName == "" {
_ = PrintLocalBackups(b.cfg, "all")
return fmt.Errorf("select backup for upload")
}
if backupName == diffFrom {
return fmt.Errorf("you cannot upload diff from the same backup")
}
log := apexLog.WithFields(apexLog.Fields{
"backup": backupName,
"operation": "upload",
})
startUpload := time.Now()
if err := b.ch.Connect(); err != nil {
return fmt.Errorf("can't connect to clickhouse: %v", err)
}
defer b.ch.Close()
if err := b.init(); err != nil {
return err
}
if _, err := getLocalBackup(b.cfg, backupName); err != nil {
return fmt.Errorf("can't upload: %v", err)
}
remoteBackups, err := b.dst.BackupList()
if err != nil {
return err
}
for i := range remoteBackups {
if backupName == remoteBackups[i].BackupName {
return fmt.Errorf("'%s' already exists on remote", backupName)
}
}
backupMetadata, err := b.ReadBackupMetadata(backupName)
if err != nil {
return err
}
var tablesForUpload RestoreTables
if len(backupMetadata.Tables) != 0 {
metadataPath := path.Join(b.DefaultDataPath, "backup", backupName, "metadata")
tablesForUpload, err = parseSchemaPattern(metadataPath, tablePattern, false)
if err != nil {
return err
}
}
var diffFromBackup *metadata.BackupMetadata
tablesForUploadFromDiff := map[metadata.TableTitle]metadata.TableMetadata{}
if diffFrom != "" {
diffFromBackup, err = b.ReadBackupMetadata(diffFrom)
if err != nil {
return err
}
if len(diffFromBackup.Tables) != 0 {
backupMetadata.RequiredBackup = diffFrom
metadataPath := path.Join(b.DefaultDataPath, "backup", diffFrom, "metadata")
diffTablesList, err := parseSchemaPattern(metadataPath, tablePattern, false)
if err != nil {
return err
}
for _, t := range diffTablesList {
tablesForUploadFromDiff[metadata.TableTitle{
Database: t.Database,
Table: t.Table,
}] = t
}
}
}
compressedDataSize := int64(0)
metadataSize := int64(0)
log.Debugf("prepare table concurrent semaphore with concurrency=%d len(tablesForUpload)=%d", b.cfg.General.UploadConcurrency, len(tablesForUpload))
s := semaphore.NewWeighted(int64(b.cfg.General.UploadConcurrency))
g, ctx := errgroup.WithContext(context.Background())
for i, table := range tablesForUpload {
if err := s.Acquire(ctx, 1); err != nil {
log.Errorf("can't acquire semaphore during Upload: %v", err)
break
}
start := time.Now()
if !schemaOnly {
if diffTable, ok := tablesForUploadFromDiff[metadata.TableTitle{
Database: table.Database,
Table: table.Table,
}]; ok {
b.markDuplicatedParts(backupMetadata, &diffTable, &table)
}
}
idx := i
g.Go(func() error {
defer s.Release(1)
var uploadedBytes int64
if !schemaOnly {
var files map[string][]string
var err error
files, uploadedBytes, err = b.uploadTableData(backupName, tablesForUpload[idx])
if err != nil {
return err
}
atomic.AddInt64(&compressedDataSize, uploadedBytes)
tablesForUpload[idx].Files = files
}
tableMetadataSize, err := b.uploadTableMetadata(backupName, tablesForUpload[idx])
if err != nil {
return err
}
atomic.AddInt64(&metadataSize, tableMetadataSize)
log.
WithField("table", fmt.Sprintf("%s.%s", tablesForUpload[idx].Database, tablesForUpload[idx].Table)).
WithField("duration", utils.HumanizeDuration(time.Since(start))).
WithField("size", utils.FormatBytes(uploadedBytes+tableMetadataSize)).
Info("done")
return nil
})
}
if err := g.Wait(); err != nil {
return fmt.Errorf("one of upload go-routine return error: %v", err)
}
// upload rbac for backup
if backupMetadata.RBACSize, err = b.uploadRBACData(backupName); err != nil {
return err
}
// upload configs for backup
if backupMetadata.ConfigSize, err = b.uploadConfigData(backupName); err != nil {
return err
}
// upload metadata for backup
backupMetadata.CompressedSize = compressedDataSize
backupMetadata.MetadataSize = metadataSize
tt := []metadata.TableTitle{}
for i := range tablesForUpload {
tt = append(tt, metadata.TableTitle{
Database: tablesForUpload[i].Database,
Table: tablesForUpload[i].Table,
})
}
backupMetadata.Tables = tt
if b.cfg.GetCompressionFormat() != "none" {
backupMetadata.DataFormat = b.cfg.GetCompressionFormat()
} else {
backupMetadata.DataFormat = "directory"
}
newBackupMetadataBody, err := json.MarshalIndent(backupMetadata, "", "\t")
if err != nil {
return err
}
remoteBackupMetaFile := path.Join(backupName, "metadata.json")
if err := b.dst.PutFile(remoteBackupMetaFile,
ioutil.NopCloser(bytes.NewReader(newBackupMetadataBody))); err != nil {
return fmt.Errorf("can't upload: %v", err)
}
log.
WithField("duration", utils.HumanizeDuration(time.Since(startUpload))).
WithField("size", utils.FormatBytes(compressedDataSize+metadataSize+int64(len(newBackupMetadataBody))+backupMetadata.RBACSize+backupMetadata.ConfigSize)).
Info("done")
// Clean
if err := b.dst.RemoveOldBackups(b.cfg.General.BackupsToKeepRemote); err != nil {
return fmt.Errorf("can't remove old backups on remote storage: %v", err)
}
return nil
}
func (b *Backuper) uploadConfigData(backupName string) (int64, error) {
configBackupPath := path.Join(b.DefaultDataPath, "backup", backupName, "configs")
configFilesGlobPattern := path.Join(configBackupPath, "**/*.*")
remoteConfigsArchive := path.Join(backupName, fmt.Sprintf("configs.%s", b.cfg.GetArchiveExtension()))
return b.uploadAndArchiveBackupRelatedDir(configBackupPath, configFilesGlobPattern, remoteConfigsArchive)
}
func (b *Backuper) uploadRBACData(backupName string) (int64, error) {
rbacBackupPath := path.Join(b.DefaultDataPath, "backup", backupName, "access")
accessFilesGlobPattern := path.Join(rbacBackupPath, "*.*")
remoteRBACArchive := path.Join(backupName, fmt.Sprintf("access.%s", b.cfg.GetArchiveExtension()))
return b.uploadAndArchiveBackupRelatedDir(rbacBackupPath, accessFilesGlobPattern, remoteRBACArchive)
}
func (b *Backuper) uploadAndArchiveBackupRelatedDir(localBackupRelatedDir, localFilesGlobPattern, remoteFile string) (int64, error) {
if _, err := os.Stat(localBackupRelatedDir); os.IsNotExist(err) {
return 0, nil
}
var localFiles []string
var err error
if localFiles, err = filepathx.Glob(localFilesGlobPattern); err != nil || localFiles == nil || len(localFiles) == 0 {
return 0, fmt.Errorf("list %s return list=%v with err=%v", localFilesGlobPattern, localFiles, err)
}
for i := range localFiles {
localFiles[i] = strings.Replace(localFiles[i], localBackupRelatedDir, "", 1)
}
if err := b.dst.CompressedStreamUpload(localBackupRelatedDir, localFiles, remoteFile); err != nil {
return 0, fmt.Errorf("can't RBAC upload: %v", err)
}
remoteUploaded, err := b.dst.StatFile(remoteFile)
if err != nil {
return 0, fmt.Errorf("can't check uploaded %s file: %v", remoteFile, err)
}
return remoteUploaded.Size(), nil
}
func (b *Backuper) uploadTableData(backupName string, table metadata.TableMetadata) (map[string][]string, int64, error) {
uuid := path.Join(clickhouse.TablePathEncode(table.Database), clickhouse.TablePathEncode(table.Table))
metdataFiles := map[string][]string{}
capacity := 0
for disk := range table.Parts {
capacity += len(table.Parts[disk])
}
apexLog.Debugf("start uploadTableData %s.%s with concurrency=%d len(table.Parts[...])=%d", table.Database, table.Table, b.cfg.General.UploadConcurrency, capacity)
s := semaphore.NewWeighted(int64(b.cfg.General.UploadConcurrency))
g, ctx := errgroup.WithContext(context.Background())
var uploadedBytes int64
for disk := range table.Parts {
backupPath := path.Join(b.DiskMap[disk], "backup", backupName, "shadow", uuid, disk)
parts, err := separateParts(backupPath, table.Parts[disk], b.cfg.General.MaxFileSize)
if err != nil {
return nil, 0, err
}
for i, p := range parts {
if err := s.Acquire(ctx, 1); err != nil {
apexLog.Errorf("can't acquire semaphore during Upload: %v", err)
break
}
remoteDataPath := path.Join(backupName, "shadow", clickhouse.TablePathEncode(table.Database), clickhouse.TablePathEncode(table.Table))
// Disabled temporary
// if b.cfg.GetCompressionFormat() == "none" {
// err = b.dst.UploadPath(0, backupPath, p, path.Join(remoteDataPath, disk))
// } else {
fileName := fmt.Sprintf("%s_%d.%s", disk, i+1, b.cfg.GetArchiveExtension())
metdataFiles[disk] = append(metdataFiles[disk], fileName)
remoteDataFile := path.Join(remoteDataPath, fileName)
localFiles := p
g.Go(func() error {
apexLog.Debugf("start upload %d files to %s", len(localFiles), remoteDataFile)
defer s.Release(1)
if err := b.dst.CompressedStreamUpload(backupPath, localFiles, remoteDataFile); err != nil {
apexLog.Errorf("CompressedStreamUpload return error: %v", err)
return fmt.Errorf("can't upload: %v", err)
}
remoteFile, err := b.dst.StatFile(remoteDataFile)
if err != nil {
return fmt.Errorf("can't check uploaded file: %v", err)
}
atomic.AddInt64(&uploadedBytes, remoteFile.Size())
apexLog.Debugf("finish upload to %s", remoteDataFile)
return nil
})
}
}
if err := g.Wait(); err != nil {
return nil, 0, fmt.Errorf("one of uploadTableData go-routine return error: %v", err)
}
apexLog.Debugf("finish uploadTableData %s.%s with concurrency=%d len(table.Parts[...])=%d metadataFiles=%v, uploadedBytes=%v", table.Database, table.Table, b.cfg.General.UploadConcurrency, capacity, metdataFiles, uploadedBytes)
return metdataFiles, uploadedBytes, nil
}
func (b *Backuper) uploadTableMetadata(backupName string, table metadata.TableMetadata) (int64, error) {
// заливаем метадату для таблицы
tableMetafile := table
content, err := json.MarshalIndent(&tableMetafile, "", "\t")
if err != nil {
return 0, fmt.Errorf("can't marshal json: %v", err)
}
remoteTableMetaFile := path.Join(backupName, "metadata", clickhouse.TablePathEncode(table.Database), fmt.Sprintf("%s.%s", clickhouse.TablePathEncode(table.Table), "json"))
if err := b.dst.PutFile(remoteTableMetaFile,
ioutil.NopCloser(bytes.NewReader(content))); err != nil {
return 0, fmt.Errorf("can't upload: %v", err)
}
return int64(len(content)), nil
}
func (b *Backuper) markDuplicatedParts(backup *metadata.BackupMetadata, existsTable *metadata.TableMetadata, newTable *metadata.TableMetadata) {
for disk, newParts := range newTable.Parts {
if _, ok := existsTable.Parts[disk]; ok {
if len(existsTable.Parts[disk]) == 0 {
continue
}
existsPartsMap := map[string]struct{}{}
for _, p := range existsTable.Parts[disk] {
existsPartsMap[p.Name] = struct{}{}
}
for i := range newParts {
if _, ok := existsPartsMap[newParts[i].Name]; !ok {
continue
}
uuid := path.Join(clickhouse.TablePathEncode(existsTable.Database), clickhouse.TablePathEncode(existsTable.Table))
existsPath := path.Join(b.DiskMap[disk], "backup", backup.RequiredBackup, "shadow", uuid, disk, newParts[i].Name)
newPath := path.Join(b.DiskMap[disk], "backup", backup.BackupName, "shadow", uuid, disk, newParts[i].Name)
if err := isDuplicatedParts(existsPath, newPath); err != nil {
apexLog.Debugf("part '%s' and '%s' must be the same: %v", existsPath, newPath, err)
continue
}
newParts[i].Required = true
}
}
}
}
func isDuplicatedParts(part1, part2 string) error {
p1, err := os.Open(part1)
if err != nil {
return err
}
defer p1.Close()
p2, err := os.Open(part2)
if err != nil {
return err
}
defer p2.Close()
pf1, err := p1.Readdirnames(-1)
if err != nil {
return err
}
pf2, err := p2.Readdirnames(-1)
if err != nil {
return err
}
if len(pf1) != len(pf2) {
return fmt.Errorf("files count in parts is different")
}
for _, f := range pf1 {
part1File, err := os.Stat(path.Join(part1, f))
if err != nil {
return err
}
part2File, err := os.Stat(path.Join(part2, f))
if err != nil {
return err
}
if !os.SameFile(part1File, part2File) {
return fmt.Errorf("file '%s' is different", f)
}
}
return nil
}
func (b *Backuper) ReadBackupMetadata(backupName string) (*metadata.BackupMetadata, error) {
backupMetadataPath := path.Join(b.DefaultDataPath, "backup", backupName, "metadata.json")
backupMetadataBody, err := ioutil.ReadFile(backupMetadataPath)
if err != nil {
return nil, err
}
backupMetadata := metadata.BackupMetadata{}
if err := json.Unmarshal(backupMetadataBody, &backupMetadata); err != nil {
return nil, err
}
if len(backupMetadata.Tables) == 0 && !b.cfg.General.AllowEmptyBackups {
return nil, fmt.Errorf("'%s' is empty backup", backupName)
}
return &backupMetadata, nil
}
func separateParts(basePath string, parts []metadata.Part, maxSize int64) ([][]string, error) {
var size int64
files := []string{}
result := [][]string{}
for i := range parts {
if parts[i].Required {
continue
}
partPath := path.Join(basePath, parts[i].Name)
err := filepath.Walk(partPath, func(filePath string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.Mode().IsRegular() {
return nil
}
if (size + info.Size()) > maxSize {
result = append(result, files)
files = []string{}
size = 0
}
relativePath := strings.TrimPrefix(filePath, basePath)
files = append(files, relativePath)
size += info.Size()
return nil
})
if err != nil {
apexLog.Warnf("filepath.Walk return error: %v", err)
}
}
if len(files) > 0 {
result = append(result, files)
}
return result, nil
}
|
package server
import (
"context"
"errors"
"fmt"
"github.com/RecleverLogger/customerrs"
"github.com/RecleverLogger/logger"
"github.com/gorilla/mux"
"net"
"net/http"
"time"
)
type Server struct {
ctx context.Context
cancel context.CancelFunc
config *Config
errCh chan<- error
stopCh chan struct{}
listner net.Listener
router *mux.Router
httpServer *http.Server
logger logger.Logger
}
func New(ctx context.Context, errCh chan<- error, config *Config, logger logger.Logger) (*Server, error) {
var err error
logger.Logf("Creating new server")
if config == nil {
return nil, customerrs.ServerConfigIsNilErr()
}
if config.Port == "" {
return nil, customerrs.ServerHttpPortIsEmptyErr()
}
server := &Server{
config: config,
errCh: errCh,
stopCh: make(chan struct{}, 1),
logger: logger,
}
if ctx == nil {
server.ctx, server.cancel = context.WithCancel(context.Background())
} else {
server.ctx, server.cancel = context.WithCancel(ctx)
}
{
server.httpServer = &http.Server{
ReadTimeout: time.Duration(config.ReadTimeout * int(time.Second)),
WriteTimeout: time.Duration(config.WriteTimeout * int(time.Second)),
IdleTimeout: time.Duration(config.WriteTimeout * int(time.Second)),
}
// Тут бы tls но пофин и так сойдет
}
{
server.listner, err = net.Listen("tcp", config.Port)
if err != nil {
return nil, customerrs.ServerFailToListenPortErr(config.Port, err)
}
logger.Logf("Created new listener on port = %s", config.Port)
}
{
server.router = mux.NewRouter()
http.Handle("/", server.router)
if config.Handlers == nil {
return nil, customerrs.ServerHaveNoHandlersErr()
}
for _, h := range config.Handlers {
server.router.HandleFunc(h.Path, h.HandleFunc).Methods(h.Method)
logger.Logf("Register new endpoint path = %s, method = %s", h.Path, h.Method)
}
}
logger.Logf("Http server is created")
return server, nil
}
func (s *Server) Run() (e error) {
defer func() {
r := recover()
if r != nil {
msg := "Server recover from panic"
switch r.(type) {
case string:
e = errors.New(fmt.Sprintf("Error: %s, trace: %s", msg, r))
case error:
e = errors.New(fmt.Sprintf("Error: %s, trace: %s", msg, r))
default:
e = errors.New(fmt.Sprintf("Error: %s", msg))
}
s.errCh <- e
}
}()
if s.config.UseTls {
// Тут стартануть tls серве
}
s.logger.Logf("Starting http server")
return s.httpServer.Serve(s.listner)
}
func (s *Server) Shutdown() (e error) {
s.logger.Logf("Shutdown the server")
defer func() { s.stopCh <- struct{}{} }()
defer s.cancel()
cancelCtx, cancel := context.WithTimeout(s.ctx, time.Second*30)
defer cancel()
if err := s.httpServer.Shutdown(cancelCtx); err != nil {
e = customerrs.ServerFailedToShutdownErr()
return
}
s.logger.Logf("Server shutdown successfully")
return
}
|
package main
import (
"fmt"
"time"
// "time"
)
func main() {
id := uint(0)
op := 1
flag := false
c := make(chan uint)
for op != 0 {
fmt.Println("1) Agregar proceso")
fmt.Println("2) Mostrar proceso")
fmt.Println("3) Eliminar proceso")
fmt.Println("0) Salir")
fmt.Scanln(&op)
switch op {
case 1:
go func(id_ext uint, ch chan uint) {
id := id_ext
i := 0
for {
select {
case msg := <-ch:
if msg == id {
fmt.Println(id)
return
} else {
ch <- msg
}
default:
if flag {
fmt.Println(id, ":", i)
}
i++
time.Sleep(time.Millisecond * 500)
}
}
}(id, c)
id++
case 2:
flag = !flag
case 3:
var id_del uint
fmt.Scanln(&id_del)
c <- id_del
case 0:
op = 0
}
}
}
|
/**
Copyright xuehuiit Corp. 2018 All Rights Reserved.
http://www.xuehuiit.com
QQ 411321681
*/
package main
import (
//"os"
//"path"
//"testing"
//"time"
//"github.com/hyperledger/fabric-sdk-go/api/apiconfig"
//ca "github.com/hyperledger/fabric-sdk-go/api/apifabca"
//fab "github.com/hyperledger/fabric-sdk-go/api/apifabclient"
//"github.com/hyperledger/fabric-sdk-go/api/apitxn"
//pb "github.com/hyperledger/fabric-sdk-go/third_party/github.com/hyperledger/fabric/protos/peer"
fabricapi "github.com/hyperledger/fabric-sdk-go/def/fabapi"
//"github.com/hyperledger/fabric-sdk-go/pkg/config"
"github.com/hyperledger/fabric-sdk-go/pkg/errors"
//"github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/events"
"github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/orderer"
//admin "github.com/hyperledger/fabric-sdk-go/pkg/fabric-txn/admin"
//"github.com/hyperledger/fabric-sdk-go/third_party/github.com/hyperledger/fabric/common/cauthdsl"
"fmt"
identityImpl "github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/identity"
//hfc "github.com/hyperledger/fabric-sdk-go/pkg/fabric-client"
//"github.com/cloudflare/cfssl/api/client
//config "github.com/hyperledger/fabric-sdk-go/api/apiconfig"
//fab "github.com/hyperledger/fabric-sdk-go/api/apifabclient"
"io/ioutil"
//"github.com/cloudflare/cfssl/api/client"
"encoding/hex"
afc "github.com/hyperledger/fabric-sdk-go/api/apifabclient"
"github.com/hyperledger/fabric-sdk-go/api/apitxn"
fab "github.com/hyperledger/fabric-sdk-go/api/apifabclient"
peerapi "github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/peer"
"github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/events"
//ca "github.com/hyperledger/fabric-sdk-go/api/apifabca"
/*fabricCAClient "github.com/hyperledger/fabric-sdk-go/pkg/fabric-ca-client"
"github.com/hyperledger/fabric-sdk-go/pkg/config"
bccspFactory "github.com/hyperledger/fabric-sdk-go/third_party/github.com/hyperledger/fabric/bccsp/factory"
kvs "github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/keyvaluestore"*/
)
/*type fbconfig struct {
tlsEnabled bool
errorCase bool
}*/
func main() {
//fabric_local()
test()
}
/*
func fabric_ca(){
configImpl, err := config.InitConfig("./fabricsdk/config_test.yaml")
if err != nil {
check( err ,"finderror" )
}
caConfig, err := configImpl.CAConfig("Org1")
if err != nil {
check( err ,"finderror" )
}
caname :=caConfig.CAName
fmt.Println(" %s ", caname)
client := hfc.NewClient(configImpl)
err = bccspFactory.InitFactories(configImpl.CSPConfig())
if err != nil {
check( err ,"finderror" )
}
cryptoSuite := bccspFactory.GetDefault()
client.SetCryptoSuite(cryptoSuite)
stateStore, err := kvs.CreateNewFileKeyValueStore("/tmp/enroll_user")
client.SetStateStore(stateStore)
caClient, err := fabricCAClient.NewFabricCAClient(configImpl, "Org1")
if err != nil {
check( err ,"caClient error " )
}
adminUser, err := client.LoadUserFromStateStore("admin")
if err != nil {
check( err ,"finderror" )
}
if adminUser == nil {
key, cert, err := caClient.Enroll("admin", "adminpw")
if err != nil {
check( err ," Enroll return error: %v " )
}
if key == nil {
check( err ," private key return from Enroll is nil " )
}
if cert == nil {
check( err ," cert return from Enroll is nil " )
}
}
}
*/
func test(){
//读取配置文件
sdkOptions := fabricapi.Options{
ConfigFile: "./fabricsdk/config_test.yaml",
}
//创建SDK代理
sdk, _ := fabricapi.NewSDK(sdkOptions)
session, _ := sdk.NewPreEnrolledUserSession("org1", "Admin")
//创建Golang的 fabric客户端代理
client, _ := sdk.NewSystemClient(session)
//创建通道代理,通道名为:roberttestchannel12
channel, _ := client.NewChannel("roberttestchannel12")
//创建Orderer节点代理
orderer, _ := orderer.NewOrderer("grpc://192.168.23.212:7050", "", "", client.Config())
channel.AddOrderer(orderer)
//创建Peer节点代理
peer ,_ := fabricapi.NewPeer("grpc://192.168.23.212:7051","","",client.Config())
channel.AddPeer(peer)
//获取当前通道的信息
blockchainInfo, _ := channel.QueryInfo()
fmt.Println(" the peer block height %d",blockchainInfo.Height)
//获取当前通道的详细信息
/*bciAfterTx, err4 := channel.QueryInfo()
check(err4,"_6")
fmt.Println(" the peer block height %d",bciAfterTx.Height)*/
}
func fabric_local(){
sdkOptions := fabricapi.Options{
ConfigFile: "./fabricsdk/config_test.yaml",
}
sdk, err := fabricapi.NewSDK(sdkOptions)
if err != nil {
check(err, "SDK init failed")
}
session, err := sdk.NewPreEnrolledUserSession("org1", "Admin")
if err != nil {
check(err, "failed getting admin user session for org")
}
sc, err := sdk.NewSystemClient(session)
if err != nil {
check( err ,"NewSystemClient failed")
}
client := sc
channel, err := client.NewChannel("roberttestchannel12")
if err != nil {
check( err , "NewChannel failed")
}
orderer, err := orderer.NewOrderer("grpc://192.168.23.212:7050", "", "", client.Config())
if err != nil {
check(err, "NewOrderer failed")
}
channel.AddOrderer(orderer)
peer ,err := fabricapi.NewPeer("grpc://192.168.23.212:7051","","",client.Config())
if err != nil {
check(err, "NewOrderer failed")
}
channel.AddPeer(peer)
peer188 ,err := fabricapi.NewPeer("grpc://172.16.10.188:7051","","",client.Config())
if err != nil {
check(err, "NewPeer failed")
}
channel.AddPeer(peer188)
//获取当前通道的详细信息
/*bciAfterTx, err4 := channel.QueryInfo()
check(err4,"_6")
fmt.Println(" the peer block height %d",bciAfterTx.Height)*/
//查询Peer节点加入的所有通道
/*channels,err := client.QueryChannels(peer)
for _ , responsechannel := range channels.Channels{
fmt.Println(" the channel info is : %s " , responsechannel.ChannelId )
}*/
//根据区块编号获取区块信息
/*block, err := channel.QueryBlock(23)
fmt.Println(" The block info : %s " , hex.EncodeToString(block.Header.PreviousHash))*/
//根据区块链HASH获取区块详细信息
/*let blockinfobyhash = yield channel.queryBlockByHash(new Buffer("ec298dc1cd1f0e0a3f6d6e25b5796e7b5e4d668aeb6ec3a90b4aa6bb1a7f0c17","hex"),peer)
console.info( JSON.stringify(blockinfobyhash ) )*/
//blockinfo , err := channel.QueryBlockByHash(block.Header.PreviousHash)
/*blockhash , err := hex.DecodeString("7376086e18a8ddbc40d318557c39987fd538c64340aa0df191e1062b935e147e")
blockinfo , err := channel.QueryBlockByHash( blockhash )
fmt.Println(" The block info : %s " , blockinfo.String())*/
//查询已经install的chaincode
/*installchaincodes , err := client.QueryInstalledChaincodes(peer)
for _ , responseinstall := range installchaincodes.Chaincodes{
fmt.Println(" chaincode info is : %s %s " , responseinstall.Version , responseinstall.Path )
}*/
// 查询已经实例化的Chaincode
/*channel.SetPrimaryPeer(peer)
instantChaincodes, err := channel.QueryInstantiatedChaincodes();
for _ , responseinstant := range instantChaincodes.Chaincodes{
fmt.Println(" chaincode info is : %s %s " , responseinstant.Version , responseinstant.Path )
}*/
//根据交易编号获取交易详细信息
/* let resulttxinfo = yield channel.queryTransaction("56f51f9a54fb4755fd68c6c24931234a59340f7c98308374e9991d276d7d4a96", peer);
console.info( JSON.stringify( resulttxinfo ) )
*/
/*tran , err := channel.QueryTransaction("56f51f9a54fb4755fd68c6c24931234a59340f7c98308374e9991d276d7d4a96")
fmt.Println(" transcaion info is : %s " , tran.String())
*/
//发起交易(查询交易)
/*targets := peerapi.PeersToTxnProcessors(channel.Peers())
client.SetUserContext(session.Identity())
request := apitxn.ChaincodeInvokeRequest{
Targets: targets,
ChaincodeID: "cc_endfinlshed",
Fcn: "query",
Args: [][]byte{[]byte("a")},
}
queryResponses, err := channel.QueryByChaincode(request)
if err != nil {
check(err,"QueryByChaincode failed %s")
}
for _ , parmbytes := range queryResponses{
fmt.Println(" chaincode query info is : %s " , string(parmbytes))
}*/
// 发起交易,写入交易
targets := peerapi.PeersToTxnProcessors(channel.Peers())
transientData := make(map[string][]byte)
request := apitxn.ChaincodeInvokeRequest{
Targets: targets,
Fcn: "invoke",
Args: [][]byte{[]byte("a"),[]byte("b"),[]byte("1")},
TransientMap: transientData,
ChaincodeID: "cc_endfinlshed",
}
transactionProposalResponses, txnID, err := channel.SendTransactionProposal(request)
fmt.Println(" tx id : %s",txnID)
if err != nil {
check(err," send transtion error ")
}
for _, v := range transactionProposalResponses {
if v.Err != nil {
check(v.Err, "endorser %s failed")
}
}
tx, err := channel.CreateTransaction(transactionProposalResponses)
transactionResponse, err := channel.SendTransaction(tx)
fmt.Println(" srarch result %s " , transactionResponse )
//eventHub, err := events.NewEventHub(client)
}
// getEventHub initilizes the event hub
func getEventHub( client fab.FabricClient) (fab.EventHub, error) {
eventHub, err := events.NewEventHub(client)
if err != nil {
return nil, errors.WithMessage(err, "NewEventHub failed")
}
foundEventHub := false
peerConfig, err := client.Config().PeersConfig("org1")
if err != nil {
return nil, errors.WithMessage(err, "PeersConfig failed")
}
for _, p := range peerConfig {
if p.URL != "" {
serverHostOverride := ""
if str, ok := p.GRPCOptions["ssl-target-name-override"].(string); ok {
serverHostOverride = str
}
eventHub.SetPeerAddr(p.EventURL, p.TLSCACerts.Path, serverHostOverride)
foundEventHub = true
break
}
}
if !foundEventHub {
return nil, errors.New("event hub configuration not found")
}
return eventHub, nil
}
/**
创建交提案
*/
func CreateAndSendTransactionProposal(channel afc.Channel,
chainCodeID string,
fcn string,
args [][]byte,
targets []apitxn.ProposalProcessor,
transientData map[string][]byte) ([]*apitxn.TransactionProposalResponse, apitxn.TransactionID, error) {
request := apitxn.ChaincodeInvokeRequest{
Targets: targets,
Fcn: fcn,
Args: args,
TransientMap: transientData,
ChaincodeID: chainCodeID,
}
transactionProposalResponses, txnID, err := channel.SendTransactionProposal(request)
if err != nil {
return nil, txnID, err
}
for _, v := range transactionProposalResponses {
if v.Err != nil {
return nil, txnID, errors.Wrapf(v.Err, "endorser %s failed", v.Endorser)
}
}
return transactionProposalResponses, txnID, nil
}
func temp(){
//fmt.Print("ddd")
sdkOptions := fabricapi.Options{
ConfigFile: "./fabricsdk/config_test.yaml",
}
sdk, err := fabricapi.NewSDK(sdkOptions)
if err != nil {
fmt.Println( errors.WithMessage(err, "SDK init failed"))
}
//session, err := sdk.NewPreEnrolledUserSession("Org1MSP" ,"Admin")
countext,err := sdk.NewContext("Org1MSP")
user := identityImpl.NewUser("Admin","Org1MSP");
cer, err := ioutil.ReadFile("/project/opt_fabric/fabricconfig/crypto-config/peerOrganizations/org1.robertfabrictest.com/users/Admin@org1.robertfabrictest.com/msp/signcerts/Admin@org1.robertfabrictest.com-cert.pem")
check(err,"1")
privatekey , err1 := ioutil.ReadFile("/project/opt_fabric/fabricconfig/crypto-config/peerOrganizations/org1.robertfabrictest.com/users/Admin@org1.robertfabrictest.com/msp/keystore/b031338f76290f089d330b064d4534202a49ae8d65ca5d266c377bc46812a884_sk")
check(err1,"2")
pfstring := hex.EncodeToString(privatekey)
fmt.Println("dddd + " + pfstring )
pk,err2 := sdk.CryptoSuiteProvider().GetKey(privatekey)
check(err2,"2.1")
user.SetEnrollmentCertificate(cer)
user.SetPrivateKey(pk)
session, err := sdk.NewSession(countext,user)
if err != nil {
fmt.Println( errors.WithMessage(err, "failed getting admin user session for org"))
}
sc, err := sdk.NewSystemClient(session)
if err != nil {
fmt.Println( errors.WithMessage(err, "NewSystemClient failed") )
}
channel, err_1 := sc.NewChannel("roberttestchannel" )
check(err_1,"_1")
orderer, err_2 := orderer.NewOrderer("grpc://192.168.23.212:7050", "", "", sc.Config())
check(err_2,"_2")
newpeer, err_3 := fabricapi.NewPeer("grpc://192.168.23.212:7051", "", "", sc.Config())
check(err_3,"_3")
err_4 :=channel.AddOrderer(orderer)
check(err_4,"_4")
channel.AddPeer(newpeer)
channel.SetPrimaryPeer(newpeer)
primaryPeer := channel.PrimaryPeer()
_, err_5 := sc.QueryChannels(primaryPeer)
check(err_5,"_5")
/*for _, responseChannel := range response.Channels {
if responseChannel.ChannelId == channel.Name() {
}
}*/
bciAfterTx, err4 := channel.QueryInfo()
check(err4,"_6")
fmt.Println(" the peer block height %d",bciAfterTx.Height)
}
func check(e error , num string) {
if e != nil {
fmt.Println( errors.WithMessage(e, " find a error "+num))
}
} |
package api
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"strconv"
"strings"
tks "github.com/birchwood-langham/go-toolkit/io/strings"
pd "github.com/birchwood-langham/portdb-ws/protocol"
bapi "github.com/birchwood-langham/web-service-bootstrap/api"
"github.com/birchwood-langham/web-service-bootstrap/config"
"github.com/birchwood-langham/web-service-bootstrap/service"
"github.com/gogo/protobuf/jsonpb"
"github.com/gogo/protobuf/proto"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
)
const (
usage = `TODO`
shortDescription = `TODO`
longDescription = `TODO`
)
// PortsWs is the client API web service object
type PortsWs struct {
conn *grpc.ClientConn
}
// NewPortsWs creates a new Ports Webservice instance
func NewPortsWs() (ws *PortsWs) {
return &PortsWs{}
}
// Init initializes the web service and ensures everything is ready to run
func (ws *PortsWs) Init() (err error) {
log.Info("Connecting to Ports Domain Service")
ws.conn, err = grpc.Dial(getServerAddress(), grpc.WithInsecure())
return
}
func getServerAddress() string {
// TODO: Use service discovery service such as consul, but for now we will get the settings from the configuration file
host := config.Get("ports-domain-service", "host").String("localhost")
port := config.Get("ports-domain-service", "port").Int(9999)
return fmt.Sprintf("%s:%d", host, port)
}
// InitializeRoutes creates the routes supported by the Client API
func (ws *PortsWs) InitializeRoutes(server *bapi.Server) {
server.Router.HandleFunc("/portsdomain/load", ws.loadPorts).Methods(http.MethodGet)
server.Router.HandleFunc("/portsdomain/search", ws.search).Methods(http.MethodGet)
}
// Cleanup performs the teardown required when the web-service terminates
func (ws *PortsWs) Cleanup() error {
// TODO: Ensure we close the gRPC client
return nil
}
// Properties returns the properties required for the bootstrap service to provide information
// about the service to users
func (ws *PortsWs) Properties() service.Properties {
return service.NewProperties(usage, shortDescription, longDescription)
}
func (ws *PortsWs) loadPorts(w http.ResponseWriter, r *http.Request) {
log.Debugf("Received load ports request: %v", r)
importFile, err := extractFilePath(r.URL.Query())
if err != nil {
bapi.RespondWithError(w, http.StatusBadRequest, err.Error())
return
}
f, err := os.Open(importFile)
if err != nil {
bapi.RespondWithError(w, http.StatusBadRequest, err.Error())
return
}
count, err := decodeJSONFile(r.Context(), f, ws.savePort)
if err != nil {
bapi.RespondWithError(w, http.StatusFailedDependency, err.Error())
return
}
bapi.RespondWithJSON(w, http.StatusOK, map[string]string{"message": fmt.Sprintf("Loading File: %s, %d records imported", importFile, count)})
}
func (ws *PortsWs) search(w http.ResponseWriter, r *http.Request) {
log.Debugf("Receive search ports request: %v", r)
symbol, portdata, err := extractQueryData(r.URL.Query())
if err != nil {
bapi.RespondWithError(w, http.StatusBadRequest, fmt.Sprintf("could not query data with the provided input: %v", err))
return
}
c := pd.NewPortDomainServiceClient(ws.conn)
if symbol != "" {
getExactPort(w, r, c, symbol)
return
}
getMatchingPorts(w, r, c, portdata)
}
func getExactPort(w http.ResponseWriter, r *http.Request, c pd.PortDomainServiceClient, symbol string) {
p, err := c.GetPort(r.Context(), &pd.GetPortRequest{Symbol: symbol})
if err != nil {
bapi.RespondWithError(w, http.StatusBadRequest, fmt.Sprintf("could not find port with the provided symbol: %v", err))
return
}
ports := pd.Ports{
Ports: []*pd.Port{p},
}
processProtoMessage(w, &ports)
}
func getMatchingPorts(w http.ResponseWriter, r *http.Request, c pd.PortDomainServiceClient, portdata *pd.PortData) {
p, err := c.FindPort(r.Context(), portdata)
if err != nil {
bapi.RespondWithError(w, http.StatusBadRequest, fmt.Sprintf("could not find port with the provided port data: %v", err))
return
}
processProtoMessage(w, p)
}
func processProtoMessage(w http.ResponseWriter, m proto.Message) {
buf := bytes.Buffer{}
marshaler := jsonpb.Marshaler{}
err := marshaler.Marshal(&buf, m)
if err != nil {
bapi.RespondWithError(w, http.StatusInternalServerError, fmt.Sprintf("could not parse server response, please contact support: %v", err))
return
}
bapi.RespondWithJSON(w, http.StatusOK, buf.String())
return
}
func (ws *PortsWs) savePort(ctx context.Context, port pd.Port) (err error) {
c := pd.NewPortDomainServiceClient(ws.conn)
resp, err := c.SavePort(ctx, &port)
if err != nil {
return err
}
if !resp.Success {
return errors.New(makeErrorStr(resp.Errors))
}
return nil
}
func makeErrorStr(msg []string) string {
b := strings.Builder{}
for _, m := range msg {
b.WriteString(m)
b.WriteRune('\n')
}
return b.String()
}
func extractQueryData(values url.Values) (symbol string, portdata *pd.PortData, err error) {
symbol = values.Get("symbol")
name := values.Get("name")
city := values.Get("city")
country := values.Get("country")
alias := tks.SplitAndTrimSpace(values.Get("alias"), ",")
regions := tks.SplitAndTrimSpace(values.Get("regions"), ",")
coords := toFloatArray(tks.SplitAndTrimSpace(values.Get("coords"), ","))
province := values.Get("province")
timezone := values.Get("timezone")
unlocs := tks.SplitAndTrimSpace(values.Get("unlocs"), ",")
code := values.Get("code")
if !isValid(symbol, name, city, country, alias, regions, coords, province, timezone, unlocs, code) {
return "", nil, fmt.Errorf("Query parameters are not valid, cannot execute query: symbol='%s', name=`%s`, city='%s', country='%s', alias='%v', regions='%v', coords='%v', province='%s', timezone='%s', unlocs='%v'",
symbol,
name,
city,
country,
alias,
regions,
coords,
province,
timezone,
unlocs,
code,
)
}
if symbol != "" {
return symbol, nil, nil
}
portdata = &pd.PortData{
Name: name,
City: city,
Country: country,
Alias: alias,
Regions: regions,
Coordinates: coords,
Province: province,
Timezone: timezone,
Unlocs: unlocs,
Code: code,
}
return
}
func extractFilePath(values url.Values) (importFile string, err error) {
importFile = values.Get("portsFile")
if importFile == "" {
err = errors.New("Import File has not been provided")
}
return
}
func setPortData(port *pd.Port, key string, value string) {
switch key {
case "name":
port.Portdata.Name = value
case "city":
port.Portdata.City = value
case "country":
port.Portdata.Country = value
case "province":
port.Portdata.Province = value
case "timezone":
port.Portdata.Timezone = value
case "code":
port.Portdata.Code = value
}
}
func decodeJSONFile(ctx context.Context, f *os.File, saveFn func(context.Context, pd.Port) error) (count int, err error) {
dec := json.NewDecoder(f)
var t json.Token
_, err = dec.Token()
if err != nil {
return
}
symbol := ""
key := ""
value := ""
var port pd.Port
var stringSlice []string
var floatSlice []float64
var isPort bool
for {
t, err = dec.Token()
if err == io.EOF {
return count, nil
}
if err != nil {
return
}
switch t.(type) {
case string:
if symbol == "" {
symbol = fmt.Sprint(t)
continue
}
if key == "" {
key = fmt.Sprint(t)
continue
}
value = fmt.Sprint(t)
switch key {
case "alias", "regions", "unlocs":
stringSlice = append(stringSlice, value)
default:
setPortData(&port, key, value)
key = ""
value = ""
}
case float64:
value = fmt.Sprint(t)
pos, err := strconv.ParseFloat(value, 64)
if err != nil {
log.Errorf("Could not convert %s to a float value, defaulting co-ordinate to 0.0 for now: %v", value, err)
}
floatSlice = append(floatSlice, pos)
case json.Delim:
switch t {
case json.Delim('{'): // Got the start of a new port
port = pd.Port{
Symbol: symbol,
Portdata: &pd.PortData{},
}
key = ""
value = ""
isPort = true
case json.Delim('}'): // Got the end of a new port
log.Infof("Sending port to Port Domain Service for persistence: %v", port)
if err := saveFn(ctx, port); err != nil {
isPort = false
continue
}
if isPort {
count++
}
symbol = ""
key = ""
value = ""
isPort = false
case json.Delim('['): // Start of new list
switch key {
case "alias", "regions", "unlocs":
stringSlice = make([]string, 0)
case "coordinates":
floatSlice = make([]float64, 0, 2)
}
case json.Delim(']'): // End of the list
switch key {
case "coordinates":
port.Portdata.Coordinates = floatSlice
case "alias":
port.Portdata.Alias = stringSlice
case "regions":
port.Portdata.Regions = stringSlice
case "unlocs":
port.Portdata.Unlocs = stringSlice
}
key = ""
case json.Delim(','):
// it's safe to ignore
default: // something else
log.Errorf("We can't identify the token: %v", t)
}
}
}
}
func toFloatArray(input []string) (output []float64) {
output = make([]float64, len(input), len(input))
for i, j := range input {
f, err := strconv.ParseFloat(j, 64)
if err != nil {
continue
}
output[i] = f
}
return
}
func isValid(symbol string, name string, city string, country string, alias []string, regions []string, coordinates []float64, province string, timezone string, unlocs []string, code string) bool {
if symbol == "" &&
name == "" &&
city == "" &&
country == "" &&
len(alias) == 0 &&
len(regions) == 0 &&
len(coordinates) == 0 &&
province == "" &&
timezone == "" &&
len(unlocs) == 0 &&
code == "" {
return false
}
return true
}
|
// Package migration provides an operatorkit resource that migrates awsconfig CRs
// to reference the default credential secret if they do not already.
// It can be safely removed once all awsconfig CRs reference a credential secret.
//
// Latest changes:
//
// * v24: Added migration code to fill spec.Cluster.Scaling.{Min,Max} values
// when they are zero. When all tenant clusters are migrated to at least v24,
// migrations in this file can be removed.
//
package migration
import (
"context"
"fmt"
"reflect"
"strings"
providerv1alpha1 "github.com/giantswarm/apiextensions/pkg/apis/provider/v1alpha1"
"github.com/giantswarm/apiextensions/pkg/clientset/versioned"
"github.com/giantswarm/microerror"
"github.com/giantswarm/micrologger"
"github.com/giantswarm/operatorkit/controller/context/reconciliationcanceledcontext"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/giantswarm/aws-operator/service/controller/legacy/v26/key"
)
const (
name = "migrationv26"
awsConfigNamespace = "default"
credentialSecretDefaultNamespace = "giantswarm"
credentialSecretDefaultName = "credential-default"
)
type Config struct {
G8sClient versioned.Interface
Logger micrologger.Logger
}
type Resource struct {
g8sClient versioned.Interface
logger micrologger.Logger
}
func New(config Config) (*Resource, error) {
if config.G8sClient == nil {
return nil, microerror.Maskf(invalidConfigError, "%T.G8sClient must not be empty", config)
}
if config.Logger == nil {
return nil, microerror.Maskf(invalidConfigError, "%T.Logger must not be empty", config)
}
r := &Resource{
g8sClient: config.G8sClient,
logger: config.Logger,
}
return r, nil
}
func (r *Resource) Name() string {
return name
}
func (r *Resource) EnsureCreated(ctx context.Context, obj interface{}) error {
var customObject providerv1alpha1.AWSConfig
var oldSpec providerv1alpha1.AWSConfigSpec
{
o, err := key.ToCustomObject(obj)
if err != nil {
return microerror.Mask(err)
}
// We have to always fetch the latest version of the resource in order to
// update it below using the latest resource version.
m, err := r.g8sClient.ProviderV1alpha1().AWSConfigs(o.GetNamespace()).Get(o.GetName(), metav1.GetOptions{})
if err != nil {
return microerror.Mask(err)
}
customObject = *m.DeepCopy()
oldSpec = *m.Spec.DeepCopy()
err = r.migrateSpec(ctx, &customObject.Spec)
if err != nil {
return microerror.Mask(err)
}
}
{
r.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("updating CR with version %#q", customObject.ResourceVersion))
if reflect.DeepEqual(customObject.Spec, oldSpec) {
r.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("CR with version %#q is already up to date", customObject.ResourceVersion))
} else {
_, err := r.g8sClient.ProviderV1alpha1().AWSConfigs(customObject.GetNamespace()).Update(&customObject)
if err != nil {
return microerror.Mask(err)
}
r.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("updated CR with version %#q", customObject.ResourceVersion))
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling reconciliation")
reconciliationcanceledcontext.SetCanceled(ctx)
}
}
return nil
}
func (r *Resource) EnsureDeleted(ctx context.Context, obj interface{}) error {
return nil
}
func (r *Resource) migrateSpec(ctx context.Context, spec *providerv1alpha1.AWSConfigSpec) error {
if spec.AWS.CredentialSecret.Name == "" {
r.logger.LogCtx(ctx, "level", "debug", "message", "CR is missing credential secret")
r.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("setting CR credential secret to %#q in namespace %#q", credentialSecretDefaultName, credentialSecretDefaultNamespace))
spec.AWS.CredentialSecret.Namespace = credentialSecretDefaultNamespace
spec.AWS.CredentialSecret.Name = credentialSecretDefaultName
}
if reflect.DeepEqual(providerv1alpha1.AWSConfigSpecAWSHostedZones{}, spec.AWS.HostedZones) {
r.logger.LogCtx(ctx, "level", "debug", "message", "CR is missing hosted zone names")
apiDomain := spec.Cluster.Kubernetes.API.Domain
zone, err := zoneFromAPIDomain(apiDomain)
if err != nil {
return microerror.Mask(err)
}
r.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("setting CR hosted zone names to %#q", zone))
spec.AWS.HostedZones.API.Name = zone
spec.AWS.HostedZones.Etcd.Name = zone
spec.AWS.HostedZones.Ingress.Name = zone
}
if spec.Cluster.Scaling.Min == 0 && spec.Cluster.Scaling.Max == 0 {
r.logger.LogCtx(ctx, "level", "debug", "message", "CR is missing cluster scaling configuration")
nWorkers := len(spec.AWS.Workers)
spec.Cluster.Scaling.Min = nWorkers
spec.Cluster.Scaling.Max = nWorkers
}
return nil
}
func zoneFromAPIDomain(apiDomain string) (string, error) {
parts := strings.Split(apiDomain, ".")
if len(parts) < 5 {
return "", microerror.Maskf(malformedDomainError, "API domain must have at least 5 parts, got %d for domain %q", len(parts), apiDomain)
}
return strings.Join(parts[3:], "."), nil
}
|
package config
type Reader interface {
Read(path string) (*Config, error)
}
|
/*
* Copyright (C) 2017-Present Pivotal Software, Inc. All rights reserved.
*
* This program and the accompanying materials are made available under
* the terms of the under the Apache License, Version 2.0 (the "License”);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package instance_test
import (
"code.cloudfoundry.org/cli/plugin/pluginfakes"
"errors"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/pivotal-cf/spring-cloud-services-cli-plugin/instance"
"github.com/pivotal-cf/spring-cloud-services-cli-plugin/instance/instancefakes"
"github.com/pivotal-cf/spring-cloud-services-cli-plugin/serviceutil"
"github.com/pivotal-cf/spring-cloud-services-cli-plugin/serviceutil/serviceutilfakes"
)
var _ = Describe("OperationRunner", func() {
const testAccessToken = "someaccesstoken"
var (
operationRunner instance.OperationRunner
fakeCliConnection *pluginfakes.FakeCliConnection
fakeOperation *instancefakes.FakeOperation
output string
fakeServiceInstanceResolver *serviceutilfakes.FakeServiceInstanceResolver
errMessage string
testError error
serviceInstanceName string
err error
)
BeforeEach(func() {
fakeCliConnection = &pluginfakes.FakeCliConnection{}
fakeOperation = &instancefakes.FakeOperation{}
fakeOperation.IsServiceBrokerOperationReturns(true)
fakeServiceInstanceResolver = &serviceutilfakes.FakeServiceInstanceResolver{}
fakeServiceInstanceResolver.GetManagementParametersReturns(serviceutil.ManagementParameters{
Url: "https://spring-cloud-broker.some.host.name/cli/instances/guid",
}, nil)
errMessage = "failure is not an option"
testError = errors.New(errMessage)
serviceInstanceName = "serviceinstance"
})
JustBeforeEach(func() {
operationRunner = instance.NewAuthenticatedOperationRunner(fakeCliConnection, fakeServiceInstanceResolver)
output, err = operationRunner.RunOperation(
serviceInstanceName,
fakeOperation)
})
It("should attempt to obtain an access token", func() {
Expect(fakeCliConnection.AccessTokenCallCount()).To(Equal(1))
})
Context("when the access token is not available", func() {
BeforeEach(func() {
fakeCliConnection.AccessTokenReturns("", errors.New("some access token error"))
})
It("should return a suitable error", func() {
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError("Access token not available: some access token error"))
})
})
Context("when the access token is available", func() {
BeforeEach(func() {
fakeCliConnection.AccessTokenReturns("bearer "+testAccessToken, nil)
})
Context("when the admin parameters are not retrieved correctly", func() {
BeforeEach(func() {
fakeServiceInstanceResolver.GetManagementParametersReturns(serviceutil.ManagementParameters{}, errors.New("some error retrieving the admin parameters"))
})
It("should return a suitable error", func() {
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError("some error retrieving the admin parameters"))
})
})
Context("when the admin parameters are retrieved correctly", func() {
It("invoke the operation with the correct parameters", func() {
Expect(fakeOperation.RunCallCount()).To(Equal(1))
serviceInstanceAdminParameters, accessToken := fakeOperation.RunArgsForCall(0)
Expect(serviceInstanceAdminParameters).To(Equal(
serviceutil.ManagementParameters{
Url: "https://spring-cloud-broker.some.host.name/cli/instances/guid",
}))
Expect(accessToken).To(Equal(testAccessToken))
})
Context("when the operation returns some output", func() {
BeforeEach(func() {
fakeOperation.RunReturns("some output", nil)
})
It("should return the output", func() {
Expect(output).To(Equal("some output"))
Expect(err).NotTo(HaveOccurred())
})
})
Context("when the operation returns an error", func() {
BeforeEach(func() {
fakeOperation.RunReturns("", testError)
})
It("should return the error", func() {
Expect(err).To(Equal(testError))
})
})
})
})
})
|
package command
import (
"errors"
"fmt"
"strings"
"sync"
"awesome-dragon.science/go/goGoGameBot/pkg/log"
)
const noAdmin = 0
type prefixFunc func(string) (string, bool)
// NewManager creates a Manager with the provided logger and messager. The prefixes vararg sets the prefixes for the
// commands. Note that the prefix is matched EXACTLY. Meaning that a trailing space is required for any "normal" prefix
func NewManager(logger *log.Logger, pFunc prefixFunc, prefixes ...string) *Manager {
m := &Manager{Logger: logger, commands: make(map[string]Command), commandPrefixes: prefixes, prefixFunc: pFunc}
if err := m.AddCommand("help", 0, m.helpImpl, "prints command help"); err != nil {
panic(err)
}
return m
}
// Manager is a frontend that manages commands and the firing thereof. It is intended to be a completely self contained
// system for managing commands on arbitrary lines
type Manager struct {
cmdMutex sync.RWMutex
commands map[string]Command
commandPrefixes []string
prefixFunc prefixFunc
Logger *log.Logger
}
// AddPrefix adds a prefix to the command manager. It is not safe for concurrent use
func (m *Manager) AddPrefix(name string) {
m.commandPrefixes = append(m.commandPrefixes, name)
}
// RemovePrefix removes a prefix from the Manager, it is not safe for concurrent use. If the prefix does not exist, the
// method is a noop
func (m *Manager) RemovePrefix(name string) {
toRemove := -1
for i, pfx := range m.commandPrefixes {
if pfx == name {
toRemove = i
break
}
}
if toRemove != -1 {
m.commandPrefixes = append(m.commandPrefixes[:toRemove], m.commandPrefixes[toRemove+1:]...)
}
}
// SetPrefixes sets the prefixes the Manager will respond to to the given slice, all other prefixes are removed
func (m *Manager) SetPrefixes(prefixes []string) { m.commandPrefixes = prefixes }
// AddCommand adds the callback as a simple (SingleCommand) to the Manager. It is safe for concurrent use. It returns
// various errors
func (m *Manager) AddCommand(name string, requiresAdmin int, callback Callback, help string) error {
return m.internalAddCommand(&SingleCommand{
adminRequired: requiresAdmin,
callback: callback,
help: help,
name: strings.ToLower(name),
})
}
// RemoveCommand removes the command referenced by the given string. If the command does not exist, RemoveCommand
// returns an error.
func (m *Manager) RemoveCommand(name string) error {
if m.getCommandByName(name) == nil {
return fmt.Errorf("command %q does not exist on %v", name, m)
}
m.Logger.Debugf("removing command %s", name)
m.cmdMutex.Lock()
defer m.cmdMutex.Unlock()
delete(m.commands, name)
return nil
}
// internalAddCommand adds the actual Command to the manager,
// it is used by both of the exported command addition methods
func (m *Manager) internalAddCommand(cmd Command) error {
if strings.Contains(cmd.Name(), " ") {
return errors.New("commands cannot contain spaces")
}
if m.getCommandByName(cmd.Name()) != nil {
return fmt.Errorf("command %q already exists on %v", cmd.Name(), m)
}
m.Logger.Debugf("adding command %s", cmd)
m.cmdMutex.Lock()
m.commands[strings.ToLower(cmd.Name())] = cmd
m.cmdMutex.Unlock()
return nil
}
func (m *Manager) getCommandByName(name string) Command {
m.cmdMutex.RLock()
defer m.cmdMutex.RUnlock()
if c, ok := m.commands[strings.ToLower(name)]; ok {
return c
}
return nil
}
// AddSubCommand adds the given callback as a subcommand to the given root name. If the root name does not exist on
// the Manager, it is automatically added. Otherwise, if it DOES exist but is of the wrong type, AddSubCommand returns
// an error
func (m *Manager) AddSubCommand(rootName, name string, requiresAdmin int, callback Callback, help string) error {
if m.getCommandByName(rootName) == nil {
err := m.internalAddCommand(&SubCommandList{
SingleCommand: SingleCommand{adminRequired: noAdmin, callback: nil, help: "", name: strings.ToLower(rootName)},
subCommands: make(map[string]Command),
})
if err != nil {
return err
}
}
var (
cmd *SubCommandList
ok bool
)
if cmd, ok = m.getCommandByName(rootName).(*SubCommandList); !ok {
return fmt.Errorf("command %s is not a command that can have subcommands", rootName)
}
return cmd.addSubcommand(&SingleCommand{name: name, adminRequired: requiresAdmin, callback: callback, help: help})
}
// RemoveSubCommand removes the command referenced by name on rootName, if rootName is not a command with sub commands,
// or name does not exist on rootName, RemoveSubCommand errors
func (m *Manager) RemoveSubCommand(rootName, name string) error {
var cmd Command
if cmd = m.getCommandByName(rootName); cmd == nil {
return fmt.Errorf("command %q does not exist on %v", rootName, m)
}
var (
realCmd *SubCommandList
ok bool
)
if realCmd, ok = cmd.(*SubCommandList); !ok {
return fmt.Errorf("command %q is not a command that has subcommands", rootName)
}
return realCmd.removeSubcmd(name)
}
func (m *Manager) stripPrefix(line string) (string, bool) {
if m.prefixFunc != nil {
if res, ok := m.prefixFunc(line); ok {
return res, ok
}
}
var (
hasPrefix bool
out string
)
for _, pfx := range m.commandPrefixes {
if strings.HasPrefix(strings.ToUpper(line), strings.ToUpper(pfx)) {
hasPrefix = true
out = line[len(pfx):]
break
}
}
return out, hasPrefix
}
// ParseLine checks the given string for a valid command. If it finds one, it fires that command.
func (m *Manager) ParseLine(line string, fromTerminal bool, source, target string, util DataUtil) {
if line == "" {
return
}
if !fromTerminal {
var ok bool
if line, ok = m.stripPrefix(line); !ok {
return
}
}
lineSplit := strings.Split(line, " ")
if len(lineSplit) < 1 {
return
}
cmdName := lineSplit[0]
cmd := m.getCommandByName(cmdName)
if cmd == nil {
if fromTerminal {
m.Logger.Infof("unknown command %q", cmdName)
}
return
}
data := &Data{
FromTerminal: fromTerminal,
Args: lineSplit[1:],
OriginalArgs: line,
Source: source,
Target: target,
Manager: m,
util: util,
}
cmd.Fire(data)
}
// String implements the stringer interface
func (m *Manager) String() string {
var cmds []string
m.cmdMutex.RLock()
for k := range m.commands {
cmds = append(cmds, k)
}
m.cmdMutex.RUnlock()
return fmt.Sprintf("command.Manager containing commands: %s", strings.Join(cmds, ", "))
}
func (m *Manager) helpImpl(data *Data) {
var toSend string
if len(data.Args) == 0 {
// just dump the available commands
var commandNames []string
m.cmdMutex.RLock()
for _, c := range m.commands {
commandNames = append(commandNames, c.Name())
}
m.cmdMutex.RUnlock()
toSend = fmt.Sprintf("Available commands are %s", strings.Join(commandNames, ", "))
} else {
// specific help on a command requested
var cmd Command
if cmd = m.getCommandByName(data.Args[0]); cmd == nil {
return
}
if realCmd, ok := cmd.(*SubCommandList); ok && len(data.Args) > 1 && realCmd.findSubcommand(data.Args[1]) != nil {
subCmd := realCmd.findSubcommand(data.Args[1])
toSend = fmt.Sprintf("%s: %s", strings.Join(data.Args[:2], " "), subCmd.Help())
} else {
toSend = fmt.Sprintf("%s: %s", data.Args[0], cmd.Help())
}
}
if data.FromTerminal {
m.Logger.Info(toSend)
} else {
data.SendSourceNotice(toSend)
}
}
|
package main
import (
"google.golang.org/api/calendar/v3"
"time"
)
type Cal struct {
Srv *calendar.Service
Id string
}
// Get the events on calendar.
func (cal *Cal) GetEvents() (*calendar.Events, error) {
events, err := cal.Srv.Events.
List(cal.Id).
ShowDeleted(false).
SingleEvents(true).
OrderBy("startTime").
TimeMin(time.Now().Format(time.RFC3339)).
Do()
if err != nil {
logger.Printf("Runtime Error => unable to get events: %v\n", err)
return nil, err
}
return events, nil
}
// Insert the events into calendar.
func (cal *Cal) InsertEvents(
summary string,
startTime time.Time,
endTime time.Time,
description string,
) error {
event := GenerateEvent(summary, startTime, endTime, description)
_, err := cal.Srv.Events.Insert(cal.Id, event).Do()
if err != nil {
logger.Printf("Unable to insert event into calendar. %v\n", err)
return err
}
return nil
}
func (cal *Cal) UpdateEvents(
eventId string,
summary string,
startTime time.Time,
endTime time.Time,
description string,
) error {
event := GenerateEvent(summary, startTime, endTime, description)
_, err := cal.Srv.Events.Update(cal.Id, eventId, event).Do()
if err != nil {
logger.Printf("Unable to update event. %v\n", err)
return err
}
return nil
}
func GenerateEvent(summary string,
startTime time.Time,
endTime time.Time,
description string,
) *calendar.Event {
desc := description + "\n" + "[Updated at " + time.Now().Format("2006-01-02 15:04") + "]"
event := &calendar.Event{
Summary: summary,
Start: &calendar.EventDateTime{
DateTime: startTime.Format(time.RFC3339),
},
End: &calendar.EventDateTime{
DateTime: endTime.Format(time.RFC3339),
},
Description: desc,
}
return event
}
// Make map,
// Key is the summary of the events, and value is exist(1) or not(0).
func ParseEventToMap(events *calendar.Events) map[string]string {
mp := make(map[string]string)
for _, item := range events.Items {
mp[item.Summary] = item.Id
}
return mp
}
// Update calendar.
func (cal *Cal) UpdateCalendar(contests []Contest) error {
existEvents, err := cal.GetEvents()
if err != nil {
logger.Printf("GetEvents: %v\n", err)
return err
}
eventMap := ParseEventToMap(existEvents)
for _, c := range contests {
value, ok := eventMap[c.Title]
if ok {
err = cal.UpdateEvents(value, c.Title, c.StartTime, c.EndTime, c.Description)
if err != nil {
logger.Printf("UpdateEvents: %v\n", err)
return err
}
updateContestState++
continue
}
err := cal.InsertEvents(c.Title, c.StartTime, c.EndTime, c.Description)
if err != nil {
logger.Printf("InsertEvents: %v\n", err)
return err
}
insertContestState++
}
return nil
}
|
package morse
import (
"bytes"
"fmt"
"strings"
"unicode"
)
type Morse struct {
charMap map[rune]string
codeMap map[string]rune
}
func New() (*Morse, error) {
letters := "ABCDEFGHIJKLMNOPQRSTUVWXYZ .,?/@1234567890"
codes := []string{".-", "-...", "-.-.", "-..", ".", "..-.", "--.", "....",
"..", ".---", "-.-", ".-..", "--", "-.", "---", ".--.",
"--.-", ".-.", "...", "-", "..-", "...-", ".--", "-..-",
"-.--", "--..", " ", ".-.-.-", "--..--", "..--..", "-..-.", ".--.-.",
".----", "..---", "...--", "....-", ".....", "-....", "--...", "---..", "----.", "-----",
}
if len(letters) != len(codes) {
return nil, fmt.Errorf("len(letters) != len(codes): %d, %d\n", len(letters), len(codes))
}
// populate letter-to-code mappings
charMap := make(map[rune]string, len(letters))
for i, r := range letters {
charMap[r] = codes[i]
}
// populate code-to-letter mappings
codeMap := make(map[string]rune, len(codes))
for i, r := range letters {
codeMap[codes[i]] = r
}
return &Morse{charMap: charMap, codeMap: codeMap}, nil
}
func (m *Morse) Encode(src string) string {
src = strings.ToUpper(src)
b := strings.Builder{}
for i, r := range src {
if i != 0 {
b.WriteRune(' ')
}
b.WriteString(m.charMap[r])
}
return b.String()
}
func (m *Morse) Decode(src string) string {
var res []rune
seq := bytes.Buffer{}
prevSpace := true
for i, r := range src {
isSpace := unicode.IsSpace(r)
if isSpace && prevSpace {
res = append(res, ' ')
} else if isSpace {
code := string(seq.Bytes())
res = append(res, m.codeMap[code])
seq.Reset()
} else if i == len(src)-1 {
seq.WriteRune(r)
code := string(seq.Bytes())
res = append(res, m.codeMap[code])
seq.Reset()
} else if prevSpace {
seq.Reset()
seq.WriteRune(r)
} else {
seq.WriteRune(r)
}
prevSpace = isSpace
}
return string(res)
}
|
package main
import "fmt"
func main() {
fmt.Println("Hello world!")
myIntSlice := []int{1, 2, 3}
fmt.Println("asdsda ", myIntSlice)
a := 1
b := 2
c := 3
myPointerIntSlice := []*int{&a, &b, &c}
fmt.Println("asad", myPointerIntSlice)
}
|
package mempid
import (
"crypto/sha1"
"encoding/hex"
"errors"
"fmt"
"os"
"strconv"
"syscall"
"unsafe"
)
type ProgMutex struct {
AppName string
key *uint16
handle syscall.Handle
}
const (
PAGE_READWRITE = 0x0004
FILE_MAP_READ = 0x0004
FILE_MAP_WRITE = 0x0008
int_size = strconv.IntSize
)
func genKey(name string) *uint16 {
sum := sha1.Sum([]byte(name))
fmt.Println(name)
share, _ := syscall.UTF16PtrFromString(hex.EncodeToString(sum[:])) // name of the share memory
return share
}
func (pm *ProgMutex) GetPid() (int, error) {
handle, err := syscall.CreateFileMapping(syscall.InvalidHandle, nil, syscall.PAGE_READONLY, 0, int_size, pm.key)
if err != nil {
return 0, err
}
defer syscall.CloseHandle(handle)
ptr, err := syscall.MapViewOfFile(handle, syscall.FILE_MAP_READ, 0, 0, 0)
if err != nil {
return 0, err
}
pid := *(*int)(unsafe.Pointer(ptr))
if _, err := os.FindProcess(pid); err != nil {
pid = 0
}
return pid, syscall.UnmapViewOfFile(ptr)
}
func (pm *ProgMutex) LockProg() error {
var err error
if pm.handle != 0 && pm.handle != syscall.InvalidHandle {
return errors.New("you should unlock first.")
}
pm.handle, err = syscall.CreateFileMapping(syscall.InvalidHandle, nil, syscall.PAGE_READWRITE, 0, int_size, pm.key)
if err != nil {
return err
}
ptr, err := syscall.MapViewOfFile(pm.handle, syscall.FILE_MAP_WRITE, 0, 0, 0)
if err != nil {
return err
}
*(*int)(unsafe.Pointer(ptr)) = os.Getpid()
return syscall.UnmapViewOfFile(ptr)
}
func (pm *ProgMutex) UnLockProg() {
if pm.handle != 0 && pm.handle != syscall.InvalidHandle {
syscall.CloseHandle(pm.handle)
pm.handle = 0
}
}
|
package schema_test
import (
"io/ioutil"
"os"
"path"
"testing"
"github.com/syncromatics/kafmesh/internal/schema"
"github.com/stretchr/testify/assert"
)
func Test_ProtobufDescribeSchema(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "Test_ProtobufDescribeSchema")
if err != nil {
t.Fatal(err)
}
tmpDir = path.Join(tmpDir, "protos")
err = os.MkdirAll(tmpDir, os.ModePerm)
if err != nil {
t.Fatal(err)
}
package1 := path.Join(tmpDir, "package1")
err = os.MkdirAll(package1, os.ModePerm)
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(path.Join(package1, "test.proto"), []byte(`syntax ="proto3";
package package1.sub1;
message Test {
string name = 1;
}`), os.ModePerm)
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(path.Join(tmpDir, "package1", "test2.proto"), []byte(`syntax ="proto3";
package package1.sub1;
import "google/protobuf/timestamp.proto";
message Test2 {
string serial = 1;
google.protobuf.Timestamp time = 2;
}`), os.ModePerm)
if err != nil {
t.Fatal(err)
}
messages, err := schema.DescribeProtobufSchema(tmpDir)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, map[string]struct{}{
"package1.sub1.test": struct{}{},
"package1.sub1.test2": struct{}{},
}, messages)
}
|
package qiwi
import (
"fmt"
"time"
)
type PaymentType string
const (
CardPayment = "CARD"
TokenPayment = "TOKEN"
ApplePayPayment = "APPLE_PAY_TOKEN"
GooglePayPayment = "GOOGLE_PAY_TOKEN"
)
type Payment struct {
token string `json:"-"` // Authtorisation token
apiLink string `json:"-"` // APILink sets payment gateway domain, no trailing slash
PublicKey string `json:"publicKey,omitempty"` // Merchant identification key String +
SiteID string `json:"siteId,omitempty"`
BillID string `json:"billId,omitempty"` // Unique invoice identifier in merchant's system. It must be generated on your side with any means. It could be any sequence of digits and letters. Also you might use underscore _ and dash -. If not used, for each URL opening a new invoice is created. String(200) -
PaymentID string `json:"paymentId,omitempty"` // Payment operation unique identifier in RSP's system
CamptureID string `json:"captureId,omitempty"` // Capture operation unique identifier in RSP's system
RefundID string `json:"refundId,omitempty"` // Refund operation unique identifier in RSP's system
Amount Amount `json:"amount,omitempty"` // Amount of customer order rounded down to 2 digits (always in rubles)
PaymentMethod PaymentMethod `json:"paymentMethod,omitempty"` // Payment method
Customer Customer `json:"customer,omitempty"` // Information about the customer
Creation QIWITime `json:"creationDateTime,omitempty"`
NotifyDate QIWITime `json:"createddatetime,omitempty"` // Time used in Notify
Expiration QIWITime `json:"expirationDateTime,omitempty"`
Comment string `json:"comment,omitempty"` // Comment to the invoice
SuccessURL string `json:"successUrl,omitempty"` // URL for redirect from the QIWI form in case of successful payment. URL should be within the merchant's site.
PayURL string `json:"payUrl,omitempty"` // Payment page on QIWI site
// extras[cf1] Extra field to add any information to invoice data URL-encoded string
// extras[cf2] Extra field to add any information to invoice data URL-encoded string
// extras[cf3] Extra field to add any information to invoice data URL-encoded string
// extras[cf4] Extra field to add any information to invoice data URL-encoded string
// extras[cf5] Extra field to add any information to invoice data URL-encoded string
Reply
QIWIError
}
// type PaymentMethod struct {
// //CardMethod
// ApplePayMethod
// //GooglePayMethod
// }
type PaymentMethod struct {
Type PaymentType `json:"type"` // Payment method type
// "CARD" — payment card
// "TOKEN" — card payment token
// "APPLE_PAY_TOKEN" — encrypted Apple Pay payment token
// "GOOGLE_PAY_TOKEN" — encrypted Google Pay payment token
PAN string `json:"pan,omitempty"` // optional string(19) Card string //Card number. For type=CARD only
ExpiryDate string `json:"expiryDate,omitempty"`
//optional
//string(5)
//Card expiry date (MM/YY). For type=CARD only
CVV string `json:"cvv2,omitempty"`
//optional
//string(4)
//Card CVV2/CVC2. For type=CARD only
Name string `json:"holderName,omitempty"`
// optional
// string(26)
//Customer card holder (Latin letters). For type=CARD only
Token ApplePayToken `json:"paymentData,omitempty"` // TODO OR paymentData
//optional
//string
//Payment token string. For type=TOKEN, APPLE_PAY_TOKEN, GOOGLE_PAY_TOKEN only
GooglePaymentToken string `json:"paymentToken,omitempty"`
T3DS T3DS `json:"external3dSec,omitempty"`
//optional
//object
//Payment data from Apple Pay or Google Pay.
}
type T3DS struct {
Type string `json:"type"`
//require
//string
//Payment data type: APPLE_PAY or GOOGLE_PAY.
OnlinePaymentCrypto string `json:"onlinePaymentCryptogram,omitempty"`
//optional
//string
// Contents of "onlinePaymentCryptogram" field from decrypted Apple payment token. For type=APPLE_PAY only.
Cryptogram string `json:"cryptogram,omitempty"`
//optional
//string
// Contents of "cryptogram" from decrypted Google payment token. For type=GOOGLE_PAY only.
ECIIndicator string `json:"eciIndicator,omitempty"`
//optional
//string(2)
//ECI indicator. It should be sent if it is received in Apple (Google) payment token. Otherwise, do not send this parameter.
}
type Customer struct {
Account string `json:"account,omitempty"`
Email string `json:"email,omitempty"`
Phone string `json:"phone,omitempty"`
}
type Reply struct {
Status Status `json:"status,omitempty"`
}
type Status struct {
Value string `json:"value,omitempty"`
Date string `json:"changedDateTime,omitempty"`
Reason string `json:"reason,omitempty"`
}
// QIWIError holds error reply from a carrier
type QIWIError struct {
Service string `json:"serviceName"` // Service name produced the error
ErrCode string `json:"errorCode"` // Error code
Description string `json:"description"` // Error description for RSP
ErrMessage string `json:"userMessage"` // Error description for Customer
ErrDate time.Time `json:"dateTime"` // Error date and time
TraceID string `json:"traceId"` // Error Log unique ID
}
// New create card payment session
func New(billId, siteid, token, apiLink string) *Payment {
if apiLink == "" {
apiLink = "https://api.qiwi.com/partner/v1/sites" // no trailing slash
}
return &Payment{SiteID: siteid, BillID: billId, apiLink: apiLink, token: token}
}
// checkErrors checks if errors is presented in reply
func (p *Payment) checkErrors(err error) error {
if err == nil {
if p.ErrCode != "" {
err = fmt.Errorf("[QIWI] RSP Response %w: %s (%s)", ErrReplyWithError, p.Description, p.ErrCode)
}
}
return err
}
|
package issuer_test
import (
"context"
"time"
"github.com/golang-jwt/jwt/v4"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"google.golang.org/protobuf/types/known/wrapperspb"
system_proto "github.com/kumahq/kuma/api/system/v1alpha1"
"github.com/kumahq/kuma/pkg/core"
"github.com/kumahq/kuma/pkg/core/resources/apis/system"
core_store "github.com/kumahq/kuma/pkg/core/resources/store"
"github.com/kumahq/kuma/pkg/core/secrets/cipher"
secret_manager "github.com/kumahq/kuma/pkg/core/secrets/manager"
secret_store "github.com/kumahq/kuma/pkg/core/secrets/store"
"github.com/kumahq/kuma/pkg/core/user"
. "github.com/kumahq/kuma/pkg/plugins/authn/api-server/tokens/issuer"
"github.com/kumahq/kuma/pkg/plugins/resources/memory"
)
var _ = Describe("User token issuer", func() {
var issuer UserTokenIssuer
var validator UserTokenValidator
var store core_store.ResourceStore
var signingKeyManager SigningKeyManager
now := time.Now()
BeforeEach(func() {
store = memory.NewStore()
secretManager := secret_manager.NewGlobalSecretManager(secret_store.NewSecretStore(store), cipher.None())
signingKeyManager = NewSigningKeyManager(secretManager)
issuer = NewUserTokenIssuer(signingKeyManager)
validator = NewUserTokenValidator(NewSigningKeyAccessor(secretManager), NewTokenRevocations(secretManager))
Expect(signingKeyManager.CreateDefaultSigningKey()).To(Succeed())
core.Now = func() time.Time {
return now
}
jwt.TimeFunc = func() time.Time {
return now
}
})
AfterEach(func() {
core.Now = time.Now
jwt.TimeFunc = time.Now
})
It("should support rotation", func() {
// given
id := user.User{
Name: "john.doe@example.com",
Groups: []string{"users"},
}
// when
token1, err := issuer.Generate(id, time.Second*60)
Expect(err).ToNot(HaveOccurred())
// then
_, err = validator.Validate(token1)
Expect(err).ToNot(HaveOccurred())
// when new signing key with higher serial number is created
err = signingKeyManager.CreateSigningKey(2)
Expect(err).ToNot(HaveOccurred())
// and a new token is generated
token2, err := issuer.Generate(id, time.Second*60)
Expect(err).ToNot(HaveOccurred())
// then all tokens are valid because 2 signing keys are present in the system
_, err = validator.Validate(token1)
Expect(err).ToNot(HaveOccurred())
_, err = validator.Validate(token2)
Expect(err).ToNot(HaveOccurred())
// when first signing key is deleted
err = store.Delete(context.Background(), system.NewGlobalSecretResource(), core_store.DeleteBy(SigningKeyResourceKey(DefaultSerialNumber)))
Expect(err).ToNot(HaveOccurred())
// then old tokens are no longer valid
_, err = validator.Validate(token1)
Expect(err).To(MatchError(`there is no signing key with serial number 1. GlobalSecret of name "user-token-signing-key-1" is not found. If signing key was rotated, regenerate the token`))
// and new token is valid because new signing key is present
_, err = validator.Validate(token2)
Expect(err).ToNot(HaveOccurred())
})
It("should validate out expired tokens", func() {
// given
id := user.User{
Name: "john.doe@example.com",
Groups: []string{"users"},
}
token, err := issuer.Generate(id, 60*time.Second)
Expect(err).ToNot(HaveOccurred())
// when
now = now.Add(60*time.Second + 1*time.Second)
_, err = validator.Validate(token)
// then
Expect(err.Error()).To(ContainSubstring("could not parse token: token is expired"))
})
It("should revoke token", func() {
// given valid token
id := user.User{
Name: "john.doe@example.com",
Groups: []string{"users"},
}
token, err := issuer.Generate(id, 60*time.Second)
Expect(err).ToNot(HaveOccurred())
_, err = validator.Validate(token)
Expect(err).ToNot(HaveOccurred())
// when id of the token is added to revocation list
c := &jwt.RegisteredClaims{}
_, _, err = new(jwt.Parser).ParseUnverified(token, c)
Expect(err).ToNot(HaveOccurred())
sec := &system.GlobalSecretResource{
Spec: &system_proto.Secret{
Data: &wrapperspb.BytesValue{
Value: []byte(c.ID),
},
},
}
err = store.Create(context.Background(), sec, core_store.CreateBy(RevocationsSecretKey))
Expect(err).ToNot(HaveOccurred())
// then
_, err = validator.Validate(token)
Expect(err).To(MatchError("token is revoked"))
})
})
|
package deployment_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/cloudfoundry/bosh-micro-cli/deployment"
)
var _ = Describe("Deployment", func() {
var (
deployment Deployment
)
Describe("NetworksSpec", func() {
Context("when the deployment has networks", func() {
BeforeEach(func() {
deployment = Deployment{
Networks: []Network{
{
Name: "fake-network-name",
Type: "dynamic",
},
{
Name: "fake-manual-network-name",
Type: "manual",
},
{
Name: "vip",
Type: "vip",
},
{
Name: "fake",
Type: "dynamic",
},
},
Jobs: []Job{
{
Name: "bosh",
Networks: []JobNetwork{
{
Name: "vip",
StaticIPs: []string{"1.2.3.4"},
},
{
Name: "fake-network-name",
StaticIPs: []string{"5.6.7.8"},
},
{
Name: "fake-manual-network-name",
StaticIPs: []string{"5.6.7.9"},
},
},
},
},
}
})
It("is a map of the networks in spec form", func() {
Expect(deployment.NetworksSpec("bosh")).To(Equal(map[string]interface{}{
"fake-network-name": map[string]interface{}{
"type": "dynamic",
"ip": "5.6.7.8",
"cloud_properties": map[string]interface{}{},
},
"fake-manual-network-name": map[string]interface{}{
"type": "manual",
"ip": "5.6.7.9",
"cloud_properties": map[string]interface{}{},
},
"vip": map[string]interface{}{
"type": "vip",
"ip": "1.2.3.4",
"cloud_properties": map[string]interface{}{},
},
}))
})
})
Context("when the deployment does not have networks", func() {
BeforeEach(func() {
deployment = Deployment{
Jobs: []Job{
{
Name: "bosh",
},
},
Networks: []Network{},
}
})
It("is an empty map", func() {
Expect(deployment.NetworksSpec("bosh")).To(Equal(map[string]interface{}{}))
})
})
Context("when the deployment does not have a job with requested name", func() {
BeforeEach(func() {
deployment = Deployment{}
})
It("returns an error", func() {
networksSpec, err := deployment.NetworksSpec("bosh")
Expect(networksSpec).To(Equal(map[string]interface{}{}))
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("Could not find job with name: bosh"))
})
})
})
})
|
package pgsql
// PostgreSQL `timestamp` read/write natively supported with:
// `time.Time`
// `string`
// `[]byte`
type _ native
|
package g2db
import (
"context"
"fmt"
"strings"
"sync"
"time"
"github.com/go-redis/redis/v8"
"github.com/spf13/cast"
"github.com/atcharles/gof/v2/g2util"
"github.com/atcharles/gof/v2/json"
)
// constants defined
const (
redisSubChannel = "Sub"
redisSubDelMemCache = "DelMemCache"
redisSubDelMemAll = "DelMemAll"
)
type (
//RedisSubHandlerFunc ...
RedisSubHandlerFunc func(payload []byte)
redisObj struct {
Logger g2util.LevelLogger `inject:""`
Config *g2util.Config `inject:""`
//Go *g2util.GoPool `inject:""`
Cache *cacheMem `inject:""`
mu sync.RWMutex
mp sync.Map
closeSub chan struct{}
isClose bool
subHandlers map[string]RedisSubHandlerFunc
}
//redisSubPayload ...
redisSubPayload struct {
Name string `json:"name,omitempty"`
Data *json.RawMessage `json:"data,omitempty"`
}
)
func (r *redisObj) AfterShutdown() {
r.closeRedisSub()
}
// close ......
func (r *redisObj) closeRedisSub() {
r.mu.Lock()
defer r.mu.Unlock()
if r.isClose {
return
}
r.isClose = true
close(r.closeSub)
}
// PubDelCache ...
func (r *redisObj) PubDelCache(keys []string) error { return r.Pub(r.pubDelMemName(), keys) }
// PubDelMemAll ...
func (r *redisObj) PubDelMemAll() error { return r.Pub(r.formatWithAppName(redisSubDelMemAll), nil) }
// Pub ...
func (r *redisObj) Pub(name string, data interface{}) error {
bd, err := json.Marshal(data)
if err != nil {
return err
}
rawText := json.RawMessage(bd)
payload := &redisSubPayload{Name: name, Data: &rawText}
msg, err := json.Marshal(payload)
if err != nil {
return err
}
return r.client().Publish(context.Background(), r.subChannel(), msg).Err()
}
func (r *redisObj) SubHandle(name string, handler RedisSubHandlerFunc) { r.subHandlers[name] = handler }
// Subscribe ...
func (r *redisObj) Subscribe() {
r.subHandlers = make(map[string]RedisSubHandlerFunc)
r.closeSub = make(chan struct{})
//rev handlers
r.SubHandle(r.pubDelMemName(), r.Cache.RedisSubDelCache())
r.SubHandle(r.formatWithAppName(redisSubDelMemAll), r.Cache.RedisSubDelMemAll())
if e := r.subscribe(); e != nil {
r.Logger.Fatalln(e)
}
}
// subAction ...
func (r *redisObj) subAction(sub *redis.PubSub) {
for {
select {
case <-r.closeSub:
_ = sub.Close()
r.Logger.Debugf("[SUB] Redis关闭订阅")
r.mp.Range(func(_, value interface{}) bool { _ = value.(*redis.Client).Close(); return true })
return
case msg, ok := <-sub.Channel():
if !ok || msg == nil {
r.Logger.Warnf("[SUB] 接收订阅消息失败")
return
}
//接收到订阅的消息,执行数据解析,与删除
r.Logger.Debugf("[SUB] 接收到订阅消息: %s\n", msg.Payload)
payload := new(redisSubPayload)
if e := json.Unmarshal([]byte(msg.Payload), payload); e != nil {
r.Logger.Errorf("[SUB] 无效的订阅内容: %s\n", msg.Payload)
continue
}
//run handlerFunc
payloadData := payload.Data
if payloadData == nil {
_d1 := make(json.RawMessage, 0)
payloadData = &_d1
}
if handler, _ok := r.subHandlers[payload.Name]; _ok {
handler(*payloadData)
}
}
}
}
func (r *redisObj) subscribe() (err error) {
ctx := context.Background()
channelName := r.subChannel()
sub := r.client().Subscribe(ctx, channelName)
_, err = sub.ReceiveTimeout(ctx, time.Second*3)
if err != nil {
err = fmt.Errorf("订阅Redis失败: %s", err.Error())
return
}
r.Logger.Debugf("[SUB] Redis订阅成功: %s", channelName)
go r.subAction(sub)
return
}
// Client ...
func (r *redisObj) Client(db ...int) *redis.Client { return r.client(db...) }
func (r *redisObj) client(dbs ...int) *redis.Client {
db := 0
if len(dbs) > 0 {
db = dbs[0]
}
r.mu.Lock()
defer r.mu.Unlock()
if v, ok := r.mp.Load(db); ok {
return v.(*redis.Client)
}
cl := redis.NewClient(r.newOption(db))
if e := cl.Ping(context.Background()).Err(); e != nil {
panic(e)
}
r.mp.Store(db, cl)
return cl
}
// newOption ...
func (r *redisObj) newOption(db int) *redis.Options {
cfg := r.Config.Viper().GetStringMapString("redis")
svAddr := r.Config.Viper().GetString("global.host")
return &redis.Options{
Addr: strings.Replace(cfg["host"], "{host}", svAddr, -1),
Password: cfg["pwd"],
DB: db,
MaxRetries: cast.ToInt(cfg["max_retries"]),
MinIdleConns: cast.ToInt(cfg["min_idle_connections"]),
MaxConnAge: cast.ToDuration(cfg["max_conn_age_seconds"]) * time.Second,
}
}
// subChannel ...
func (r *redisObj) subChannel() string {
return fmt.Sprintf("%s_%s", r.Config.Viper().GetString("name"), redisSubChannel)
}
// pubDelMemName ...
func (r *redisObj) pubDelMemName() string {
return fmt.Sprintf("%s_%s", r.Config.Viper().GetString("name"), redisSubDelMemCache)
}
// formatWithAppName ...
func (r *redisObj) formatWithAppName(s string) string {
return fmt.Sprintf("%s_%s", r.Config.Viper().GetString("name"), s)
}
|
package main
import (
"context"
"log"
"os/exec"
)
func main() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c := exec.CommandContext(ctx, "dpkg", "-l", "hoge")
out, err := c.Output()
if exitErr, ok := err.(*exec.ExitError); ok {
log.Println(string(exitErr.Stderr))
return
}
log.Println(out)
log.Println(err)
}
|
package stores
import (
"github.com/jakewitcher/pos-server/graph/model"
"strconv"
)
type StoreLocationEntity struct {
Id int64 `json:"id"`
Street string `json:"street"`
City string `json:"city"`
State string `json:"state"`
ZipCode string `json:"zip_code"`
}
type StoreEntity struct {
Id int64 `json:"id"`
Name string `json:"name"`
LocationId int64 `json:"location_id"`
}
func (l *StoreLocationEntity) ToDTO() *model.StoreLocation {
return &model.StoreLocation{
Street: l.Street,
City: l.City,
State: l.State,
ZipCode: l.ZipCode,
}
}
func (s *StoreEntity) ToDTO(location *StoreLocationEntity) *model.Store {
return &model.Store{
ID: strconv.FormatInt(s.Id, 10),
Name: s.Name,
Location: location.ToDTO(),
}
}
|
package employees
import (
"encoding/json"
"fmt"
"net/http"
"github.com/akshayvinodpunnath/webserver/db"
"github.com/akshayvinodpunnath/webserver/models"
)
func GetEmployees() []dbModels.Employee {
var employee []dbModels.Employee
db := db.DbConn()
defer db.Close()
rows, _ := db.Query("SELECT * FROM employees ORDER BY employeeNumber DESC")
defer rows.Close()
for rows.Next() {
e := new(dbModels.Employee)
//var fName, lName, ext string
rows.Scan(&e.EmployeeNumber, &e.LastName, &e.FirstName, &e.Extension, &e.Email, &e.OfficeCode, &e.ReportsTo, &e.JobTitle)
//, &lName, &fName, &ext) , &e.email, &e.officeCode, &e.reportsTo, &e.jobTitle)
//employee = append(employee, Employee{Id: employeeNumber, LastName: lName}) //, lastName: lName, firstName: fName, extension: ext})
employee = append(employee, *e)
}
//fmt.Println(employee)
return employee
}
func EmployeeMethods(w http.ResponseWriter, r *http.Request) {
/*
switch r.Method {
case "GET":
*/
fmt.Println("Test")
var employee []dbModels.Employee
employee = GetEmployees()
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(employee)
/*
default:
w.WriteHeader(http.StatusPreconditionFailed)
fmt.Fprintf(w, "Sorry, only GET and POST methods are supported.")
}
*/
}
|
package manifest_test
import (
. "github.com/onsi/ginkgo"
"github.com/spf13/afero"
. "github.com/onsi/gomega"
"github.com/simonjohansson/go-linter/manifest"
"github.com/simonjohansson/go-linter/model"
)
var _ = Describe("RequiredFiles", func() {
var (
fs afero.Fs
)
BeforeEach(func() {
fs = afero.NewMemMapFs()
})
It("Returns error if .halfpipe.io is missing", func() {
manifestReader := manifest.NewManifestReader(fs)
_, err := manifestReader.ParseManifest("/path/to/repo")
Expect(err).To(HaveOccurred())
})
It("Returns empty manifest is .halfpipe.io is empty", func() {
manifestReader := manifest.NewManifestReader(fs)
afero.WriteFile(fs, "/path/to/repo/.halfpipe.io", []byte(""), 0644)
manifest, err := manifestReader.ParseManifest("/path/to/repo")
Expect(err).To(Not(HaveOccurred()))
Expect(manifest).To(Equal(model.Manifest{}))
})
It("Parses empty .halfpipe.io to empty manifest", func() {
manifestReader := manifest.NewManifestReader(fs)
content := ``
afero.WriteFile(fs, "/path/to/repo/.halfpipe.io", []byte(content), 0644)
manifest, err := manifestReader.ParseManifest("/path/to/repo")
Expect(err).To(Not(HaveOccurred()))
Expect(manifest).To(Equal(model.Manifest{}))
})
It("Parses minimal .halfpipe.io to minimal manifest", func() {
manifestReader := manifest.NewManifestReader(fs)
content := `
team: engineering-enablement
`
afero.WriteFile(fs, "/path/to/repo/.halfpipe.io", []byte(content), 0644)
manifest, err := manifestReader.ParseManifest("/path/to/repo")
Expect(err).To(Not(HaveOccurred()))
Expect(manifest).To(Equal(model.Manifest{
Team: "engineering-enablement",
}))
})
It("Parses .halfpipe.io to manifest", func() {
manifestReader := manifest.NewManifestReader(fs)
content := `
team: engineering-enablement
repo:
uri: https://....
private_key: asdf
tasks:
- task: run
script: ./test.sh
image: openjdk:8-slim
- task: docker
username: ((docker.username))
password: ((docker.password))
repository: simonjohansson/half-pipe-linter
- task: deploy
space: test
api: https://api.europe-west1.cf.gcp.springernature.io
- task: run
script: ./asdf.sh
image: openjdk:8-slim
vars:
A: asdf
B: 1234
- task: deploy
space: test
api: https://api.europe-west1.cf.gcp.springernature.io
vars:
VAR1: asdf1234
VAR2: 9876
`
afero.WriteFile(fs, "/path/to/repo/.halfpipe.io", []byte(content), 0644)
manifest, err := manifestReader.ParseManifest("/path/to/repo")
Expect(err).To(Not(HaveOccurred()))
Expect(manifest).To(Equal(model.Manifest{
Team: "engineering-enablement",
Repo: model.Repo{
Uri: "https://....",
PrivateKey: "asdf",
},
Tasks: []model.Task{
model.RunTask{
Script: "./test.sh",
Image: "openjdk:8-slim",
Vars: make(map[string]string),
},
model.DockerTask{
Username: "((docker.username))",
Password: "((docker.password))",
Repository: "simonjohansson/half-pipe-linter",
},
model.DeployTask{
Username: "((cf-credentials.username))",
Password: "((cf-credentials.password))",
Api: "https://api.europe-west1.cf.gcp.springernature.io",
Org: "engineering-enablement",
Space: "test",
Manifest: "manifest.yml",
Vars: make(map[string]string),
},
model.RunTask{
Script: "./asdf.sh",
Image: "openjdk:8-slim",
Vars: map[string]string{
"A": "asdf",
"B": "1234",
},
},
model.DeployTask{
Username: "((cf-credentials.username))",
Password: "((cf-credentials.password))",
Api: "https://api.europe-west1.cf.gcp.springernature.io",
Org: "engineering-enablement",
Space: "test",
Manifest: "manifest.yml",
Vars: map[string]string{
"VAR1": "asdf1234",
"VAR2": "9876",
},
},
},
}))
})
})
|
package main
import (
"context"
"flag"
"fmt"
"log"
"os"
"regexp"
"strconv"
"sync"
"github.com/algolia/algoliasearch-client-go/algoliasearch"
"github.com/cheggaaa/pb"
"github.com/go-resty/resty"
"github.com/gocolly/colly"
jsoniter "github.com/json-iterator/go"
"go.uber.org/ratelimit"
"golang.org/x/oauth2"
)
var statusMap = map[string]string{
"status1": "completed",
"status2": "current",
"status3": "dropped",
"status4": "planned",
"status5": "on_hold",
"status6": "dropped",
}
var index algoliasearch.Index
var f *os.File
func main() {
flag.Parse()
if flag.NArg() < 3 {
log.Print("Gib anime-planet username, kitsu username, kitsu password.")
return
}
var err error
f, err = os.OpenFile("dump.log", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
panic(err)
}
defer f.Close()
log.SetOutput(f)
var (
animePlanetUsername = flag.Arg(0)
kitsuUsername = flag.Arg(1)
kitsuPassword = flag.Arg(2)
)
oauthConfig := oauth2.Config{
ClientID: "",
ClientSecret: "",
Endpoint: oauth2.Endpoint{
TokenURL: "https://kitsu.io/api/oauth/token",
},
}
tokens, err := oauthConfig.PasswordCredentialsToken(context.TODO(), kitsuUsername, kitsuPassword)
if err != nil {
panic(err)
}
if tokens.AccessToken == "" {
panic("failed to obtain access token")
}
c := resty.New().SetAuthToken(tokens.AccessToken).SetHeaders(map[string]string{
"Accept": "application/vnd.api+json",
"Content-Type": "application/vnd.api+json",
"User-Agent": "anime-planet-sync/1.0.0 (github.com/kitsu-space/anime-planet-sync)",
})
u := getUserID(c)
baseURL := fmt.Sprintf("https://www.anime-planet.com/users/%s/%%s?sort=title&page=%%d&per_page=240&mylist_view=list", animePlanetUsername)
algoClient := algoliasearch.NewClient("AWQO5J657S", "MTc1MWYzYzNiMjVjZjM5OWFiMDc1YWE4NTNkYWMyZjE4NTk2YjkyNjM1YWJkOWIwZTEwM2U1YmUyMjgyODIwY3Jlc3RyaWN0SW5kaWNlcz1wcm9kdWN0aW9uX21lZGlhJmZpbHRlcnM9")
index = algoClient.InitIndex("production_media")
kinds := [...]string{"anime", "manga"}
pbs := make([]*pb.ProgressBar, len(kinds))
chans := make([]<-chan Media, len(kinds))
for idx, kind := range kinds {
chans[idx], pbs[idx] = fetch(baseURL, kind)
}
_, err = pb.StartPool(pbs...)
if err != nil {
panic(err)
}
for m := range merge(chans...) {
if err := commit(c, pkg(m, u)); err != nil {
log.Print(err)
}
}
}
func pkg(m Media, uid string) Parcel {
return Parcel{
Data: ParcelData{
Attributes: m.Attributes,
Relationships: map[string]LinkWrapper{
m.Type: {m.Link},
"user": {Link{ID: uid, Type: "users"}},
},
Type: "library-entries",
},
}
}
type (
Media struct {
Attributes
Link
}
Parcel struct {
Data ParcelData `json:"data"`
}
Attributes struct {
Status string `json:"status"`
Progress int `json:"progress,omitempty"`
Rating int `json:"ratingTwenty,omitempty"`
}
Link struct {
Type string `json:"type"`
ID string `json:"id"`
}
LinkWrapper struct {
Data Link `json:"data"`
}
ParcelData struct {
Attributes Attributes `json:"attributes"`
Relationships map[string]LinkWrapper `json:"relationships"`
Type string `json:"type"`
}
)
func getUserID(c *resty.Client) string {
r, _ := c.R().SetQueryParam("filter[self]", "true").Get("https://kitsu.io/api/edge/users")
return jsoniter.Get(r.Body(), "data", 0, "id").ToString()
}
func commit(c *resty.Client, p Parcel) error {
data, err := jsoniter.MarshalToString(p)
if err != nil {
return err
}
_, err = c.R().SetBody(data).Post("https://kitsu.io/api/edge/library-entries")
if err != nil {
return err
}
log.Println(data)
return err
}
func fetch(baseURL, kind string) (results chan Media, p *pb.ProgressBar) {
c := colly.NewCollector()
rl := ratelimit.New(2)
qk := fmt.Sprintf("kind:%s", kind)
results = make(chan Media)
wg := sync.WaitGroup{}
wg.Add(1)
c.OnHTML("p b", func(e *colly.HTMLElement) {
if p != nil {
return
}
u, err := strconv.ParseInt(e.Text, 10, 64)
if err == nil {
p = pb.New64(u).Prefix(kind)
}
wg.Done()
})
c.OnHTML(".personalList tbody tr", func(e *colly.HTMLElement) {
// log.Println(e.ChildText("td.tableTitle"), e.ChildText("td.tableType"), e.ChildText("td.tableStatus span"))
rl.Take()
res, err := index.Search(e.ChildText("td.tableTitle"), algoliasearch.Map{
"facetFilters": []string{qk},
"hitsPerPage": 1,
"attributesToRetrieve": []string{"id"},
})
if err != nil {
log.Print(err)
} else if len(res.Hits) > 0 {
hit := res.Hits[0]
status, ok := statusMap[e.ChildAttr("td.tableStatus span", "class")]
if !ok {
status = "planned"
}
parsedRating, _ := strconv.ParseFloat(e.ChildAttr("td.tableRating .starrating div", "name"), 64)
m := Media{
Attributes: Attributes{
Status: status,
},
Link: Link{
Type: kind,
ID: strconv.FormatFloat(hit["id"].(float64), 'f', 0, 64),
},
}
if status != "completed" {
m.Progress, _ = strconv.Atoi(e.ChildText("td.tableEps"))
}
if parsedRating != 0 {
m.Rating = int(parsedRating * 4)
}
results <- m
}
p.Increment()
})
re := regexp.MustCompile("[0-9]+")
c.OnHTML(".next a", func(e *colly.HTMLElement) {
u, err := strconv.ParseUint(re.FindAllString(e.Attr("href"), -1)[0], 10, 64)
if err == nil {
c.Visit(fmt.Sprintf(baseURL, kind, u))
}
})
// Before making a request print "Visiting ..."
c.OnRequest(func(r *colly.Request) {
log.Println("Visiting", r.URL.String())
})
// Set error handler
c.OnError(func(r *colly.Response, err error) {
log.Println("Request URL:", r.Request.URL, "failed with response:", r, "\nError:", err)
})
go func() {
c.Visit(fmt.Sprintf(baseURL, kind, 1))
close(results)
}()
wg.Wait()
return
}
func merge(cs ...<-chan Media) <-chan Media {
var wg sync.WaitGroup
out := make(chan Media)
output := func(c <-chan Media) {
for n := range c {
out <- n
}
wg.Done()
}
wg.Add(len(cs))
for _, c := range cs {
go output(c)
}
go func() {
wg.Wait()
close(out)
}()
return out
}
|
package utils
import (
"fmt"
"github.com/pkg/errors"
)
// ReaderError represents an error of an error
type ReaderError struct {
error
Reader *RuneReader
Location ReaderPosition
}
// Cause returns the underlying cause of this error
func (r ReaderError) Cause() error {
return r.error
}
// Error returns the error
func (r *ReaderError) Error() string {
return fmt.Sprintf("%s near %s", r.error.Error(), r.Location)
}
// NewErrorF returns a new error for the given reader and error message
func NewErrorF(reader *RuneReader, format string, args ...interface{}) *ReaderError {
return &ReaderError{
error: fmt.Errorf(format, args...),
Reader: reader,
Location: reader.Position(),
}
}
// WrapErrorF returns a new error for the given reader and underlying error
func WrapErrorF(reader *RuneReader, err error, format string, args ...interface{}) *ReaderError {
return &ReaderError{
error: errors.Wrapf(err, format, args...),
Reader: reader,
Location: reader.Position(),
}
}
|
package base
import (
"bytes"
"context"
"sync"
mnet "github.com/ka2n/masminer/net"
"golang.org/x/crypto/ssh"
)
type Client struct {
SSH *ssh.Client
MU sync.RWMutex
}
func (c *Client) SetSSH(client *ssh.Client) {
c.MU.Lock()
defer c.MU.Unlock()
c.SSH = client
}
func (c *Client) Setup(ctx context.Context) error {
return nil
}
func (c *Client) Close() error {
return c.SSH.Close()
}
func GetMacAddr(ctx context.Context, client *ssh.Client) (string, error) {
ret, err := OutputRemoteShell(ctx, client, `ip link show eth0 | grep -o 'link/.*' | cut -d' ' -f2`)
return string(bytes.TrimSpace(ret)), err
}
func GetHostname(ctx context.Context, client *ssh.Client) (string, error) {
ret, err := OutputRemoteShell(ctx, client, `hostname`)
return string(bytes.TrimSpace(ret)), err
}
func GetKernelVersion(ctx context.Context, client *ssh.Client) (string, error) {
ret, err := OutputRemoteShell(ctx, client, `uname -srv`)
return string(bytes.TrimSpace(ret)), err
}
func GetFileSystemVersion(ctx context.Context, client *ssh.Client) (string, error) {
ret, err := OutputRemoteShell(ctx, client, `uname -v`)
return string(bytes.TrimSpace(ret)), err
}
func GetUptimeSeconds(ctx context.Context, client *ssh.Client) (string, error) {
cmd := "cut -d \".\" -f 1 /proc/uptime"
ret, err := OutputRemoteShell(ctx, client, cmd)
return string(bytes.TrimSpace(ret)), err
}
func GetCPUTemp(ctx context.Context, client *ssh.Client) (string, error) {
cmd := `cat /sys/class/thermal/thermal_zone*/temp | awk '{sum+=$1} END {print sum/NR}'`
ret, err := OutputRemoteShell(ctx, client, cmd)
return string(bytes.TrimSpace(ret)), err
}
func GetIPAddr(ctx context.Context, client *ssh.Client) (string, error) {
ret, err := OutputRemoteShell(ctx, client, `ip a show eth0 | grep -o 'inet\s.*' | cut -d' ' -f2`)
if err != nil {
return string(ret), err
}
return mnet.ParseIPAddr(string(bytes.TrimSpace(ret)))
}
|
// +build windows
package main
import (
"os"
)
// IsTerminal returns false on Windows.
func IsTerminal(f *os.File) bool {
return false
}
// MakeRaw is a no-op on windows. It returns nil.
func MakeRaw(f *os.File) error {
return nil
}
// RestoreTerm is a no-op on windows. It returns nil.
func RestoreTerm(f *os.File) error {
return nil
}
|
package core
import (
"fmt"
"os"
"path"
"regexp"
"strings"
"github.com/jessevdk/go-flags"
"gopkg.in/op/go-logging.v1"
)
var log = logging.MustGetLogger("core")
// A BuildLabel is a representation of an identifier of a build target, e.g. //spam/eggs:ham
// corresponds to BuildLabel{PackageName: spam/eggs name: ham}
// BuildLabels are always absolute, so relative identifiers
// like :ham are always parsed into an absolute form.
// There is also implicit expansion of the final element of a target (ala Blaze)
// so //spam/eggs is equivalent to //spam/eggs:eggs
type BuildLabel struct {
PackageName string
Name string
}
// WholeGraph represents parsing the entire graph (i.e. //...).
// We use this specially in one or two places.
var WholeGraph = []BuildLabel{{PackageName: "", Name: "..."}}
// BuildLabelStdin is used to indicate that we're going to consume build labels from stdin.
var BuildLabelStdin = BuildLabel{PackageName: "", Name: "_STDIN"}
// OriginalTarget is used to indicate one of the originally requested targets on the command line.
var OriginalTarget = BuildLabel{PackageName: "", Name: "_ORIGINAL"}
const validChars = `\pL\pN\pM!@`
const packagePart = "[" + validChars + `\._\+-]+`
const packageName = "(" + packagePart + "(?:/" + packagePart + ")*)"
const targetName = `([` + validChars + `_\+-][` + validChars + `\._\+-]*(?:#[` + validChars + `_\+-]+)*)`
// Regexes for matching the various ways of writing a build label.
// Fully specified labels, e.g. //src/core:core
var absoluteTarget = regexp.MustCompile(fmt.Sprintf("^//(?:%s)?:%s$", packageName, targetName))
// Targets in local package, e.g. :core
var localTarget = regexp.MustCompile(fmt.Sprintf("^:%s$", targetName))
// Targets with an implicit target name, e.g. //src/core (expands to //src/core:core)
var implicitTarget = regexp.MustCompile(fmt.Sprintf("^//(?:%s/)?(%s)$", packageName, packagePart))
// All targets underneath a package, e.g. //src/core/...
var subTargets = regexp.MustCompile(fmt.Sprintf("^//%s/(\\.\\.\\.)$", packageName))
// Sub targets immediately underneath the root; //...
var rootSubTargets = regexp.MustCompile(fmt.Sprintf("^(//)(\\.\\.\\.)$"))
// The following cases only apply on the command line and can't be used in BUILD files.
// A relative target, e.g. core:core (expands to //src/core:core if already in src)
var relativeTarget = regexp.MustCompile(fmt.Sprintf("^%s:%s$", packageName, targetName))
// A relative target with implicitly specified target name, e.g. src/core (expands to //src/core:core)
var relativeImplicitTarget = regexp.MustCompile(fmt.Sprintf("^(?:%s/)?(%s)$", packageName, packagePart))
// All targets underneath a relative package, e.g. src/core/...
var relativeSubTargets = regexp.MustCompile(fmt.Sprintf("^(?:%s/)?(\\.\\.\\.)$", packageName))
// Package and target names only, used for validation.
var packageNameOnly = regexp.MustCompile(fmt.Sprintf("^%s?$", packageName))
var targetNameOnly = regexp.MustCompile(fmt.Sprintf("^%s$", targetName))
// String returns a string representation of this build label.
func (label BuildLabel) String() string {
if label.Name != "" {
return "//" + label.PackageName + ":" + label.Name
}
return "//" + label.PackageName
}
// NewBuildLabel constructs a new build label from the given components. Panics on failure.
func NewBuildLabel(pkgName, name string) BuildLabel {
label, err := TryNewBuildLabel(pkgName, name)
if err != nil {
panic(err)
}
return label
}
// TryNewBuildLabel constructs a new build label from the given components.
func TryNewBuildLabel(pkgName, name string) (BuildLabel, error) {
if err := validateNames(pkgName, name); err != nil {
return BuildLabel{}, err
}
return BuildLabel{PackageName: pkgName, Name: name}, nil
}
// validateNames returns an error if the package name of target name isn't accepted.
func validateNames(pkgName, name string) error {
if !packageNameOnly.MatchString(pkgName) {
return fmt.Errorf("Invalid package name: %s", pkgName)
} else if !targetNameOnly.MatchString(name) {
return fmt.Errorf("Invalid target name: %s", name)
} else if err := validateSuffixes(pkgName, name); err != nil {
return err
}
return nil
}
// validateSuffixes checks that there are no invalid suffixes on the target name.
func validateSuffixes(pkgName, name string) error {
if strings.HasSuffix(name, buildDirSuffix) ||
strings.HasSuffix(name, testDirSuffix) ||
strings.HasSuffix(pkgName, buildDirSuffix) ||
strings.HasSuffix(pkgName, testDirSuffix) {
return fmt.Errorf("._build and ._test are reserved suffixes")
}
return nil
}
// ParseBuildLabel parses a single build label from a string. Panics on failure.
func ParseBuildLabel(target, currentPath string) BuildLabel {
label, err := TryParseBuildLabel(target, currentPath)
if err != nil {
panic(err)
}
return label
}
// newBuildLabel creates a new build label after parsing.
// It is only used internally because it skips some checks; we know those are valid because
// they already run implicitly as part of us parsing the label.
func newBuildLabel(pkgName, name string) (BuildLabel, error) {
return BuildLabel{pkgName, name}, validateSuffixes(pkgName, name)
}
// TryParseBuildLabel attempts to parse a single build label from a string. Returns an error if unsuccessful.
func TryParseBuildLabel(target string, currentPath string) (BuildLabel, error) {
matches := absoluteTarget.FindStringSubmatch(target)
if matches != nil {
return newBuildLabel(matches[1], matches[2])
}
matches = localTarget.FindStringSubmatch(target)
if matches != nil {
return newBuildLabel(currentPath, matches[1])
}
matches = subTargets.FindStringSubmatch(target)
if matches != nil {
return newBuildLabel(matches[1], matches[2])
}
matches = rootSubTargets.FindStringSubmatch(target)
if matches != nil {
return newBuildLabel("", matches[2])
}
matches = implicitTarget.FindStringSubmatch(target)
if matches != nil {
if matches[1] != "" {
return newBuildLabel(matches[1]+"/"+matches[2], matches[2])
}
return newBuildLabel(matches[2], matches[2])
}
return BuildLabel{}, fmt.Errorf("Invalid build label: %s", target)
}
// As above, but allows parsing of relative labels (eg. src/parse/rules:python_rules)
// which is convenient at the shell prompt
func parseMaybeRelativeBuildLabel(target, subdir string) (BuildLabel, error) {
// Try the ones that don't need locating the repo root first.
if !strings.HasPrefix(target, ":") {
if label, err := TryParseBuildLabel(target, ""); err == nil {
return label, nil
}
}
// Now we need to locate the repo root and initial package.
// Deliberately leave this till after the above to facilitate the --repo_root flag.
if subdir == "" {
MustFindRepoRoot()
subdir = initialPackage
}
matches := relativeTarget.FindStringSubmatch(target)
if matches != nil {
return newBuildLabel(path.Join(subdir, matches[1]), matches[2])
}
matches = relativeSubTargets.FindStringSubmatch(target)
if matches != nil {
return newBuildLabel(path.Join(subdir, matches[1]), matches[2])
}
matches = relativeImplicitTarget.FindStringSubmatch(target)
if matches != nil {
if matches[1] != "" {
return newBuildLabel(path.Join(subdir, matches[1], matches[2]), matches[2])
}
return newBuildLabel(path.Join(subdir, matches[2]), matches[2])
}
matches = localTarget.FindStringSubmatch(target)
if matches != nil {
return newBuildLabel(subdir, matches[1])
}
return BuildLabel{}, fmt.Errorf("Invalid build target label: %s", target)
}
// ParseBuildLabels parses a bunch of build labels from strings. It dies on failure.
// Relative labels are allowed since this is generally used at initialisation.
func ParseBuildLabels(targets []string) []BuildLabel {
ret := make([]BuildLabel, len(targets))
for i, target := range targets {
if label, err := parseMaybeRelativeBuildLabel(target, ""); err != nil {
log.Fatalf("%s", err)
} else {
ret[i] = label
}
}
return ret
}
// IsAllSubpackages returns true if the label ends in ..., ie. it includes all subpackages.
func (label BuildLabel) IsAllSubpackages() bool {
return label.Name == "..."
}
// IsAllTargets returns true if the label is the pseudo-label referring to all targets in this package.
func (label BuildLabel) IsAllTargets() bool {
return label.Name == "all"
}
// Includes returns true if label includes the other label (//pkg:target1 is covered by //pkg:all etc).
func (label BuildLabel) Includes(that BuildLabel) bool {
if (label.PackageName == "" && label.IsAllSubpackages()) ||
that.PackageName == label.PackageName ||
strings.HasPrefix(that.PackageName, label.PackageName+"/") {
// We're in the same package or a subpackage of this visibility spec
if label.IsAllSubpackages() {
return true
} else if label.PackageName == that.PackageName {
if label.Name == that.Name || label.IsAllTargets() {
return true
}
}
}
return false
}
// Less returns true if this build label would sort less than another one.
func (label BuildLabel) Less(other BuildLabel) bool {
if label.PackageName == other.PackageName {
return label.Name < other.Name
}
return label.PackageName < other.PackageName
}
// Paths is an implementation of BuildInput interface; we use build labels directly as inputs.
func (label BuildLabel) Paths(graph *BuildGraph) []string {
return addPathPrefix(graph.TargetOrDie(label).Outputs(), label.PackageName)
}
// FullPaths is an implementation of BuildInput interface.
func (label BuildLabel) FullPaths(graph *BuildGraph) []string {
target := graph.TargetOrDie(label)
return addPathPrefix(target.Outputs(), target.OutDir())
}
// addPathPrefix adds a prefix to all the entries in a slice.
func addPathPrefix(paths []string, prefix string) []string {
ret := make([]string, len(paths))
for i, output := range paths {
ret[i] = path.Join(prefix, output)
}
return ret
}
// LocalPaths is an implementation of BuildInput interface.
func (label BuildLabel) LocalPaths(graph *BuildGraph) []string {
return graph.TargetOrDie(label).Outputs()
}
// Label is an implementation of BuildInput interface. It always returns this label.
func (label BuildLabel) Label() *BuildLabel {
return &label
}
func (label BuildLabel) nonOutputLabel() *BuildLabel {
return &label
}
// UnmarshalFlag unmarshals a build label from a command line flag. Implementation of flags.Unmarshaler interface.
func (label *BuildLabel) UnmarshalFlag(value string) error {
// This is only allowable here, not in any other usage of build labels.
if value == "-" {
*label = BuildLabelStdin
return nil
} else if l, err := parseMaybeRelativeBuildLabel(value, ""); err != nil {
// This has to be fatal because of the way we're using the flags package;
// we lose incoming flags if we return errors.
// But don't die in completion mode.
if os.Getenv("PLZ_COMPLETE") == "" {
log.Fatalf("%s", err)
}
} else {
*label = l
}
return nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
// This is used by gcfg to unmarshal the config files.
func (label *BuildLabel) UnmarshalText(text []byte) error {
l, err := TryParseBuildLabel(string(text), "")
*label = l
return err
}
// Parent returns what would be the parent of a build label, or the label itself if it's parentless.
// Note that there is not a concrete guarantee that the returned label exists in the build graph,
// and that the label returned is the ultimate ancestor (ie. not necessarily immediate parent).
func (label BuildLabel) Parent() BuildLabel {
index := strings.IndexRune(label.Name, '#')
if index == -1 || !strings.HasPrefix(label.Name, "_") {
return label
}
label.Name = strings.TrimLeft(label.Name[:index], "_")
return label
}
// HasParent returns true if the build label has a parent that's not itself.
func (label BuildLabel) HasParent() bool {
return label.Parent() != label
}
// IsEmpty returns true if this is an empty build label, i.e. nothing's populated it yet.
func (label BuildLabel) IsEmpty() bool {
return label.PackageName == "" && label.Name == ""
}
// PackageDir returns a path to the directory this target is in.
// This is equivalent to PackageName in all cases except when at the repo root, when this
// will return . instead. This is often easier to use in build rules.
func (label BuildLabel) PackageDir() string {
if label.PackageName == "" {
return "."
}
return label.PackageName
}
// Complete implements the flags.Completer interface, which is used for shell completion.
// Unfortunately it's rather awkward to handle here; we need to do a proper parse in order
// to find out what the possible build labels are, and we're not ready for that yet.
// Returning to main is also awkward since the flags haven't parsed properly; all in all
// it seems an easier (albeit inelegant) solution to start things over by re-execing ourselves.
func (label BuildLabel) Complete(match string) []flags.Completion {
if match == "" {
os.Exit(0)
}
os.Setenv("PLZ_COMPLETE", match)
os.Unsetenv("GO_FLAGS_COMPLETION")
exec, _ := os.Executable()
out, _, err := ExecWithTimeout(nil, "", os.Environ(), 0, 0, false, append([]string{exec}, os.Args[1:]...))
if err != nil {
return nil
}
ret := []flags.Completion{}
for _, line := range strings.Split(string(out), "\n") {
if line != "" {
ret = append(ret, flags.Completion{Item: line})
}
}
return ret
}
// LooksLikeABuildLabel returns true if the string appears to be a build label, false if not.
// Useful for cases like rule sources where sources can be a filename or a label.
func LooksLikeABuildLabel(str string) bool {
return strings.HasPrefix(str, "//") || strings.HasPrefix(str, ":")
}
// BuildLabels makes slices of build labels sortable.
type BuildLabels []BuildLabel
func (slice BuildLabels) Len() int {
return len(slice)
}
func (slice BuildLabels) Less(i, j int) bool {
return slice[i].Less(slice[j])
}
func (slice BuildLabels) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
}
|
package _105_Construct_Binary_Tree_from_Preorder_and_Inorder_Traversal
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func buildTree(preorder []int, inorder []int) *TreeNode {
preLength := len(preorder)
inLength := len(inorder)
return buildTreeRecursion(preorder, 0, preLength-1, inorder, 0, inLength-1)
}
func buildTreeRecursion(preorder []int, preStart int, preEnd int, inorder []int, inStart int, inEnd int) *TreeNode {
if preStart > preEnd || inStart > inEnd {
return nil
}
var rootVal = preorder[preStart]
var rootIndex = 0
for i := inStart; i <= inEnd; i++ {
if rootVal == inorder[i] {
rootIndex = i
break
}
}
length := rootIndex - inStart
node := &TreeNode{
Val: rootVal,
}
node.Left = buildTreeRecursion(preorder, preStart+1, preStart+length, inorder, inStart, rootIndex-1)
node.Right = buildTreeRecursion(preorder, preStart+length+1, preEnd, inorder, rootIndex+1, inEnd)
return node
}
|
package log
import "log"
// Implements standard functions of go's log
// Printf log.Printf
func Printf(format string, v ...interface{}) {
log.Printf(format, v...)
}
// Println log.Println
func Println(v ...interface{}) {
log.Println(v...)
}
// Add more when needed
|
package main
import (
"bytes"
"fmt"
"net/url"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
)
//App contains dependencies for the functions
type App struct {
S3Service s3iface.S3API
DynamoService dynamodbiface.DynamoDBAPI
ImportFn func(request Request) error
}
func downloadFile(svc s3iface.S3API, bucket string, key string) ([]byte, error) {
//download the content of the file that triggers the event
s3Output, err := svc.GetObject(&s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
})
if err != nil {
return nil, err
}
//turn GetObjectOutput into []byte
buf := new(bytes.Buffer)
buf.ReadFrom(s3Output.Body)
return buf.Bytes(), nil
}
func (app App) handler(event events.S3Event) error {
if len(event.Records) < 0 {
fmt.Println("no records")
return nil
}
//get the first record of the event.
record := event.Records[0]
key, err := url.QueryUnescape(record.S3.Object.Key)
if err != nil {
return err
}
//download the content of the file that triggers the event
file, err := downloadFile(app.S3Service, record.S3.Bucket.Name, key)
if err != nil {
return err
}
//perform the import
{
err := app.ImportFn(Request{
App: app,
Bucket: record.S3.Bucket.Name,
Content: file,
Key: key,
})
if err != nil {
fmt.Println(err) //log the error
}
return err
}
}
func main() {
sess := session.New()
app := App{
S3Service: s3.New(sess),
DynamoService: dynamodb.New(sess),
ImportFn: Import,
}
lambda.Start(app.handler)
}
|
package main
import (
"fmt"
"github.com/awslabs/aws-sdk-go/aws"
"github.com/awslabs/aws-sdk-go/gen/route53"
"log"
"os"
)
var region = "us-west-1"
// Connect will create a valid ec2 client
func Connect() *route53.Route53 {
creds := aws.Creds(os.Getenv("AWS_ACCESS_KEY"), os.Getenv("AWS_SECRET_KEY"), "") // HL
return route53.New(creds, region, nil)
}
// ListZones prints out the hosted zones
func ListZones(r *route53.Route53) {
resp, err := r.ListHostedZones(&route53.ListHostedZonesRequest{})
if err != nil {
log.Fatalln(err)
}
fmt.Println(*resp)
}
//
func GetStatus(r *route53.Route53) {
resp, err := r.GetHealthCheckStatus( // HL
&route53.GetHealthCheckStatusRequest{ // HL
HealthCheckID: aws.String("gomeetup_check"), // HL
}, // HL
) // HL
if err != nil {
log.Fatalln(err)
}
fmt.Println(*resp)
}
func main() {
r := Connect()
//ListZones(r)
GetStatus(r)
}
|
package ovirt
import "github.com/openshift/installer/pkg/destroy/providers"
func init() {
providers.Registry["ovirt"] = New
}
|
package main
import (
"context"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
. "github.com/pobo380/network-games/card-game/server/websocket/handler"
"github.com/pobo380/network-games/card-game/server/websocket/handler/table"
"net/http"
)
func OnConnect(ctx context.Context, request events.APIGatewayWebsocketProxyRequest) (events.APIGatewayProxyResponse, error) {
reqCtx := request.RequestContext
header := http.Header(request.MultiValueHeaders)
playerId := header.Get(CustomHeaderPlayerId)
pc := &table.PlayerConnection{
PlayerId: playerId,
ConnectionId: reqCtx.ConnectionID,
}
item, err := dynamodbattribute.MarshalMap(pc)
if err != nil {
return events.APIGatewayProxyResponse{Body: request.Body, StatusCode: 500}, err
}
in := &dynamodb.PutItemInput{
TableName: &DynamoDbTableConnections,
Item: item,
}
_, err = Dynamo.PutItem(in)
if err != nil {
return events.APIGatewayProxyResponse{Body: request.Body, StatusCode: 500}, err
}
return events.APIGatewayProxyResponse{Body: request.Body, StatusCode: 200}, nil
}
func main() {
lambda.Start(OnConnect)
}
|
package station
type Station struct {
name string
}
|
// Copyright (c) WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
//
// WSO2 Inc. licenses this file to you under the Apache License,
// Version 2.0 (the "License"); you may not use this file except
// in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package apim
const (
apimConfName = "apim-config"
clientRegistrationSecret = "ckcs-secret"
clientIdConst = "clientId"
clientSecretConst = "clientSecret"
apimRegistrationEndpointConst = "apimKeymanagerEndpoint"
apimPublisherEndpointConst = "apimPublisherEndpoint"
apimTokenEndpointConst = "apimTokenEndpoint"
apimCredentialsConst = "apimCredentialsSecret"
skipVerifyConst = "insecureSkipVerify"
HeaderAuthorization = "Authorization"
HeaderAccept = "Accept"
HeaderContentType = "Content-Type"
HeaderConnection = "Connection"
HeaderValueApplicationJSON = "application/json"
HeaderValueAuthBasicPrefix = "Basic"
HeaderValueAuthBearerPrefix = "Bearer"
HeaderValueKeepAlive = "keep-alive"
HeaderValueXWWWFormUrlEncoded = "application/x-www-form-urlencoded"
DefaultHttpRequestTimeout = 10000
publisherAPIImportEndpoint = "api/am/publisher/v1/apis/import?overwrite=true"
defaultClientRegistrationEndpointSuffix = "client-registration/v0.17/register"
defaultApiListEndpointSuffix = "api/am/publisher/v1/apis"
defaultTokenEndpoint = "oauth2/token"
importAPIFromSwaggerEndpoint = "api/am/publisher/v1/apis/import-openapi"
)
// APIDefinition represents an API artifact in APIM
type APIDefinitionFile struct {
Type string `json:"type,omitempty" yaml:"type,omitempty"`
ApimVersion string `json:"version,omitempty" yaml:"version,omitempty"`
Data APIDTODefinition `json:"data,omitempty" yaml:"data,omitempty"`
}
// APIDTODefinition represents an APIDTO artifact in APIM
type APIDTODefinition struct {
ID string `json:"id,omitempty" yaml:"id,omitempty"`
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Description string `json:"description,omitempty" yaml:"description,omitempty"`
Context string `json:"context,omitempty" yaml:"context,omitempty"`
Version string `json:"version,omitempty" yaml:"version,omitempty"`
Provider string `json:"provider,omitempty" yaml:"provider,omitempty"`
LifeCycleStatus string `json:"lifeCycleStatus,omitempty" yaml:"lifeCycleStatus,omitempty"`
WsdlInfo interface{} `json:"wsdlInfo,omitempty" yaml:"wsdlInfo,omitempty"`
WsdlURL string `json:"wsdlUrl,omitempty" yaml:"wsdlUrl,omitempty"`
TestKey string `json:"testKey,omitempty" yaml:"testKey,omitempty"`
ResponseCachingEnabledKey bool `json:"responseCachingEnabled,omitempty" yaml:"responseCachingEnabled,omitempty"`
CacheTimeout int `json:"cacheTimeout,omitempty" yaml:"cacheTimeout,omitempty"`
DestinationStatsEnabled string `json:"destinationStatsEnabled,omitempty" yaml:"destinationStatsEnabled,omitempty"`
HasThumbnail bool `json:"hasThumbnail,omitempty" yaml:"hasThumbnail,omitempty"`
IsDefaultVersion bool `json:"isDefaultVersion,omitempty" yaml:"isDefaultVersion,omitempty"`
EnableSchemaValidation bool `json:"enableSchemaValidation,omitempty" yaml:"enableSchemaValidation,omitempty"`
EnableStore bool `json:"enableStore,omitempty" yaml:"enableStore,omitempty"`
Type string `json:"type,omitempty" yaml:"type,omitempty"`
Transport []string `json:"transport,omitempty" yaml:"transport,omitempty"`
Tags []string `json:"tags,omitempty" yaml:"tags,omitempty"`
Policies []string `json:"policies,omitempty" yaml:"policies,omitempty"`
APIThrottlingPolicy string `json:"apiThrottlingPolicy,omitempty" yaml:"apiThrottlingPolicy,omitempty"`
AuthorizationHeader string `json:"authorizationHeader,omitempty" yaml:"authorizationHeader,omitempty"`
SecurityScheme []string `json:"securityScheme,omitempty" yaml:"securityScheme,omitempty"`
MaxTPS interface{} `json:"maxTps,omitempty" yaml:"maxTps,omitempty"`
Visibility string `json:"visibility,omitempty" yaml:"visibility,omitempty"`
VisibleRoles []string `json:"visibleRoles,omitempty" yaml:"visibleRoles,omitempty"`
VisibleTenants []string `json:"visibleTenants,omitempty" yaml:"visibleTenants,omitempty"`
EndpointSecurity interface{} `json:"endpointSecurity,omitempty" yaml:"endpointSecurity,omitempty"`
GatewayEnvironments []string `json:"gatewayEnvironments,omitempty" yaml:"gatewayEnvironments,omitempty"`
DeploymentEnvironments []interface{} `json:"deploymentEnvironments,omitempty" yaml:"deploymentEnvironments,omitempty"`
Labels []string `json:"labels,omitempty" yaml:"labels,omitempty"`
MediationPolicies []interface{} `json:"mediationPolicies,omitempty" yaml:"mediationPolicies,omitempty"`
SubscriptionAvailability string `json:"subscriptionAvailability,omitempty" yaml:"subscriptionAvailability,omitempty"`
SubscriptionAvailableTenants []string `json:"subscriptionAvailableTenants,omitempty" yaml:"subscriptionAvailableTenants,omitempty"`
AdditionalProperties map[string]string `json:"additionalProperties,omitempty" yaml:"additionalProperties,omitempty"`
Monetization interface{} `json:"monetization,omitempty" yaml:"monetization,omitempty"`
AccessControl string `json:"accessControl,omitempty" yaml:"accessControl,omitempty"`
AcessControlRoles []string `json:"accessControlRoles,omitempty" yaml:"accessControlRoles,omitempty"`
BusinessInformation interface{} `json:"businessInformation,omitempty" yaml:"businessInformation,omitempty"`
CorsConfiguration interface{} `json:"corsConfiguration,omitempty" yaml:"corsConfiguration,omitempty"`
WorkflowStatus []string `json:"workflowStatus,omitempty" yaml:"workflowStatus,omitempty"`
CreatedTime string `json:"createdTime,omitempty" yaml:"createdTime,omitempty"`
LastUpdatedTime string `json:"lastUpdatedTime,omitempty" yaml:"lastUpdatedTime,omitempty"`
EndpointConfig interface{} `json:"endpointConfig,omitempty" yaml:"endpointConfig,omitempty"`
EndpointImplementationType string `json:"endpointImplementationType,omitempty" yaml:"endpointImplementationType,omitempty"`
Scopes []interface{} `json:"scopes,omitempty" yaml:"scopes,omitempty"`
Operations []interface{} `json:"operations,omitempty" yaml:"operations,omitempty"`
ThreatProtectionPolicies interface{} `json:"threatProtectionPolicies,omitempty" yaml:"threatProtectionPolicies,omitempty"`
Categories []string `json:"categories,omitempty" yaml:"categories,omitempty"`
KeyManagers []string `json:"keyManagers,omitempty" yaml:"keyManagers,omitempty"`
}
// APIDefinition represents an API artifact in APIM
type APIDefinition struct {
ID ID `json:"id,omitempty" yaml:"id,omitempty"`
UUID string `json:"uuid,omitempty" yaml:"uuid,omitempty"`
Description string `json:"description,omitempty" yaml:"description,omitempty"`
Type string `json:"type,omitempty" yaml:"type,omitempty"`
Context string `json:"context" yaml:"context"`
ContextTemplate string `json:"contextTemplate,omitempty" yaml:"contextTemplate,omitempty"`
Tags []string `json:"tags" yaml:"tags,omitempty"`
Documents []interface{} `json:"documents,omitempty" yaml:"documents,omitempty"`
LastUpdated string `json:"lastUpdated,omitempty" yaml:"lastUpdated,omitempty"`
AvailableTiers []AvailableTiers `json:"availableTiers,omitempty" yaml:"availableTiers,omitempty"`
AvailableSubscriptionLevelPolicies []interface{} `json:"availableSubscriptionLevelPolicies,omitempty" yaml:"availableSubscriptionLevelPolicies,omitempty"`
URITemplates []URITemplates `json:"uriTemplates" yaml:"uriTemplates,omitempty"`
APIHeaderChanged bool `json:"apiHeaderChanged,omitempty" yaml:"apiHeaderChanged,omitempty"`
APIResourcePatternsChanged bool `json:"apiResourcePatternsChanged,omitempty" yaml:"apiResourcePatternsChanged,omitempty"`
Status string `json:"status,omitempty" yaml:"status,omitempty"`
TechnicalOwner string `json:"technicalOwner,omitempty" yaml:"technicalOwner,omitempty"`
TechnicalOwnerEmail string `json:"technicalOwnerEmail,omitempty" yaml:"technicalOwnerEmail,omitempty"`
BusinessOwner string `json:"businessOwner,omitempty" yaml:"businessOwner,omitempty"`
BusinessOwnerEmail string `json:"businessOwnerEmail,omitempty" yaml:"businessOwnerEmail,omitempty"`
Visibility string `json:"visibility,omitempty" yaml:"visibility,omitempty"`
EndpointSecured bool `json:"endpointSecured,omitempty" yaml:"endpointSecured,omitempty"`
EndpointAuthDigest bool `json:"endpointAuthDigest,omitempty" yaml:"endpointAuthDigest,omitempty"`
EndpointUTUsername string `json:"endpointUTUsername,omitempty" yaml:"endpointUTUsername,omitempty"`
Transports string `json:"transports,omitempty" yaml:"transports,omitempty"`
InSequence string `json:"inSequence,omitempty" yaml:"inSequence,omitempty"`
OutSequence string `json:"outSequence,omitempty" yaml:"outSequence,omitempty"`
FaultSequence string `json:"faultSequence,omitempty" yaml:"faultSequence,omitempty"`
AdvertiseOnly bool `json:"advertiseOnly,omitempty" yaml:"advertiseOnly,omitempty"`
CorsConfiguration *CorsConfiguration `json:"corsConfiguration,omitempty" yaml:"corsConfiguration,omitempty"`
ProductionUrl string `json:"productionUrl,omitempty" yaml:"productionUrl,omitempty"`
SandboxUrl string `json:"sandboxUrl,omitempty" yaml:"sandboxUrl,omitempty"`
EndpointConfig *string `json:"endpointConfig,omitempty" yaml:"endpointConfig,omitempty"`
ResponseCache string `json:"responseCache,omitempty" yaml:"responseCache,omitempty"`
CacheTimeout int `json:"cacheTimeout,omitempty" yaml:"cacheTimeout,omitempty"`
Implementation string `json:"implementation,omitempty" yaml:"implementation,omitempty"`
AuthorizationHeader string `json:"authorizationHeader,omitempty" yaml:"authorizationHeader,omitempty"`
Scopes []interface{} `json:"scopes,omitempty" yaml:"scopes,omitempty"`
IsDefaultVersion bool `json:"isDefaultVersion,omitempty" yaml:"isDefaultVersion,omitempty"`
IsPublishedDefaultVersion bool `json:"isPublishedDefaultVersion,omitempty" yaml:"isPublishedDefaultVersion,omitempty"`
Environments []string `json:"environments,omitempty" yaml:"environments,omitempty"`
CreatedTime string `json:"createdTime,omitempty" yaml:"createdTime,omitempty"`
AdditionalProperties map[string]string `json:"additionalProperties,omitempty" yaml:"additionalProperties,omitempty"`
EnvironmentList []string `json:"environmentList,omitempty" yaml:"environmentList,omitempty"`
APISecurity string `json:"apiSecurity,omitempty" yaml:"apiSecurity,omitempty"`
AccessControl string `json:"accessControl,omitempty" yaml:"accessControl,omitempty"`
Rating float64 `json:"rating,omitempty" yaml:"rating,omitempty"`
IsLatest bool `json:"isLatest,omitempty" yaml:"isLatest,omitempty"`
EnableStore bool `json:"enableStore,omitempty" yaml:"enableStore,omitempty"`
KeyManagers []string `json:"keyManagers,omitempty" yaml:"keyManagers,omitempty"`
}
type ID struct {
ProviderName string `json:"providerName" yaml:"providerName"`
APIName string `json:"apiName" yaml:"apiName"`
Version string `json:"version" yaml:"version"`
}
type AvailableTiers struct {
Name string `json:"name,omitempty" yaml:"name,omitempty"`
DisplayName string `json:"displayName,omitempty" yaml:"displayName,omitempty"`
Description string `json:"description,omitempty" yaml:"description,omitempty"`
RequestsPerMin int `json:"requestsPerMin,omitempty" yaml:"requestsPerMin,omitempty"`
RequestCount int `json:"requestCount,omitempty" yaml:"requestCount,omitempty"`
UnitTime int `json:"unitTime,omitempty" yaml:"unitTime,omitempty"`
TimeUnit string `json:"timeUnit,omitempty" yaml:"timeUnit,omitempty"`
TierPlan string `json:"tierPlan,omitempty" yaml:"tierPlan,omitempty"`
StopOnQuotaReached bool `json:"stopOnQuotaReached,omitempty" yaml:"stopOnQuotaReached,omitempty"`
}
type Scopes struct {
Key string `json:"key,omitempty" yaml:"key,omitempty"`
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Roles string `json:"roles,omitempty" yaml:"roles,omitempty"`
Description string `json:"description,omitempty" yaml:"description,omitempty"`
ID int `json:"id,omitempty" yaml:"id,omitempty"`
}
type MediationScripts struct {
}
type URITemplates struct {
URITemplate string `json:"uriTemplate,omitempty" yaml:"uriTemplate,omitempty"`
HTTPVerb string `json:"httpVerb,omitempty" yaml:"httpVerb,omitempty"`
AuthType string `json:"authType,omitempty" yaml:"authType,omitempty"`
HTTPVerbs []string `json:"httpVerbs,omitempty" yaml:"httpVerbs,omitempty"`
AuthTypes []string `json:"authTypes,omitempty" yaml:"authTypes,omitempty"`
ThrottlingConditions []interface{} `json:"throttlingConditions,omitempty" yaml:"throttlingConditions,omitempty"`
ThrottlingTier string `json:"throttlingTier,omitempty" yaml:"throttlingTier,omitempty"`
ThrottlingTiers []string `json:"throttlingTiers,omitempty" yaml:"throttlingTiers,omitempty"`
MediationScript string `json:"mediationScript,omitempty" yaml:"mediationScript,omitempty"`
Scopes []*Scopes `json:"scopes,omitempty" yaml:"scopes,omitempty"`
MediationScripts *MediationScripts `json:"mediationScripts,omitempty" yaml:"mediationScripts,omitempty"`
}
type CorsConfiguration struct {
CorsConfigurationEnabled bool `json:"corsConfigurationEnabled,omitempty" yaml:"corsConfigurationEnabled,omitempty"`
AccessControlAllowOrigins []string `json:"accessControlAllowOrigins,omitempty" yaml:"accessControlAllowOrigins,omitempty"`
AccessControlAllowCredentials bool `json:"accessControlAllowCredentials,omitempty" yaml:"accessControlAllowCredentials,omitempty"`
AccessControlAllowHeaders []string `json:"accessControlAllowHeaders,omitempty" yaml:"accessControlAllowHeaders,omitempty"`
AccessControlAllowMethods []string `json:"accessControlAllowMethods,omitempty" yaml:"accessControlAllowMethods,omitempty"`
}
type API struct {
ID string `json:"id"`
Name string `json:"name"`
Context string `json:"context"`
Version string `json:"version"`
Provider string `json:"provider"`
LifeCycleStatus string `json:"lifeCycleStatus"`
}
type APIListResponse struct {
Count int32 `json:"count"`
List []API `json:"list"`
}
type RESTConfig struct {
KeyManagerEndpoint string
PublisherEndpoint string
TokenEndpoint string
CredentialsSecretName string
SkipVerification bool
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.