text stringlengths 11 4.05M |
|---|
package viper
import(
"github.com/spf13/viper"
)
func defaultViperVal() {
viper.SetDefault("serverPort", ":10000")
viper.SetDefault("firestoreAccountKey", "configs/serviceAccountKey.json")
viper.SetDefault("host", "localhost")
viper.SetDefault("dbPort", 5432)
viper.SetDefault("user", "postgres")
viper.SetDefault("dbname", "simple-http-server")
}
func ReadConfig(filename string, configPath string) error {
defaultViperVal()
//viper.AutomaticEnv()
viper.AddConfigPath(configPath)
viper.SetConfigName(filename)
err := viper.ReadInConfig() // Find and read the config file
if err != nil { // Handle errors reading the config file
return err
}
return nil
} |
package api
import "github.com/gin-gonic/gin"
func CreateGoods(ctx *gin.Context) {
}
func DeleteGoods(ctx *gin.Context) {
}
func UpdateGoods(ctx *gin.Context) {
}
func GetGoods(ctx *gin.Context) {
}
func GetAllGoods(ctx *gin.Context) {
}
func SearchGoods(ctx *gin.Context) {
} |
package dice
import (
"math/rand"
"time"
)
func Roll(n int) int {
if n == 1 {
return 0
}
return rand.Intn(n)
}
func init() {
rand.Seed(time.Now().Unix())
}
|
package main
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
type pair struct {
root *TreeNode
depth int
}
func maxDepth(root *TreeNode) int {
if root == nil {
return 0
}
res := 0
stack := []pair{pair{root, 1}}
for len(stack) > 0 {
tmp := stack[len(stack)-1]
node := tmp.root
depth := tmp.depth
stack = stack[:len(stack)-1]
if node.Right != nil {
stack = append(stack, pair{node.Right, depth + 1})
}
if node.Left != nil {
stack = append(stack, pair{node.Left, depth + 1})
}
if node.Right == nil && node.Left == nil {
res = max(res, depth)
}
}
return res
}
func max(x, y int) int {
if x > y {
return x
}
return y
}
|
// flarmport a library for connecting and reading from FLARM serial port.
//
// According to the flarm specification, available on:
// http://www.ediatec.ch/pdf/FLARM%20Data%20Port%20Specification%20v7.00.pdf
//
// A usage example:
//
// flarm, err := flarmport.Open("/dev/ttyS0")
// if err != nil {
// log.Fatal(err)
// }
// defer flarm.Close()
// for flarm.Next() {
// if err := flarm.Err(); err != nil {
// log.Printf("Unknown format: %v", err)
// }
// entry := flarm.Value()
// if entry != nil {
// fmt.Printf("%+v", entry)
// }
// }
package flarmport
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"log"
"math"
"time"
"github.com/adrianmo/go-nmea"
"github.com/jacobsa/go-serial/serial"
)
var defaultTimezone = time.UTC
// StationInfo is information about the station where the flarm is location.
type StationInfo struct {
// Latitude and longitude coordinates of the station.
Lat, Long float64
// Altitude of the station, in meters.
Alt float64
// Time zone of the station. Default is set to UTC.
TimeZone *time.Location
// Mapping of flarm ID to plane sign.
IDMap map[string]string
}
func (si StationInfo) MapID(id string) string {
if mapped := si.IDMap[id]; mapped != "" {
return mapped
}
return id
}
// Port is a connection to a FLARM serial port.
type Port struct {
scanner *bufio.Scanner
io.Closer
station StationInfo
}
// Open opens a serial connection to a given FLARM port.
func Open(port string, baudRate uint, station StationInfo) (*Port, error) {
serial, err := serial.Open(serial.OpenOptions{
PortName: port,
// Baud rate from spec: "The baud rate can be configured by commands described in FLARM
// configuration specification"
BaudRate: baudRate,
MinimumReadSize: 1,
StopBits: 1, // From spec: "1 stop bit".
DataBits: 8, // From spec: "All ports use 8 data bits".
ParityMode: serial.PARITY_NONE, // From spec: "no parity".
})
if err != nil {
return nil, fmt.Errorf("failed open serial port: %v", err)
}
// Create a scanner that splits on CR.
s := bufio.NewScanner(serial)
s.Split(splitCR)
if station.TimeZone == nil {
station.TimeZone = defaultTimezone
}
return &Port{
scanner: s,
Closer: serial,
station: station,
}, nil
}
// Range iterates and parses data from the serial connection. It exists when the port is closed.
func (p *Port) Range(ctx context.Context, f func(Data)) error {
for ctx.Err() == nil {
value, ok := p.next()
if !ok {
return nil
}
if value != nil && ctx.Err() == nil {
f(*value)
}
}
return ctx.Err()
}
// next used by Range and exist for testing purposes.
func (p *Port) next() (*Data, bool) {
if !p.scanner.Scan() {
// Stop scanning.
return nil, false
}
line := p.scanner.Text()
value, err := nmea.Parse(line)
if err != nil {
// Unknown NMEA, ignore...
return nil, true
}
switch e := value.(type) {
case TypePFLAA:
return p.station.processPFLAA(e), true
}
return nil, true
}
func splitCR(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := bytes.IndexByte(data, '\r'); i >= 0 {
// We have a full newline-terminated line.
return i + 1, data[0:i], nil
}
// If we're at EOF, we have a final, non-terminated line. Return it.
if atEOF {
return len(data), data, nil
}
// Request more data.
return 0, nil, nil
}
type Data struct {
Name string
Lat, Long float64 `gorm:"type=float,precision=2"`
// Direction of airplane (In degrees relative to N)
Dir int
// Altitude in m
Alt float64
// Ground speed in m/s
GroundSpeed int64
// Climb rate in m/s
Climb float64
// Change to direction in deg/s
TurnRate float64
Type string
Time time.Time
AlarmLevel int
}
func (o *Data) TableName() string { return "logs" }
func (s StationInfo) processPFLAA(e TypePFLAA) *Data {
id := s.MapID(e.ID)
if id == "" {
log.Println("Ignoring empty ID entry.")
return nil
}
lat, long := add(s.Lat, s.Long, float64(e.RelativeNorth), float64(e.RelativeEast))
return &Data{
Name: id,
Lat: lat,
Long: long,
Dir: int(e.Track),
Alt: s.Alt + float64(e.RelativeVertical),
Climb: e.ClimbRate,
GroundSpeed: e.GroundSpeed,
Type: e.AircraftType,
AlarmLevel: int(e.AlarmLevel),
Time: time.Now().In(s.TimeZone),
}
}
func add(lat, lon float64, relN, relE float64) (float64, float64) {
const earthRadius = 6378137
//Coordinate offsets in radians
dLat := relN / earthRadius
dLon := relE / (earthRadius * math.Cos(math.Pi*lat/180.0))
//OffsetPosition, decimal degrees
return lat + dLat*180.0/math.Pi,
lon + dLon*180.0/math.Pi
}
|
package autoscaler
import (
"encoding/base64"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
"log"
"math"
"strconv"
"strings"
"time"
)
type EC2ClientIface interface {
TerminateInstancesByCount(instances Instances, v InstanceVariety, count int64) error
TerminateInstances(instances Instances) error
LaunchSpotInstances(v InstanceVariety, c int64, ami string) error
ChangeInstances(change map[InstanceVariety]int64, ami string, terminationTarget Instances) error
DescribeWorkingInstances() (Instances, error)
DescribePendingAndActiveSIRs() ([]*ec2.SpotInstanceRequest, error)
PropagateTagsFromSIRsToInstances(reqs []*ec2.SpotInstanceRequest) error
CreateStatusTagsOfSIRs(reqs []*ec2.SpotInstanceRequest, status string) error
DescribeSpotPrices(vs []InstanceVariety) (map[InstanceVariety]float64, error)
DescribeDeadSIRs() ([]*ec2.SpotInstanceRequest, error)
CancelOpenSIRs(reqs []*ec2.SpotInstanceRequest) error
}
type EC2Client struct {
ec2 ec2iface.EC2API
config *Config
}
func NewEC2Client(ec2 ec2iface.EC2API, config *Config) *EC2Client {
return &EC2Client{
ec2: ec2,
config: config,
}
}
func (c *EC2Client) TerminateInstancesByCount(instances Instances, v InstanceVariety, count int64) error {
target := Instances{}
for _, i := range instances {
if count <= 0 {
break
}
if i.Variety() == v {
target = append(target, i)
count -= 1
}
}
return c.TerminateInstances(target)
}
func (c *EC2Client) TerminateInstances(instances Instances) error {
ids := []*string{}
for _, i := range instances {
ids = append(ids, i.InstanceId)
}
params := &ec2.CreateTagsInput{
DryRun: aws.Bool(c.config.DryRun),
Resources: ids,
Tags: c.config.TerminateTags.SDK(),
}
log.Printf("[DEBUG] terminating: %s", params)
_, err := c.ec2.CreateTags(params)
if err != nil {
return err
}
return nil
}
func (c *EC2Client) LaunchSpotInstances(v InstanceVariety, count int64, ami string) error {
securityGroupIds := []*string{}
for _, i := range c.config.LaunchConfiguration.SecurityGroupIDs {
securityGroupIds = append(securityGroupIds, aws.String(i))
}
biddingPrice, ok := c.config.BiddingPriceByType[v.InstanceType]
if !ok {
return fmt.Errorf("Bidding price for %s is unknown", v.InstanceType)
}
userData := base64.StdEncoding.EncodeToString([]byte(c.config.LaunchConfiguration.UserData))
requestSpotInstancesParams := &ec2.RequestSpotInstancesInput{
DryRun: aws.Bool(c.config.DryRun),
SpotPrice: aws.String(fmt.Sprintf("%f", biddingPrice)),
InstanceCount: aws.Int64(count),
LaunchSpecification: &ec2.RequestSpotLaunchSpecification{
ImageId: aws.String(ami),
InstanceType: aws.String(v.InstanceType),
KeyName: aws.String(c.config.LaunchConfiguration.KeyName),
SecurityGroupIds: securityGroupIds,
SubnetId: aws.String(v.Subnet.SubnetID),
UserData: aws.String(userData),
IamInstanceProfile: &ec2.IamInstanceProfileSpecification{
Name: aws.String(c.config.LaunchConfiguration.IAMInstanceProfileName),
},
BlockDeviceMappings: c.config.LaunchConfiguration.SDKBlockDeviceMappings(),
},
}
log.Printf("[INFO] requesting spot instances: %s", requestSpotInstancesParams)
resp, err := c.ec2.RequestSpotInstances(requestSpotInstancesParams)
if err != nil {
return err
}
ids := []*string{}
for _, req := range resp.SpotInstanceRequests {
ids = append(ids, req.SpotInstanceRequestId)
}
capacity, err := v.Capacity()
if err != nil {
return err
}
tags := []*ec2.Tag{
{Key: aws.String("RequestedBy"), Value: aws.String(c.config.FullAutoscalerID())},
{Key: aws.String("spotscaler:Status"), Value: aws.String("pending")},
{Key: aws.String(fmt.Sprintf("propagate:%s", c.config.CapacityTagKey)), Value: aws.String(fmt.Sprint(capacity))},
{Key: aws.String("propagate:ManagedBy"), Value: aws.String(c.config.FullAutoscalerID())},
}
for _, t := range c.config.InstanceTags {
tags = append(tags, &ec2.Tag{Key: aws.String(fmt.Sprintf("propagate:%s", t.Key)), Value: aws.String(t.Value)})
}
createTagsParams := &ec2.CreateTagsInput{
DryRun: aws.Bool(c.config.DryRun),
Resources: ids,
Tags: tags,
}
retry := 4
for i := 0; i < retry; i++ {
_, err = c.ec2.CreateTags(createTagsParams)
if err == nil {
break
}
if i < retry-1 {
sleepSec := int(math.Pow(2, float64(i)))
log.Printf("[INFO] CreateTags failed, will retry after %d sec: %s", sleepSec, err)
<-time.After(time.Duration(sleepSec) * time.Second)
} else {
return err
}
}
return nil
}
func (c *EC2Client) ChangeInstances(change map[InstanceVariety]int64, ami string, terminationTarget Instances) error {
var err error
for v, count := range change {
if count > 0 {
err = c.LaunchSpotInstances(v, count, ami)
if err != nil {
return err
}
} else if count < 0 {
err = c.TerminateInstancesByCount(terminationTarget, v, count*-1)
if err != nil {
return err
}
}
}
return nil
}
func (c *EC2Client) DescribeWorkingInstances() (Instances, error) {
filters := append(
c.config.WorkingInstanceFilters.SDK(),
&ec2.Filter{Name: aws.String("instance-state-name"), Values: []*string{aws.String("running")}},
)
params := &ec2.DescribeInstancesInput{
Filters: filters,
}
instances := []*ec2.Instance{}
err := c.ec2.DescribeInstancesPages(
params,
func(page *ec2.DescribeInstancesOutput, lastPage bool) bool {
for _, res := range page.Reservations {
instances = append(instances, res.Instances...)
}
return true
})
if err != nil {
return nil, err
}
ret := Instances{}
for _, i := range instances {
ret = append(ret, NewInstanceFromSDK(i))
}
return ret, nil
}
func (c *EC2Client) DescribePendingAndActiveSIRs() ([]*ec2.SpotInstanceRequest, error) {
params := &ec2.DescribeSpotInstanceRequestsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
Values: []*string{aws.String("active")},
}, {
Name: aws.String("tag:RequestedBy"),
Values: []*string{aws.String(c.config.FullAutoscalerID())},
}, {
Name: aws.String("tag:spotscaler:Status"),
Values: []*string{aws.String("pending")},
},
},
}
resp, err := c.ec2.DescribeSpotInstanceRequests(params)
if err != nil {
return nil, err
}
return resp.SpotInstanceRequests, nil
}
func (c *EC2Client) PropagateTagsFromSIRsToInstances(reqs []*ec2.SpotInstanceRequest) error {
for _, req := range reqs {
tags := []*ec2.Tag{}
for _, t := range req.Tags {
if strings.HasPrefix(*t.Key, "propagate:") {
key := strings.TrimPrefix(*t.Key, "propagate:")
tags = append(tags, &ec2.Tag{Key: &key, Value: t.Value})
}
}
createTagsParams := &ec2.CreateTagsInput{
DryRun: aws.Bool(c.config.DryRun),
Resources: []*string{req.InstanceId},
Tags: tags,
}
log.Printf("[DEBUG] CreateTags: %s", createTagsParams)
_, err := c.ec2.CreateTags(createTagsParams)
if err != nil {
return err
}
}
return nil
}
func (c *EC2Client) CreateStatusTagsOfSIRs(reqs []*ec2.SpotInstanceRequest, status string) error {
ids := []*string{}
for _, req := range reqs {
ids = append(ids, req.SpotInstanceRequestId)
}
createTagsParams := &ec2.CreateTagsInput{
DryRun: aws.Bool(c.config.DryRun),
Resources: ids,
Tags: []*ec2.Tag{
{Key: aws.String("spotscaler:Status"), Value: aws.String(status)},
},
}
log.Printf("[DEBUG] CreateTags: %s", createTagsParams)
_, err := c.ec2.CreateTags(createTagsParams)
if err != nil {
return err
}
return nil
}
func (c *EC2Client) DescribeSpotPrices(vs []InstanceVariety) (map[InstanceVariety]float64, error) {
res := map[InstanceVariety]float64{}
varietiesByAZ := map[string][]InstanceVariety{}
for _, v := range vs {
varietiesByAZ[v.Subnet.AvailabilityZone] = append(varietiesByAZ[v.Subnet.AvailabilityZone], v)
}
for az, vs := range varietiesByAZ {
instanceTypes := []*string{}
for _, v := range vs {
instanceTypes = append(instanceTypes, aws.String(v.InstanceType))
}
input := &ec2.DescribeSpotPriceHistoryInput{
AvailabilityZone: aws.String(az),
InstanceTypes: instanceTypes,
ProductDescriptions: []*string{aws.String("Linux/UNIX (Amazon VPC)")}, // TODO: make configurable
}
found := map[InstanceVariety]bool{}
var errInside error
pageIndex := 1
err := c.ec2.DescribeSpotPriceHistoryPages(input, func(page *ec2.DescribeSpotPriceHistoryOutput, lastPage bool) bool {
log.Printf("[TRACE] DescribeSpotPriceHistory page %d", pageIndex)
for _, v := range vs {
if f := found[v]; f {
// already found
continue
}
latestTimestamp := time.Time{}
latestPrice := 0.0
for _, p := range page.SpotPriceHistory {
if latestTimestamp.Before(*p.Timestamp) && *p.InstanceType == v.InstanceType && *p.AvailabilityZone == v.Subnet.AvailabilityZone {
latestTimestamp = *p.Timestamp
f, err := strconv.ParseFloat(*p.SpotPrice, 64)
if err != nil {
errInside = err
return false
}
latestPrice = f
}
}
if latestPrice != 0.0 {
// found
res[v] = latestPrice
found[v] = true
}
}
pageIndex++
return len(found) < len(vs)
})
if errInside != nil {
return nil, errInside
}
if err != nil {
return nil, err
}
}
return res, nil
}
func (c *EC2Client) DescribeDeadSIRs() ([]*ec2.SpotInstanceRequest, error) {
params := &ec2.DescribeSpotInstanceRequestsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("tag:RequestedBy"),
Values: []*string{aws.String(c.config.FullAutoscalerID())},
}, {
Name: aws.String("state"),
Values: []*string{aws.String("open")},
},
},
}
resp, err := c.ec2.DescribeSpotInstanceRequests(params)
if err != nil {
return nil, err
}
deadSIRs := []*ec2.SpotInstanceRequest{}
for _, req := range resp.SpotInstanceRequests {
if time.Now().Add(-5 * time.Minute).After(*req.CreateTime) {
deadSIRs = append(deadSIRs, req)
}
}
return deadSIRs, nil
}
func (c *EC2Client) CancelOpenSIRs(reqs []*ec2.SpotInstanceRequest) error {
ids := []*string{}
for _, req := range reqs {
if *req.State == "open" {
ids = append(ids, req.SpotInstanceRequestId)
}
}
if len(ids) == 0 {
return nil
}
cancelParams := &ec2.CancelSpotInstanceRequestsInput{
DryRun: aws.Bool(c.config.DryRun),
SpotInstanceRequestIds: ids,
}
log.Printf("[DEBUG] CancelSpotInstanceRequests: %s", cancelParams)
_, err := c.ec2.CancelSpotInstanceRequests(cancelParams)
if err != nil {
return err
}
return nil
}
|
package main
import (
"ims_api_connector"
"fmt"
)
func main() {
connector := ims_api_connector.New("some_username", "some_password", "some.server.com:8000", 5)
connector.Authenticate()
assets, _ := connector.GetAssets()
fmt.Println(assets)
}
|
package ghclient
import (
"context"
"os"
"path/filepath"
"testing"
"time"
"gopkg.in/src-d/go-billy.v4/osfs"
git "gopkg.in/src-d/go-git.v4"
"github.com/dollarshaveclub/acyl/pkg/memfs"
billy "gopkg.in/src-d/go-billy.v4"
gitplumb "gopkg.in/src-d/go-git.v4/plumbing"
gitcache "gopkg.in/src-d/go-git.v4/plumbing/cache"
gitobj "gopkg.in/src-d/go-git.v4/plumbing/object"
gitfs "gopkg.in/src-d/go-git.v4/storage/filesystem"
)
// localTestRepo creates a new repo in memory with the provided branches and returns testing commit hashes
func localTestRepo(t *testing.T, branches []string) (billy.Filesystem, []string) {
fs := memfs.New()
if err := fs.MkdirAll("repo", os.ModeDir|os.ModePerm); err != nil {
t.Fatalf("error in mkdir repo: %v", err)
}
fs2, err := fs.Chroot("repo")
if err != nil {
t.Fatalf("error in chroot: %v", err)
}
if err := fs2.MkdirAll(".git", os.ModeDir|os.ModePerm); err != nil {
t.Fatalf("error in mkdir .git: %v", err)
}
dot, _ := fs2.Chroot(".git")
repo, err := git.Init(gitfs.NewStorage(dot, gitcache.NewObjectLRUDefault()), fs2)
if err != nil {
t.Fatalf("error initializing repo: %v", err)
}
wt, err := repo.Worktree()
if err != nil {
t.Fatalf("error getting working tree: %v", err)
}
fs2.MkdirAll("something", os.ModeDir|os.ModePerm)
f, err := fs2.Create("something/foo.txt")
if err != nil {
t.Fatalf("error creating file 1: %v", err)
}
f.Write([]byte(`omg12345`))
f.Close()
if _, err := wt.Add("something/foo.txt"); err != nil {
t.Fatalf("error adding changed file: %v", err)
}
h1, err := wt.Commit("first commit", &git.CommitOptions{Author: &gitobj.Signature{Name: "someguy", Email: "asdf@asdf.com", When: time.Now().UTC()}})
if err != nil {
t.Fatalf("error commiting 1: %v", err)
}
out := []string{h1.String()}
for i, b := range branches {
co := &git.CheckoutOptions{Branch: gitplumb.NewBranchReferenceName(b), Create: true}
if err := wt.Checkout(co); err != nil {
t.Fatalf("error checking out branch: %v: %v", b, err)
}
if i == 0 {
fs2.MkdirAll("somethingelse", os.ModeDir|os.ModePerm)
f, err := fs2.Create("somethingelse/bar.txt")
if err != nil {
t.Fatalf("error creating file 2: %v", err)
}
f.Write([]byte(`qwerty9999`))
f.Close()
f, err = fs2.Create("somethingelse/asdf.txt")
if err != nil {
t.Fatalf("error creating file 3: %v", err)
}
f.Write([]byte(`00000000`))
f.Close()
if _, err := wt.Add("somethingelse/"); err != nil {
t.Fatalf("error adding changed files 2: %v", err)
}
h2, err := wt.Commit("another commit", &git.CommitOptions{Author: &gitobj.Signature{Name: "someguy", Email: "asdf@asdf.com", When: time.Now().UTC()}})
if err != nil {
t.Fatalf("error commiting 2: %v", err)
}
out = append(out, h2.String())
}
}
// add a file but don't commit it
f, err = fs2.Create("something/bar.txt")
if err != nil {
t.Fatalf("error creating extra file: %v", err)
}
f.Write([]byte(`asdf`))
f.Close()
return fs, out
}
func TestLocalWrapperGetBranches(t *testing.T) {
fs, _ := localTestRepo(t, []string{"foo", "bar"})
var backendExecuted bool
lw := &LocalWrapper{
FSFunc: func(path string) billy.Filesystem {
rfs, _ := fs.Chroot(path)
return rfs
},
RepoPathMap: map[string]string{"some/repo": "repo"},
Backend: &FakeRepoClient{
GetBranchesFunc: func(context.Context, string) ([]BranchInfo, error) {
backendExecuted = true
return []BranchInfo{}, nil
},
},
}
bl, err := lw.GetBranches(context.Background(), "some/repo")
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if len(bl) != 3 {
t.Fatalf("bad count: %v", len(bl))
}
_, err = lw.GetBranches(context.Background(), "some/other-repo")
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if !backendExecuted {
t.Fatalf("backend should have been executed")
}
}
func TestLocalWrapperGetBranch(t *testing.T) {
fs, _ := localTestRepo(t, []string{"foo", "bar"})
var backendExecuted bool
lw := &LocalWrapper{
FSFunc: func(path string) billy.Filesystem {
rfs, _ := fs.Chroot(path)
return rfs
},
RepoPathMap: map[string]string{"some/repo": "repo"},
Backend: &FakeRepoClient{
GetBranchFunc: func(context.Context, string, string) (BranchInfo, error) {
backendExecuted = true
return BranchInfo{}, nil
},
},
}
bi, err := lw.GetBranch(context.Background(), "some/repo", "foo")
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if bi.Name != "foo" {
t.Errorf("bad branch name: %v", bi.Name)
}
_, err = lw.GetBranch(context.Background(), "some/other-repo", "foo")
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if !backendExecuted {
t.Fatalf("backend should have been executed")
}
}
func TestLocalWrapperGetCommitMessage(t *testing.T) {
fs, commits := localTestRepo(t, []string{"foo", "bar"})
var backendExecuted bool
lw := &LocalWrapper{
FSFunc: func(path string) billy.Filesystem {
rfs, _ := fs.Chroot(path)
return rfs
},
RepoPathMap: map[string]string{"some/repo": "repo"},
Backend: &FakeRepoClient{
GetCommitMessageFunc: func(context.Context, string, string) (string, error) {
backendExecuted = true
return "", nil
},
},
}
msg, err := lw.GetCommitMessage(context.Background(), "some/repo", commits[0])
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if msg != "first commit" {
t.Errorf("bad commit msg: %v", msg)
}
_, err = lw.GetCommitMessage(context.Background(), "some/other-repo", "asdf")
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if !backendExecuted {
t.Fatalf("backend should have been executed")
}
}
func TestLocalWrapperGetFileContents(t *testing.T) {
fs, commits := localTestRepo(t, []string{"foo", "bar"})
var backendExecuted bool
lw := &LocalWrapper{
FSFunc: func(path string) billy.Filesystem {
rfs, _ := fs.Chroot(path)
return rfs
},
RepoPathMap: map[string]string{"some/repo": "repo"},
Backend: &FakeRepoClient{
GetFileContentsFunc: func(context.Context, string, string, string) ([]byte, error) {
backendExecuted = true
return nil, nil
},
},
}
contents, err := lw.GetFileContents(context.Background(), "some/repo", "something/foo.txt", commits[0])
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if string(contents) != "omg12345" {
t.Errorf("bad contents: %v", string(contents))
}
_, err = lw.GetFileContents(context.Background(), "some/other-repo", "something/foo.txt", commits[0])
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if !backendExecuted {
t.Fatalf("backend should have been executed")
}
}
func TestLocalWrapperGetFileContentsTriggeringRepo(t *testing.T) {
fs, commits := localTestRepo(t, []string{"foo", "bar"})
var backendExecuted bool
lw := &LocalWrapper{
WorkingTreeRepos: []string{"some/repo"},
FSFunc: func(path string) billy.Filesystem {
rfs, _ := fs.Chroot(path)
return rfs
},
RepoPathMap: map[string]string{"some/repo": "repo"},
Backend: &FakeRepoClient{
GetFileContentsFunc: func(context.Context, string, string, string) ([]byte, error) {
backendExecuted = true
return nil, nil
},
},
}
// we should be able to read the uncommitted file
contents, err := lw.GetFileContents(context.Background(), "some/repo", "something/bar.txt", commits[0])
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if string(contents) != "asdf" {
t.Errorf("bad contents: %v", string(contents))
}
if backendExecuted {
t.Fatalf("backend should not have been executed")
}
}
func TestLocalWrapperGetDirectoryContents(t *testing.T) {
fs, _ := localTestRepo(t, []string{"foo", "bar"})
var backendExecuted bool
lw := &LocalWrapper{
FSFunc: func(path string) billy.Filesystem {
rfs, _ := fs.Chroot(path)
return rfs
},
RepoPathMap: map[string]string{"some/repo": "repo"},
Backend: &FakeRepoClient{
GetDirectoryContentsFunc: func(context.Context, string, string, string) (map[string]FileContents, error) {
backendExecuted = true
return nil, nil
},
},
}
dc, err := lw.GetDirectoryContents(context.Background(), "some/repo", "somethingelse", "foo")
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if len(dc) != 2 {
t.Fatalf("bad length: %v", len(dc))
}
bar, ok := dc["bar.txt"]
if !ok {
t.Fatalf("bar.txt not found")
}
if string(bar.Contents) != "qwerty9999" {
t.Fatalf("bad contents for bar: %v", string(bar.Contents))
}
asdf, ok := dc["asdf.txt"]
if !ok {
t.Fatalf("asdf.txt not found")
}
if string(asdf.Contents) != "00000000" {
t.Fatalf("bad contents for asdf: %v", string(asdf.Contents))
}
_, err = lw.GetDirectoryContents(context.Background(), "some/other-repo", "somethingelse", "foo")
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if !backendExecuted {
t.Fatalf("backend should have been executed")
}
}
func TestLocalWrapperGetDirectoryContentsTriggeringRepo(t *testing.T) {
fs, commits := localTestRepo(t, []string{"foo", "bar"})
var backendExecuted bool
lw := &LocalWrapper{
WorkingTreeRepos: []string{"some/repo"},
FSFunc: func(path string) billy.Filesystem {
rfs, _ := fs.Chroot(path)
return rfs
},
RepoPathMap: map[string]string{"some/repo": "repo"},
Backend: &FakeRepoClient{
GetFileContentsFunc: func(context.Context, string, string, string) ([]byte, error) {
backendExecuted = true
return nil, nil
},
},
}
// we should be able to read the uncommitted files
dc, err := lw.GetDirectoryContents(context.Background(), "some/repo", "something", commits[0])
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if len(dc) != 2 {
t.Fatalf("bad length: %v", len(dc))
}
foo, ok := dc["foo.txt"]
if !ok {
t.Fatalf("foo.txt not found")
}
if string(foo.Contents) != "omg12345" {
t.Fatalf("bad contents for foo: %v", string(foo.Contents))
}
bar, ok := dc["bar.txt"]
if !ok {
t.Fatalf("bar.txt not found")
}
if string(bar.Contents) != "asdf" {
t.Fatalf("bad contents for bar: %v", string(bar.Contents))
}
if backendExecuted {
t.Fatalf("backend should not have been executed")
}
}
func TestLocalWrapperThisAcylRepo(t *testing.T) {
if os.Getenv("TEST_ACYL_REPO") == "" {
t.SkipNow()
}
p, err := filepath.Abs("../..")
if err != nil {
t.Fatalf("error making path absolute: %v", err)
}
lw := &LocalWrapper{
FSFunc: func(path string) billy.Filesystem { return osfs.New(path) },
RepoPathMap: map[string]string{"dollarshaveclub/acyl": p},
}
bl, err := lw.GetBranches(context.Background(), "dollarshaveclub/acyl")
if err != nil {
t.Fatalf("branches should have succeeded: %v", err)
}
t.Logf("# branches: %v\n", len(bl))
// arbitrary commit SHA
msg, err := lw.GetCommitMessage(context.Background(), "dollarshaveclub/acyl", "516d472b0ae6292fcd6b07350734ca0268747659")
if err != nil {
t.Fatalf("commit msg should have succeeded: %v", err)
}
if msg != "only load db secrets, fix error msg\n" {
t.Fatalf("bad commit msg: %v", msg)
}
lw.WorkingTreeRepos = []string{"dollarshaveclub/acyl"}
contents, err := lw.GetDirectoryContents(context.Background(), "dollarshaveclub/acyl", "", "516d472b0ae6292fcd6b07350734ca0268747659")
if err != nil {
t.Fatalf("get dir contents should have succeeded: %v", err)
}
t.Logf("contents file count: %v", len(contents))
}
|
package main
import (
"context"
"log"
"time"
"github.com/go-redis/redis/v8"
)
func testGetAndSetInteger(redisClient *redis.Client) {
_, err := redisClient.Set(context.Background(), "dicky", 10, 10*time.Minute).Result()
if err != nil {
log.Fatalf("error set in cache, err: %v", err)
}
num, err := redisClient.Get(context.Background(), "dicky").Result()
log.Printf("num: %v type: %T, err: %v", num, num, err)
}
func main() {
option := &redis.Options{
Addr: ":6379",
}
rd := redis.NewClient(option)
resSet := rd.HSet(context.Background(), "keyhash", "hello", "a123", "hello1", "233")
log.Println("resSet:", resSet)
res := rd.HGet(context.Background(), "keyhash", "hello")
//res1 := rd.HGet(context.Background(), "keyhash", "hello1")
resInt, err := res.Int64()
log.Printf("res: %v %v", resInt, err)
// redis transactions
pipe := rd.TxPipeline()
incr := pipe.Incr(context.Background(), "counter")
pipe.Expire(context.Background(), "counter", 5*time.Second) //it will expire after 5 seconds this counter var is not used anymore
_, err = pipe.Exec(context.Background())
//time.Sleep(2 * time.Second)
log.Printf("err: %v incr val: %v", err, incr.Val())
testGetAndSetInteger(rd)
}
|
package routes
import (
"fmt"
"net/http"
"github.com/davelaursen/idealogue-go/Godeps/_workspace/src/github.com/gorilla/mux"
"github.com/davelaursen/idealogue-go/services"
)
// RegisterTagRoutes registers the /tags endpoints with the router.
func RegisterTagRoutes(r *mux.Router, enc Encoder, tagSvc services.TagSvc) {
u := util{}
r.HandleFunc("/api/tags", func(w http.ResponseWriter, r *http.Request) {
if u.checkAccess(w, r) {
GetTags(w, r, enc, tagSvc)
}
}).Methods("GET")
r.HandleFunc("/api/tags/{tag}", func(w http.ResponseWriter, r *http.Request) {
if u.checkAccess(w, r) {
PutTag(w, r, enc, tagSvc, mux.Vars(r))
}
}).Methods("PUT")
r.HandleFunc("/api/tags/{tag}", func(w http.ResponseWriter, r *http.Request) {
if u.checkAccess(w, r) {
DeleteTag(w, enc, tagSvc, mux.Vars(r))
}
}).Methods("DELETE")
}
// GetTags returns a list of tags.
func GetTags(w http.ResponseWriter, r *http.Request, enc Encoder, svc services.TagSvc) {
tags, err := svc.GetAll()
if err != nil {
panic(err)
}
util{}.writeResponse(w, http.StatusOK, enc.EncodeMultiString(tags...))
}
// PutTag updates a tag.
func PutTag(w http.ResponseWriter, r *http.Request, enc Encoder, svc services.TagSvc, params Params) {
tag := params["tag"]
err := svc.Save(tag)
if err != nil {
switch err.Type {
case services.ErrBadData:
util{}.badRequest(w, enc, err.Error())
return
default:
panic(err)
}
}
util{}.writeResponse(w, http.StatusOK, enc.Encode(tag))
}
// DeleteTag removes a tag.
func DeleteTag(w http.ResponseWriter, enc Encoder, svc services.TagSvc, params Params) {
tag := params["tag"]
err := svc.Delete(tag)
if err != nil {
switch err.Type {
case services.ErrNotFound:
util{}.notFound(w, enc, fmt.Sprintf("the tag %s does not exist", tag))
return
default:
panic(err)
}
}
util{}.writeResponse(w, http.StatusNoContent, "")
}
|
package web
import (
"flag"
"fmt"
"github.com/oceango/di"
"github.com/oceango/router"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"io/ioutil"
"log"
"net/http"
"os"
)
type Application struct {
port string
workDir string
router *router.Router
}
func NewApplication(router *router.Router) *Application {
workDir,_ := os.Getwd()
var port string
port = viper.GetString("server.port")
return &Application{
port: ":"+port,
workDir: workDir,
router: router,
}
}
func (a *Application) Run() {
printBanner(a.workDir)
log.Print("application starting...")
initContainer(a)
log.Println("application started and listen on port:" + a.port)
panic(http.ListenAndServe(a.port, a.router))
}
func BuildConfiguration() {
workDir, _ := os.Getwd()
mode := flag.String("mode", "dev", "application environment mode")
flag.Int("port", 1016, "application port")
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
pflag.Parse()
viper.BindPFlags(pflag.CommandLine)
viper.SetConfigName("application") // name of config file (without extension)
if mode !=nil {
viper.SetConfigName("application-" + *mode)
}
viper.SetConfigType("yml") // REQUIRED if the config file does not have the extension in the name
viper.AddConfigPath(workDir +"/config") // optionally look for config in the working directory
err := viper.ReadInConfig()
if err != nil {
panic(err)
}
}
func initContainer(a *Application) {
// inject configuration
//di.Singleton(func() {
//
// return
//})
// inject application
di.Singleton(func() *Application{
return a
})
// inject db
//di.Singleton(func() *gorm.DB {
// return db.GetDb()
//})
}
func printBanner(workDir string) {
banner := `
___ ___ ___ ___ ___ ___ ___
/\ \ /\ \ /\ \ /\ \ /\__\ /\ \ /\ \
/::\ \ /::\ \ /::\ \ /::\ \ /:| _|_ /::\ \ /::\ \
/:/\:\__/:/\:\__/::\:\__/::\:\__/::|/\__/:/\:\__/:/\:\__\
\:\/:/ \:\ \/__\:\:\/ \/\::/ \/|::/ \:\:\/__\:\/:/ / http://www.oceango.tech
\::/ / \:\__\ \:\/ / /:/ / |:/ / \::/ / \::/ /
\/__/ \/__/ \/__/ \/__/ \/__/ \/__/ \/__/
`
filename := workDir + "/config/banner.txt"
if fileExists(filename) {
file, err := ioutil.ReadFile(filename)
if err == nil {
banner = string(file)
}
}
fmt.Println(banner)
}
func fileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
|
package main
import (
"context"
"fmt"
"github.com/golang/protobuf/proto"
"github.com/lemon-cloud-service/lemon-cloud-user/lemon-cloud-user-common/dto"
lemon_cloud_user_sdk "github.com/lemon-cloud-service/lemon-cloud-user/lemon-cloud-user-sdk"
client "github.com/lemon-cloud-service/lemon-cloud-user/lemon-cloud-user-sdk/client"
"google.golang.org/grpc"
)
func main() {
lemon_cloud_user_sdk.InitSdk("localhost:33385")
rsp, err := client.GetUserLoginServiceClient().LoginByNumber(context.Background(), &dto.LoginByNumberReq{
ClientInfo: nil,
Number: "123",
Password: "234",
})
if err != nil {
fmt.Println("error")
}
fmt.Println(rsp.Token)
// 从原理测试
con, err := grpc.Dial("localhost:33385", grpc.WithInsecure())
if err != nil {
fmt.Println("初始化失败")
}
con.Invoke(context.Background(), "/service.UserLoginService/LoginByNumber", in, out, opts...)
proto.Marshal()
}
|
package main
import (
"fmt"
)
func main() {
if x := 500; x > 100 {
fmt.Println("chis é maior que cem")
} else if x < 10 {
fmt.Println("chis é menor que déis")
} else {
fmt.Println("chis não é menor que déis nem maior que cem")
}
}
|
package main
import (
"database/sql"
"errors"
"fmt"
"github.com/go-redis/redis/v8"
)
var Db *sql.DB
var redisClient *redis.Client
func Exists(rollNo string) (bool, error) {
var has bool
err := Db.QueryRow("SELECT COUNT(*) FROM User WHERE rollno = ?", rollNo).Scan(&has)
if err != nil {
return false, errors.New("internal error")
}
return has, nil
}
func AdminFlag(rollNo string) (bool, error) {
var isAdmin bool
err := Db.QueryRow("SELECT isAdmin FROM User WHERE rollno = ?", rollNo).Scan(&isAdmin)
if err == sql.ErrNoRows {
return false, errors.New("user with given rollno does not exist")
}
if err != nil {
return false, errors.New("internal error")
}
return isAdmin, nil
}
func getBatch(rollNo string) (string, error) {
b := rollNo[:2]
return b, nil
}
func addUser(usr *userDetails) error {
has, err := Exists(usr.Rollno)
if err != nil {
return err
}
if !has {
_, err := Db.Exec("INSERT INTO User (rollno, name, password, coins, isAdmin, email) VALUES (?, ?, ?, ?, ?, ?)", usr.Rollno, usr.Name, usr.Password, 0, usr.Rollno == "000000", usr.Email)
if err != nil {
return errors.New("could not add user: something went wrong")
} else {
fmt.Printf("User [%s: %s] succesfully added.\n", usr.Rollno, usr.Name)
return nil
}
} else {
return fmt.Errorf("user with roll number %v already present", usr.Rollno)
}
}
func createRedeemRequest(red *redeem) error {
_, err := Db.Exec("INSERT INTO RedeemRequests (itemName, coins, madeBy) VALUES (?, ?, ?)", red.ItemName, red.Coins, red.Rollno)
if err != nil {
return errors.New("internal error: something went wrong")
}
return nil
}
|
package client
import (
"fmt"
"io"
"net"
"os"
"os/exec"
"github.com/Cloud-Foundations/Dominator/lib/bufwriter"
"github.com/Cloud-Foundations/Dominator/lib/errors"
"github.com/Cloud-Foundations/Dominator/lib/filesystem"
"github.com/Cloud-Foundations/Dominator/lib/filter"
"github.com/Cloud-Foundations/Dominator/lib/log"
"github.com/Cloud-Foundations/Dominator/lib/srpc"
proto "github.com/Cloud-Foundations/Dominator/proto/hypervisor"
)
func acknowledgeVm(client *srpc.Client, ipAddress net.IP) error {
request := proto.AcknowledgeVmRequest{ipAddress}
var reply proto.AcknowledgeVmResponse
return client.RequestReply("Hypervisor.AcknowledgeVm", request, &reply)
}
func addVmVolumes(client *srpc.Client, ipAddress net.IP, sizes []uint64) error {
request := proto.AddVmVolumesRequest{
IpAddress: ipAddress,
VolumeSizes: sizes,
}
var reply proto.AddVmVolumesResponse
err := client.RequestReply("Hypervisor.AddVmVolumes", request, &reply)
if err != nil {
return err
}
return errors.New(reply.Error)
}
func changeVmSize(client *srpc.Client,
request proto.ChangeVmSizeRequest) error {
var reply proto.ChangeVmSizeResponse
err := client.RequestReply("Hypervisor.ChangeVmSize", request, &reply)
if err != nil {
return err
}
return errors.New(reply.Error)
}
func changeVmVolumeSize(client *srpc.Client, ipAddress net.IP, index uint,
size uint64) error {
request := proto.ChangeVmVolumeSizeRequest{
IpAddress: ipAddress,
VolumeIndex: index,
VolumeSize: size,
}
var reply proto.ChangeVmVolumeSizeResponse
err := client.RequestReply("Hypervisor.ChangeVmVolumeSize", request, &reply)
if err != nil {
return err
}
return errors.New(reply.Error)
}
func connectToVmConsole(client *srpc.Client, ipAddr net.IP,
vncViewerCommand string, logger log.DebugLogger) error {
serverConn, err := client.Call("Hypervisor.ConnectToVmConsole")
if err != nil {
return err
}
defer serverConn.Close()
request := proto.ConnectToVmConsoleRequest{IpAddress: ipAddr}
if err := serverConn.Encode(request); err != nil {
return err
}
if err := serverConn.Flush(); err != nil {
return err
}
var response proto.ConnectToVmConsoleResponse
if err := serverConn.Decode(&response); err != nil {
return err
}
if err := errors.New(response.Error); err != nil {
return err
}
listener, err := net.Listen("tcp", "localhost:")
if err != nil {
return err
}
defer listener.Close()
_, port, err := net.SplitHostPort(listener.Addr().String())
if err != nil {
return err
}
if vncViewerCommand == "" {
logger.Printf("listening on port %s for VNC connection\n", port)
} else {
cmd := exec.Command(vncViewerCommand, "::"+port)
cmd.Stderr = os.Stderr
if err := cmd.Start(); err != nil {
return err
}
}
clientConn, err := listener.Accept()
if err != nil {
return err
}
defer clientConn.Close()
listener.Close()
var readErr error
readFinished := false
go func() { // Copy from server to client.
_, readErr = io.Copy(clientConn, serverConn)
readFinished = true
}()
// Copy from client to server.
_, writeErr := io.Copy(bufwriter.NewAutoFlushWriter(serverConn), clientConn)
if readFinished {
return readErr
}
return writeErr
}
func createVm(client *srpc.Client, request proto.CreateVmRequest,
reply *proto.CreateVmResponse, logger log.DebugLogger) error {
if conn, err := client.Call("Hypervisor.CreateVm"); err != nil {
return err
} else {
defer conn.Close()
if err := conn.Encode(request); err != nil {
return err
}
if err := conn.Flush(); err != nil {
return err
}
for {
var response proto.CreateVmResponse
if err := conn.Decode(&response); err != nil {
return fmt.Errorf("error decoding: %s", err)
}
if response.Error != "" {
return errors.New(response.Error)
}
if response.ProgressMessage != "" {
logger.Debugln(0, response.ProgressMessage)
}
if response.Final {
*reply = response
return nil
}
}
}
}
func deleteVmVolume(client *srpc.Client, ipAddr net.IP, accessToken []byte,
volumeIndex uint) error {
request := proto.DeleteVmVolumeRequest{
AccessToken: accessToken,
IpAddress: ipAddr,
VolumeIndex: volumeIndex,
}
var reply proto.DeleteVmVolumeResponse
err := client.RequestReply("Hypervisor.DeleteVmVolume", request, &reply)
if err != nil {
return err
}
return errors.New(reply.Error)
}
func destroyVm(client *srpc.Client, ipAddr net.IP, accessToken []byte) error {
request := proto.DestroyVmRequest{
AccessToken: accessToken,
IpAddress: ipAddr,
}
var reply proto.DestroyVmResponse
err := client.RequestReply("Hypervisor.DestroyVm", request, &reply)
if err != nil {
return err
}
return errors.New(reply.Error)
}
func exportLocalVm(client *srpc.Client, ipAddr net.IP,
verificationCookie []byte) (proto.ExportLocalVmInfo, error) {
request := proto.ExportLocalVmRequest{
IpAddress: ipAddr,
VerificationCookie: verificationCookie,
}
var reply proto.ExportLocalVmResponse
err := client.RequestReply("Hypervisor.ExportLocalVm", request, &reply)
if err != nil {
return proto.ExportLocalVmInfo{}, err
}
if err := errors.New(reply.Error); err != nil {
return proto.ExportLocalVmInfo{}, err
}
return reply.VmInfo, nil
}
func getRootCookiePath(client *srpc.Client) (string, error) {
request := proto.GetRootCookiePathRequest{}
var reply proto.GetRootCookiePathResponse
err := client.RequestReply("Hypervisor.GetRootCookiePath", request, &reply)
if err != nil {
return "", err
}
if err := errors.New(reply.Error); err != nil {
return "", err
}
return reply.Path, nil
}
func getVmInfo(client *srpc.Client, ipAddr net.IP) (proto.VmInfo, error) {
request := proto.GetVmInfoRequest{IpAddress: ipAddr}
var reply proto.GetVmInfoResponse
err := client.RequestReply("Hypervisor.GetVmInfo", request, &reply)
if err != nil {
return proto.VmInfo{}, err
}
if err := errors.New(reply.Error); err != nil {
return proto.VmInfo{}, err
}
return reply.VmInfo, nil
}
func listSubnets(client *srpc.Client, doSort bool) ([]proto.Subnet, error) {
request := proto.ListSubnetsRequest{Sort: doSort}
var reply proto.ListSubnetsResponse
err := client.RequestReply("Hypervisor.ListSubnets", request, &reply)
if err != nil {
return nil, err
}
if err := errors.New(reply.Error); err != nil {
return nil, err
}
return reply.Subnets, nil
}
func powerOff(client *srpc.Client, stopVMs bool) error {
request := proto.PowerOffRequest{StopVMs: stopVMs}
var reply proto.PowerOffResponse
err := client.RequestReply("Hypervisor.PowerOff", request, &reply)
if err != nil {
return err
}
return errors.New(reply.Error)
}
func prepareVmForMigration(client *srpc.Client, ipAddr net.IP,
accessToken []byte, enable bool) error {
request := proto.PrepareVmForMigrationRequest{
AccessToken: accessToken,
Enable: enable,
IpAddress: ipAddr,
}
var reply proto.PrepareVmForMigrationResponse
err := client.RequestReply("Hypervisor.PrepareVmForMigration",
request, &reply)
if err != nil {
return err
}
return errors.New(reply.Error)
}
func registerExternalLeases(client *srpc.Client, addressList proto.AddressList,
hostnames []string) error {
request := proto.RegisterExternalLeasesRequest{
Addresses: addressList,
Hostnames: hostnames,
}
var reply proto.RegisterExternalLeasesResponse
err := client.RequestReply("Hypervisor.RegisterExternalLeases",
request, &reply)
if err != nil {
return err
}
return errors.New(reply.Error)
}
func reorderVmVolumes(client *srpc.Client, ipAddr net.IP, accessToken []byte,
volumeIndices []uint) error {
request := proto.ReorderVmVolumesRequest{
IpAddress: ipAddr,
VolumeIndices: volumeIndices,
}
var reply proto.ReorderVmVolumesResponse
err := client.RequestReply("Hypervisor.ReorderVmVolumes", request, &reply)
if err != nil {
return err
}
return errors.New(reply.Error)
}
func scanVmRoot(client *srpc.Client, ipAddr net.IP,
scanFilter *filter.Filter) (*filesystem.FileSystem, error) {
request := proto.ScanVmRootRequest{IpAddress: ipAddr, Filter: scanFilter}
var reply proto.ScanVmRootResponse
err := client.RequestReply("Hypervisor.ScanVmRoot", request, &reply)
if err != nil {
return nil, err
}
return reply.FileSystem, errors.New(reply.Error)
}
func startVm(client *srpc.Client, ipAddr net.IP, accessToken []byte) error {
request := proto.StartVmRequest{
AccessToken: accessToken,
IpAddress: ipAddr,
}
var reply proto.StartVmResponse
err := client.RequestReply("Hypervisor.StartVm", request, &reply)
if err != nil {
return err
}
return errors.New(reply.Error)
}
func stopVm(client *srpc.Client, ipAddr net.IP, accessToken []byte) error {
request := proto.StopVmRequest{
AccessToken: accessToken,
IpAddress: ipAddr,
}
var reply proto.StopVmResponse
err := client.RequestReply("Hypervisor.StopVm", request, &reply)
if err != nil {
return err
}
return errors.New(reply.Error)
}
|
package compose
import (
"fmt"
"strconv"
"github.com/kudrykv/latex-yearly-planner/app/components/calendar"
"github.com/kudrykv/latex-yearly-planner/app/components/header"
"github.com/kudrykv/latex-yearly-planner/app/components/page"
"github.com/kudrykv/latex-yearly-planner/app/config"
)
func HeaderWeekly(cfg config.Config, tpls []string) (page.Modules, error) {
if len(tpls) != 1 {
return nil, fmt.Errorf("exppected one tpl, got %d %v", len(tpls), tpls)
}
modules := make(page.Modules, 0, 53)
sow := pickUpStartWeekForTheYear(cfg.Year, cfg.WeekStart)
yearInWeeks := calendar.FillWeekly(sow).FillYear()
for i, weekly := range yearInWeeks {
right := header.Items{}
if i > 0 {
item := header.NewTextItem("Week " + strconv.Itoa(yearInWeeks[i-1].WeekNumber()))
if i-1 == 0 {
item = item.RefPrefix("fw")
}
right = append(right, item)
}
if i+1 < len(yearInWeeks) {
right = append(right, header.NewTextItem("Week "+strconv.Itoa(yearInWeeks[i+1].WeekNumber())))
}
qrtrItems := make([]header.Item, 0, 2)
for _, qrtr := range weekly.Quarters(cfg.Year) {
qrtrItems = append(qrtrItems, header.NewTextItem("Q"+strconv.Itoa(qrtr)))
}
monthItems := make([]header.Item, 0, 2)
for _, month := range weekly.Months(cfg.Year) {
monthItems = append(monthItems, header.NewMonthItem(month))
}
curr := header.NewTextItem("Week " + strconv.Itoa(weekly.WeekNumber())).Ref(true)
if i == 0 {
curr = curr.RefPrefix("fw")
}
modules = append(modules, page.Module{
Cfg: cfg,
Tpl: tpls[0],
Body: header.Header{
Right: right,
Left: header.Items{
header.NewIntItem(cfg.Year),
header.NewItemsGroup(qrtrItems...).Delim(" / "),
header.NewItemsGroup(monthItems...).Delim(" / "),
curr,
},
},
})
}
return modules, nil
}
func HeaderWeekly2(cfg config.Config, tpls []string) (page.Modules, error) {
if len(tpls) != 1 {
return nil, fmt.Errorf("exppected one tpl, got %d %v", len(tpls), tpls)
}
modules := make(page.Modules, 0, 53)
sow := pickUpStartWeekForTheYear(cfg.Year, cfg.WeekStart)
yearInWeeks := calendar.FillWeekly(sow).FillYear()
for i, week := range yearInWeeks {
var day calendar.DayTime
var weekPrefix string
for _, moment := range week {
if moment.Year() == cfg.Year {
day = moment
break
}
}
if i == 0 {
weekPrefix = "fw"
}
modules = append(modules, page.Module{
Cfg: cfg,
Tpl: tpls[0],
Body: map[string]interface{}{
"Week": week,
"WeekPrefix": weekPrefix,
"Date": day,
"Cells": header.Items{
header.NewCellItem("Calendar"),
header.NewCellItem("To Do").Refer("Todos Index"),
header.NewCellItem("Notes").Refer("Notes Index"),
},
"Months": MonthsToCellItems(cfg.WeekStart, calendar.NewYearInMonths(cfg.Year).Selected(day).Reverse()),
"Quarters": QuartersToCellItems(calendar.NewYearInQuarters(cfg.Year).Reverse()),
},
})
}
return modules, nil
}
|
package compute
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
)
// Get IP address list by Id (successful).
func TestClient_GetIPAddressList_ById_Success(test *testing.T) {
testServer := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
fmt.Fprintln(writer, getIPAddressListTestResponse)
}))
defer testServer.Close()
client := NewClientWithBaseAddress(testServer.URL, "user1", "password")
client.setAccount(&Account{
OrganizationID: "dummy-organization-id",
})
server, err := client.GetIPAddressList("5a32d6e4-9707-4813-a269-56ab4d989f4d")
if err != nil {
test.Fatal("Unable to retrieve IP address list: ", err)
}
verifyGetIPAddressListTestResponse(test, server)
}
/*
* Test responses.
*/
const getIPAddressListTestResponse = `
{
"id": "c8c92ea3-2da8-4d51-8153-f39bec794d69",
"name": "ProductionIPAddressList",
"description": "For our production web servers",
"ipVersion": "IPV4",
"ipAddress": [
{
"begin": "1.1.1.1",
"end": "2.2.2.2"
},
{
"begin": "192.168.1.1"
},
{
"begin": "192.168.1.1",
"prefixSize": 24
}
],
"childIpAddressList": [
{
"id": "c8c92ea3-2da8-4d51-8153-f39bec794d68",
"name": "tomcatIpAddresses"
},
{
"id": "c8c92ea3-2da8-4d51-8153-f39bec794d67",
"name": "mySqlIpAddresses"
}
],
"state": "NORMAL",
"createTime": "2015-09-29T02:49:45"
}
`
func verifyGetIPAddressListTestResponse(test *testing.T, addressList *IPAddressList) {
expect := expect(test)
expect.NotNil("IPAddressList", addressList)
expect.EqualsString("IPAddressList.Name", "ProductionIPAddressList", addressList.Name)
expect.EqualsString("IPAddressList.Description", "For our production web servers", addressList.Description)
expect.EqualsString("IPAddressList.IPVersion", "IPV4", addressList.IPVersion)
expect.EqualsString("IPAddressList.State", ResourceStatusNormal, addressList.State)
expect.EqualsString("IPAddressList.CreateTime", "2015-09-29T02:49:45", addressList.CreateTime)
expect.EqualsInt("IPAddressList.Addresses.Length", 3, len(addressList.Addresses))
address1 := addressList.Addresses[0]
expect.EqualsString("IPAddressList.Addresses[0].Begin", "1.1.1.1", address1.Begin)
expect.NotNil("IPAddressList.Addresses[0].End", address1.End)
expect.EqualsString("IPAddressList.Addresses[0].End", "2.2.2.2", *address1.End)
expect.IsNil("IPAddressList.Addresses[0].PrefixSize", address1.PrefixSize)
address2 := addressList.Addresses[1]
expect.EqualsString("IPAddressList.Addresses[1].Begin", "192.168.1.1", address2.Begin)
expect.IsNil("IPAddressList.Addresses[1].End", address2.End)
expect.IsNil("IPAddressList.Addresses[1].PrefixSize", address2.PrefixSize)
address3 := addressList.Addresses[2]
expect.EqualsString("IPAddressList.Addresses[2].Begin", "192.168.1.1", address3.Begin)
expect.IsNil("IPAddressList.Addresses[2].End", address3.End)
expect.NotNil("IPAddressList.Addresses[2].PrefixSize", address3.PrefixSize)
expect.EqualsInt("IPAddressList.Addresses[2].PrefixSize", 24, *address3.PrefixSize)
expect.EqualsInt("IPAddressList.ChildLists.Length", 2, len(addressList.ChildLists))
childList1 := addressList.ChildLists[0]
expect.EqualsString("IPAddressList.ChildLists[0].ID", "c8c92ea3-2da8-4d51-8153-f39bec794d68", childList1.ID)
expect.EqualsString("IPAddressList.ChildLists[0].Name", "tomcatIpAddresses", childList1.Name)
childList2 := addressList.ChildLists[1]
expect.EqualsString("IPAddressList.ChildLists[1].ID", "c8c92ea3-2da8-4d51-8153-f39bec794d67", childList2.ID)
expect.EqualsString("IPAddressList.ChildLists[1].Name", "mySqlIpAddresses", childList2.Name)
}
const listIPAddressListsResponse = `
{
"ipAddressList": [
{
"id": "c8c92ea3-2da8-4d51-8153-f39bec794d69",
"name": "ProductionIPAddressList",
"description": "For our production web servers", "ipVersion": "IPV4",
"ipAddress": [
{
"begin": "1.1.1.1",
"end": "2.2.2.2"
},
{
"begin": "192.168.1.1"
},
{
"begin": "192.168.1.1",
"prefixSize": 24
}
],
"childIpAddressList": [
{
"id": "c8c92ea3-2da8-4d51-8153-f39bec794d68",
"name": "tomcatIpAddresses"
},
{
"id": "c8c92ea3-2da8-4d51-8153-f39bec794d67",
"name": "mySqlIpAddresses"
}
],
"state": "NORMAL",
"createTime": "2015-09-29T02:49:45"
}
],
"pageNumber": 1,
"pageCount": 1,
"totalCount": 3,
"pageSize": 3
}
`
func verifyListIPAddressListsResponse(test *testing.T, addressLists *IPAddressLists) {
expect := expect(test)
expect.NotNil("IPAddressLists", addressLists)
expect.EqualsInt("IPAddressLists.PageCount", 1, addressLists.PageCount)
expect.EqualsInt("IPAddressLists.AddressLists.Length", 1, len(addressLists.AddressLists))
addressList1 := addressLists.AddressLists[0]
expect.EqualsString("IPAddressLists.AddressLists[0].Name", "ProductionIPAddressList", addressList1.Name)
expect.EqualsString("IPAddressLists.AddressLists[0].Description", "For our production web servers", addressList1.Description)
expect.EqualsString("IPAddressLists.AddressLists[0].IPVersion", "IPV4", addressList1.IPVersion)
expect.EqualsString("IPAddressLists.AddressLists[0].Name", "ProductionIPAddressList", addressList1.Name)
expect.EqualsString("IPAddressLists.AddressLists[0].Name", "ProductionIPAddressList", addressList1.Name)
expect.EqualsString("IPAddressLists.AddressLists[0].State", ResourceStatusNormal, addressList1.State)
expect.EqualsString("IPAddressLists.AddressLists[0].CreateTime", "2015-09-29T02:49:45", addressList1.CreateTime)
expect.EqualsInt("IPAddressLists.AddressLists[0].Addresses.Length", 3, len(addressList1.Addresses))
address1 := addressList1.Addresses[0]
expect.EqualsString("IPAddressLists.AddressLists[0].Addresses[0].Begin", "1.1.1.1", address1.Begin)
expect.NotNil("IPAddressLists.AddressLists[0].Addresses[0].End", address1.End)
expect.EqualsString("IPAddressLists.AddressLists[0].Addresses[0].End", "2.2.2.2", *address1.End)
expect.IsNil("IPAddressLists.AddressLists[0].Addresses[0].PrefixSize", address1.PrefixSize)
address2 := addressList1.Addresses[1]
expect.EqualsString("IPAddressLists.AddressLists[0].Addresses[1].Begin", "192.168.1.1", address2.Begin)
expect.IsNil("IPAddressLists.AddressLists[0].Addresses[1].End", address2.End)
expect.IsNil("IPAddressLists.AddressLists[0].Addresses[1].PrefixSize", address2.PrefixSize)
address3 := addressList1.Addresses[2]
expect.EqualsString("IPAddressLists.AddressLists[0].Addresses[2].Begin", "192.168.1.1", address3.Begin)
expect.IsNil("IPAddressLists.AddressLists[0].Addresses[2].End", address3.End)
expect.NotNil("IPAddressLists.AddressLists[0].Addresses[2].PrefixSize", address3.PrefixSize)
expect.EqualsInt("IPAddressLists.AddressLists[0].Addresses[2].PrefixSize", 24, *address3.PrefixSize)
expect.EqualsInt("IPAddressLists.AddressLists[0].ChildLists.Length", 2, len(addressList1.ChildLists))
childList1 := addressList1.ChildLists[0]
expect.EqualsString("IPAddressLists.AddressLists[0].ChildLists[0].ID", "c8c92ea3-2da8-4d51-8153-f39bec794d68", childList1.ID)
expect.EqualsString("IPAddressLists.AddressLists[0].ChildLists[0].Name", "tomcatIpAddresses", childList1.Name)
childList2 := addressList1.ChildLists[1]
expect.EqualsString("IPAddressLists.AddressLists[0].ChildLists[1].ID", "c8c92ea3-2da8-4d51-8153-f39bec794d67", childList2.ID)
expect.EqualsString("IPAddressLists.AddressLists[0].ChildLists[1].Name", "mySqlIpAddresses", childList2.Name)
}
|
package hive
import (
"bytes"
"encoding/json"
"net/http"
"net/url"
)
// ActiveMode defines the active heating/cooling mode
type ActiveMode int
// ActiveMode values
const (
ActiveModeOff ActiveMode = iota
ActiveModeHeating
ActiveModeCooling
)
const (
// ThermostatDefaultMinimum is the default minimum heating temperature
ThermostatDefaultMinimum = 5.0
// ThermostatDefaultMaximum is the default maximum heating temperature
ThermostatDefaultMaximum = 35.0
)
// Thermostat is a Hive managed Thermostat
type Thermostat struct {
home *Home
node *node
ID string
Name string
Href string
}
// ActiveMode returns the current active heating/cooling mode
func (t *Thermostat) ActiveMode() (ActiveMode, error) {
v, ok := t.node.attr("activeHeatCoolMode").ReportedValueString()
if !ok {
return ActiveModeOff, &Error{
Op: "thermostat: temperature",
Code: ErrInvalidDataType,
Message: "invalid data type",
}
}
switch v {
case "HEAT":
return ActiveModeHeating, nil
case "COOL":
return ActiveModeCooling, nil
default:
return ActiveModeOff, nil
}
}
// Temperature returns the current measured temperature
func (t *Thermostat) Temperature() (float64, error) {
v, ok := t.node.attr("temperature").ReportedValueFloat()
if !ok {
return t.Minimum(), &Error{
Op: "thermostat: temperature",
Code: ErrInvalidDataType,
Message: "invalid data type",
}
}
if v < t.Minimum() {
return t.Minimum(), nil
}
if v > t.Maximum() {
return t.Maximum(), nil
}
return v, nil
}
// Target returns the target temperature setting
func (t *Thermostat) Target() (float64, error) {
v, ok := t.node.attr("targetHeatTemperature").ReportedValueFloat()
if !ok {
return t.Minimum(), &Error{
Op: "thermostat: target temperature",
Code: ErrInvalidDataType,
Message: "invalid data type",
}
}
if v < t.Minimum() {
return t.Minimum(), nil
}
if v > t.Maximum() {
return t.Maximum(), nil
}
return v, nil
}
// Minimum returns the minimum valid temperature
func (t *Thermostat) Minimum() float64 {
v, ok := t.node.attr("minHeatTemperature").ReportedValueFloat()
if !ok {
return ThermostatDefaultMinimum
}
return v
}
// Maximum returns the maximum valid temperature
func (t *Thermostat) Maximum() float64 {
v, ok := t.node.attr("maxHeatTemperature").ReportedValueFloat()
if !ok {
return ThermostatDefaultMaximum
}
return v
}
// Update fetches the latest information about the Thermostat from the API
func (t *Thermostat) Update() error {
n, err := t.home.node(t.Href)
if err != nil {
return &Error{Op: "thermostat: update", Err: err}
}
if n.ID != t.ID {
return &Error{Op: "thermostat: update", Code: ErrInvalidUpdate, Message: "update failed, ID mismatch"}
}
t.node = n
return nil
}
// Thermostats returns the list of thermostats in the Home
func (home *Home) Thermostats() ([]*Thermostat, error) {
const nodeTypeThermostat = "http://alertme.com/schema/json/node.class.thermostat.json#"
nodes, err := home.nodes()
if err != nil {
return nil, err
}
var thermostats []*Thermostat
for _, n := range nodes {
nt, err := n.NodeType()
if err != nil || nt != nodeTypeThermostat {
continue
}
if _, ok := n.Attributes["temperature"]; !ok {
continue
}
n := n
thermostats = append(thermostats, &Thermostat{
ID: n.ID,
Name: n.Name,
Href: n.Href,
home: home,
node: n,
})
}
return thermostats, nil
}
// SetTarget sets the target temperature of the Thermostat
func (t *Thermostat) SetTarget(temp float64) error {
n, err := t.home.setThermostat(t, temp)
if err != nil {
return err
}
t.node = n
return nil
}
// setThermostat sets the target temperature of the Thermostat
func (home *Home) setThermostat(t *Thermostat, targetTemp float64) (*node, error) {
body := &nodesResponse{
Nodes: []*node{{
Attributes: nodeAttributes{
"targetHeatTemperature": {
TargetValue: targetTemp,
},
},
}},
}
uri, err := url.Parse(t.Href)
if err != nil {
return nil, &Error{Op: "", Err: err}
}
buf := &bytes.Buffer{}
if err := json.NewEncoder(buf).Encode(body); err != nil {
return nil, &Error{
Op: "thermostat: set temperature: encode json",
Err: err,
}
}
rs := bytes.NewReader(buf.Bytes()) // convert JSON bytes into bytes.Reader to support io.ReadSeeker
resp, err := home.httpRequestWithSession(http.MethodPut, uri.RequestURI(), rs)
if err != nil {
return nil, &Error{
Op: "thermostat: set temperature: request",
Err: err,
}
}
defer resp.Body.Close()
var response nodesResponse
if err := json.NewDecoder(resp.Body).Decode(&response); err != nil {
return nil, &Error{
Op: "thermostat: set temperature: read body",
Code: ErrInvalidJSON,
Err: err,
}
}
if len(response.Nodes) != 1 {
return nil, &Error{
Op: "thermostat: set temperature",
Code: ErrNodeNotFound,
Message: "incorrect number of nodes returned",
}
}
return response.Nodes[0], nil
}
|
package main
import (
"log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/andywow/golang-lessons/lesson-calendar/cmd/client/command"
"github.com/andywow/golang-lessons/lesson-calendar/internal/client/config"
)
var (
rootCmd = &cobra.Command{
Use: "client",
Short: "client for grpc api server",
Long: "client for testing grpc api server",
}
)
func init() {
options := config.ClientOptions{}
rootCmd.PersistentFlags().StringVar(&options.GRPCHost, "host", "127.0.0.1", "host of grpc server")
rootCmd.PersistentFlags().Int64Var(&options.GRPCPort, "port", 9090, "port of grpc server")
if err := viper.BindPFlag("host", rootCmd.PersistentFlags().Lookup("host")); err != nil {
log.Fatal(err)
}
if err := viper.BindPFlag("port", rootCmd.PersistentFlags().Lookup("port")); err != nil {
log.Fatal(err)
}
rootCmd.AddCommand(command.CreateCmd(&options))
rootCmd.AddCommand(command.UpdateCmd(&options))
rootCmd.AddCommand(command.DeleteCmd(&options))
rootCmd.AddCommand(command.ListDateCmd(&options))
rootCmd.AddCommand(command.ListWeekCmd(&options))
rootCmd.AddCommand(command.ListMonthCmd(&options))
}
func main() {
if err := rootCmd.Execute(); err != nil {
log.Fatalf("Command failed: %v", err)
}
}
|
package main
import "fmt"
func main() {
nums := make([]int,10);
for i,_ := range nums {
fmt.Scan(&nums[i])
}
sum:=calc_sum(nums)
avg:=sum/10
fmt.Println(avg)
}
func calc_sum(array []int) int{
sum:=0;
for _,num := range array{
sum=sum+num;
}
return sum
}
|
package Solution
import "math"
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
// 这是一道树结构的动态规划题,每个节点可以为三种状态 0 无摄像机 被子节点监控 1有摄像机 2 无摄像机被父节点监控
// 即用dp[root][0] dp[root][1] dp[root][2] 表示三个状态下各自的最优解 为什么不用dp[root] 直接表示 父节点的最优解 因为dp[root]
// 的最优解是会被子节点不同状态所影响的。
// 思路采取自下向上的解法 用后续遍历 依次由子节点返回给父节点
// 状态转移方程为
// dp[root][0] = dp[root.left][1]+dp[root.right][1] -- 当前节点无摄像机 为了保证每个都被监控 所以各自的子节点必须要有摄像机 也就是dp[child][1]
// dp[root][1] = 1 + min(dp[root.left][0],[1],[2]) + min(dp[root.right][0],[1],[2])
// dp[root][2] = dp[root.left][1]+dp[root.right][1]
// 定义dp初始状态 dp[leaf][0] = int_max dp[leaf][1] = 1 dp[leaf][2] = 0
// dp[null] = 0
func min(a int, b int) int {
if a < b {
return a
} else {
return b
}
}
func _min(a int, b int, c int) int {
return min(min(a, b), c)
}
func minCameraCover(root *TreeNode) int {
res := _minCameraCover(root)
return _min(res[0],res[1],res[2])
}
func _minCameraCover(root *TreeNode) []int{
var dp = make([]int,3)
if root == nil {
dp[0] = 0
dp[1] = 0
dp[2] = 0
return dp
}
if root.Left == nil && root.Right == nil {
dp[0] = math.MaxInt32
dp[1] = 1
dp[2] = 0
return dp
}
l := _minCameraCover(root.Left)
r := _minCameraCover(root.Right)
// 后序遍历
dp[0] = min(r[0]+l[1],r[1]+l[0])
dp[1] = 1 + _min(l[0],l[1],l[2]) + _min(r[0],r[1],r[2])
dp[2] = min(l[0],l[1]) + min(r[0],r[1])
return dp
}
|
package main
import (
"html/template"
"log"
"os"
)
func main() {
// here we call the ParseFiles of the package level
tpl, err := template.ParseFiles("one.gmao")
if err != nil {
log.Fatal(err)
}
// err = tpl.Execute(os.Stdout, nil)
// if err != nil {
// log.Fatal(err)
// }
// here from the type level
tpl, err = tpl.ParseFiles("two.gmao", "vespa.gmao") // we had those to the "templates bucket"
if err != nil {
log.Fatal(err)
}
// Now we can execute anything from the templates bucket
err = tpl.ExecuteTemplate(os.Stdout, "vespa.gmao", nil)
if err != nil {
log.Fatal(err)
}
err = tpl.ExecuteTemplate(os.Stdout, "two.gmao", nil)
if err != nil {
log.Fatal(err)
}
err = tpl.ExecuteTemplate(os.Stdout, "one.gmao", nil)
if err != nil {
log.Fatal(err)
}
}
|
package setting
import (
"github.com/phjt-go/logger"
"github.com/spf13/viper"
)
// init
func init() {
// 初始化配置文件
if err := Config(); err != nil {
logger.Error("Load configuration failed, ", err)
}
// 监控配置文件变化并热加载程序
//watchConfig()
}
// GetString 获取字符串类型的配置
func GetString(params string) string {
return viper.GetString(params)
}
// GetInt 获取INT类型的配置
func GetInt(params string) int {
return viper.GetInt(params)
}
// GetBool 获取布尔类型的配置
func GetBool(params string) bool {
return viper.GetBool(params)
}
// Config viper解析配置文件
func Config() error {
viper.AddConfigPath("config")
viper.SetConfigName("jenkins")
if err := viper.ReadInConfig(); err != nil {
return err
}
return nil
}
|
package main
import (
"fmt"
"time"
)
func main() {
// t1 := time.Now().UnixNano()
// t2 := time.Now().Local().Unix()
// fmt.Println(strconv.Itoa(int(t1)))
// fmt.Println(t2)
for i := 0; i < 50; i++ {
fmt.Println(time.Now().Format("20060102150405"))
}
}
|
package envdir
import (
"fmt"
"github.com/imdario/mergo"
"github.com/joho/godotenv"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
)
// according exit codes https://www.unix.com/man-page/debian/8/envdir/
const exitCode = 111
// ReadDir scans the specified directory and returns all environment variables defined in it
func ReadDir(dir string) (map[string]string, error) {
environments := make(map[string]string, 0)
files, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
filesPath := make([]string, 0, len(files))
for _, file := range files {
if !file.IsDir() {
filesPath = append(filesPath, filepath.Join(dir, file.Name()))
}
}
for _, path := range filesPath {
env, err := godotenv.Read(path)
if err != nil {
return nil, err
}
if err := mergo.Merge(&environments, env); err != nil {
return nil, err
}
}
return environments, nil
}
// RunCmd starts a program with arguments (cmd) with an overridden environment.
func RunCmd(cmd []string, env map[string]string) int {
var command *exec.Cmd
if len(cmd) == 1 {
command = exec.Command(cmd[0])
} else {
command = exec.Command(cmd[0], cmd[1:]...)
}
sliceEnv := make([]string, 0, len(env))
for name, value := range env {
sliceEnv = append(sliceEnv, fmt.Sprintf("%s=%s", name, value))
}
command.Env = append(os.Environ(), sliceEnv...)
command.Stdin = os.Stdin
command.Stdout = os.Stdout
command.Stderr = os.Stderr
if err := command.Run(); err != nil {
return exitCode
}
return 0
}
|
package signals
// Signal indicates that a system should shut down.
type Signal chan error
|
package main
import (
"fmt"
"time"
)
// A goroutine is a lightweight thread of execution.
func f(from string) {
for i := 0; i < 3; i++ {
fmt.Println(from, ":", i)
}
}
func main() {
// running it synchronously.
f("direct")
// This new goroutine will execute concurrently with the calling one.
go f("goroutine")
// start goroutine for an anonymous func
go func(msg string) {
fmt.Println(msg)
}("going with new goroutine")
time.Sleep(time.Second)
fmt.Println("done!")
}
|
package main
import (
"fmt"
"strconv"
)
/*
The obligatory Hello World example.
run this with
> go run 01_helloworld.go
or, compile it and run the binary:
>go build 01_helloworld.go
> 01_helloword
*/
func main() {
a := []string{"31313", "-1", "0.5", ".2", "-0.5", "t", "2"}
for _, i := range a {
fmt.Println(i)
_, err := strconv.ParseUint(i, 10, 64)
if err != nil {
fmt.Println(err)
}
}
}
/*
func isPositiveInteger(val string) (int64, error) {
value, err := strconv.ParseUint(val, 10, 64)
if err != nil {
return 0, err
}
return value, nil
}
/*
the package main is special. It defines a stabdalone executable program, rather than a library
the function main() is also special - it;s where the execution of the program begins
*/
|
package git
/*
#include <git2.h>
extern void _go_git_populate_checkout_callbacks(git_checkout_options *opts);
*/
import "C"
import (
"os"
"runtime"
"unsafe"
)
type CheckoutNotifyType uint
type CheckoutStrategy uint
const (
CheckoutNotifyNone CheckoutNotifyType = C.GIT_CHECKOUT_NOTIFY_NONE
CheckoutNotifyConflict CheckoutNotifyType = C.GIT_CHECKOUT_NOTIFY_CONFLICT
CheckoutNotifyDirty CheckoutNotifyType = C.GIT_CHECKOUT_NOTIFY_DIRTY
CheckoutNotifyUpdated CheckoutNotifyType = C.GIT_CHECKOUT_NOTIFY_UPDATED
CheckoutNotifyUntracked CheckoutNotifyType = C.GIT_CHECKOUT_NOTIFY_UNTRACKED
CheckoutNotifyIgnored CheckoutNotifyType = C.GIT_CHECKOUT_NOTIFY_IGNORED
CheckoutNotifyAll CheckoutNotifyType = C.GIT_CHECKOUT_NOTIFY_ALL
CheckoutNone CheckoutStrategy = C.GIT_CHECKOUT_NONE // Dry run, no actual updates
CheckoutSafe CheckoutStrategy = C.GIT_CHECKOUT_SAFE // Allow safe updates that cannot overwrite uncommitted data
CheckoutForce CheckoutStrategy = C.GIT_CHECKOUT_FORCE // Allow all updates to force working directory to look like index
CheckoutRecreateMissing CheckoutStrategy = C.GIT_CHECKOUT_RECREATE_MISSING // Allow checkout to recreate missing files
CheckoutAllowConflicts CheckoutStrategy = C.GIT_CHECKOUT_ALLOW_CONFLICTS // Allow checkout to make safe updates even if conflicts are found
CheckoutRemoveUntracked CheckoutStrategy = C.GIT_CHECKOUT_REMOVE_UNTRACKED // Remove untracked files not in index (that are not ignored)
CheckoutRemoveIgnored CheckoutStrategy = C.GIT_CHECKOUT_REMOVE_IGNORED // Remove ignored files not in index
CheckoutUpdateOnly CheckoutStrategy = C.GIT_CHECKOUT_UPDATE_ONLY // Only update existing files, don't create new ones
CheckoutDontUpdateIndex CheckoutStrategy = C.GIT_CHECKOUT_DONT_UPDATE_INDEX // Normally checkout updates index entries as it goes; this stops that
CheckoutNoRefresh CheckoutStrategy = C.GIT_CHECKOUT_NO_REFRESH // Don't refresh index/config/etc before doing checkout
CheckoutSkipUnmerged CheckoutStrategy = C.GIT_CHECKOUT_SKIP_UNMERGED // Allow checkout to skip unmerged files
CheckoutUseOurs CheckoutStrategy = C.GIT_CHECKOUT_USE_OURS // For unmerged files, checkout stage 2 from index
CheckoutUseTheirs CheckoutStrategy = C.GIT_CHECKOUT_USE_THEIRS // For unmerged files, checkout stage 3 from index
CheckoutDisablePathspecMatch CheckoutStrategy = C.GIT_CHECKOUT_DISABLE_PATHSPEC_MATCH // Treat pathspec as simple list of exact match file paths
CheckoutSkipLockedDirectories CheckoutStrategy = C.GIT_CHECKOUT_SKIP_LOCKED_DIRECTORIES // Ignore directories in use, they will be left empty
CheckoutDontOverwriteIgnored CheckoutStrategy = C.GIT_CHECKOUT_DONT_OVERWRITE_IGNORED // Don't overwrite ignored files that exist in the checkout target
CheckoutConflictStyleMerge CheckoutStrategy = C.GIT_CHECKOUT_CONFLICT_STYLE_MERGE // Write normal merge files for conflicts
CheckoutConflictStyleDiff3 CheckoutStrategy = C.GIT_CHECKOUT_CONFLICT_STYLE_DIFF3 // Include common ancestor data in diff3 format files for conflicts
CheckoutDontRemoveExisting CheckoutStrategy = C.GIT_CHECKOUT_DONT_REMOVE_EXISTING // Don't overwrite existing files or folders
CheckoutDontWriteIndex CheckoutStrategy = C.GIT_CHECKOUT_DONT_WRITE_INDEX // Normally checkout writes the index upon completion; this prevents that
CheckoutUpdateSubmodules CheckoutStrategy = C.GIT_CHECKOUT_UPDATE_SUBMODULES // Recursively checkout submodules with same options (NOT IMPLEMENTED)
CheckoutUpdateSubmodulesIfChanged CheckoutStrategy = C.GIT_CHECKOUT_UPDATE_SUBMODULES_IF_CHANGED // Recursively checkout submodules if HEAD moved in super repo (NOT IMPLEMENTED)
)
type CheckoutNotifyCallback func(why CheckoutNotifyType, path string, baseline, target, workdir DiffFile) error
type CheckoutProgressCallback func(path string, completed, total uint)
type CheckoutOptions struct {
Strategy CheckoutStrategy // Default will be a dry run
DisableFilters bool // Don't apply filters like CRLF conversion
DirMode os.FileMode // Default is 0755
FileMode os.FileMode // Default is 0644 or 0755 as dictated by blob
FileOpenFlags int // Default is O_CREAT | O_TRUNC | O_WRONLY
NotifyFlags CheckoutNotifyType // Default will be none
NotifyCallback CheckoutNotifyCallback
ProgressCallback CheckoutProgressCallback
TargetDirectory string // Alternative checkout path to workdir
Paths []string
Baseline *Tree
}
func checkoutOptionsFromC(c *C.git_checkout_options) CheckoutOptions {
opts := CheckoutOptions{
Strategy: CheckoutStrategy(c.checkout_strategy),
DisableFilters: c.disable_filters != 0,
DirMode: os.FileMode(c.dir_mode),
FileMode: os.FileMode(c.file_mode),
FileOpenFlags: int(c.file_open_flags),
NotifyFlags: CheckoutNotifyType(c.notify_flags),
}
if c.notify_payload != nil {
opts.NotifyCallback = pointerHandles.Get(c.notify_payload).(*checkoutCallbackData).options.NotifyCallback
}
if c.progress_payload != nil {
opts.ProgressCallback = pointerHandles.Get(c.progress_payload).(*checkoutCallbackData).options.ProgressCallback
}
if c.target_directory != nil {
opts.TargetDirectory = C.GoString(c.target_directory)
}
return opts
}
type checkoutCallbackData struct {
options *CheckoutOptions
errorTarget *error
}
//export checkoutNotifyCallback
func checkoutNotifyCallback(
why C.git_checkout_notify_t,
cpath *C.char,
cbaseline, ctarget, cworkdir, handle unsafe.Pointer,
) C.int {
if handle == nil {
return C.int(ErrorCodeOK)
}
path := C.GoString(cpath)
var baseline, target, workdir DiffFile
if cbaseline != nil {
baseline = diffFileFromC((*C.git_diff_file)(cbaseline))
}
if ctarget != nil {
target = diffFileFromC((*C.git_diff_file)(ctarget))
}
if cworkdir != nil {
workdir = diffFileFromC((*C.git_diff_file)(cworkdir))
}
data := pointerHandles.Get(handle).(*checkoutCallbackData)
if data.options.NotifyCallback == nil {
return C.int(ErrorCodeOK)
}
err := data.options.NotifyCallback(CheckoutNotifyType(why), path, baseline, target, workdir)
if err != nil {
*data.errorTarget = err
return C.int(ErrorCodeUser)
}
return C.int(ErrorCodeOK)
}
//export checkoutProgressCallback
func checkoutProgressCallback(
path *C.char,
completed_steps, total_steps C.size_t,
handle unsafe.Pointer,
) {
data := pointerHandles.Get(handle).(*checkoutCallbackData)
if data.options.ProgressCallback == nil {
return
}
data.options.ProgressCallback(C.GoString(path), uint(completed_steps), uint(total_steps))
}
// populateCheckoutOptions populates the provided C-struct with the contents of
// the provided CheckoutOptions struct. Returns copts, or nil if opts is nil,
// in order to help with what to pass.
func populateCheckoutOptions(copts *C.git_checkout_options, opts *CheckoutOptions, errorTarget *error) *C.git_checkout_options {
C.git_checkout_options_init(copts, C.GIT_CHECKOUT_OPTIONS_VERSION)
if opts == nil {
return nil
}
copts.checkout_strategy = C.uint(opts.Strategy)
copts.disable_filters = cbool(opts.DisableFilters)
copts.dir_mode = C.uint(opts.DirMode.Perm())
copts.file_mode = C.uint(opts.FileMode.Perm())
copts.notify_flags = C.uint(opts.NotifyFlags)
if opts.NotifyCallback != nil || opts.ProgressCallback != nil {
C._go_git_populate_checkout_callbacks(copts)
data := &checkoutCallbackData{
options: opts,
errorTarget: errorTarget,
}
payload := pointerHandles.Track(data)
if opts.NotifyCallback != nil {
copts.notify_payload = payload
}
if opts.ProgressCallback != nil {
copts.progress_payload = payload
}
}
if opts.TargetDirectory != "" {
copts.target_directory = C.CString(opts.TargetDirectory)
}
if len(opts.Paths) > 0 {
copts.paths.strings = makeCStringsFromStrings(opts.Paths)
copts.paths.count = C.size_t(len(opts.Paths))
}
if opts.Baseline != nil {
copts.baseline = opts.Baseline.cast_ptr
}
return copts
}
func freeCheckoutOptions(copts *C.git_checkout_options) {
if copts == nil {
return
}
C.free(unsafe.Pointer(copts.target_directory))
if copts.paths.count > 0 {
freeStrarray(&copts.paths)
}
if copts.notify_payload != nil {
pointerHandles.Untrack(copts.notify_payload)
} else if copts.progress_payload != nil {
pointerHandles.Untrack(copts.progress_payload)
}
}
// Updates files in the index and the working tree to match the content of
// the commit pointed at by HEAD. opts may be nil.
func (v *Repository) CheckoutHead(opts *CheckoutOptions) error {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
var err error
cOpts := populateCheckoutOptions(&C.git_checkout_options{}, opts, &err)
defer freeCheckoutOptions(cOpts)
ret := C.git_checkout_head(v.ptr, cOpts)
runtime.KeepAlive(v)
if ret == C.int(ErrorCodeUser) && err != nil {
return err
}
if ret < 0 {
return MakeGitError(ret)
}
return nil
}
// Updates files in the working tree to match the content of the given
// index. If index is nil, the repository's index will be used. opts
// may be nil.
func (v *Repository) CheckoutIndex(index *Index, opts *CheckoutOptions) error {
var iptr *C.git_index = nil
if index != nil {
iptr = index.ptr
}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
var err error
cOpts := populateCheckoutOptions(&C.git_checkout_options{}, opts, &err)
defer freeCheckoutOptions(cOpts)
ret := C.git_checkout_index(v.ptr, iptr, cOpts)
runtime.KeepAlive(v)
if ret == C.int(ErrorCodeUser) && err != nil {
return err
}
if ret < 0 {
return MakeGitError(ret)
}
return nil
}
func (v *Repository) CheckoutTree(tree *Tree, opts *CheckoutOptions) error {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
var err error
cOpts := populateCheckoutOptions(&C.git_checkout_options{}, opts, &err)
defer freeCheckoutOptions(cOpts)
ret := C.git_checkout_tree(v.ptr, tree.ptr, cOpts)
runtime.KeepAlive(v)
runtime.KeepAlive(tree)
if ret == C.int(ErrorCodeUser) && err != nil {
return err
}
if ret < 0 {
return MakeGitError(ret)
}
return nil
}
|
package middlewares
import (
"context"
"sync"
)
var (
logNo int = 1
mu sync.Mutex
)
func newTraceID() int {
var no int
mu.Lock()
no = logNo
logNo += 1
mu.Unlock()
return no
}
type traceIDKey struct{}
func SetTraceID(ctx context.Context, traceID int) context.Context {
return context.WithValue(ctx, traceIDKey{}, traceID)
}
func GetTraceID(ctx context.Context) int {
id := ctx.Value(traceIDKey{})
if idInt, ok := id.(int); ok {
return idInt
}
return 0
}
|
package main
import (
"flag"
"sync"
"net"
"io"
"log"
"fmt"
"strings"
"github.com/cheikhshift/gos/core"
)
// Data structures to manage
// web server instances
type StaticHost struct {
Lock *sync.RWMutex
Cache map[string]int
}
func NewCache() StaticHost {
return StaticHost{Lock: new(sync.RWMutex), Cache: make(map[string]int)}
}
var (
PortApp,App string
Limit,IpInc int
Count int = 2
LANnet = []string{}
Host StaticHost
bspath = "./launcher.sh"
)
func GetServerAvailable() string {
var index string
Host.Lock.Lock()
defer Host.Lock.Unlock()
for index,concount := range Host.Cache {
if concount < Limit {
Host.Cache[index] += IpInc
return index
}
}
if App != "" {
core.RunCmd(App)
}
lsize := len(LANnet) - IpInc
LANnet[lsize] = fmt.Sprintf("%v:%s",Count, PortApp )
Count++
index = strings.Join(LANnet,".")
//run bash
Host.Cache[index] = IpInc
return index
}
func main() {
lnch := flag.String("app", "", "Run specified terminal command each time a new instance is needed.")
dhcpstr := flag.String("net", "192.168.0.1", "Router LAN IP address (DHCP subnet).")
maxcon := flag.Int("max", 100, "Maximum number of connections per instance. clm-static will divide tasks.")
addby := flag.Int("incby", 1, "Increase last octet of IP when picking the next server.")
dhcpstart := flag.Int("start", 1, "First DHCP assigned IP of your instances (Initial value of last octet in IPv4 address). Example : with value 21, this tool will assume your first instance's ip will be 192.168.0.21")
apport := flag.String("appPort", "8080", "Port your instances will listen on.")
port := flag.String("port", "9000", "Port clm-static should listen on.")
flag.Parse()
App = *lnch
Limit = *maxcon
IpInc = *addby
LANnet = strings.Split(*dhcpstr, ".")
Count = *dhcpstart
Host = NewCache()
PortApp = * apport
ln, err := net.Listen("tcp", fmt.Sprintf(":%s", *port) )
if err != nil {
panic(err)
}
for {
conn, err := ln.Accept()
if err != nil {
log.Println(err)
}
go handleRequest(conn)
}
}
func handleRequest(conn net.Conn) {
ipaddr := GetServerAvailable()
proxy, err := net.Dial("tcp", ipaddr )
if err != nil {
defer handleRequest(conn)
Host.Lock.Lock()
defer Host.Lock.Unlock()
delete(Host.Cache, ipaddr)
return
}
go copyIO(conn, proxy,"")
go copyIO(proxy, conn,ipaddr)
}
func copyIO(src, dest net.Conn, index string) {
defer src.Close()
defer dest.Close()
io.Copy(src, dest)
if index != "" {
Host.Lock.Lock()
defer Host.Lock.Unlock()
Host.Cache[index] -= IpInc
// fmt.Println(Host)
}
}
|
package session
import "github.com/ipastushenko/simple-chat/server/services/auth"
type ISessionService interface {
SignIn(auth.IUserCredentials) (interface{}, bool)
SignOut(interface{}) error
}
|
package leetcode
func IsValidSudoku(board [][]byte) bool {
mp := make(map[byte][]int, 0)
for i := 0; i < 9; i++ {
for j := 0; j < 9; j++ {
if board[i][j] != '.' {
mp[board[i][j]] = append(mp[board[i][j]], i*9+j)
}
}
}
for _, pos := range mp {
for i := 0; i < len(pos); i++ {
for j := i + 1; j < len(pos); j++ {
if (pos[j]-pos[i])%9 == 0 {
return false
}
if pos[j]/9 == pos[i]/9 {
return false
}
if pos[j]%9/3 == pos[i]%9/3 && pos[j]/9/3 == pos[i]/9/3 {
return false
}
}
}
}
return true
}
func isValidSudoku(board [][]byte) bool {
for i := 0; i < 9; i++ {
for j := 0; j < 9; j++ {
if board[i][j] != '.' && !checkValid(board[i][j], []int{i, j}, board) {
return false
}
}
}
return true
}
func checkValid(num byte, pos []int, board [][]byte) bool {
row, column := pos[0], pos[1]
a, b := row/3*3, column/3*3
for i := 0; i < 9; i++ {
if board[row][i] == num && i != column {
return false
}
if board[i][column] == num && i != row {
return false
}
if board[a+i/3][b+i%3] == num && a+i/3 != row && b+i%3 != column {
return false
}
}
return true
}
|
package middlewares
import (
"github.com/authelia/authelia/v4/internal/authentication"
)
// Require1FA check if user has enough permissions to execute the next handler.
func Require1FA(next RequestHandler) RequestHandler {
return func(ctx *AutheliaCtx) {
if s, err := ctx.GetSession(); err != nil || s.AuthenticationLevel < authentication.OneFactor {
ctx.ReplyForbidden()
return
}
next(ctx)
}
}
|
package main
import (
"flag"
"fmt"
"image"
"os/signal"
"syscall"
"time"
)
// 256x256 is written to in total but only 160x144 is visible.
const (
screenWidth = 256
screenHeight = 256
visibleWidth = 160
visibleHeight = 144
)
// global emulation state
var Gb *GameBoy
func main() {
// init gameboy
Gb = &GameBoy{
Register: &Register{},
mainMemory: &GBMem{cartridge: &GBROM{}},
interruptEnabled: true,
image: image.NewRGBA(image.Rect(0, 0, screenWidth, screenHeight)),
LCDClock: time.NewTicker(108 * time.Microsecond),
CPUClock: time.NewTicker(GBClockPeriod),
TSC: 0,
TSCStart: 0,
Paused: true,
}
// load rom from file
rom_path := flag.String("rom", "", "rom image to load")
flag.Parse()
if *rom_path != "" {
Gb.mainMemory.cartridge.loadROMFromFile(*rom_path)
fmt.Printf("Loaded %s\n", *rom_path)
}
// Initialize joypad values
Gb.mainMemory.ioregs[0] = 0xff
/* Initialize PC to 0x100 */
Gb.set16Reg(PC, 0x100)
d := NewDebugger(Gb)
/* Initialize SIGINT handler */
go d.SIGINTHandler()
signal.Notify(sig_chan, syscall.SIGINT)
go Gb.LCDLoop()
go Gb.TSCLoop()
debugLoop(d)
}
|
package utorrent
import (
"bytes"
"fmt"
"net/http"
)
func (c *Client) url(path string) string {
if path == "" || path[0:1] != "/" {
path = fmt.Sprintf("/%s", path)
}
if c.token != "" {
path = fmt.Sprintf("%s&token=%s", path, c.token)
}
return fmt.Sprintf("%s%s", c.API, path)
}
func (c *Client) request(method, path string, payload []byte, headers *http.Header) (*http.Response, error) {
if c == nil {
return nil, fmt.Errorf("Cannot make a request with a nil client")
}
in := bytes.NewBuffer(payload)
req, err := http.NewRequest(method, c.url(path), in)
if err != nil {
return nil, err
}
req.SetBasicAuth(c.Username, c.Password)
if headers != nil {
for header, values := range *headers {
for _, value := range values {
req.Header.Add(header, value)
}
}
}
res, err := c.user_agent.Do(req)
if err != nil {
return nil, err
}
return res, nil
}
func (c *Client) post(path string, payload []byte, headers *http.Header) (*http.Response, error) {
return c.request("POST", path, payload, headers)
}
func (c *Client) put(path string, payload []byte, headers *http.Header) (*http.Response, error) {
return c.request("PUT", path, payload, headers)
}
func (c *Client) get(path string, headers *http.Header) (*http.Response, error) {
return c.request("GET", path, nil, headers)
}
func (c *Client) delete(path string, headers *http.Header) (*http.Response, error) {
return c.request("DELETE", path, nil, headers)
}
func (c *Client) action(action string, hash string, headers *http.Header) error {
res, err := c.get(fmt.Sprintf("/?action=%s&hash=%s", action, hash), headers)
if err != nil {
return err
}
if res.StatusCode != 200 {
return fmt.Errorf("error status: %s", res.Status)
}
return nil
}
|
package main
import (
_ "github.com/go-sql-driver/mysql"
_ "github.com/lib/pq"
"log"
"github.com/jinzhu/gorm"
"fmt"
"time"
)
type Owner struct{
gorm.Model
FirstName string
LastName string
Books []Book
}
type Book struct{
gorm.Model
Name string
PublishDate time.Time
OwnerID uint `sql:"index"`
Authors []Author `gorm:"many2many:books_authors"`
}
type Author struct{
gorm.Model
Firstname string
LastName string
}
/*var db *gorm.DB
var err error*/
func main() {
db,err:=gorm.Open("mysql","root:password@/gorm_db?charset=utf8&parseTime=True&loc=Local")
//db,err:=gorm.Open("postgres","user=aman password=password dbname=test1 sslmode=disable")
if err!=nil{
log.Fatal(err)
}
defer db.Close()
err = db.DB().Ping()
if err != nil {
log.Fatal(err)
}else{
fmt.Println("connected")
}
db.SingularTable(true)
db.DropTableIfExists(&Owner{},&Book{},&Author{})
db.CreateTable(&Owner{},&Book{},&Author{})
owner:=Owner{FirstName:"Aman",LastName:"Patel"}
db.Create(&owner)
owner1:=Owner{FirstName:"Shubham",LastName:"Garg"}
db.Create(&owner1)
//db.Debug().Model(&Owner{}).Where("id = ?",1).Update("last_name","Patel") //updating with callbacks
//db.Model(&Owner{}).Where("id = ?",1).Updates(map[string]interface{}{"first_name": "hello","last_name":"newname"})
//db.Debug().Model(&Owner{}).Where("id = ?",1).UpdateColumn("first_name", "hello") //without callbaks
//db.Debug().Model(&Owner{}).Where("id = ?",2).Delete(&Owner{}) //deleting with callback
var owner3 []Owner
db.Scopes(Search).Find(&owner3) //Scope is used here to apply additional conditions
fmt.Println(owner3)
}
func Search(db *gorm.DB) *gorm.DB {
return db.Where("id = ?", 1)
}
|
package main
import (
"github.com/freignat91/mlearning/api"
"github.com/spf13/cobra"
)
// ServerLogsCmd .
var ServerLogsCmd = &cobra.Command{
Use: "logs",
Short: "server logs toogles",
Run: func(cmd *cobra.Command, args []string) {
if err := mlCli.serverLogs(cmd, args); err != nil {
mlCli.Fatal("Error: %v\n", err)
}
},
}
func init() {
ServerCmd.AddCommand(ServerLogsCmd)
}
func (m *mlCLI) serverLogs(cmd *cobra.Command, args []string) error {
api := mlapi.New(m.server)
lines, err := api.ServerLogs()
if err != nil {
return err
}
displayList(lines)
return nil
}
|
package iteration
import "testing"
func TestRepeat(t *testing.T) {
assertExpectedResult := func(t *testing.T, got, expected string) {
if got != expected {
t.Errorf("expected %q but got %q", expected, got)
}
}
t.Run("Repeat a 5 times", func(t *testing.T) {
repeated := Repeat("a", 5)
expected := "aaaaa"
assertExpectedResult(t, repeated, expected)
})
t.Run("Repeat b 5 times", func(t *testing.T) {
repeated := Repeat("b", 5)
expected := "bbbbb"
assertExpectedResult(t, repeated, expected)
})
t.Run("Repeat a 6 times", func(t *testing.T) {
repeated := Repeat("a", 6)
expected := "aaaaaa"
assertExpectedResult(t, repeated, expected)
})
}
func BenchmarkRepeat(b *testing.B) {
for i := 0; i < b.N; i++ {
Repeat("a", 5)
}
}
|
package main
import (
"flag"
)
func main(){
var path, token string
flag.StringVar(&path, "p", ".", "生成的README.md文件路径")
flag.StringVar(&token, "t", "xxx", "GitHub API access_token")
flag.Parse()
InitDB()
// 启动README.md文件解析任务
StartReadmeParseJob(path, token)
signal := make(chan int)
<-signal
} |
package logger
import (
"os"
"github.com/Sirupsen/logrus"
)
var log *logrus.Logger
func Init() {
log = logrus.New()
log.Formatter = new(logrus.TextFormatter)
switch os.Getenv("MODE") {
case "debug":
log.Level = logrus.DebugLevel
default:
log.Level = logrus.DebugLevel
}
Info("Logger Successfully Initialize")
}
func Debug(args ...interface{}) {
log.Debug(args...)
}
func Debugf(format string, args ...interface{}) {
log.Debugf(format, args...)
}
func Info(args ...interface{}) {
log.Info(args...)
}
func Infof(format string, args ...interface{}) {
log.Infof(format, args...)
}
func Warn(args ...interface{}) {
log.Warn(args...)
}
func Warnf(format string, args ...interface{}) {
log.Warnf(format, args...)
}
func Err(args ...interface{}) {
log.Error(args...)
}
func Errf(format string, args ...interface{}) {
log.Errorf(format, args...)
}
func Fatal(args ...interface{}) {
log.Fatal(args...)
}
func Fatalf(format string, args ...interface{}) {
log.Fatalf(format, args...)
}
func Panic(args ...interface{}) {
log.Panic(args...)
}
func Panicf(format string, args ...interface{}) {
log.Panicf(format, args...)
}
func PanicfIfError(err error, format string, args ...interface{}) {
if err != nil {
Panicf(format, args...)
}
}
|
package utils
import (
"html/template"
"testing"
"github.com/stretchr/testify/assert"
)
func TestCompressedContent(t *testing.T) {
htmlContent1 := template.HTML(`
<html>
<body>
<h1>Test</h1>
<p>CompressedContent</p>
</body>
</html>
`)
htmlContent2 := htmlContent1
CompressedContent(&htmlContent2)
t.Log(len(htmlContent1) > len(htmlContent2))
}
func TestCompareVersion(t *testing.T) {
assert.Equal(t, true, CompareVersion("v1.2.4", "v1.2.5"))
assert.Equal(t, false, CompareVersion("v1.2.4", "v1.2.4"))
assert.Equal(t, false, CompareVersion("v1.2.4", "v1.2.3"))
assert.Equal(t, false, CompareVersion("v1.2.4", "v1.1.3"))
assert.Equal(t, true, CompareVersion("v1.2.4", "v1.3.3"))
assert.Equal(t, false, CompareVersion("v1.2.4", "v0.3.3"))
assert.Equal(t, true, CompareVersion("<v1.2.4", "v0.3.3"))
assert.Equal(t, false, CompareVersion("<v1.2.4", "v1.2.5"))
assert.Equal(t, true, CompareVersion("<=v1.2.4", "v1.2.4"))
assert.Equal(t, true, CompareVersion("<=v1.2.4", "v1.2.3"))
assert.Equal(t, false, CompareVersion("<=v1.2.4", "v1.2.5"))
assert.Equal(t, true, CompareVersion(">v1.2.4", "v1.2.5"))
assert.Equal(t, false, CompareVersion(">v1.2.4", "v1.2.4"))
assert.Equal(t, true, CompareVersion(">=v1.2.4", "v1.2.4"))
assert.Equal(t, true, CompareVersion(">=v1.2.4", "v1.2.5"))
assert.Equal(t, false, CompareVersion(">=v1.2.4", "v1.2.3"))
assert.Equal(t, false, CompareVersion("=v1.2.4", "v1.2.3"))
assert.Equal(t, true, CompareVersion("=v1.2.4", "v1.2.4"))
assert.Equal(t, true, CompareVersion("= v1.2.4", "v1.2.4"))
}
|
package lintcode
/**
* Brute Force
* @param k: An integer
* @param n: An integer
* @return: An integer denote the count of digit k in 1..n
*/
func digitCounts(k int, n int) int {
counter := 0
if k == 0 {
counter++
}
for i := 1; i <= n; i++ {
tmp := i
for tmp != 0 {
if tmp%10 == k {
counter++
}
tmp = tmp / 10
}
}
return counter
}
|
package monster
import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
"os"
)
type Monster struct {
Name string
Age int
Skill string
}
func (m *Monster) Store() bool {
str, err := json.Marshal(m)
if err != nil {
fmt.Println("json.Marshal error=", err)
return false
}
fileName, err := os.OpenFile("F:/go/src/golangStudy/testing/demo1/monster/my.txt", os.O_RDWR|os.O_CREATE, 0666)
if err != nil {
fmt.Println("os.OpenFile error=", err)
return false
}
defer fileName.Close()
writer := bufio.NewWriter(fileName)
_, err = writer.WriteString(string(str))
if err != nil {
fmt.Println("writer.WriteStringerror=", err)
return false
}
err = writer.Flush()
if err != nil {
fmt.Println("writer.Flusherror=", err)
return false
}
return true
}
func (m *Monster) ReStore() bool {
fileData, err := ioutil.ReadFile("F:/go/src/golangStudy/testing/demo1/monster/my.txt")
if err != nil {
fmt.Println("ioutil.ReadFile error=", err)
return false
}
err = json.Unmarshal(fileData, m)
if err != nil {
fmt.Println("json.Unmarshal error=", err)
return false
}
fmt.Println(m)
return true
}
|
package menu
import (
"bufio"
"errors"
"fmt"
"log"
"os"
"strconv"
"strings"
"time"
ui "github.com/gizak/termui/v3"
"github.com/gizak/termui/v3/widgets"
"github.com/nsf/termbox-go"
)
const resultHeight = 20
const resultWidth = 70
type validCheck func(string) (string, string, bool)
// Entry contains all the information needed for a boot entry.
type Entry interface {
// Label returns the string will show in menu.
Label() string
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func countNewlines(str string) int {
count := 0
for _, s := range str {
if s == '\n' {
count++
}
}
return count
}
func Init() error {
return ui.Init()
}
func Close() {
ui.Close()
}
var BackRequest = errors.New("User requested to return to a previous menu.")
var ExitRequest = errors.New("User requested to exit the program.")
// AlwaysValid is a special isValid function that check nothing
func AlwaysValid(input string) (string, string, bool) {
return input, "", true
}
// newParagraph returns a widgets.Paragraph struct with given initial text.
func newParagraph(initText string, border bool, location int, wid int, ht int) *widgets.Paragraph {
p := widgets.NewParagraph()
p.Text = initText
p.Border = border
p.SetRect(0, location, wid, location+ht)
p.TextStyle.Fg = ui.ColorWhite
return p
}
// readKey reads a key from input stream.
func readKey(uiEvents <-chan ui.Event) string {
for {
e := <-uiEvents
if e.Type == ui.KeyboardEvent || e.Type == ui.MouseEvent {
return e.ID
}
}
}
// processInput presents an input box to user and returns the user's input.
// processInput will check validation of input using isValid function.
func processInput(introwords string, location int, wid int, ht int, isValid validCheck, uiEvents <-chan ui.Event) (string, string, error) {
intro := newParagraph(introwords, false, location, len(introwords)+4, 3)
location += 2
input := newParagraph("", true, location, wid, ht+2)
location += ht + 2
warning := newParagraph("<Esc> to go back, <Ctrl+d> to exit", false, location, wid, 15)
ui.Render(intro)
ui.Render(input)
ui.Render(warning)
// The input box is wid characters wide
// - 2 chars are reserved for the left and right borders
// - 1 char is left empty at the end of input to visually
// signify that the text box is still accepting input
// The user might want to input a string longer than wid-3
// characters, so we store the full typed input in fullText
// and display a substring of the full text to the user
var fullText string
for {
k := readKey(uiEvents)
switch k {
case "<C-d>":
return "", "", ExitRequest
case "<Escape>":
return "", "", BackRequest
case "<Enter>":
inputString, warningString, ok := isValid(fullText)
if ok {
return inputString, warning.Text, nil
}
fullText = ""
input.Text = ""
warning.Text = warningString
ui.Render(input)
ui.Render(warning)
case "<Backspace>":
if len(input.Text) > 0 {
fullText = fullText[:len(fullText)-1]
start := max(0, len(fullText)-wid+3)
input.Text = fullText[start:]
ui.Render(input)
}
case "<Space>":
fullText += " "
start := max(0, len(fullText)-wid+3)
input.Text = fullText[start:]
ui.Render(input)
default:
// the termui use a string begin at '<' to represent some special keys
// for example the 'F1' key will be parsed to "<F1>" string .
// we should do nothing when meet these special keys, we only care about alphabets and digits.
if k[0:1] != "<" {
fullText += k
start := max(0, len(fullText)-wid+3)
input.Text = fullText[start:]
ui.Render(input)
}
}
}
}
// PromptTextInput opens a new input window with fixed width=100, hight=1.
func PromptTextInput(introwords string, isValid validCheck, uiEvents <-chan ui.Event, menus chan<- string) (string, error) {
menus <- introwords
defer ui.Clear()
input, _, err := processInput(introwords, 0, 80, 1, isValid, uiEvents)
return input, err
}
// DisplayResult opens a new window and displays a message.
// each item in the message array will be displayed on a single line.
func DisplayResult(message []string, uiEvents <-chan ui.Event, menus chan<- string) (string, error) {
menus <- message[0]
defer ui.Clear()
// if a message is longer then width of the window, split it to shorter lines
var wid int = resultWidth
text := []string{}
for _, m := range message {
for len(m) > wid {
text = append(text, m[0:wid])
m = m[wid:]
}
text = append(text, m)
}
p := widgets.NewParagraph()
p.Border = true
p.SetRect(0, 0, resultWidth+2, resultHeight+4)
p.TextStyle.Fg = ui.ColorWhite
msgLength := len(text)
first := 0
last := min(resultHeight, msgLength)
controlText := "<Page Up>, <Page Down> to scroll\n\nPress any other key to continue."
controls := newParagraph(controlText, false, resultHeight+4, wid+2, 5)
ui.Render(controls)
for {
p.Title = fmt.Sprintf("Message---%v/%v", first, msgLength)
displayText := strings.Join(text[first:last], "\n")
// Indicate whether user is at the
// end of text for long messages
if msgLength > resultHeight {
if last < msgLength {
displayText += "\n\n(More)"
} else if last == msgLength {
displayText += "\n\n(End of message)"
}
}
p.Text = displayText
ui.Render(p)
k := readKey(uiEvents)
switch k {
case "<Up>", "<MouseWheelUp>":
first = max(0, first-1)
last = min(first+resultHeight, len(text))
case "<Down>", "<MouseWheelDown>":
last = min(last+1, len(text))
first = max(0, last-resultHeight)
case "<Left>", "<PageUp>":
first = max(0, first-resultHeight)
last = min(first+resultHeight, len(text))
case "<Right>", "<PageDown>":
last = min(last+resultHeight, len(text))
first = max(0, last-resultHeight)
case "<C-d>":
return p.Text, ExitRequest
case "<Escape>":
return p.Text, BackRequest
default:
return p.Text, nil
}
}
}
// parsingMenuOption parses the user's operation in the menu page, such as page up, page down, selection. etc
func parsingMenuOption(labels []string, menu *widgets.List, input *widgets.Paragraph, logBox *widgets.List, warning *widgets.Paragraph, uiEvents <-chan ui.Event, customWarning ...string) (int, error) {
if len(labels) == 0 {
return 0, fmt.Errorf("No Entry in the menu")
}
menuTitle := menu.Title + "---%v/%v"
// first, last always point to the first and last entry in current menu page
first := 0
last := min(10, len(labels))
listData := labels[first:last]
menu.Rows = listData
menu.Title = fmt.Sprintf(menuTitle, first, len(labels))
ui.Render(menu)
// keep tracking all input from user
for {
k := readKey(uiEvents)
switch k {
case "<C-d>":
return -1, ExitRequest
case "<Escape>":
return -1, BackRequest
case "<Enter>":
choose := input.Text
input.Text = ""
ui.Render(input)
c, err := strconv.Atoi(choose)
// Input is valid if the selected index
// is between 0 <= input < len(labels)
if err == nil && c >= 0 && c < len(labels) {
// if there is not specific warning for this entry, return it
// elsewise show the warning and continue
if len(customWarning) > c && customWarning[c] != "" {
warning.Text = customWarning[c]
ui.Render(warning)
continue
}
return c, nil
}
warning.Text = "Please enter a valid entry number."
ui.Render(warning)
case "<Backspace>":
if len(input.Text) > 0 {
input.Text = input.Text[:len(input.Text)-1]
ui.Render(input)
}
case "<Left>":
first = max(0, first-10)
last = min(first+10, len(labels))
listData := labels[first:last]
menu.Rows = listData
menu.Title = fmt.Sprintf(menuTitle, first, len(labels))
ui.Render(menu)
case "<Right>":
if first+10 >= len(labels) {
continue
}
first = first + 10
last = min(first+10, len(labels))
listData := labels[first:last]
menu.Rows = listData
menu.Title = fmt.Sprintf(menuTitle, first, len(labels))
ui.Render(menu)
case "<PageUp>":
// scroll up in the log box
logBox.ScrollHalfPageUp()
ui.Render(logBox)
case "<PageDown>":
// scroll down in the log box
logBox.ScrollHalfPageDown()
ui.Render(logBox)
case "<Up>", "<MouseWheelUp>":
// move one line up
first = max(0, first-1)
last = min(first+10, len(labels))
listData := labels[first:last]
menu.Rows = listData
menu.Title = fmt.Sprintf(menuTitle, first, len(labels))
ui.Render(menu)
case "<Down>", "<MouseWheelDown>":
// move one line down
last = min(last+1, len(labels))
first = max(0, last-10)
listData := labels[first:last]
menu.Rows = listData
menu.Title = fmt.Sprintf(menuTitle, first, len(labels))
ui.Render(menu)
case "<Home>":
// first page
first = 0
last = min(first+10, len(labels))
listData := labels[first:last]
menu.Rows = listData
menu.Title = fmt.Sprintf(menuTitle, first, len(labels))
ui.Render(menu)
case "<End>":
// last page
last = len(labels)
first = max(0, last-10)
listData := labels[first:last]
menu.Rows = listData
menu.Title = fmt.Sprintf(menuTitle, first, len(labels))
ui.Render(menu)
case "<Space>":
input.Text += " "
ui.Render(input)
default:
// the termui use a string begin at '<' to represent some special keys
// for example the 'F1' key will be parsed to "<F1>" string .
// we should do nothing when meet these special keys, we only care about alphabets and digits.
if k[0:1] != "<" {
input.Text += k
ui.Render(input)
}
}
}
}
// PromptMenuEntry presents all entries into a menu with numbers.
// user inputs a number to choose from them.
// customWarning allow self-defined warnings in the menu
// for example the wifi menu want to show specific warning when user hit a specific entry,
// because some wifi's type may not be supported.
func PromptMenuEntry(menuTitle string, introwords string, entries []Entry, uiEvents <-chan ui.Event, menus chan<- string, customWarning ...string) (Entry, error) {
menus <- menuTitle
defer ui.Clear()
// listData contains all choice's labels
listData := []string{}
for i, e := range entries {
listData = append(listData, fmt.Sprintf("[%d] %s", i, e.Label()))
}
windowWidth, windowHeight := termbox.Size()
// location will serve as the y1 coordinate in this function.
location := 0
menu := widgets.NewList()
menu.Title = menuTitle
// windowHeight is divided by 5 to make room for the five boxes that will be on the screen.
height := windowHeight / 5
// menu is the box with the options. It will be at the top of the screen.
menu.SetRect(0, location, windowWidth, height)
menu.TextStyle.Fg = ui.ColorWhite
location += height
// A variable to help get rid of the gap between "Choose an option:" and its
// corresponding box.
alternateHeight := 2
intro := newParagraph(introwords, false, location, windowWidth, height)
location += alternateHeight
input := newParagraph("", true, location, windowWidth, height)
location += height
logBox := widgets.NewList()
logBox.Title = "Logs:"
logBox.WrapText = false
logBox.SetRect(0, location, windowWidth, location+height)
location += height
warning := newParagraph("<Esc> to go back, <Ctrl+d> to exit", false, location, windowWidth, height)
// Write the contents of the log output text file to the log box.
var file, err = os.OpenFile("logOutput.txt", os.O_APPEND|os.O_CREATE|os.O_RDWR, 0644)
if err != nil {
log.Fatal(err)
}
scanner := bufio.NewScanner(file)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
logBox.Rows = append(logBox.Rows, scanner.Text())
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
defer file.Close()
ui.Render(intro)
ui.Render(input)
ui.Render(warning)
ui.Render(logBox)
chooseIndex, err := parsingMenuOption(listData, menu, input, logBox, warning, uiEvents, customWarning...)
if err != nil {
return nil, err
}
return entries[chooseIndex], nil
}
func PromptConfirmation(message string, uiEvents <-chan ui.Event, menus chan<- string) (bool, error) {
defer ui.Clear()
menus <- message
wid := resultWidth
text := ""
position := 1
for {
// Split message if longer than msg box
end := min(len(message), wid)
text += message[:end] + "\n"
if len(message) > wid {
message = message[wid:]
} else {
break
}
}
text += "\n[0] Yes\n[1] No\n"
position += countNewlines(text) + 2
wid += 2 // 2 borders
msg := newParagraph(text, true, 0, wid, position)
ui.Render(msg)
selectHint := newParagraph("Choose an option:", false, position+1, wid, 1)
ui.Render(selectHint)
entry := newParagraph("", true, position+2, wid, 3)
ui.Render(entry)
backHint := newParagraph("<Esc> to go back, <Ctrl+d> to exit", false, position+6, wid, 1)
ui.Render(backHint)
for {
key := readKey(uiEvents)
switch key {
case "<Escape>":
return false, BackRequest
case "<C-d>":
return false, ExitRequest
case "<Enter>":
switch entry.Text {
case "0":
return true, nil
case "1":
return false, nil
}
case "0", "1":
entry.Text = key
ui.Render(entry)
case "<Backspace>":
entry.Text = ""
ui.Render(entry)
}
}
}
type Progress struct {
paragraph *widgets.Paragraph
animated bool
sigTerm chan bool
ackTerm chan bool
}
func NewProgress(text string, animated bool) Progress {
paragraph := widgets.NewParagraph()
paragraph.Border = true
paragraph.SetRect(0, 0, resultWidth, 10)
paragraph.TextStyle.Fg = ui.ColorWhite
paragraph.Title = "Operation Running"
paragraph.Text = text
ui.Render(paragraph)
progress := Progress{paragraph, animated, make(chan bool), make(chan bool)}
if animated {
go progress.animate()
}
return progress
}
func (p *Progress) Update(text string) {
p.paragraph.Text = text
ui.Render(p.paragraph)
}
func (p *Progress) animate() {
counter := 0
for {
select {
case <-p.sigTerm:
p.ackTerm <- true
return
default:
time.Sleep(time.Second)
pText := p.paragraph.Text
p.Update(pText + strings.Repeat(".", counter%4))
p.paragraph.Text = pText
counter++
}
}
}
func (p *Progress) Close() {
if p.animated {
p.sigTerm <- true
<-p.ackTerm
}
ui.Clear()
}
|
package main
import "ms/sun/servises/event_service"
func listernAndSaverActions() {
for {
subParam := event_service.SubParam{
Added_Post_Event: true,
Deleted_Post_Event: true,
Liked_Post_Event: true,
UnLiked_Post_Event: true,
Commented_Post_Event: true,
UnCommented_Post_Event: true,
Followed_User_Event: true,
UnFollwed_User_Event: true,
Blocked_User_Event: true,
UnBlocked_User_Event: true,
}
sub := event_service.NewSub(subParam)
for {
var e event_service.GeneralEvent
select {
case e = <-sub.Deleted_Post_Event:
on_Deleted_Post(e)
case e = <-sub.Liked_Post_Event:
on_Liked_Post(e)
case e = <-sub.UnLiked_Post_Event:
on_UnLiked_Post(e)
case e = <-sub.Commented_Post_Event:
on_Commented_Post(e)
case e = <-sub.UnCommented_Post_Event:
on_UnCommented(e)
case e = <-sub.Followed_User_Event:
on_Followed(e)
case e = <-sub.UnFollwed_User_Event:
on_UnFollwed(e)
case e = <-sub.Blocked_User_Event:
on_Blocked(e)
case e = <-sub.UnBlocked_User_Event:
on_UnBlocked(e)
}
}
}
}
func on_Deleted_Post(event event_service.GeneralEvent) {
}
func on_Liked_Post(event event_service.GeneralEvent) {
}
func on_UnLiked_Post(event event_service.GeneralEvent) {
}
func on_Commented_Post(event event_service.GeneralEvent) {
}
func on_UnCommented(event event_service.GeneralEvent) {
}
func on_Followed(event event_service.GeneralEvent) {
}
func on_UnFollwed(event event_service.GeneralEvent) {
}
func on_Blocked(event event_service.GeneralEvent) {
}
func on_UnBlocked(event event_service.GeneralEvent) {
}
/*func on_Deleted_Post(event event_service.GeneralEvent) {
}
*/
|
package main
import (
"encoding/json"
"fmt"
)
func main() {
// var jsonBlob = []byte(`{"result":0,"dcdn_progress":0,"message":"","root_url":"http://up057.tw11a.filemail.xunlei.com","uri":"request_upload","query_str":"g=22596363b3de40b06f981fb85d82312e8c0ed511&s=12&t=1525689963&ver=1&tid=c28ebf311c2bbe6878c07f98edf253ea&ui=150007900&e=1526294763&ms=10485760&ak=0:0:0:0&pk=filemail&aid=30d30bbe013e1caecc375328fbe2e238","block_size":0}`)
var jsonBlob = []byte(`{"result":5,"dcdn_progress":2}`)
type UploadRes struct {
Result int
Dcdn_Progress int
// Message string
// RootUrl string
// Uri string
// QueryStr string
// BlockSize int
}
var res UploadRes
err := json.Unmarshal(jsonBlob, &res)
if err != nil {
fmt.Println("error:", err)
}
fmt.Printf("%+v\n", res)
}
|
package main
import "fmt"
func main() {
var x uint8 = 1<<1 | 1<<4 | 1<<7
var y uint8 = 1<<4 | 1<<6
fmt.Printf("%08b\n", x) // 10010010,代表集合 {1, 4, 7}
fmt.Printf("%08b\n", y) // 01010000,代表集合 {4, 6}
fmt.Printf("%08b\n", x&y) // 00010000,代表交集 {4}
fmt.Printf("%08b\n", x|y) // 11010010,代表并集 {1, 4, 6, 7}
fmt.Printf("%08b\n", x^y) // 11000010,代表对称差 {1, 6, 7}
fmt.Printf("%08b\n", x&^y) // 10000010,代表差集 {1, 7}
// 打印集合 x 将输出 1 4 7
for i := uint(0); i < 8; i++ {
if x&(1<<i) != 0 {
fmt.Println(i)
}
}
// 打印集合 y 将输出 4 6
for i := uint(0); i < 8; i++ {
if y&(1<<i) != 0 {
fmt.Println(i)
}
}
}
|
package tree
func sumOfLeftLeaves(root *TreeNode) int {
if root == nil {
return 0
}
if root.Left == nil && root.Right == nil {
return 0
}
sum := 0
stack := []*TreeNode{root}
for len(stack) != 0 {
p := stack[len(stack)-1]
stack = stack[:len(stack)-1]
if p.Left != nil {
if p.Left.Left == nil && p.Left.Right == nil {
sum += p.Left.Val
} else {
stack = append(stack, p.Left)
}
}
if p.Right != nil {
stack = append(stack, p.Right)
}
}
return sum
}
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package minibroker
import (
"fmt"
"net/url"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
)
const (
mysqlProtocolName = "mysql"
rootMysqlUsername = "root"
)
type MySQLProvider struct {
hostBuilder
}
func (p MySQLProvider) Bind(
services []corev1.Service,
_ *BindParams,
provisionParams *ProvisionParams,
chartSecrets Object,
) (Object, error) {
service := services[0]
if len(service.Spec.Ports) == 0 {
return nil, errors.Errorf("no ports found")
}
svcPort := service.Spec.Ports[0]
host := p.hostFromService(&service)
database, err := provisionParams.DigStringOr("mysqlDatabase", "")
if err != nil {
return nil, fmt.Errorf("failed to get database name: %w", err)
}
user, err := provisionParams.DigStringOr("mysqlUser", rootMysqlUsername)
if err != nil {
return nil, fmt.Errorf("failed to get username: %w", err)
}
var passwordKey string
if user == rootMysqlUsername {
passwordKey = "mysql-root-password"
} else {
passwordKey = "mysql-password"
}
password, err := chartSecrets.DigString(passwordKey)
if err != nil {
return nil, fmt.Errorf("failed to get password: %w", err)
}
creds := Object{
"protocol": mysqlProtocolName,
"port": svcPort.Port,
"host": host,
"username": user,
"password": password,
"database": database,
"uri": (&url.URL{
Scheme: mysqlProtocolName,
User: url.UserPassword(user, password),
Host: fmt.Sprintf("%s:%d", host, svcPort.Port),
Path: database,
}).String(),
}
return creds, nil
}
|
package main
//@todo: implement a database
// as you guessed it was hell writing this code
type Person struct {
Name string
Job string
Summary string
PersonalInfo []*PersonalInfo
Skills []string
Experience []string
Education []string
Posts []*Employment
}
type PersonalInfo struct {
Title string
Info string
}
type Employment struct {
Company string
Period string
Title string
Duties []string
}
func getData() Person {
infoMap := map[string]string{
"Address": "4454 Silicon savannah, Kenya",
"Phone": "+24700001234",
"Email": "jackogina@coder.com",
"Website": "jack.solutions.com",
"Stackoverflow": "stackoverflow/users/12345/jakogina",
"Github": "github.com/jakhax",
"Linked": "linked.com/users/jakogina",
}
pInfo := []*PersonalInfo{}
for title, info := range infoMap {
p := PersonalInfo{Title: title, Info: info}
pInfo = append(pInfo, &p)
}
edu := []string{"Kapsabet High School, K.C.S.E",
"Moringa School, Full Stack Developer",
"Some University, Mathematics & Computer Science"}
experince := []string{"Engineering web development, all layers, from database to services to user interfaces",
"Supporting legacy systems with backups of all cases to/from parallel systems",
"Analysis and design of databases and user interfaces",
"Managing requirements",
"Implementing software development lifecycle policies and procedures",
"Managing and supporting multiple projects",
"Highly adaptable in quickly changing technical environments with very strong organizational and analytical skills"}
summary := "A results-driven, customer-focused, articulate and analytical Senior Software Engineer who can think “out of the box.” Strong in design and integration problem-solving skills. Expert in Java, C#, .NET, and T-SQL with database analysis and design."
skills := []string{"Databases: MySQL, Oracle, Access, SAP",
"Software: Microsoft Office, Remedy, Microsoft SQL Server, DB Artisan, Eclipse, Visual Studio.NET, FrontPage",
"Languages: C#, Java, Visual Basic, ASP, XML, XSL, JWS, SQL, and Python"}
e1 := Employment{Company: "E*Trade Financial, Silicon Valley.",
Period: "CA July 2012 – Present",
Title: "Software Engineer (Customer Service Systems)",
Duties: []string{"Re-engineered customer account software systems used by brokerage teams.",
"Web developer for user interfaces to trading inquiries, support parallel systems.",
"Developed and implemented new feedback system for users concerns, bugs, and defect tracking regarding use and functionality of new interfaces.",
"Coded web designed interfaces using Java, XML, XSL, AJAX, and JWS.",
"Support system for existing intranet for employees, including designing and developing the Advantage@Work system company-wide.",
"Code and support provided through ASP.NET, T-SQL, Microsoft SQL Server, and Oracle 9i.",
"Supported existing legacy system to provide newly created cases and ensured they were available in the systems in parallel until legacy systems were retired.",
}}
e2 := Employment{Company: "Intel Corporation, Silicon savannah",
Period: "Jan 2005 – Jul 2012",
Title: "Systems Programmer (Remote Servers and SSL Product Analyst)",
Duties: []string{"Deployed and tested Remote Installation Services(RIS)-Server Installs on Windows XP.",
"Focused deployment of Server builds and handled some client builds.",
"Modified Visual Basic applications for use in post-server builds for customizing builds.",
"Researched RIS and Active Directory for future deployment worldwide.",
"Wrote bi-monthly progress reports, participated in weekly staff meetings and JDP team meetings designed to develop white paper processing.",
"Provide technical support to the SSL team, managing inventory."}}
employment := []*Employment{&e1, &e2}
user := Person{
Name: "JakHax",
Job: "Software Developer",
Summary: summary,
PersonalInfo: pInfo,
Skills: skills,
Posts: employment,
Experience: experince,
Education: edu,
}
return user
}
|
package parser
import "fmt"
// Err represents a generic parser error
type Err struct {
Err error
At Cursor
}
func (err *Err) Error() string {
return fmt.Sprintf("%s at %s", err.Err, err.At.String())
}
// ErrUnexpectedToken represents a parser error
type ErrUnexpectedToken struct {
At Cursor
Expected Pattern
}
func (err *ErrUnexpectedToken) Error() string {
if err.Expected == nil {
return fmt.Sprintf(
"unexpected token at %s",
err.At,
)
}
return fmt.Sprintf(
"unexpected token, expected {%s} at %s",
err.Expected.Desig(),
err.At,
)
}
type errEOF struct{}
func (err errEOF) Error() string { return "eof" }
|
/*
Package trie implements a trie data-structure similar to the one described by
Donald E Knuth in “Programming Perls”. (Communications of the ACM,
Vol. 29, No. 6, June 1986,
https://cecs.wright.edu/people/faculty/pmateti/Courses/7140/PDF/cwp-knuth-cacm-1986.pdf).
The trie is suitable for write-once-read-many-times situations. The idea is to
spend some effort to create a compact but efficient dictionary for categorical data.
License
Governed by a 3-Clause BSD license. License file may be found in the root
folder of this module.
Copyright © 2021 Norbert Pillmayer <norbert@pillmayer.com>
*/
package trie
|
/*
Copyright 2021 RadonDB.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sidecar
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"os/user"
"path"
"strconv"
"github.com/spf13/cobra"
)
// NewInitCommand return a pointer to cobra.Command.
func NewInitCommand(cfg *Config) *cobra.Command {
cmd := &cobra.Command{
Use: "init",
Short: "do some initialization operations.",
Run: func(cmd *cobra.Command, args []string) {
if err := runInitCommand(cfg); err != nil {
log.Error(err, "init command failed")
os.Exit(1)
}
},
}
return cmd
}
// runInitCommand do some initialization operations.
func runInitCommand(cfg *Config) error {
var err error
if exists, _ := checkIfPathExists(dataPath); exists {
// remove lost+found.
if err := os.RemoveAll(dataPath + "/lost+found"); err != nil {
return fmt.Errorf("removing lost+found: %s", err)
}
// Get the mysql user.
user, err := user.Lookup("mysql")
if err != nil {
return fmt.Errorf("failed to get mysql user: %s", err)
}
uid, err := strconv.Atoi(user.Uid)
if err != nil {
return fmt.Errorf("failed to get mysql user uid: %s", err)
}
gid, err := strconv.Atoi(user.Gid)
if err != nil {
return fmt.Errorf("failed to get mysql user gid: %s", err)
}
// chown -R mysql:mysql /var/lib/mysql.
if err = os.Chown(dataPath, uid, gid); err != nil {
return fmt.Errorf("failed to chown %s: %s", dataPath, err)
}
}
// copy appropriate my.cnf from config-map to config mount.
if err = copyFile(path.Join(configMapPath, "my.cnf"), path.Join(configPath, "my.cnf")); err != nil {
return fmt.Errorf("failed to copy my.cnf: %s", err)
}
// build client.conf.
clientConfig, err := cfg.buildClientConfig()
if err != nil {
return fmt.Errorf("failed to build client.conf: %s", err)
}
// save client.conf to /etc/mysql.
if err := clientConfig.SaveTo(path.Join(clientConfPath)); err != nil {
return fmt.Errorf("failed to save client.conf: %s", err)
}
if err = os.Mkdir(extraConfPath, os.FileMode(0755)); err != nil {
if !os.IsExist(err) {
return fmt.Errorf("error mkdir %s: %s", extraConfPath, err)
}
}
// Run reset master in init-mysql container.
if err = ioutil.WriteFile(initFilePath+"/reset.sql", []byte("reset master;"), 0644); err != nil {
return fmt.Errorf("failed to write reset.sql: %s", err)
}
// build init.sql.
initSqlPath := path.Join(extraConfPath, "init.sql")
if err = ioutil.WriteFile(initSqlPath, cfg.buildInitSql(), 0644); err != nil {
return fmt.Errorf("failed to write init.sql: %s", err)
}
// build extra.cnf.
extraConfig, err := cfg.buildExtraConfig(initSqlPath)
if err != nil {
return fmt.Errorf("failed to build extra.cnf: %s", err)
}
// save extra.cnf to conf.d.
if err := extraConfig.SaveTo(path.Join(extraConfPath, "extra.cnf")); err != nil {
return fmt.Errorf("failed to save extra.cnf: %s", err)
}
// build post-start.sh.
bashPostStartPath := path.Join(scriptsPath, "post-start.sh")
bashPostStart, err := cfg.buildPostStart()
if err != nil {
return fmt.Errorf("failed to build post-start.sh: %s", err)
}
if err = ioutil.WriteFile(bashPostStartPath, bashPostStart, os.FileMode(0755)); err != nil {
return fmt.Errorf("failed to write post-start.sh: %s", err)
}
// build pre-stop.sh.
bashPreStopPath := path.Join(scriptsPath, "pre-stop.sh")
bashPreStop := cfg.buildPreStop()
if err = ioutil.WriteFile(bashPreStopPath, bashPreStop, os.FileMode(0755)); err != nil {
return fmt.Errorf("failed to write pre-stop.sh: %s", err)
}
// build leader-start.sh.
bashLeaderStart := cfg.buildLeaderStart()
leaderStartPath := path.Join(scriptsPath, "leader-start.sh")
if err = ioutil.WriteFile(leaderStartPath, bashLeaderStart, os.FileMode(0755)); err != nil {
return fmt.Errorf("failed to write leader-start.sh: %s", err)
}
// build leader-stop.sh.
bashLeaderStop := cfg.buildLeaderStop()
leaderStopPath := path.Join(scriptsPath, "leader-stop.sh")
if err = ioutil.WriteFile(leaderStopPath, bashLeaderStop, os.FileMode(0755)); err != nil {
return fmt.Errorf("failed to write leader-stop.sh: %s", err)
}
// for install tokudb.
if cfg.InitTokuDB {
arg := fmt.Sprintf("echo never > %s/enabled", sysPath)
cmd := exec.Command("sh", "-c", arg)
cmd.Stderr = os.Stderr
if err = cmd.Run(); err != nil {
return fmt.Errorf("failed to disable the transparent_hugepage: %s", err)
}
}
// build xenon.json.
xenonFilePath := path.Join(xenonPath, "xenon.json")
if err = ioutil.WriteFile(xenonFilePath, cfg.buildXenonConf(), 0644); err != nil {
return fmt.Errorf("failed to write xenon.json: %s", err)
}
log.Info("init command success")
return nil
}
|
package dto
type UserMeditationExercise struct {
MeditationExercise
MeditationExerciseStarted
}
|
package docker
import (
"bufio"
"fmt"
"github.com/Sirupsen/logrus"
dc "github.com/fsouza/go-dockerclient"
"github.com/rootsongjc/magpie/utils"
"github.com/samalba/dockerclient"
"github.com/spf13/viper"
"io"
"os"
"strings"
"sync"
"time"
)
//Yarn docker cluster state
type docker_cluster_state struct {
cluster string
total int
running int
exited int
}
var logger = utils.Logger()
//Yarn nodemanager docker container
type Yarn_docker_container struct {
ID string
Clustername string //yarn1
Name string //yarn1-20160912-nm1
Status string //exited
State string //Exited (0) 3 hours ago
Ip string //172.18.12.31
Host string //bj-dc-datanode-078.tendcloud.com
}
//Get the yarn docker cluster status
func Get_docker_status(cluster_names []string) {
fmt.Println("======================DOCKER CLSUTER STATUS=========================")
fmt.Println("CLUSTER\tTOTAL\tRUNNING\tEXITED")
yarn_containers := Get_all_yarn_containers()
var total, running, exited int
exited_containers := make([]Yarn_docker_container, 0)
arr := make([]docker_cluster_state, len(cluster_names))
for j := range yarn_containers {
c := yarn_containers[j]
for i := range cluster_names {
arr[i].cluster = cluster_names[i]
// name format: yarn6-20160901-nm139
if c.Clustername == cluster_names[i] {
arr[i].total += 1
if c.State == "running" {
arr[i].running += 1
} else if c.State == "exited" {
arr[i].exited += 1
exited_containers = append(exited_containers, c)
}
}
}
}
for i := range cluster_names {
fmt.Println(arr[i].cluster, "\t", arr[i].total, "\t", arr[i].running, "\t", arr[i].exited)
total += arr[i].total
running += arr[i].running
exited += arr[i].exited
}
fmt.Println("--------------------------------------------------------------------")
fmt.Println("TOTAL", "\t", total, "\t", running, "\t", exited)
fmt.Println("================EXITED DOCKER CONTAINERS DISTRIBUTION===============")
if len(exited_containers) == 0 {
fmt.Println("None")
} else {
fmt.Println("CLUSTER\tNAME\tSTATUS\tHOSTNAME")
for i := range exited_containers {
c := exited_containers[i]
fmt.Println(c.Clustername, "\t", c.Name, "\t", c.Status, "\t", c.Host)
}
}
}
//Delete a docker container.
func Delete_container(id string, wg *sync.WaitGroup) {
if wg != nil {
defer wg.Done()
}
client, err := Swarm_client()
if err != nil {
panic(err)
}
fmt.Println("Removal", id, "in progress...")
err = client.RemoveContainer(dc.RemoveContainerOptions{ID: id, Force: true})
if err != nil {
logger.WithFields(logrus.Fields{"Time": time.Now(), "ContainerID": id, "Action": "DELETE"}).Error(err)
panic(err)
}
fmt.Println("Remove", id, "OK")
logger.WithFields(logrus.Fields{"Time": time.Now(), "ContainerID": id, "Action": "DELETE"}).Info("Delete container " + id)
}
//Delete all the yarn docker containers on the host.
func Delete_containers_on_host(hostname string) {
containers, err := Get_all_docker_containers()
if err != nil {
panic(err)
} else {
var wg sync.WaitGroup
for i := range containers {
c := Get_nodemanager_host(containers[i].Names[0])
//DO NOT DELETE THE OTHER CONTAINERS EXCEPT THE YARN DOCKER CONTAINER
// FOR EXAMPLE: cAdvisor
if c == hostname && strings.HasPrefix(strings.Split(containers[i].Names[0], "/")[2],"yarn"){
id := containers[i].ID[0:12]
fmt.Println("Delete docker contianer ID:", id, " NAME:", containers[i].Names[0])
wg.Add(1)
go Delete_container(id, &wg)
}
}
wg.Wait()
}
}
//Delete the containers of the file list
func Delete_container_file_list(path string) {
fi, err := os.Open(path)
if err != nil {
panic(err)
}
defer fi.Close()
containers := Get_all_yarn_containers()
buff := bufio.NewReader(fi)
var wg sync.WaitGroup
for {
line, err := buff.ReadString('\n')
if err != nil || io.EOF == err {
break
}
name := strings.Trim(line, "\n")
wg.Add(1)
for i := range containers {
if containers[i].Name == name {
fmt.Println("Delete docker container:", name, " ID:", containers[i].ID)
go Delete_container(containers[i].ID, &wg)
}
}
}
wg.Wait()
}
//Get container name for example: yarn1-20141231-nm1
func Get_container_name(longname string) string {
return strings.Split(longname, "/")[2]
}
//Get nodemanager local machine's hostname for example:bj-dc-datanode-006.tendcloud.com
func Get_nodemanager_host(longname string) string {
return strings.Split(longname, "/")[1]
}
//Convert the swarm docker struct to yarn_docker_container struct
func Convert_yarn_docker_container(docker_container dc.APIContainers) Yarn_docker_container {
id := docker_container.ID[0:12]
name := strings.Split(docker_container.Names[0], "/")[2]
clustername := strings.Split(name, "-")[0]
status := docker_container.Status
state := docker_container.State
//Exited container has no IPAddress
var ip = "-"
if state == "running" {
ip = docker_container.Networks.Networks["mynet"].IPAddress
}
host := strings.Split(docker_container.Names[0], "/")[1]
return Yarn_docker_container{ID: id,
Name: name,
Clustername: clustername,
Status: status,
State: state,
Ip: ip,
Host: host}
}
//Get yarn docker containers list
func Get_all_yarn_containers() []Yarn_docker_container {
cluster_names := viper.GetStringSlice("clusters.cluster_name")
list := make([]Yarn_docker_container, 0)
containers, err := Get_all_docker_containers()
if err != nil {
fmt.Println("Cann connet to the swarm master.")
panic(err)
}
for j := range containers {
container := containers[j]
// Names format: [/bj-yh-dc-datanode-141.tendcloud.com/yarn6-20160901-nm141]
c := Get_container_name(container.Names[0])
for i := range cluster_names {
// name format: yarn6-20160901-nm139
name := cluster_names[i]
if strings.HasPrefix(c, name) {
ydc := Convert_yarn_docker_container(container)
list = append(list, ydc)
}
}
}
return list
}
//Get all the docker containers of swarm cluster
func Get_all_docker_containers() ([]dc.APIContainers, error) {
client, err := Swarm_client()
if err != nil {
fmt.Println("Cann connet to the swarm master.")
panic(err)
}
containers, err := client.ListContainers(dc.ListContainersOptions{All: true})
return containers, err
}
//Get all the running docker containers of swarm cluster
func Get_running_docker_containers() ([]dc.APIContainers, error) {
client, err := Swarm_client()
if err != nil {
fmt.Println("Cann connet to the swarm master.")
panic(err)
}
containers, err := client.ListContainers(dc.ListContainersOptions{})
return containers, err
}
//Make a swarm client
func Swarm_client() (*dc.Client, error) {
swarm_master_ip := viper.GetString("clusters.swarm_master_ip")
swarm_master_port := viper.GetString("clusters.swarm_master_port")
endpoint := "tcp://" + swarm_master_ip + ":" + swarm_master_port
client, err := dc.NewClient(endpoint)
return client, err
}
//Parse swarm cluster nodes info
func ParseClusterNodes() ([]Node, error) {
swarm_master_ip := viper.GetString("clusters.swarm_master_ip")
swarm_master_port := viper.GetString("clusters.swarm_master_port")
endpoint := "tcp://" + swarm_master_ip + ":" + swarm_master_port
client, err := dockerclient.NewDockerClient(endpoint, nil)
if err != nil {
panic(err)
}
info, err := client.Info()
if err != nil {
return nil, err
}
driverStatus := info.DriverStatus
nodes := []Node{}
var node Node
nodeComplete := false
name := ""
addr := ""
containers := ""
reservedCPUs := ""
reservedMemory := ""
labels := []string{}
for _, l := range driverStatus {
if len(l) != 2 {
continue
}
label := l[0]
data := l[1]
// cluster info label i.e. "Filters" or "Strategy"
if strings.Index(label, "\u0008") > -1 {
continue
}
if strings.Index(label, " └") == -1 {
name = label
addr = data
}
// node info like "Containers"
switch label {
case " └ Containers":
containers = data
case " └ Reserved CPUs":
reservedCPUs = data
case " └ Reserved Memory":
reservedMemory = data
case " └ Labels":
lbls := strings.Split(data, ",")
labels = lbls
nodeComplete = true
default:
continue
}
if nodeComplete {
node = Node{
Name: name,
Addr: addr,
Containers: containers,
ReservedCPUs: reservedCPUs,
ReservedMemory: reservedMemory,
Labels: labels,
}
nodes = append(nodes, node)
// reset info
name = ""
addr = ""
containers = ""
reservedCPUs = ""
reservedMemory = ""
labels = []string{}
nodeComplete = false
}
}
return nodes, nil
}
//Show the swarm cluster status
func Get_swarm_nodes_status() {
nodes, err := ParseClusterNodes()
if err != nil {
panic(err)
}
fmt.Println("Name\tAddr\tContainers\tReservererCPUs\tReserverdMemeory")
fmt.Println("============================================================================================================")
for _, n := range nodes {
fmt.Println(n.Name, "\t", n.Addr, "\t", n.Containers, "\t", n.ReservedCPUs, "\t", n.ReservedMemory)
}
}
//Lookup the IP address of hostname
//Hostname is the 12 byte short container ID.
func Lookup(hostname string, all bool) {
list := Get_all_yarn_containers()
var container Yarn_docker_container
flag := false
for i := range list {
c := list[i]
if c.ID == hostname {
flag = true
container = c
}
}
if flag == false {
fmt.Println("No such contianer.")
} else if all == true {
fmt.Println("ID:", container.ID)
fmt.Println("CLUSTER:", container.Clustername)
fmt.Println("NAME:", container.Name)
fmt.Println("STATUS:", container.Status)
fmt.Println("STATE:", container.State)
fmt.Println("IP:", container.Ip)
fmt.Println("HOST:", container.Host)
} else {
fmt.Println("IP:", container.Ip)
}
}
|
package main
import "fmt"
func main() {
data := []float64{1, 4, 6, 7, 8, 9, 10, 11, 13, 17, 21, 23}
n := average(data...)
fmt.Println(n)
}
func average(sf ...float64) float64 {
fmt.Println(sf)
fmt.Printf("%T \n", sf)
var total float64
for _, v := range sf {
total += v
}
return total / float64(len(sf))
}
// [1 4 6 7 8 9 10 11 13 17 21 23]
// []float64
// 10.833333333333334
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"flag"
"fmt"
"os"
"strings"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/utils/common"
"github.com/oam-dev/kubevela/references/docgen"
)
const (
// KubeVelaIOTerraformPath is the target path for kubevela.io terraform docs
KubeVelaIOTerraformPath = "../kubevela.io/docs/end-user/components/cloud-services/terraform"
// KubeVelaIOTerraformPathZh is the target path for kubevela.io terraform docs in Chinese
KubeVelaIOTerraformPathZh = "../kubevela.io/i18n/zh/docusaurus-plugin-content-docs/current/end-user/components/cloud-services/terraform"
)
func main() {
ref := &docgen.MarkdownReference{}
ctx := context.Background()
c, err := common.InitBaseRestConfig()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
ref.Remote = &docgen.FromCluster{Namespace: types.DefaultKubeVelaNS}
ref.Filter = func(capability types.Capability) bool {
if capability.Labels != nil && capability.Labels[types.LabelDefinitionHidden] == "true" {
return false
}
return capability.Type == types.TypeComponentDefinition && capability.Category == types.TerraformCategory
}
path := flag.String("path", "", "path of output")
location := flag.String("location", "", "path of output")
i18nfile := flag.String("i18n", "../kubevela.io/static/reference-i18n.json", "file path of i18n data")
flag.Parse()
if *i18nfile != "" {
docgen.LoadI18nData(*i18nfile)
}
if *path != "" {
ref.I18N = &docgen.En
if strings.Contains(*location, "zh") || strings.Contains(*location, "chinese") {
ref.I18N = &docgen.Zh
}
if err := ref.GenerateReferenceDocs(ctx, c, *path); err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("terraform reference docs (%s) successfully generated in %s \n", ref.I18N.Language(), *path)
}
ref.I18N = &docgen.En
if err := ref.GenerateReferenceDocs(ctx, c, KubeVelaIOTerraformPath); err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("terraform reference docs (%s) successfully generated in %s \n", ref.I18N.Language(), KubeVelaIOTerraformPath)
ref.I18N = &docgen.Zh
if err := ref.GenerateReferenceDocs(ctx, c, KubeVelaIOTerraformPathZh); err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("terraform reference docs (%s) successfully generated in %s \n", ref.I18N.Language(), KubeVelaIOTerraformPathZh)
}
|
package list
// 单链表测试
import "testing"
func TestListAppend(t *testing.T) {
l := New()
h, _ := l.Append("hello")
if l.Len() != 1 {
t.Errorf("链表长度有误, 期望%d, 实际上是%d", 1, l.Len())
}
if l.head.Data != "hello" {
t.Errorf("链表数据存储有误, 实际上为: %v", l.head.Data)
}
if l.head != h || l.tail != h {
t.Errorf("链表插入异常, 插入空链表, 首位应该是同一个元素")
}
n, _ := l.Append("world")
if l.Len() != 2 {
t.Errorf("链表长度有误, 期望%d, 实际上是%d", 2, l.Len())
}
if l.head.next.Data != "world" {
t.Errorf("链表数据存储有误, 实际上为: %v", l.head.next.Data)
}
nn, _ := n.Append("foo")
if l.Len() != 3 {
t.Errorf("链表长度有误, 期望%d, 实际上是%d", 3, l.Len())
}
if l.tail != nn {
t.Errorf("链表插入异常, 链表尾部不等于最新元素")
}
if n.next != nn {
t.Errorf("链表插入异常, 元素下一个元素不等于最新元素")
}
if !l.Has(nn) {
t.Errorf("nn 应该是l的节点")
}
notExisted := Node{}
_, err := notExisted.Append("a")
if err != ErrRemoved {
t.Errorf("链表插入异常, 悬空节点无法被追加元素")
}
}
func TestListEach(t *testing.T) {
l := New()
l.Append(1)
n, _ := l.Append(2)
l.Append(3)
n.Append(4)
var act [4]int
exp := [...]int{1, 2, 4, 3}
l.Each(func(node *Node, i int) bool {
v := node.Data.(int)
act[i] = v
return false
})
if act != exp {
t.Errorf("遍历异常, 期望")
}
}
func TestListRemove(t *testing.T) {
l := New()
foo, _ := l.Append("1")
bar, _ := l.Append("2")
baz, _ := l.Append("3")
bar.Remove()
if bar.list != nil {
t.Errorf("删除后应该为悬空节点")
}
if foo.next != baz {
t.Errorf("foo的后继应该是baz")
}
foo.Remove()
if l.head != baz || l.tail != baz {
t.Errorf("foo删除后, baz是最后一个元素, 应该是head和tail")
}
bazz, _ := baz.Append("4")
if l.tail != bazz {
t.Errorf("bazz应该是最后一个元素")
}
bazz.Remove()
if l.head != baz || l.tail != baz {
t.Errorf("bazz删除后, baz是最后一个元素, 应该是head和tail")
}
baz.Remove()
if l.head != nil || l.tail != nil {
t.Errorf("baz删除后, head, tail 应该为空")
}
}
|
package hive
import (
"encoding/json"
"github.com/google/uuid"
"github.com/stepan-s/ws-bro/log"
"io/ioutil"
"net/http"
)
// AppMessageToEvent A message to app
type AppMessageToEvent struct {
Aid uuid.UUID
Uid uint32
RawMessage []byte
}
// AppMessageFromEvent A message from app
type AppMessageFromEvent struct {
Aid uuid.UUID
Uids []uint32
RawMessage []byte
}
// A connection message
type appConnectionEvent struct {
cmd uint8
aid uuid.UUID
conn AConnection
}
type appGetUidsEvent struct {
aid uuid.UUID
attempts byte
}
type AppUidsEvent struct {
Cmd uint8
Aid uuid.UUID
Uids []uint32
}
type appConnectedEvent struct {
uid uint32
aids []uuid.UUID
}
type App struct {
uids []uint32
conn AConnection
}
// Apps A apps hive
type Apps struct {
conns map[uuid.UUID]*App
chanIn chan AppMessageToEvent
chanOutUids chan AppMessageFromEvent
chanOut chan AppMessageFromEvent
chanConn chan appConnectionEvent
chanGetUids chan appGetUidsEvent
chanUids chan AppUidsEvent
chanConnected chan appConnectedEvent
stats AAppStat
uidsApiUrl string
}
type uidsReponse struct {
Uids []uint32
}
// NewApps Instantiate apps hive
func NewApps(uidsApiUrl string, stats AAppStat) *Apps {
apps := new(Apps)
apps.conns = make(map[uuid.UUID]*App)
apps.chanIn = make(chan AppMessageToEvent, 10000)
apps.chanOutUids = make(chan AppMessageFromEvent, 10000)
apps.chanOut = make(chan AppMessageFromEvent, 10000)
apps.chanConn = make(chan appConnectionEvent, 10000)
apps.chanGetUids = make(chan appGetUidsEvent, 10000)
apps.chanUids = make(chan AppUidsEvent, 10000)
apps.chanConnected = make(chan appConnectedEvent, 10000)
apps.uidsApiUrl = uidsApiUrl
apps.stats = stats
go func() {
for {
select {
case event := <-apps.chanConn:
switch event.cmd {
case ADD:
apps.addConnection(event.aid, event.conn)
case REMOVE:
apps.removeConnection(event.aid, event.conn)
}
case event := <-apps.chanIn:
apps.sendEvent(event)
case event := <-apps.chanUids:
switch event.Cmd {
case ADD:
apps.addUids(event)
case REMOVE:
apps.removeUids(event)
}
case event := <-apps.chanConnected:
apps.replyConnected(event)
case event := <-apps.chanOutUids:
conn, exists := apps.conns[event.Aid]
if exists && len(conn.uids) > 0 {
event.Uids = conn.uids
apps.chanOut <- event
}
}
}
}()
for w := 0; w < 4; w++ {
go apps.getUidsWorker()
}
return apps
}
// Register app connection
func (apps *Apps) addConnection(aid uuid.UUID, conn AConnection) {
existApp, exists := apps.conns[aid]
if exists {
log.Info("Reconnect app: %v", aid)
apps.conns[aid] = &App{
uids: existApp.uids,
conn: conn,
}
existApp.conn.Close()
apps.stats.Reconnected()
} else {
log.Info("Hello app: %v", aid)
apps.conns[aid] = &App{
uids: []uint32{},
conn: conn,
}
apps.stats.Connected()
apps.chanGetUids <- appGetUidsEvent{aid, 0}
}
conn.Start()
}
func (apps *Apps) getUidsWorker() {
for {
select {
case event := <-apps.chanGetUids:
err, uids := apps.getUids(event.aid)
if err != nil && event.attempts < 10 {
event.attempts++
apps.chanGetUids <- event
} else {
apps.chanUids <- AppUidsEvent{ADD, event.aid, uids}
}
}
}
}
// Request uid list
func (apps *Apps) getUids(aid uuid.UUID) (error, []uint32) {
req, err := http.NewRequest("GET", apps.uidsApiUrl, nil)
if err != nil {
log.Error("Fail init request: %v", err)
return err, nil
}
q := req.URL.Query()
q.Add("aid", aid.String())
req.URL.RawQuery = q.Encode()
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
log.Error("Fail do request: %v", err)
return err, nil
}
buf, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Error("Fail get response: %v", err)
return err, nil
}
var uids uidsReponse
err = json.Unmarshal(buf, &uids)
if err != nil {
log.Error("Fail parse response: %v", err)
return err, nil
}
return nil, uids.Uids
}
func (apps *Apps) addUids(event AppUidsEvent) {
conn, exists := apps.conns[event.Aid]
if !exists {
return
}
var added []uint32
for _, uid := range event.Uids {
add := true
// check exist
for _, item := range conn.uids {
if uid == item {
add = false
break
}
}
if add {
added = append(added, uid)
conn.uids = append(conn.uids, uid)
}
}
rawMessage, err := MessageUserConnectedPack(&MessageUserConnected{
Action: ACTION_CONNECTED,
List: []appConnection{{
Aid: event.Aid,
Ip: conn.conn.RemoteAddr().String(),
}},
})
if err != nil {
log.Error("Fail pack %v", err)
}
apps.chanOut <- AppMessageFromEvent{
Aid: event.Aid,
Uids: added,
RawMessage: rawMessage,
}
}
func (apps *Apps) removeUids(event AppUidsEvent) {
conn, exists := apps.conns[event.Aid]
if !exists {
return
}
var uids []uint32
for _, item := range conn.uids {
left := true
// check remove
for _, uid := range event.Uids {
if uid == item {
left = false
break
}
}
if left {
uids = append(uids, item)
}
}
conn.uids = uids
rawMessage, err := MessageUserConnectedPack(&MessageUserConnected{
Action: ACTION_DISCONNECTED,
List: []appConnection{{
Aid: event.Aid,
Ip: conn.conn.RemoteAddr().String(),
}},
})
if err != nil {
log.Error("Fail pack %v", err)
}
apps.chanOut <- AppMessageFromEvent{
Aid: event.Aid,
Uids: event.Uids,
RawMessage: rawMessage,
}
}
func (apps *Apps) replyConnected(event appConnectedEvent) {
var list []appConnection
for _, aid := range event.aids {
conn, exists := apps.conns[aid]
if exists {
for _, uid := range conn.uids {
if uid == event.uid {
list = append(list, appConnection{
Aid: aid,
Ip: conn.conn.RemoteAddr().String(),
})
break
}
}
}
}
rawMessage, err := MessageUserConnectedPack(&MessageUserConnected{
Action: ACTION_CONNECTED,
List: list,
})
if err != nil {
log.Error("Fail pack %v", err)
}
apps.chanOut <- AppMessageFromEvent{
Aid: uuid.Nil,
Uids: []uint32{event.uid},
RawMessage: rawMessage,
}
}
// Unregister app connection
func (apps *Apps) removeConnection(aid uuid.UUID, theConn AConnection) {
conn, exists := apps.conns[aid]
if !exists {
return
}
if conn.conn != theConn {
return
}
rawMessage, err := MessageUserDisconnectedPack(&MessageUserDisconnected{
Action: ACTION_DISCONNECTED,
List: []uuid.UUID{aid},
})
if err != nil {
log.Error("Fail pack %v", err)
}
apps.chanOut <- AppMessageFromEvent{
Aid: aid,
Uids: conn.uids,
RawMessage: rawMessage,
}
// No connection left - remove app
delete(apps.conns, aid)
conn.conn.Close()
apps.stats.Disconnected()
log.Info("Bye app: %v", aid)
}
// Send message to all app connections
func (apps *Apps) sendEvent(event AppMessageToEvent) {
app, exists := apps.conns[event.Aid]
if exists {
send := false
if event.Uid == SYSUID {
send = true
} else {
// check uid is linked to app
if app.uids != nil {
for _, item := range app.uids {
if event.Uid == item {
send = true
break
}
}
}
}
if send {
// uid can send to app
app.conn.Send(event.RawMessage)
apps.stats.Transmitted()
}
}
}
// SendEvent Send message to all app connections
func (apps *Apps) SendEvent(event AppMessageToEvent) {
apps.chanIn <- event
}
// ReceiveEvent Read message from app, blocked
func (apps *Apps) ReceiveEvent() AppMessageFromEvent {
return <-apps.chanOut
}
// UpdateUids Unregister app connection
func (apps *Apps) UpdateUids(event AppUidsEvent) {
apps.chanUids <- event
}
func (apps *Apps) getConnected(event appConnectedEvent) {
apps.chanConnected <- event
}
func (apps *Apps) ConnectionAdd(aid uuid.UUID, conn AConnection) {
apps.chanConn <- appConnectionEvent{ADD, aid, conn}
}
func (apps *Apps) ConnectionRemove(aid uuid.UUID, conn AConnection) {
apps.chanConn <- appConnectionEvent{REMOVE, aid, conn}
}
func (apps *Apps) ConnectionMessage(aid uuid.UUID, message []byte) {
apps.stats.Received()
apps.chanOutUids <- AppMessageFromEvent{aid, nil, message}
}
|
package oidc
import (
"context"
"net/url"
"time"
"github.com/go-crypt/crypt/algorithm"
"github.com/golang-jwt/jwt/v5"
"github.com/ory/fosite"
"github.com/ory/fosite/handler/openid"
fjwt "github.com/ory/fosite/token/jwt"
"github.com/ory/herodot"
"gopkg.in/square/go-jose.v2"
"github.com/authelia/authelia/v4/internal/authentication"
"github.com/authelia/authelia/v4/internal/authorization"
"github.com/authelia/authelia/v4/internal/configuration/schema"
"github.com/authelia/authelia/v4/internal/model"
"github.com/authelia/authelia/v4/internal/storage"
"github.com/authelia/authelia/v4/internal/utils"
)
// NewSession creates a new empty OpenIDSession struct.
func NewSession() (session *Session) {
return &Session{
DefaultSession: &openid.DefaultSession{
Claims: &fjwt.IDTokenClaims{
Extra: map[string]any{},
},
Headers: &fjwt.Headers{
Extra: map[string]any{},
},
},
Extra: map[string]any{},
}
}
// NewSessionWithAuthorizeRequest uses details from an AuthorizeRequester to generate an OpenIDSession.
func NewSessionWithAuthorizeRequest(ctx Context, issuer *url.URL, kid, username string, amr []string, extra map[string]any,
authTime time.Time, consent *model.OAuth2ConsentSession, requester fosite.AuthorizeRequester) (session *Session) {
if extra == nil {
extra = map[string]any{}
}
session = &Session{
DefaultSession: &openid.DefaultSession{
Claims: &fjwt.IDTokenClaims{
Subject: consent.Subject.UUID.String(),
Issuer: issuer.String(),
AuthTime: authTime,
RequestedAt: consent.RequestedAt,
IssuedAt: ctx.GetClock().Now().UTC(),
Nonce: requester.GetRequestForm().Get(ClaimNonce),
Audience: requester.GetGrantedAudience(),
Extra: extra,
AuthenticationMethodsReferences: amr,
},
Headers: &fjwt.Headers{
Extra: map[string]any{
JWTHeaderKeyIdentifier: kid,
},
},
Subject: consent.Subject.UUID.String(),
Username: username,
},
ChallengeID: model.NullUUID(consent.ChallengeID),
KID: kid,
ClientID: requester.GetClient().GetID(),
ExcludeNotBeforeClaim: false,
AllowedTopLevelClaims: nil,
Extra: map[string]any{},
}
// Ensure required audience value of the client_id exists.
if !utils.IsStringInSlice(requester.GetClient().GetID(), session.Claims.Audience) {
session.Claims.Audience = append(session.Claims.Audience, requester.GetClient().GetID())
}
session.Claims.Add(ClaimAuthorizedParty, session.ClientID)
session.Claims.Add(ClaimClientIdentifier, session.ClientID)
return session
}
// PopulateClientCredentialsFlowSessionWithAccessRequest is used to configure a session when performing a client credentials grant.
func PopulateClientCredentialsFlowSessionWithAccessRequest(ctx Context, request fosite.AccessRequester, session *Session, funcGetKID func(ctx context.Context, kid, alg string) string) (err error) {
var (
issuer *url.URL
client Client
ok bool
)
if issuer, err = ctx.IssuerURL(); err != nil {
return fosite.ErrServerError.WithWrap(err).WithDebugf("Failed to determine the issuer with error: %s.", err.Error())
}
if client, ok = request.GetClient().(Client); !ok {
return fosite.ErrServerError.WithDebugf("Failed to get the client for the request.")
}
session.Subject = ""
session.Claims.Subject = client.GetID()
session.ClientID = client.GetID()
session.DefaultSession.Claims.Issuer = issuer.String()
session.DefaultSession.Claims.IssuedAt = ctx.GetClock().Now().UTC()
session.DefaultSession.Claims.RequestedAt = ctx.GetClock().Now().UTC()
return nil
}
// OpenIDConnectProvider for OpenID Connect.
type OpenIDConnectProvider struct {
fosite.OAuth2Provider
*herodot.JSONWriter
*Store
*Config
KeyManager *KeyManager
discovery OpenIDConnectWellKnownConfiguration
}
// Store is Authelia's internal representation of the fosite.Storage interface. It maps the following
// interfaces to the storage.Provider interface:
// fosite.Storage, fosite.ClientManager, storage.Transactional, oauth2.AuthorizeCodeStorage, oauth2.AccessTokenStorage,
// oauth2.RefreshTokenStorage, oauth2.TokenRevocationStorage, pkce.PKCERequestStorage,
// openid.OpenIDConnectRequestStorage, and partially implements rfc7523.RFC7523KeyStorage.
type Store struct {
provider storage.Provider
clients map[string]Client
}
// BaseClient is the base for all clients.
type BaseClient struct {
ID string
Description string
Secret *schema.PasswordDigest
SectorIdentifier string
Public bool
EnforcePAR bool
EnforcePKCE bool
EnforcePKCEChallengeMethod bool
PKCEChallengeMethod string
Audience []string
Scopes []string
RedirectURIs []string
GrantTypes []string
ResponseTypes []string
ResponseModes []fosite.ResponseModeType
Lifespans schema.IdentityProvidersOpenIDConnectLifespan
AuthorizationSignedResponseAlg string
AuthorizationSignedResponseKeyID string
IDTokenSignedResponseAlg string
IDTokenSignedResponseKeyID string
AccessTokenSignedResponseAlg string
AccessTokenSignedResponseKeyID string
UserinfoSignedResponseAlg string
UserinfoSignedResponseKeyID string
IntrospectionSignedResponseAlg string
IntrospectionSignedResponseKeyID string
RefreshFlowIgnoreOriginalGrantedScopes bool
AuthorizationPolicy ClientAuthorizationPolicy
ConsentPolicy ClientConsentPolicy
}
// FullClient is the client with comprehensive supported features.
type FullClient struct {
*BaseClient
RequestURIs []string
JSONWebKeys *jose.JSONWebKeySet
JSONWebKeysURI string
RequestObjectSigningAlg string
TokenEndpointAuthMethod string
TokenEndpointAuthSigningAlg string
}
// Client represents the internal client definitions.
type Client interface {
fosite.Client
fosite.ResponseModeClient
RefreshFlowScopeClient
GetDescription() (description string)
GetSecret() (secret algorithm.Digest)
GetSectorIdentifier() (sector string)
GetConsentResponseBody(consent *model.OAuth2ConsentSession) (body ConsentGetResponseBody)
GetAuthorizationSignedResponseAlg() (alg string)
GetAuthorizationSignedResponseKeyID() (kid string)
GetIDTokenSignedResponseAlg() (alg string)
GetIDTokenSignedResponseKeyID() (kid string)
GetAccessTokenSignedResponseAlg() (alg string)
GetAccessTokenSignedResponseKeyID() (kid string)
GetJWTProfileOAuthAccessTokensEnabled() bool
GetUserinfoSignedResponseAlg() (alg string)
GetUserinfoSignedResponseKeyID() (kid string)
GetIntrospectionSignedResponseAlg() (alg string)
GetIntrospectionSignedResponseKeyID() (kid string)
GetPAREnforcement() (enforce bool)
GetPKCEEnforcement() (enforce bool)
GetPKCEChallengeMethodEnforcement() (enforce bool)
GetPKCEChallengeMethod() (method string)
ValidatePKCEPolicy(r fosite.Requester) (err error)
ValidatePARPolicy(r fosite.Requester, prefix string) (err error)
ValidateResponseModePolicy(r fosite.AuthorizeRequester) (err error)
GetConsentPolicy() ClientConsentPolicy
IsAuthenticationLevelSufficient(level authentication.Level, subject authorization.Subject) (sufficient bool)
GetAuthorizationPolicyRequiredLevel(subject authorization.Subject) (level authorization.Level)
GetAuthorizationPolicy() (policy ClientAuthorizationPolicy)
GetEffectiveLifespan(gt fosite.GrantType, tt fosite.TokenType, fallback time.Duration) (lifespan time.Duration)
}
// RefreshFlowScopeClient is a client which can be customized to ignore scopes that were not originally granted.
type RefreshFlowScopeClient interface {
fosite.Client
GetRefreshFlowIgnoreOriginalGrantedScopes(ctx context.Context) (ignoreOriginalGrantedScopes bool)
}
// Context represents the context implementation that is used by some OpenID Connect 1.0 implementations.
type Context interface {
context.Context
RootURL() (issuerURL *url.URL)
IssuerURL() (issuerURL *url.URL, err error)
GetClock() utils.Clock
GetJWTWithTimeFuncOption() jwt.ParserOption
}
// ClientRequesterResponder is a fosite.Requster or fosite.Responder with a GetClient method.
type ClientRequesterResponder interface {
GetClient() fosite.Client
}
// IDTokenClaimsSession is a session which can return the IDTokenClaims type.
type IDTokenClaimsSession interface {
GetIDTokenClaims() *fjwt.IDTokenClaims
}
// Configurator is an internal extension to the fosite.Configurator.
type Configurator interface {
fosite.Configurator
AuthorizationServerIssuerIdentificationProvider
JWTSecuredResponseModeProvider
}
// AuthorizationServerIssuerIdentificationProvider provides OAuth 2.0 Authorization Server Issuer Identification related methods.
type AuthorizationServerIssuerIdentificationProvider interface {
GetAuthorizationServerIdentificationIssuer(ctx context.Context) (issuer string)
}
// JWTSecuredResponseModeProvider provides JARM related methods.
type JWTSecuredResponseModeProvider interface {
GetJWTSecuredAuthorizeResponseModeLifespan(ctx context.Context) (lifespan time.Duration)
GetJWTSecuredAuthorizeResponseModeSigner(ctx context.Context) (signer fjwt.Signer)
GetJWTSecuredAuthorizeResponseModeIssuer(ctx context.Context) (issuer string)
}
// IDTokenSessionContainer is similar to the oauth2.JWTSessionContainer to facilitate obtaining the headers as appropriate.
type IDTokenSessionContainer interface {
IDTokenHeaders() *fjwt.Headers
IDTokenClaims() *fjwt.IDTokenClaims
}
// ConsentGetResponseBody schema of the response body of the consent GET endpoint.
type ConsentGetResponseBody struct {
ClientID string `json:"client_id"`
ClientDescription string `json:"client_description"`
Scopes []string `json:"scopes"`
Audience []string `json:"audience"`
PreConfiguration bool `json:"pre_configuration"`
}
// ConsentPostRequestBody schema of the request body of the consent POST endpoint.
type ConsentPostRequestBody struct {
ConsentID string `json:"id"`
ClientID string `json:"client_id"`
Consent bool `json:"consent"`
PreConfigure bool `json:"pre_configure"`
}
// ConsentPostResponseBody schema of the response body of the consent POST endpoint.
type ConsentPostResponseBody struct {
RedirectURI string `json:"redirect_uri"`
}
/*
CommonDiscoveryOptions represents the discovery options used in both OAuth 2.0 and OpenID Connect.
See Also:
OpenID Connect Discovery: https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
OAuth 2.0 Discovery: https://datatracker.ietf.org/doc/html/draft-ietf-oauth-discovery-10#section-2
*/
type CommonDiscoveryOptions struct {
/*
REQUIRED. URL using the https scheme with no query or fragment component that the OP asserts as its Issuer
Identifier. If Issuer discovery is supported (see Section 2), this value MUST be identical to the issuer value
returned by WebFinger. This also MUST be identical to the iss Claim value in ID Tokens issued from this Issuer.
*/
Issuer string `json:"issuer"`
/*
REQUIRED. URL of the OP's JSON Web Key Set [JWK] document. This contains the signing key(s) the RP uses to
validate signatures from the OP. The JWK Set MAY also contain the Server's encryption key(s), which are used by
RPs to encrypt requests to the Server. When both signing and encryption keys are made available, a use (Key Use)
parameter value is REQUIRED for all keys in the referenced JWK Set to indicate each key's intended usage.
Although some algorithms allow the same key to be used for both signatures and encryption, doing so is NOT
RECOMMENDED, as it is less secure. The JWK x5c parameter MAY be used to provide X.509 representations of keys
provided. When used, the bare key values MUST still be present and MUST match those in the certificate.
*/
JWKSURI string `json:"jwks_uri,omitempty"`
/*
REQUIRED. URL of the OP's OAuth 2.0 Authorization Endpoint [OpenID.Core].
See Also:
OpenID.Core: https://openid.net/specs/openid-connect-core-1_0.html
*/
AuthorizationEndpoint string `json:"authorization_endpoint"`
/*
URL of the OP's OAuth 2.0 Token Endpoint [OpenID.Core]. This is REQUIRED unless only the Implicit Flow is used.
See Also:
OpenID.Core: https://openid.net/specs/openid-connect-core-1_0.html
*/
TokenEndpoint string `json:"token_endpoint,omitempty"`
/*
REQUIRED. JSON array containing a list of the Subject Identifier types that this OP supports. Valid types
include pairwise and public.
*/
SubjectTypesSupported []string `json:"subject_types_supported"`
/*
REQUIRED. JSON array containing a list of the OAuth 2.0 response_type values that this OP supports. Dynamic
OpenID Providers MUST support the code, id_token, and the token id_token Response Type values.
*/
ResponseTypesSupported []string `json:"response_types_supported"`
/*
OPTIONAL. JSON array containing a list of the OAuth 2.0 Grant Type values that this OP supports. Dynamic OpenID
Providers MUST support the authorization_code and implicit Grant Type values and MAY support other Grant Types.
If omitted, the default value is ["authorization_code", "implicit"].
*/
GrantTypesSupported []string `json:"grant_types_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of the OAuth 2.0 response_mode values that this OP supports, as specified
in OAuth 2.0 Multiple Response Type Encoding Practices [OAuth.Responses]. If omitted, the default for Dynamic
OpenID Providers is ["query", "fragment"].
*/
ResponseModesSupported []string `json:"response_modes_supported,omitempty"`
/*
RECOMMENDED. JSON array containing a list of the OAuth 2.0 [RFC6749] scope values that this server supports.
The server MUST support the openid scope value. Servers MAY choose not to advertise some supported scope values
even when this parameter is used, although those defined in [OpenID.Core] SHOULD be listed, if supported.
See Also:
OAuth 2.0: https://datatracker.ietf.org/doc/html/rfc6749
OpenID.Core: https://openid.net/specs/openid-connect-core-1_0.html
*/
ScopesSupported []string `json:"scopes_supported,omitempty"`
/*
RECOMMENDED. JSON array containing a list of the Claim Names of the Claims that the OpenID Provider MAY be able
to supply values for. Note that for privacy or other reasons, this might not be an exhaustive list.
*/
ClaimsSupported []string `json:"claims_supported,omitempty"`
/*
OPTIONAL. Languages and scripts supported for the user interface, represented as a JSON array of BCP47 [RFC5646]
language tag values.
See Also:
BCP47: https://datatracker.ietf.org/doc/html/rfc5646
*/
UILocalesSupported []string `json:"ui_locales_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of Client Authentication methods supported by this Token Endpoint. The
options are client_secret_post, client_secret_basic, client_secret_jwt, and private_key_jwt, as described in
Section 9 of OpenID Connect Core 1.0 [OpenID.Core]. Other authentication methods MAY be defined by extensions.
If omitted, the default is client_secret_basic -- the HTTP Basic Authentication Scheme specified in Section
2.3.1 of OAuth 2.0 [RFC6749].
See Also:
OAuth 2.0: https://datatracker.ietf.org/doc/html/rfc6749
OpenID.Core Section 9: https://openid.net/specs/openid-connect-core-1_0.html#ClientAuthentication
*/
TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of the JWS signing algorithms (alg values) supported by the Token Endpoint
for the signature on the JWT [JWT] used to authenticate the Client at the Token Endpoint for the private_key_jwt
and client_secret_jwt authentication methods. Servers SHOULD support RS256. The value none MUST NOT be used.
See Also:
JWT: https://datatracker.ietf.org/doc/html/rfc7519
*/
TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported,omitempty"`
/*
OPTIONAL. URL of a page containing human-readable information that developers might want or need to know when
using the OpenID Provider. In particular, if the OpenID Provider does not support Dynamic Client Registration,
then information on how to register Clients needs to be provided in this documentation.
*/
ServiceDocumentation string `json:"service_documentation,omitempty"`
/*
OPTIONAL. URL that the OpenID Provider provides to the person registering the Client to read about the OP's
requirements on how the Relying Party can use the data provided by the OP. The registration process SHOULD
display this URL to the person registering the Client if it is given.
*/
OPPolicyURI string `json:"op_policy_uri,omitempty"`
/*
OPTIONAL. URL that the OpenID Provider provides to the person registering the Client to read about OpenID
Provider's terms of service. The registration process SHOULD display this URL to the person registering the
Client if it is given.
*/
OPTOSURI string `json:"op_tos_uri,omitempty"`
/*
A JWT containing metadata values about the authorization server as claims. This is a string value consisting of
the entire signed JWT. A "signed_metadata" metadata value SHOULD NOT appear as a claim in the JWT.
*/
SignedMetadata string `json:"signed_metadata,omitempty"`
}
// OAuth2DiscoveryOptions represents the discovery options specific to OAuth 2.0.
type OAuth2DiscoveryOptions struct {
/*
OPTIONAL. URL of the authorization server's OAuth 2.0 introspection endpoint [RFC7662].
See Also:
OAuth 2.0 Token Introspection: https://datatracker.ietf.org/doc/html/rfc7662
*/
IntrospectionEndpoint string `json:"introspection_endpoint,omitempty"`
/*
OPTIONAL. URL of the authorization server's OAuth 2.0 revocation endpoint [RFC7009].
See Also:
OAuth 2.0 Token Revocation: https://datatracker.ietf.org/doc/html/rfc7009
*/
RevocationEndpoint string `json:"revocation_endpoint,omitempty"`
/*
OPTIONAL. URL of the authorization server's OAuth 2.0 Dynamic Client Registration endpoint [RFC7591].
See Also:
OAuth 2.0 Dynamic Client Registration Protocol: https://datatracker.ietf.org/doc/html/rfc7591
*/
RegistrationEndpoint string `json:"registration_endpoint,omitempty"`
/*
OPTIONAL. JSON array containing a list of client authentication methods supported by this introspection endpoint.
The valid client authentication method values are those registered in the IANA "OAuth Token Endpoint
Authentication Methods" registry [IANA.OAuth.Parameters] or those registered in the IANA "OAuth Access Token Types"
registry [IANA.OAuth.Parameters]. (These values are and will remain distinct, due to Section 7.2.) If omitted,
the set of supported authentication methods MUST be determined by other means.
See Also:
IANA.OAuth.Parameters: https://www.iana.org/assignments/oauth-parameters/oauth-parameters.xhtml
OAuth 2.0 Authorization Server Metadata - Updated Registration Instructions: https://datatracker.ietf.org/doc/html/draft-ietf-oauth-discovery-10#section-7.2
*/
IntrospectionEndpointAuthMethodsSupported []string `json:"introspection_endpoint_auth_methods_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of client authentication methods supported by this revocation endpoint.
The valid client authentication method values are those registered in the IANA "OAuth Token Endpoint
Authentication Methods" registry [IANA.OAuth.Parameters]. If omitted, the default is "client_secret_basic" --
the HTTP Basic Authentication Scheme specified in Section 2.3.1 of OAuth 2.0 [RFC6749].
See Also:
IANA.OAuth.Parameters: https://www.iana.org/assignments/oauth-parameters/oauth-parameters.xhtml
OAuth 2.0 - Client Password: https://datatracker.ietf.org/doc/html/rfc6749#section-2.3.1
*/
RevocationEndpointAuthMethodsSupported []string `json:"revocation_endpoint_auth_methods_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of the JWS signing algorithms ("alg" values) supported by the revocation
endpoint for the signature on the JWT [JWT] used to authenticate the client at the revocation endpoint for the
"private_key_jwt" and "client_secret_jwt" authentication methods. This metadata entry MUST be present if either
of these authentication methods are specified in the "revocation_endpoint_auth_methods_supported" entry. No
default algorithms are implied if this entry is omitted. The value "none" MUST NOT be used.
See Also:
JWT: https://datatracker.ietf.org/doc/html/rfc7519
*/
RevocationEndpointAuthSigningAlgValuesSupported []string `json:"revocation_endpoint_auth_signing_alg_values_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of the JWS signing algorithms ("alg" values) supported by the
introspection endpoint for the signature on the JWT [JWT] used to authenticate the client at the introspection
endpoint for the "private_key_jwt" and "client_secret_jwt" authentication methods. This metadata entry MUST be
present if either of these authentication methods are specified in the
"introspection_endpoint_auth_methods_supported" entry. No default algorithms are implied if this entry is omitted.
The value "none" MUST NOT be used.
See Also:
JWT: https://datatracker.ietf.org/doc/html/rfc7519
*/
IntrospectionEndpointAuthSigningAlgValuesSupported []string `json:"introspection_endpoint_auth_signing_alg_values_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of PKCE [RFC7636] code challenge methods supported by this authorization
server. Code challenge method values are used in the "code_challenge_method" parameter defined in Section 4.3 of
[RFC7636]. The valid code challenge method values are those registered in the IANA "PKCE Code Challenge Methods"
registry [IANA.OAuth.Parameters]. If omitted, the authorization server does not support PKCE.
See Also:
PKCE: https://datatracker.ietf.org/doc/html/rfc7636
IANA.OAuth.Parameters: https://www.iana.org/assignments/oauth-parameters/oauth-parameters.xhtml
*/
CodeChallengeMethodsSupported []string `json:"code_challenge_methods_supported,omitempty"`
}
type OAuth2JWTIntrospectionResponseDiscoveryOptions struct {
/*
OPTIONAL. JSON array containing a list of the JWS [RFC7515] signing algorithms ("alg" values) as defined in JWA
[RFC7518] supported by the introspection endpoint to sign the response.
*/
IntrospectionSigningAlgValuesSupported []string `json:"introspection_signing_alg_values_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of the JWE [RFC7516] encryption algorithms ("alg" values) as defined in
JWA [RFC7518] supported by the introspection endpoint to encrypt the content encryption key for introspection
responses (content key encryption).
*/
IntrospectionEncryptionAlgValuesSupported []string `json:"introspection_encryption_alg_values_supported"`
/*
OPTIONAL. JSON array containing a list of the JWE [RFC7516] encryption algorithms ("enc" values) as defined in
JWA [RFC7518] supported by the introspection endpoint to encrypt the response (content encryption).
*/
IntrospectionEncryptionEncValuesSupported []string `json:"introspection_encryption_enc_values_supported"`
}
type OAuth2DeviceAuthorizationGrantDiscoveryOptions struct {
/*
OPTIONAL. URL of the authorization server's device authorization endpoint, as defined in Section 3.1.
*/
DeviceAuthorizationEndpoint string `json:"device_authorization_endpoint"`
}
type OAuth2MutualTLSClientAuthenticationDiscoveryOptions struct {
/*
OPTIONAL. Boolean value indicating server support for mutual-TLS client certificate-bound access tokens. If
omitted, the default value is false.
*/
TLSClientCertificateBoundAccessTokens bool `json:"tls_client_certificate_bound_access_tokens"`
/*
OPTIONAL. A JSON object containing alternative authorization server endpoints that, when present, an OAuth
client intending to do mutual TLS uses in preference to the conventional endpoints. The parameter value itself
consists of one or more endpoint parameters, such as token_endpoint, revocation_endpoint,
introspection_endpoint, etc., conventionally defined for the top level of authorization server metadata. An
OAuth client intending to do mutual TLS (for OAuth client authentication and/or to acquire or use
certificate-bound tokens) when making a request directly to the authorization server MUST use the alias URL of
the endpoint within the mtls_endpoint_aliases, when present, in preference to the endpoint URL of the same name
at the top level of metadata. When an endpoint is not present in mtls_endpoint_aliases, then the client uses the
conventional endpoint URL defined at the top level of the authorization server metadata. Metadata parameters
within mtls_endpoint_aliases that do not define endpoints to which an OAuth client makes a direct request have
no meaning and SHOULD be ignored.
*/
MutualTLSEndpointAliases OAuth2MutualTLSClientAuthenticationAliasesDiscoveryOptions `json:"mtls_endpoint_aliases"`
}
type OAuth2MutualTLSClientAuthenticationAliasesDiscoveryOptions struct {
AuthorizationEndpoint string `json:"authorization_endpoint,omitempty"`
TokenEndpoint string `json:"token_endpoint,omitempty"`
IntrospectionEndpoint string `json:"introspection_endpoint,omitempty"`
RevocationEndpoint string `json:"revocation_endpoint,omitempty"`
EndSessionEndpoint string `json:"end_session_endpoint,omitempty"`
UserinfoEndpoint string `json:"userinfo_endpoint,omitempty"`
BackChannelAuthenticationEndpoint string `json:"backchannel_authentication_endpoint,omitempty"`
FederationRegistrationEndpoint string `json:"federation_registration_endpoint,omitempty"`
PushedAuthorizationRequestEndpoint string `json:"pushed_authorization_request_endpoint,omitempty"`
RegistrationEndpoint string `json:"registration_endpoint,omitempty"`
}
type OAuth2JWTSecuredAuthorizationRequestDiscoveryOptions struct {
/*
Indicates where authorization request needs to be protected as Request Object and provided through either
request or request_uri parameter.
*/
RequireSignedRequestObject bool `json:"require_signed_request_object"`
}
type OAuth2IssuerIdentificationDiscoveryOptions struct {
AuthorizationResponseIssuerParameterSupported bool `json:"authorization_response_iss_parameter_supported"`
}
// OAuth2PushedAuthorizationDiscoveryOptions represents the well known discovery document specific to the
// OAuth 2.0 Pushed Authorization Requests (RFC9126) implementation.
//
// OAuth 2.0 Pushed Authorization Requests: https://datatracker.ietf.org/doc/html/rfc9126#section-5
type OAuth2PushedAuthorizationDiscoveryOptions struct {
/*
The URL of the pushed authorization request endpoint at which a client can post an authorization request to
exchange for a "request_uri" value usable at the authorization server.
*/
PushedAuthorizationRequestEndpoint string `json:"pushed_authorization_request_endpoint"`
/*
Boolean parameter indicating whether the authorization server accepts authorization request data only via PAR.
If omitted, the default value is "false".
*/
RequirePushedAuthorizationRequests bool `json:"require_pushed_authorization_requests"`
}
// OpenIDConnectDiscoveryOptions represents the discovery options specific to OpenID Connect.
type OpenIDConnectDiscoveryOptions struct {
/*
RECOMMENDED. URL of the OP's UserInfo Endpoint [OpenID.Core]. This URL MUST use the https scheme and MAY contain
port, path, and query parameter components.
See Also:
OpenID.Core: https://openid.net/specs/openid-connect-core-1_0.html
*/
UserinfoEndpoint string `json:"userinfo_endpoint,omitempty"`
/*
REQUIRED. JSON array containing a list of the JWS signing algorithms (alg values) supported by the OP for the ID
Token to encode the Claims in a JWT [JWT]. The algorithm RS256 MUST be included. The value none MAY be supported,
but MUST NOT be used unless the Response Type used returns no ID Token from the Authorization Endpoint (such as
when using the Authorization Code Flow).
See Also:
JWT: https://datatracker.ietf.org/doc/html/rfc7519
*/
IDTokenSigningAlgValuesSupported []string `json:"id_token_signing_alg_values_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of the JWS [JWS] signing algorithms (alg values) [JWA] supported by the
UserInfo Endpoint to encode the Claims in a JWT [JWT]. The value none MAY be included.
See Also:
JWS: https://datatracker.ietf.org/doc/html/rfc7515
JWA: https://datatracker.ietf.org/doc/html/rfc7518
JWT: https://datatracker.ietf.org/doc/html/rfc7519
*/
UserinfoSigningAlgValuesSupported []string `json:"userinfo_signing_alg_values_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of the JWS signing algorithms (alg values) supported by the OP for Request
Objects, which are described in Section 6.1 of OpenID Connect Core 1.0 [OpenID.Core]. These algorithms are used
both when the Request Object is passed by value (using the request parameter) and when it is passed by reference
(using the request_uri parameter). Servers SHOULD support none and RS256.
*/
RequestObjectSigningAlgValuesSupported []string `json:"request_object_signing_alg_values_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of the JWE encryption algorithms (alg values) supported by the OP for the
ID Token to encode the Claims in a JWT [JWT].
See Also:
JWE: https://datatracker.ietf.org/doc/html/rfc7516
JWT: https://datatracker.ietf.org/doc/html/rfc7519
*/
IDTokenEncryptionAlgValuesSupported []string `json:"id_token_encryption_alg_values_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of the JWE [JWE] encryption algorithms (alg values) [JWA] supported by
the UserInfo Endpoint to encode the Claims in a JWT [JWT].
See Also:
JWE: https://datatracker.ietf.org/doc/html/rfc7516
JWA: https://datatracker.ietf.org/doc/html/rfc7518
JWT: https://datatracker.ietf.org/doc/html/rfc7519
*/
UserinfoEncryptionAlgValuesSupported []string `json:"userinfo_encryption_alg_values_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of the JWE encryption algorithms (alg values) supported by the OP for
Request Objects. These algorithms are used both when the Request Object is passed by value and when it is passed
by reference.
See Also:
JWE: https://datatracker.ietf.org/doc/html/rfc7516
*/
RequestObjectEncryptionAlgValuesSupported []string `json:"request_object_encryption_alg_values_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of the JWE encryption algorithms (enc values) supported by the OP for the
ID Token to encode the Claims in a JWT [JWT].
See Also:
JWE: https://datatracker.ietf.org/doc/html/rfc7516
JWT: https://datatracker.ietf.org/doc/html/rfc7519
*/
IDTokenEncryptionEncValuesSupported []string `json:"id_token_encryption_enc_values_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of the JWE encryption algorithms (enc values) [JWA] supported by the
UserInfo Endpoint to encode the Claims in a JWT [JWT].
See Also:
JWE: https://datatracker.ietf.org/doc/html/rfc7516
JWA: https://datatracker.ietf.org/doc/html/rfc7518
JWT: https://datatracker.ietf.org/doc/html/rfc7519
*/
UserinfoEncryptionEncValuesSupported []string `json:"userinfo_encryption_enc_values_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of the JWE encryption algorithms (enc values) supported by the OP for
Request Objects. These algorithms are used both when the Request Object is passed by value and when it is passed
by reference.
See Also:
JWE: https://datatracker.ietf.org/doc/html/rfc7516
JWT: https://datatracker.ietf.org/doc/html/rfc7519
*/
RequestObjectEncryptionEncValuesSupported []string `json:"request_object_encryption_enc_values_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of the Authentication Context Class References that this OP supports.
*/
ACRValuesSupported []string `json:"acr_values_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of the display parameter values that the OpenID Provider supports. These
values are described in Section 3.1.2.1 of OpenID Connect Core 1.0 [OpenID.Core].
See Also:
OpenID.Core Section 3.1.2.1: https://openid.net/specs/openid-connect-core-1_0.html#AuthRequest
*/
DisplayValuesSupported []string `json:"display_values_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of the Claim Types that the OpenID Provider supports. These Claim Types
are described in Section 5.6 of OpenID Connect Core 1.0 [OpenID.Core]. Values defined by this specification are
normal, aggregated, and distributed. If omitted, the implementation supports only normal Claims.
See Also:
OpenID.Core Section 5.6: https://openid.net/specs/openid-connect-core-1_0.html#ClaimTypes
*/
ClaimTypesSupported []string `json:"claim_types_supported,omitempty"`
/*
OPTIONAL. Languages and scripts supported for values in Claims being returned, represented as a JSON array of
BCP47 [RFC5646] language tag values. Not all languages and scripts are necessarily supported for all Claim values.
See Also:
BCP47: https://datatracker.ietf.org/doc/html/rfc5646
*/
ClaimLocalesSupported []string `json:"claims_locales_supported,omitempty"`
/*
OPTIONAL. Boolean value specifying whether the OP supports use of the request parameter, with true indicating
support. If omitted, the default value is false.
*/
RequestParameterSupported bool `json:"request_parameter_supported"`
/*
OPTIONAL. Boolean value specifying whether the OP supports use of the request_uri parameter, with true indicating
support. If omitted, the default value is true.
*/
RequestURIParameterSupported bool `json:"request_uri_parameter_supported"`
/*
OPTIONAL. Boolean value specifying whether the OP requires any request_uri values used to be pre-registered using
the request_uris registration parameter. Pre-registration is REQUIRED when the value is true. If omitted, the
default value is false.
*/
RequireRequestURIRegistration bool `json:"require_request_uri_registration"`
/*
OPTIONAL. Boolean value specifying whether the OP supports use of the claims parameter, with true indicating
support. If omitted, the default value is false.
*/
ClaimsParameterSupported bool `json:"claims_parameter_supported"`
}
// OpenIDConnectFrontChannelLogoutDiscoveryOptions represents the discovery options specific to
// OpenID Connect Front-Channel Logout functionality.
// See Also:
//
// OpenID Connect Front-Channel Logout: https://openid.net/specs/openid-connect-frontchannel-1_0.html#OPLogout
type OpenIDConnectFrontChannelLogoutDiscoveryOptions struct {
/*
OPTIONAL. Boolean value specifying whether the OP supports HTTP-based logout, with true indicating support. If
omitted, the default value is false.
*/
FrontChannelLogoutSupported bool `json:"frontchannel_logout_supported"`
/*
OPTIONAL. Boolean value specifying whether the OP can pass iss (issuer) and sid (session ID) query parameters to
identify the RP session with the OP when the frontchannel_logout_uri is used. If supported, the sid Claim is also
included in ID Tokens issued by the OP. If omitted, the default value is false.
*/
FrontChannelLogoutSessionSupported bool `json:"frontchannel_logout_session_supported"`
}
// OpenIDConnectBackChannelLogoutDiscoveryOptions represents the discovery options specific to
// OpenID Connect Back-Channel Logout functionality.
// See Also:
//
// OpenID Connect Back-Channel Logout: https://openid.net/specs/openid-connect-backchannel-1_0.html#BCSupport
type OpenIDConnectBackChannelLogoutDiscoveryOptions struct {
/*
OPTIONAL. Boolean value specifying whether the OP supports back-channel logout, with true indicating support.
If omitted, the default value is false.
*/
BackChannelLogoutSupported bool `json:"backchannel_logout_supported"`
/*
OPTIONAL. Boolean value specifying whether the OP can pass a sid (session ID) Claim in the Logout Token to
identify the RP session with the OP. If supported, the sid Claim is also included in ID Tokens issued by the OP.
If omitted, the default value is false.
*/
BackChannelLogoutSessionSupported bool `json:"backchannel_logout_session_supported"`
}
// OpenIDConnectSessionManagementDiscoveryOptions represents the discovery options specific to OpenID Connect 1.0
// Session Management.
//
// To support OpenID Connect Session Management, the RP needs to obtain the Session Management related OP metadata. This
// OP metadata is normally obtained via the OP's Discovery response, as described in OpenID Connect Discovery 1.0, or
// MAY be learned via other mechanisms. This OpenID Provider Metadata parameter MUST be included in the Server's
// discovery responses when Session Management and Discovery are supported.
//
// See Also:
//
// OpenID Connect 1.0 Session Management: https://openid.net/specs/openid-connect-session-1_0.html
type OpenIDConnectSessionManagementDiscoveryOptions struct {
/*
REQUIRED. URL of an OP iframe that supports cross-origin communications for session state information with the
RP Client, using the HTML5 postMessage API. This URL MUST use the https scheme and MAY contain port, path, and
query parameter components. The page is loaded from an invisible iframe embedded in an RP page so that it can
run in the OP's security context. It accepts postMessage requests from the relevant RP iframe and uses
postMessage to post back the login status of the End-User at the OP.
*/
CheckSessionIFrame string `json:"check_session_iframe"`
}
// OpenIDConnectRPInitiatedLogoutDiscoveryOptions represents the discovery options specific to
// OpenID Connect RP-Initiated Logout 1.0.
//
// To support OpenID Connect RP-Initiated Logout, the RP needs to obtain the RP-Initiated Logout related OP metadata.
// This OP metadata is normally obtained via the OP's Discovery response, as described in OpenID Connect Discovery 1.0,
// or MAY be learned via other mechanisms. This OpenID Provider Metadata parameter MUST be included in the Server's
// discovery responses when RP-Initiated Logout and Discovery are supported.
//
// See Also:
//
// OpenID Connect RP-Initiated Logout 1.0: https://openid.net/specs/openid-connect-rpinitiated-1_0.html
type OpenIDConnectRPInitiatedLogoutDiscoveryOptions struct {
/*
REQUIRED. URL at the OP to which an RP can perform a redirect to request that the End-User be logged out at the
OP. This URL MUST use the https scheme and MAY contain port, path, and query parameter components.
*/
EndSessionEndpoint string `json:"end_session_endpoint"`
}
// OpenIDConnectPromptCreateDiscoveryOptions represents the discovery options specific to Initiating User Registration
// via OpenID Connect 1.0 functionality.
//
// This specification extends the OpenID Connect Discovery Metadata Section 3.
//
// See Also:
//
// Initiating User Registration via OpenID Connect 1.0: https://openid.net/specs/openid-connect-prompt-create-1_0.html
type OpenIDConnectPromptCreateDiscoveryOptions struct {
/*
OPTIONAL. JSON array containing the list of prompt values that this OP supports.
This metadata element is OPTIONAL in the context of the OpenID Provider not supporting the create value. If
omitted, the Relying Party should assume that this specification is not supported. The OpenID Provider MAY
provide this metadata element even if it doesn't support the create value.
Specific to this specification, a value of create in the array indicates to the Relying party that this OpenID
Provider supports this specification. If an OpenID Provider supports this specification it MUST define this metadata
element in the openid-configuration file. Additionally, if this metadata element is defined by the OpenID
Provider, the OP must also specify all other prompt values which it supports.
See Also:
OpenID.PromptCreate: https://openid.net/specs/openid-connect-prompt-create-1_0.html
*/
PromptValuesSupported []string `json:"prompt_values_supported,omitempty"`
}
// OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions represents the discovery options specific to
// OpenID Connect Client-Initiated Backchannel Authentication Flow - Core 1.0
//
// The following authorization server metadata parameters are introduced by this specification for OPs publishing their
// support of the CIBA flow and details thereof.
//
// See Also:
//
// OpenID Connect Client-Initiated Backchannel Authentication Flow - Core 1.0:
// https://openid.net/specs/openid-client-initiated-backchannel-authentication-core-1_0.html#rfc.section.4
type OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions struct {
/*
REQUIRED. URL of the OP's Backchannel Authentication Endpoint as defined in Section 7.
*/
BackChannelAuthenticationEndpoint string `json:"backchannel_authentication_endpoint"`
/*
REQUIRED. JSON array containing one or more of the following values: poll, ping, and push.
*/
BackChannelTokenDeliveryModesSupported []string `json:"backchannel_token_delivery_modes_supported"`
/*
OPTIONAL. JSON array containing a list of the JWS signing algorithms (alg values) supported by the OP for signed
authentication requests, which are described in Section 7.1.1. If omitted, signed authentication requests are
not supported by the OP.
*/
BackChannelAuthRequestSigningAlgValuesSupported []string `json:"backchannel_authentication_request_signing_alg_values_supported,omitempty"`
/*
OPTIONAL. Boolean value specifying whether the OP supports the use of the user_code parameter, with true
indicating support. If omitted, the default value is false.
*/
BackChannelUserCodeParameterSupported bool `json:"backchannel_user_code_parameter_supported"`
}
// OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions represents the discovery options specific to
// JWT Secured Authorization Response Mode for OAuth 2.0 (JARM).
//
// Authorization servers SHOULD publish the supported algorithms for signing and encrypting the JWT of an authorization
// response by utilizing OAuth 2.0 Authorization Server Metadata [RFC8414] parameters. The following parameters are
// introduced by this specification.
//
// See Also:
//
// JWT Secured Authorization Response Mode for OAuth 2.0 (JARM):
// https://openid.net/specs/oauth-v2-jarm.html#name-authorization-server-metada
type OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions struct {
/*
OPTIONAL. A JSON array containing a list of the JWS [RFC7515] signing algorithms (alg values) supported by the
authorization endpoint to sign the response.
*/
AuthorizationSigningAlgValuesSupported []string `json:"authorization_signing_alg_values_supported,omitempty"`
/*
OPTIONAL. A JSON array containing a list of the JWE [RFC7516] encryption algorithms (alg values) supported by
the authorization endpoint to encrypt the response.
*/
AuthorizationEncryptionAlgValuesSupported []string `json:"authorization_encryption_alg_values_supported,omitempty"`
/*
OPTIONAL. A JSON array containing a list of the JWE [RFC7516] encryption algorithms (enc values) supported by
the authorization endpoint to encrypt the response.
*/
AuthorizationEncryptionEncValuesSupported []string `json:"authorization_encryption_enc_values_supported,omitempty"`
}
type OpenIDFederationDiscoveryOptions struct {
/*
OPTIONAL. URL of the OP's federation-specific Dynamic Client Registration Endpoint. If the OP supports explicit
client registration as described in Section 10.2, then this claim is REQUIRED.
*/
FederationRegistrationEndpoint string `json:"federation_registration_endpoint,omitempty"`
/*
REQUIRED. Array specifying the federation types supported. Federation-type values defined by this specification
are automatic and explicit.
*/
ClientRegistrationTypesSupported []string `json:"client_registration_types_supported"`
/*
OPTIONAL. A JSON Object defining the client authentications supported for each endpoint. The endpoint names are
defined in the IANA "OAuth Authorization Server Metadata" registry [IANA.OAuth.Parameters]. Other endpoints and
authentication methods are possible if made recognizable according to established standards and not in conflict
with the operating principles of this specification. In OpenID Connect Core, no client authentication is
performed at the authentication endpoint. Instead, the request itself is authenticated. The OP maps information
in the request (like the redirect_uri) to information it has gained on the client through static or dynamic
registration. If the mapping is successful, the request can be processed. If the RP uses Automatic Registration,
as defined in Section 10.1, the OP has no prior knowledge of the RP. Therefore, the OP must start by gathering
information about the RP using the process outlined in Section 6. Once it has the RP's metadata, the OP can
verify the request in the same way as if it had known the RP's metadata beforehand. To make the request
verification more secure, we demand the use of a client authentication or verification method that proves that
the RP is in possession of a key that appears in the RP's metadata.
*/
RequestAuthenticationMethodsSupported []string `json:"request_authentication_methods_supported,omitempty"`
/*
OPTIONAL. JSON array containing a list of the JWS signing algorithms (alg values) supported for the signature on
the JWT [RFC7519] used in the request_object contained in the request parameter of an authorization request or
in the private_key_jwt of a pushed authorization request. This entry MUST be present if either of these
authentication methods are specified in the request_authentication_methods_supported entry. No default
algorithms are implied if this entry is omitted. Servers SHOULD support RS256. The value none MUST NOT be used.
*/
RequestAuthenticationSigningAlgValuesSupproted []string `json:"request_authentication_signing_alg_values_supported,omitempty"`
}
// OAuth2WellKnownConfiguration represents the well known discovery document specific to OAuth 2.0.
type OAuth2WellKnownConfiguration struct {
CommonDiscoveryOptions
OAuth2DiscoveryOptions
*OAuth2DeviceAuthorizationGrantDiscoveryOptions
*OAuth2MutualTLSClientAuthenticationDiscoveryOptions
*OAuth2IssuerIdentificationDiscoveryOptions
*OAuth2JWTIntrospectionResponseDiscoveryOptions
*OAuth2JWTSecuredAuthorizationRequestDiscoveryOptions
*OAuth2PushedAuthorizationDiscoveryOptions
}
// OpenIDConnectWellKnownConfiguration represents the well known discovery document specific to OpenID Connect.
type OpenIDConnectWellKnownConfiguration struct {
OAuth2WellKnownConfiguration
OpenIDConnectDiscoveryOptions
*OpenIDConnectFrontChannelLogoutDiscoveryOptions
*OpenIDConnectBackChannelLogoutDiscoveryOptions
*OpenIDConnectSessionManagementDiscoveryOptions
*OpenIDConnectRPInitiatedLogoutDiscoveryOptions
*OpenIDConnectPromptCreateDiscoveryOptions
*OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions
*OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions
*OpenIDFederationDiscoveryOptions
}
|
package main
import (
"io"
"log"
"os"
)
var (
Info *log.Logger
Warning *log.Logger
Error *log.Logger
)
func init(){
errFile, err := os.OpenFile("errors.log",os.O_CREATE|os.O_WRONLY|os.O_APPEND,0666)
if err!= nil {
log.Fatalln("打开日志文件失败:",err)
}
Info = log.New(os.Stdout,"Info:",log.Ldate | log.Ltime | log.Lshortfile)
Warning = log.New(os.Stdout,"Warning:",log.Ldate | log.Ltime | log.Lshortfile)
//io.MultiWriter函数可以包装多个io.Writer为一个io.Writer,这样就可以达到同时对对各io.Writer输出日志的目的
Error = log.New(io.MultiWriter(os.Stderr,errFile),"Error:",log.Ldate | log.Ltime | log.Lshortfile)
}
func main() {
Info.Println("zfy测试信息日志")
Warning.Println("zfy测试告警日志")
Error.Println("zfy测试错误日志")
}
//log原理
//func Caller(skip int) (pc uintptr, file string, line int, ok bool)
/**
runtime.Caller它可以获取运行时方法的调用信息
参数skip表示跳过栈帧数,0表示不跳过,也就是runtime.Caller的调用者,1的话就是再向上一层,表示调用者的调用者
log日志包里使用的是2,也就是表示我们在源码中调用log.Print、log.Fatal和log.Panic这些函数的调用者
以main函数调用log.Println为例,是main->log.Println->*Logger.Output->runtime.Caller这么一个方法调用栈,所以这时候,skip的值分别代表:
1. 0表示*logger.Output中调用runtime.Caller的源代码文件和行号
2. 1表示log.Println中调用*logger.Output的源代码文件和行号
3. 2表示main中调用log.Println的源代码文件和行号
这也是log包中这个skip的值为什么一直是2的yaunyin
*/
|
package repository
import (
"context"
"database/sql"
"github.com/dheerajgopi/todo-api/models"
"github.com/dheerajgopi/todo-api/task"
)
type mySQLRepo struct {
DB *sql.DB
}
// New will return new object which implements task.Repository
func New(db *sql.DB) task.Repository {
return &mySQLRepo{
DB: db,
}
}
func (repo *mySQLRepo) getOne(ctx context.Context, query string, args ...interface{}) (*models.Task, error) {
stmt, err := repo.DB.PrepareContext(ctx, query)
if err != nil {
return nil, err
}
row := stmt.QueryRowContext(ctx, args...)
task := &models.Task{}
userID := int64(0)
err = row.Scan(
&task.ID,
&task.Title,
&task.Description,
&userID,
&task.IsComplete,
&task.CreatedAt,
&task.UpdatedAt,
)
if err != nil {
return nil, err
}
task.CreatedBy = &models.User{
ID: userID,
}
return task, nil
}
// GetByID will return task with the given id
func (repo *mySQLRepo) GetByID(ctx context.Context, id int64) (*models.Task, error) {
query := `SELECT id, title, description, created_by, is_complete, created_at, updated_at
FROM task WHERE id=?`
return repo.getOne(ctx, query, id)
}
// Create will store new task entry
func (repo *mySQLRepo) Create(ctx context.Context, task *models.Task) error {
query := `INSERT INTO task (title, description, created_by, is_complete, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?)`
tx, err := repo.DB.BeginTx(ctx, nil)
if err != nil {
return err
}
res, err := tx.Exec(
query,
task.Title,
task.Description,
task.CreatedBy.ID,
task.IsComplete,
task.CreatedAt,
task.UpdatedAt,
)
if err != nil {
tx.Rollback()
return err
}
lastID, err := res.LastInsertId()
if err != nil {
tx.Rollback()
return err
}
err = tx.Commit()
if err != nil {
return err
}
task.ID = lastID
return nil
}
// GetAllByUserID returns list of tasks created by an user
func (repo *mySQLRepo) GetAllByUserID(ctx context.Context, userID int64) ([]*models.Task, error) {
query := `SELECT id, title, description, created_by, is_complete, created_at, updated_at
FROM task WHERE created_by=?`
stmt, err := repo.DB.PrepareContext(ctx, query)
if err != nil {
return nil, err
}
rows, err := stmt.QueryContext(ctx, userID)
defer rows.Close()
if err != nil {
return nil, err
}
tasks := make([]*models.Task, 0)
for rows.Next() {
task := &models.Task{}
userID := int64(0)
err = rows.Scan(
&task.ID,
&task.Title,
&task.Description,
&userID,
&task.IsComplete,
&task.CreatedAt,
&task.UpdatedAt,
)
if err != nil {
return nil, err
}
task.CreatedBy = &models.User{
ID: userID,
}
tasks = append(tasks, task)
}
err = rows.Err()
if err != nil {
return nil, err
}
return tasks, nil
}
|
package main
/**
面试题 02.01. 移除重复节点
编写代码,移除未排序链表中的重复节点。保留最开始出现的节点。
示例1:
```
输入:[1, 2, 3, 3, 2, 1]
输出:[1, 2, 3]
```
示例2:
```
输入:[1, 1, 1, 1, 2]
输出:[1, 2]
```
提示:
- 链表长度在`[0, 20000]`范围内。
- 链表元素在`[0, 20000]`范围内。
*/
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func RemoveDuplicateNodes(head *ListNode) *ListNode {
m := make(map[int]int)
if head == nil {
return head
}
curr := head
m[curr.Val] = 1
for curr.Next != nil {
if _, ok := m[curr.Next.Val]; ok {
curr.Next = curr.Next.Next
} else {
curr = curr.Next
}
m[curr.Val] = 1
}
return head
}
type ListNode struct {
Val int
Next *ListNode
}
|
package main
import (
"io/ioutil"
"fmt"
"crypto"
"encoding/pem"
"crypto/x509"
"crypto/rsa"
"encoding/base64"
"reflect"
)
func main() {
//读取私钥内容
keyBytes, err := ioutil.ReadFile("static/rsa.key")
if err != nil {
fmt.Println("read file error")
return
}
fmt.Println("-----------------RSA私钥-----------------")
fmt.Println(string(keyBytes))
src := "试一试"
s, err := RsaSign(src, keyBytes, crypto.SHA256)
if err != nil {
fmt.Println(err)
}
fmt.Println("-----------------S-----------------")
fmt.Println(s)
////读取证书内容
certBytes, err := ioutil.ReadFile("static/rsa.crt")
if err != nil {
fmt.Println("read file error")
return
}
fmt.Println("-----------------RSA证书-----------------")
fmt.Println(string(certBytes))
e := RsaVerifySign(s,src,string(certBytes),crypto.SHA256)
if e != nil {
panic(e)
}
fmt.Println("pass")
}
func RsaSign(origData string, privateKeyPem []byte, hash crypto.Hash) (sig string, err error) {
//解析成RSA私钥
block, _ := pem.Decode(privateKeyPem)
prikey, err := x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
fmt.Println(err)
}
h := hash.New()
h.Write([]byte(origData))
digest := h.Sum(nil)
s, err := rsa.SignPKCS1v15(nil, prikey, hash, digest)
if err != nil {
fmt.Println(err)
}
sig = base64.StdEncoding.EncodeToString(s)
return
}
func RsaVerifySign(signBase64 string, data string, pemCert string, hash crypto.Hash) error {
//base64解码
sign, err := base64.StdEncoding.DecodeString(signBase64)
if err != nil {
fmt.Println(err)
}
block, _ := pem.Decode([]byte(pemCert))
if block == nil {
fmt.Println(err)
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
fmt.Println(err)
}
h := hash.New()
h.Write([]byte(data))
digest := h.Sum(nil)
testPubKey := cert.PublicKey
fmt.Println("公钥类型:", reflect.TypeOf(testPubKey))
rsaPubKey := cert.PublicKey.(*rsa.PublicKey)
return rsa.VerifyPKCS1v15(rsaPubKey, hash, digest, sign)
}
|
package main
import (
"bufio"
"log"
"os"
"strings"
)
func main() {
f, _ := os.Open("input.txt")
defer f.Close()
scanner := bufio.NewScanner(f)
seats := make([]bool, 977)
for scanner.Scan() {
text := strings.TrimSpace(scanner.Text())
row := getRow(text[:7], 0, 127)
column := getColumn(text[7:], 0, 7)
seatID := (row * 8) + column
seats[seatID] = true
}
for id, occupied := range seats {
if !occupied {
log.Printf("available seat %d", id)
}
}
}
func getRow(text string, start, end int) int {
if len(text) == 1 {
if text == "F" {
return start
}
return end
}
middle := start + ((end - start) / 2)
char := string(text[0])
if char == "F" {
return getRow(text[1:], start, middle)
}
return getRow(text[1:], middle+1, end)
}
func getColumn(text string, start, end int) int {
if len(text) == 1 {
if text == "L" {
return start
}
return end
}
middle := start + ((end - start) / 2)
char := string(text[0])
if char == "L" {
return getColumn(text[1:], start, middle)
}
return getColumn(text[1:], middle+1, end)
}
|
package main
import (
"fmt"
"io"
)
var (
// stack is used to store variable names and values.
stack map[string]Value
)
func init() {
stack = make(map[string]Value)
}
// Parser represents a parser.
type Parser struct {
s *Scanner
buf struct {
t []Token // stack of last read tokens
lit []string // stack of last read literals
n int // buffer size (max=1)
size int // stack size for 't' and 'lit'
}
}
// NewParser returns a new instance of Parser.
func NewParser(r io.Reader) *Parser {
p := Parser{s: NewScanner(r)}
p.buf.size = 10
return &p
}
// scan returns the next token from the underlying scanner.
// if a token has been unscanned then read that instead.
func (p *Parser) scan() (t Token, lit string) {
if p.buf.n != 0 {
// todo(santiaago): refactor
t, p.buf.t = p.buf.t[len(p.buf.t)-1], p.buf.t[:len(p.buf.t)-1]
lit, p.buf.lit = p.buf.lit[len(p.buf.lit)-1], p.buf.lit[:len(p.buf.lit)-1]
p.buf.n--
return
}
t, lit = p.s.Scan()
if len(p.buf.t) < p.buf.size {
p.buf.t = append(p.buf.t, t)
p.buf.lit = append(p.buf.lit, lit)
} else {
// stack limit reached so shift values and insert new ones
// todo(santiaago): refactor
p.buf.t = p.buf.t[1:]
p.buf.t = append(p.buf.t, t)
p.buf.lit = p.buf.lit[1:]
p.buf.lit = append(p.buf.lit, lit)
}
return
}
// unscan pushes the previously read token back onto the buffer.
func (p *Parser) unscan() {
if p.buf.n == p.buf.size {
fmt.Println("ERROR cannot unscan anymore, stack size limit reached.")
return
}
p.buf.n++
}
// scanIgnoreWhitespace scans the next non-whitespace token.
func (p *Parser) scanIgnoreWhitespace() (t Token, lit string) {
t, lit = p.scan()
if t == Space {
t, lit = p.scan()
}
return
}
// numberOrVector returns a number or a vector.
func (p *Parser) numberOrVector() Value {
tok, lit := p.scanIgnoreWhitespace()
var vector Vector
if tok == Number {
vector = append(vector, ValueParse(lit))
} else if tok == Operator && lit == "-" {
// todo(santiaago): handle this as a unary operator.
// we use scan here because the '-' sign number must be
// right next to number, no space in between
tok, lit = p.scan()
if tok == Number {
vector = append(vector, ValueParse("-"+lit))
} else {
return nil
}
} else {
return nil
}
for {
// Read a field.
tok, lit := p.scanIgnoreWhitespace()
if tok == Operator && lit == "-" {
// we use scan here because the '-' sign number must be
// right next to number, no space in between
tok, lit = p.scan()
if tok == Number {
lit = "-" + lit
} else {
// unscan twice to roll back both scan
p.unscan()
p.unscan()
break
}
} else if tok != Number {
p.unscan()
break
}
v := ValueParse(lit)
vector = append(vector, v)
}
// todo(santiaago) do we need this?
if len(vector) == 1 {
return vector[0]
}
return vector
}
// Parse parse a assign statement a = b
func (p *Parser) Parse() (*Expression, error) {
// First token can be an identifier or number(a number can start with a '-' sign)
// todo(santiaago): refactor first token.
var left Value
var right, operator string
tok, lit := p.scanIgnoreWhitespace()
lastTok := tok
if tok == Identifier {
left = Variable{name: lit}
} else if tok == Number {
p.unscan()
left = p.numberOrVector()
} else if tok == Operator {
if lit == "-" {
p.unscan()
left = p.numberOrVector()
if left != nil {
lastTok = Number
}
} else if isUnary(lit) {
// remember operator
p.unscan()
operator = lit
}
// todo(santiaago): need to handle negative identifiers and vectors.
} else {
return nil, fmt.Errorf("ERROR found %q, expected left", lit)
}
// Next it could be EOF, an operator or an assignment (for now)
// todo(santiaago): refactor EOF case.
tok, lit = p.scanIgnoreWhitespace()
if tok == EOF {
if lastTok == Number {
expr := Expression(left)
return &expr, nil
} else if lastTok == Identifier {
// todo(santiaago): do we need this error check?
if left.Evaluate() == nil {
return nil, fmt.Errorf("ERROR")
}
expr := Expression(left)
return &expr, nil
} else {
return nil, fmt.Errorf("ERROR not a number or identifier")
}
}
isOperator := tok == Operator
isAssign := tok == Assign
isNumber := tok == Number
if !isAssign && !isOperator && !isNumber {
return nil, fmt.Errorf("ERROR found %q, expected assignment, operator or number. Got token %v", lit, tok)
}
// if the literal scanned was a number we unscan it to scan it completly.
// This is to scan all numbers in a vector and don't skip the first one.
if isNumber {
p.unscan()
}
// if last token read is an operator, remember it.
if isOperator {
operator = lit
}
// Next: Take care of assign case.
// todo(santiaago): refactor assign case.
if isAssign {
tok, lit = p.scanIgnoreWhitespace()
if tok == Number {
// todo(santiaago):
// we should print an error here if left is a number and
// not a variable as the asignment 1 = 2 doesn't make sense.
var expr Expression
if v, ok := left.(Variable); ok {
stack[v.name] = ValueParse(lit)
expr = Expression(v)
} else {
return nil, fmt.Errorf("ERROR left hand side should be a variable")
}
return &expr, nil
} else if lastTok == Number {
return nil, fmt.Errorf("ERROR left hand side should be a variable")
} else if tok == Identifier {
right = lit
var expr Expression
var r Value
if val, ok := stack[right]; ok {
r = val
} else {
return nil, fmt.Errorf("ERROR variable undefined")
}
if v, ok := left.(Variable); ok {
stack[v.name] = r
expr = Expression(v)
}
return &expr, nil
}
return nil, fmt.Errorf("ERROR right hand side has unexpected token")
}
// Next: Take care of the operator case.
// todo(santiaago): refactor this.
// We should loop over all our operators.
var terms []Value
var operators []string
// Initialize arrays with first term and first operator.
if left != nil {
terms = append(terms, left)
}
operators = append(operators, operator)
for {
// Read a field.
tok, lit = p.scanIgnoreWhitespace()
if tok != Identifier && tok != Number && tok != Operator {
return nil, fmt.Errorf("ERROR found %q, expected number or identifier or sign", lit)
}
// todo(santiaago): should check here cases (number, identifier, sign)
if tok == Number || tok == Operator {
p.unscan()
term := p.numberOrVector()
terms = append(terms, term)
} else if tok == Identifier {
if _, ok := stack[lit]; ok {
terms = append(terms, stack[lit])
} else {
return nil, fmt.Errorf("ERROR variable %v not found", lit)
}
}
// Read operator
tok, lit = p.scanIgnoreWhitespace()
// If the next token is not an operator then break the loop
if tok != Operator {
p.unscan()
break
}
operators = append(operators, lit)
}
// At this point we have terms and, operators.
// We now need to process all of this.
return buildOperatorExpression(terms, operators)
}
// At this point we have the following
// left, operator, terms, operators
// we need now to process all of this.
func buildOperatorExpression(terms []Value, operators []string) (*Expression, error) {
var cumulExpr Expression
first := terms[0]
// todo(santiaago):
// you should be able to do:
// a = 1
// 1
// 1 2 a
// 1 2 1
// This means that a vector can also contain identifiers.
// we will ignore this case for now, we will only work with vector of numbers.
// if _, ok := stack[first]; ok {
// cumulExpr = Expression(Variable{name: first})
// } else {
cumulExpr = first
//}
for i := 0; i < len(operators); i++ {
op := operators[i]
// unary case
// todo(santiaago): how to handle +/ 1 2 3 + +/ 1 2 3 ?
if isUnary(op) {
u := Unary{Val: cumulExpr.Evaluate(), Operator: op}
cumulExpr = Expression(u)
continue
}
// todo(santiaago): need to clean this.
if len(terms)-1 != len(operators) {
return nil, fmt.Errorf("ERROR terms and operators size mismatch")
}
right := terms[i+1]
var r Value
// if val, ok := stack[right]; ok {
// r = val
// } else {
r = right
//}
if cumulExpr != nil {
b := Binary{Left: cumulExpr.Evaluate(), Right: r, Operator: op}
cumulExpr = Expression(b)
} else {
return nil, fmt.Errorf("ERROR nil expression")
}
}
return &cumulExpr, nil
}
|
package main
import (
"fmt"
null "gopkg.in/guregu/null.v3"
)
type hotdog int
func main() {
var x int
fmt.Println(x)
var s string
fmt.Println(s)
arr := [3]int{}
fmt.Println(arr)
slicevar := []int{}
fmt.Println(slicevar)
//intptr := 90
// var x1 hotdog = 56
// nullableInt := Int8fromPtr(*x1)
fmt.Println(nullableInt)
}
type Int8 struct {
null.Int
}
func NewInt8(i int8, valid bool) Int8 {
return Int8{null.NewInt(int64(i), valid)}
}
func Int8from(i int8) Int8 {
return NewInt8(i, true)
}
func Int8fromPtr(i *int8) Int8 {
if i == nil {
return NewInt8(0, false)
}
return NewInt8(*i, true)
}
|
package main
import (
"fmt"
"math"
"os"
)
type Vertex struct {
X, Y float64
}
func (v *Vertex) Hypo () float64 {
return math.Sqrt(v.X*v.X + v.Y*v.Y)
}
type MyInt int32
func (v MyInt) double() int32 {
return int32(v * 2)
}
func main() {
p := &Vertex{3, 4}
fmt.Println(p.Hypo())
fmt.Println(MyInt(32).double())
fmt.Fprintf(os.Stdout, "Heloo thereeeeeeeeeeeee")
}
|
package zconfig
import (
"fmt"
"os"
"strconv"
"strings"
)
const (
COMMENT_STRING = "#"
EQUAL_STRING = "="
SUBSPLIT_STRING = "::"
)
type Configer struct {
path string
data map[string]string
prefix map[string][]string // prefix - subfixs
}
func InitConfiger(path string) (*Configer, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
config := &Configer{
path: path,
data: make(map[string]string),
prefix: make(map[string][]string),
}
if err := config.parse(f); err != nil {
return nil, fmt.Errorf("file %v parse error! error:%v", path, err)
}
if err := f.Close(); err != nil {
return nil, fmt.Errorf("file %v close error! error:%v\n", path, err)
}
return config, nil
}
func (c *Configer) String(key string) (string, error) {
key = strings.ToLower(key)
if value, ok := c.data[key]; ok {
return value, nil
}
return "", fmt.Errorf("data not exist! key:%v", key)
}
func (c *Configer) DefaultString(key, defaultVal string) string {
key = strings.ToLower(key)
if value, err := c.String(key); err == nil {
return value
}
return defaultVal
}
func (c *Configer) Bool(key string) (bool, error) {
key = strings.ToLower(key)
value, err := c.String(key)
if err != nil {
return false, err
}
value = strings.ToLower(value)
if value == "true" {
return true, nil
}
if value == "false" {
return false, nil
}
return false, fmt.Errorf("invalid bool value! key:%v value:%v", key, value)
}
func (c *Configer) DefaultBool(key string, defaultVal bool) bool {
if value, err := c.Bool(key); err == nil {
return value
}
return defaultVal
}
func (c *Configer) Int(key string) (int, error) {
key = strings.ToLower(key)
value, err := c.String(key)
if err != nil {
return 0, err
}
return strconv.Atoi(value)
}
func (c *Configer) DefaultInt(key string, defaultVal int) int {
if value, err := c.Int(key); err == nil {
return value
}
return defaultVal
}
func (c *Configer) Int64(key string) (int64, error) {
key = strings.ToLower(key)
value, err := c.String(key)
if err != nil {
return 0, err
}
return strconv.ParseInt(value, 10, 64)
}
func (c *Configer) DefaultInt64(key string, defaultVal int64) int64 {
if value, err := c.Int64(key); err == nil {
return value
}
return defaultVal
}
func (c *Configer) Float(key string) (float64, error) {
key = strings.ToLower(key)
value, err := c.String(key)
if err != nil {
return 0, err
}
return strconv.ParseFloat(value, 64)
}
func (c *Configer) DefaultFloat(key string, defaultVal float64) float64 {
if value, err := c.Float(key); err == nil {
return value
}
return defaultVal
}
func (c *Configer) Map(prefix string) (map[string]string, error) {
subs, ok := c.prefix[prefix]
if !ok {
return nil, fmt.Errorf("prefix is not exist! prefix:%v", prefix)
}
results := make(map[string]string)
for _, subfix := range subs {
if value, ok := c.data[prefix+"::"+subfix]; ok {
results[subfix] = value
}
}
return results, nil
}
func (c *Configer) Slice(prefix string) ([]string, error) {
subs, ok := c.prefix[prefix]
if !ok {
return nil, fmt.Errorf("prefix is not exist! prefix:%v", prefix)
}
sort, sortmap := c.sortInt(subs)
results := make([]string, 0, len(subs))
for _, index := range sort {
if orgSub, ok := sortmap[index]; ok {
if value, ok := c.data[prefix+"::"+orgSub]; ok {
results = append(results, value)
}
}
}
return results, nil
}
|
package handlers
import (
"bytes"
"github.com/go-webauthn/webauthn/protocol"
"github.com/go-webauthn/webauthn/webauthn"
"github.com/authelia/authelia/v4/internal/middlewares"
"github.com/authelia/authelia/v4/internal/model"
"github.com/authelia/authelia/v4/internal/regulation"
"github.com/authelia/authelia/v4/internal/session"
)
// WebAuthnAssertionGET handler starts the assertion ceremony.
func WebAuthnAssertionGET(ctx *middlewares.AutheliaCtx) {
var (
w *webauthn.WebAuthn
user *model.WebAuthnUser
userSession session.UserSession
err error
)
if userSession, err = ctx.GetSession(); err != nil {
ctx.Logger.WithError(err).Error("Error occurred retrieving user session")
respondUnauthorized(ctx, messageMFAValidationFailed)
return
}
if w, err = newWebAuthn(ctx); err != nil {
ctx.Logger.Errorf("Unable to configure %s during assertion challenge for user '%s': %+v", regulation.AuthTypeWebAuthn, userSession.Username, err)
respondUnauthorized(ctx, messageMFAValidationFailed)
return
}
if user, err = getWebAuthnUser(ctx, userSession); err != nil {
ctx.Logger.Errorf("Unable to create %s assertion challenge for user '%s': %+v", regulation.AuthTypeWebAuthn, userSession.Username, err)
respondUnauthorized(ctx, messageMFAValidationFailed)
return
}
var opts = []webauthn.LoginOption{
webauthn.WithAllowedCredentials(user.WebAuthnCredentialDescriptors()),
}
extensions := map[string]any{}
if user.HasFIDOU2F() {
extensions["appid"] = w.Config.RPOrigin
}
if len(extensions) != 0 {
opts = append(opts, webauthn.WithAssertionExtensions(extensions))
}
var assertion *protocol.CredentialAssertion
if assertion, userSession.WebAuthn, err = w.BeginLogin(user, opts...); err != nil {
ctx.Logger.Errorf("Unable to create %s assertion challenge for user '%s': %+v", regulation.AuthTypeWebAuthn, userSession.Username, err)
respondUnauthorized(ctx, messageMFAValidationFailed)
return
}
if err = ctx.SaveSession(userSession); err != nil {
ctx.Logger.Errorf(logFmtErrSessionSave, "assertion challenge", regulation.AuthTypeWebAuthn, userSession.Username, err)
respondUnauthorized(ctx, messageMFAValidationFailed)
return
}
if err = ctx.SetJSONBody(assertion); err != nil {
ctx.Logger.Errorf(logFmtErrWriteResponseBody, regulation.AuthTypeWebAuthn, userSession.Username, err)
respondUnauthorized(ctx, messageMFAValidationFailed)
return
}
}
// WebAuthnAssertionPOST handler completes the assertion ceremony after verifying the challenge.
//
//nolint:gocyclo
func WebAuthnAssertionPOST(ctx *middlewares.AutheliaCtx) {
var (
userSession session.UserSession
err error
w *webauthn.WebAuthn
bodyJSON bodySignWebAuthnRequest
)
if err = ctx.ParseBody(&bodyJSON); err != nil {
ctx.Logger.Errorf(logFmtErrParseRequestBody, regulation.AuthTypeWebAuthn, err)
respondUnauthorized(ctx, messageMFAValidationFailed)
return
}
if userSession, err = ctx.GetSession(); err != nil {
ctx.Logger.WithError(err).Error("Error occurred retrieving user session")
respondUnauthorized(ctx, messageMFAValidationFailed)
return
}
if userSession.WebAuthn == nil {
ctx.Logger.Errorf("WebAuthn session data is not present in order to handle assertion for user '%s'. This could indicate a user trying to POST to the wrong endpoint, or the session data is not present for the browser they used.", userSession.Username)
respondUnauthorized(ctx, messageMFAValidationFailed)
return
}
if w, err = newWebAuthn(ctx); err != nil {
ctx.Logger.Errorf("Unable to configure %s during assertion challenge for user '%s': %+v", regulation.AuthTypeWebAuthn, userSession.Username, err)
respondUnauthorized(ctx, messageMFAValidationFailed)
return
}
var (
assertionResponse *protocol.ParsedCredentialAssertionData
credential *webauthn.Credential
user *model.WebAuthnUser
)
if assertionResponse, err = protocol.ParseCredentialRequestResponseBody(bytes.NewReader(ctx.PostBody())); err != nil {
ctx.Logger.Errorf("Unable to parse %s assertionfor user '%s': %+v", regulation.AuthTypeWebAuthn, userSession.Username, err)
respondUnauthorized(ctx, messageMFAValidationFailed)
return
}
if user, err = getWebAuthnUser(ctx, userSession); err != nil {
ctx.Logger.Errorf("Unable to load %s devices for assertion challenge for user '%s': %+v", regulation.AuthTypeWebAuthn, userSession.Username, err)
respondUnauthorized(ctx, messageMFAValidationFailed)
return
}
if credential, err = w.ValidateLogin(user, *userSession.WebAuthn, assertionResponse); err != nil {
_ = markAuthenticationAttempt(ctx, false, nil, userSession.Username, regulation.AuthTypeWebAuthn, err)
respondUnauthorized(ctx, messageMFAValidationFailed)
return
}
var found bool
for _, device := range user.Devices {
if bytes.Equal(device.KID.Bytes(), credential.ID) {
device.UpdateSignInInfo(w.Config, ctx.Clock.Now(), credential.Authenticator.SignCount)
found = true
if err = ctx.Providers.StorageProvider.UpdateWebAuthnDeviceSignIn(ctx, device.ID, device.RPID, device.LastUsedAt, device.SignCount, device.CloneWarning); err != nil {
ctx.Logger.Errorf("Unable to save %s device signin count for assertion challenge for user '%s': %+v", regulation.AuthTypeWebAuthn, userSession.Username, err)
respondUnauthorized(ctx, messageMFAValidationFailed)
return
}
break
}
}
if !found {
ctx.Logger.Errorf("Unable to save %s device signin count for assertion challenge for user '%s' device '%x' count '%d': unable to find device", regulation.AuthTypeWebAuthn, userSession.Username, credential.ID, credential.Authenticator.SignCount)
respondUnauthorized(ctx, messageMFAValidationFailed)
return
}
if err = ctx.RegenerateSession(); err != nil {
ctx.Logger.Errorf(logFmtErrSessionRegenerate, regulation.AuthTypeWebAuthn, userSession.Username, err)
respondUnauthorized(ctx, messageMFAValidationFailed)
return
}
if err = markAuthenticationAttempt(ctx, true, nil, userSession.Username, regulation.AuthTypeWebAuthn, nil); err != nil {
respondUnauthorized(ctx, messageMFAValidationFailed)
return
}
userSession.SetTwoFactorWebAuthn(ctx.Clock.Now(),
assertionResponse.Response.AuthenticatorData.Flags.UserPresent(),
assertionResponse.Response.AuthenticatorData.Flags.UserVerified())
if err = ctx.SaveSession(userSession); err != nil {
ctx.Logger.Errorf(logFmtErrSessionSave, "removal of the assertion challenge and authentication time", regulation.AuthTypeWebAuthn, userSession.Username, err)
respondUnauthorized(ctx, messageMFAValidationFailed)
return
}
if bodyJSON.Workflow == workflowOpenIDConnect {
handleOIDCWorkflowResponse(ctx, bodyJSON.TargetURL, bodyJSON.WorkflowID)
} else {
Handle2FAResponse(ctx, bodyJSON.TargetURL)
}
}
|
package main
import "fmt"
type hero struct {
name string
age int
power int
}
//函数参数
func test17(m map[int]hero) {
//err
//m[102].power = 89
stu := m[102]
stu.power = 89
m[102] = stu
fmt.Println(m)
fmt.Printf("%p\n", m)
}
func main1701() {
//将结构体作为map中的值 value
m := make(map[int]hero)
//map中的数据不建议排序操作
m[101] = hero{"钢铁侠", 30, 100}
m[102] = hero{"美队", 30, 90}
fmt.Println(m)
delete(m, 102)
fmt.Println(m)
}
func main1702() {
//value 类型 是一个切片
m := make(map[int][]hero)
m[101] = []hero{{"钢铁侠", 30, 100},
{"蜘蛛侠", 17, 80}}
m[101] = append(m[101], hero{"星爵", 30, 10})
fmt.Println(m)
m[102] = []hero{{"美队", 30, 90}}
m[102] = append(m[102], hero{"冬兵", 30, 75})
fmt.Println(m[102][0])
}
func main() {
m := make(map[int]hero)
//map中的数据不建议排序操作
m[101] = hero{"钢铁侠", 30, 100}
m[102] = hero{"美队", 30, 90}
//将map作为函数参数 地址传递
test17(m)
fmt.Println(m)
fmt.Printf("%p\n", m)
}
|
package requests
import (
"encoding/json"
"testing"
walletmodels "github.com/appditto/pippin_nano_wallet/libs/wallet/models"
"github.com/mitchellh/mapstructure"
"github.com/stretchr/testify/assert"
)
func TestEncodeProcessRequest(t *testing.T) {
stateBlock := walletmodels.StateBlock{
Type: "state",
Hash: "1234",
Account: "1",
Previous: "2",
Representative: "3",
Balance: "4",
Link: "5",
}
request := ProcessRequest{
BaseRequest: BaseRequest{
Action: "process",
},
Block: stateBlock,
JsonBlock: true,
}
encoded, err := json.Marshal(request)
assert.Nil(t, err)
assert.Equal(t, `{"action":"process","json_block":true,"block":{"type":"state","hash":"1234","account":"1","previous":"2","representative":"3","balance":"4","link":"5","work":"","signature":""}}`, string(encoded))
}
func TestDecodeProcessRequest(t *testing.T) {
encoded := `{"action":"process","json_block":true,"block":{"type":"state","hash":"1234","account":"1","previous":"2","representative":"3","balance":"4","link":"5","work":"","signature":""}}`
var request ProcessRequest
err := json.Unmarshal([]byte(encoded), &request)
assert.Nil(t, err)
assert.Equal(t, "process", request.Action)
assert.Equal(t, true, request.JsonBlock)
assert.Equal(t, "state", request.Block.Type)
assert.Equal(t, "1234", request.Block.Hash)
assert.Equal(t, "1", request.Block.Account)
assert.Equal(t, "2", request.Block.Previous)
assert.Equal(t, "3", request.Block.Representative)
assert.Equal(t, "4", request.Block.Balance)
assert.Equal(t, "5", request.Block.Link)
assert.Equal(t, "", request.Block.Work)
assert.Equal(t, "", request.Block.Signature)
}
func TestMapStructureDecodeProcessRequest(t *testing.T) {
request := map[string]interface{}{
"action": "process",
"json_block": true,
"block": map[string]interface{}{
"type": "state",
"hash": "1234",
"account": "1",
"previous": "2",
"representative": "3",
"balance": "4",
"link": "5",
"work": "",
"signature": "",
},
}
var decoded ProcessRequest
mapstructure.Decode(request, &decoded)
assert.Equal(t, "process", decoded.Action)
assert.Equal(t, true, decoded.JsonBlock)
assert.Equal(t, "state", decoded.Block.Type)
assert.Equal(t, "1234", decoded.Block.Hash)
assert.Equal(t, "1", decoded.Block.Account)
assert.Equal(t, "2", decoded.Block.Previous)
assert.Equal(t, "3", decoded.Block.Representative)
assert.Equal(t, "4", decoded.Block.Balance)
assert.Equal(t, "5", decoded.Block.Link)
assert.Equal(t, "", decoded.Block.Work)
assert.Equal(t, "", decoded.Block.Signature)
}
|
// can't declare a function inside a block
package main
func main(){
func x(){
}
} |
package clickhousespanstore
import (
"database/sql"
"time"
"github.com/hashicorp/go-hclog"
)
// WriteParams contains parameters that are shared between WriteWorker`s
type WriteParams struct {
logger hclog.Logger
db *sql.DB
indexTable TableName
spansTable TableName
encoding Encoding
delay time.Duration
}
|
package base
import (
"pb/c2s"
"pb/s2c"
"server"
"server/libs/log"
"server/libs/rpc"
"server/share"
"github.com/golang/protobuf/proto"
)
type Account struct {
SendBuf []byte
}
func (t *Account) RegisterCallback(s rpc.Servicer) {
s.RegisterCallback("SelectUser", t.SelectUser)
s.RegisterCallback("CreatePlayer", t.CreatePlayer)
s.RegisterCallback("Login", t.Login)
}
func (a *Account) SelectUser(mailbox rpc.Mailbox, msg *rpc.Message) (errcode int32, reply *rpc.Message) {
args := &c2s.Selectuser{}
if server.Check(server.ParseProto(msg, args)) {
return 0, nil
}
p := App.Players.FindPlayer(mailbox.Uid)
if p == nil {
//角色没有找到
return 0, nil
}
player := p.(*BasePlayer)
if player.State != STATE_LOGGED {
log.LogWarning("player state not logged")
return 0, nil
}
player.ChooseRole = args.GetRolename()
player.Name = player.ChooseRole
player.UpdateHash()
err := App.DbBridge.selectUser(mailbox, player.Account, args.GetRolename(), int(args.GetRoleindex()))
if err != nil {
log.LogError(err)
}
return 0, nil
}
func (a *Account) CreatePlayer(mailbox rpc.Mailbox, msg *rpc.Message) (errcode int32, reply *rpc.Message) {
args := &c2s.Create{}
if server.Check(server.ParseProto(msg, args)) {
return 0, nil
}
p := App.Players.FindPlayer(mailbox.Uid)
if p == nil {
//角色没有找到
return 0, nil
}
player := p.(*BasePlayer)
if player.State != STATE_LOGGED {
log.LogWarning("player state not logged")
return 0, nil
}
obj, err := App.Kernel().CreateRole("Player", args)
if err != nil {
log.LogError(err)
return 0, nil
}
save := share.GetSaveData(obj)
server.Check(App.DbBridge.createRole(mailbox, obj, player.Account, args.GetName(), int(args.GetIndex()), save))
App.Kernel().Destroy(obj.ObjectId())
return 0, nil
}
func (a *Account) Login(mailbox rpc.Mailbox, msg *rpc.Message) (errcode int32, reply *rpc.Message) {
args := &c2s.Enterbase{}
if server.Check(server.ParseProto(msg, args)) {
return 0, nil
}
if App.Login.checkClient(args.GetUser(), args.GetKey()) {
if p, err := App.Players.AddNewPlayer(mailbox.Uid); err == nil {
pl := p.(*BasePlayer)
log.LogMessage("add player:", mailbox)
pl.Account = args.GetUser()
pl.State = STATE_LOGGED
pl.UpdateHash()
if args.GetRolename() != "" {
pl.ChooseRole = args.GetRolename()
server.Check(App.DbBridge.selectUser(mailbox, pl.Account, args.GetRolename(), int(args.GetRoleindex())))
return 0, nil
}
server.Check(App.DbBridge.getUserInfo(mailbox, args.GetUser()))
return 0, nil
}
log.LogError("player add failed", mailbox)
return 0, nil
} else {
log.LogDebug(args.GetUser(), args.GetKey())
err := &s2c.Error{}
err.ErrorNo = proto.Int32(share.ERROR_LOGIN_FAILED)
server.Check(server.MailTo(nil, &mailbox, "Login.Error", err))
return 0, nil
}
}
func NewAccount() *Account {
a := &Account{}
a.SendBuf = make([]byte, 0, 64*1024)
return a
}
|
package main
import (
"testing"
)
func TestAutoCamelCase(t *testing.T) {
checks := []struct {
in string
out string
}{
{"WhatEver", "[WhatEver](/view/WhatEver)"},
{"[AnExampleLink](http://example.com)", "[AnExampleLink](http://example.com)"},
}
for _, check := range checks {
out := AutoCamelCase([]byte(check.in), "/view/")
if string(out) != check.out {
t.Errorf("mismatch:\n <%s>\n !=\n <%s>", out, check.out)
}
}
}
|
package friends
import (
"encoding/json"
"fmt"
"os"
)
type DataStore interface {
Refresh() error
Save() error
Marshal() ([]byte, error)
Add(Friend) error
Delete(name string) bool
}
// Implements DataStore
type FriendStore struct {
friends []Friend
file string
}
func NewFriendStore(datafile string) *FriendStore {
friends := FriendStore{
make([]Friend, 0),
datafile,
}
return &friends
}
func (self *FriendStore) Refresh() error {
b, err := os.ReadFile(self.file)
if err != nil && os.IsNotExist(err) {
err := self.Save()
if err != nil {
return err
}
return nil
} else if err != nil {
return err
}
return json.Unmarshal(b, &self.friends)
}
func (self *FriendStore) Save() error {
b, err := self.Marshal()
if err != nil {
return err
}
return os.WriteFile(self.file, b, 0644)
}
func (self *FriendStore) Marshal() ([]byte, error) {
b, err := json.Marshal(self.friends)
if err != nil {
return nil, err
}
return b, nil
}
func (self *FriendStore) Add(newfriend Friend) error {
for _, v := range self.friends {
if v.Equals(newfriend) {
return fmt.Errorf("Entity %#q already exists", newfriend.Name)
}
}
// Need a pointer here to modify the receiver directly. Value just throws out the copy
self.friends = append(self.friends, newfriend)
return nil
}
func (self *FriendStore) Delete(name string) bool {
for i, v := range self.friends {
if v.Name == name {
self.friends = append(self.friends[:i], self.friends[i+1:]...)
return true
}
}
return false
}
|
package auth
type Plugin struct {
}
|
package main
import (
"encoding/binary"
"errors"
"io"
"net"
"time"
)
var packetIDEnum int32 = 0
func getNewPacketID() int32 {
packetIDEnum++
return packetIDEnum
}
// Client is a minecraft rcon client
type Client struct {
conn net.Conn
isLoggedIn bool
}
// SendPacket sends rcon packet with packetID, type and payload
func (client *Client) SendPacket(packetID int32, _type int32, payload string) (err error) {
lengthOfPacket := int32(4 + 4 + len(payload) + 2)
buf := make([]byte, 4+lengthOfPacket)
binary.LittleEndian.PutUint32(buf, uint32(lengthOfPacket))
binary.LittleEndian.PutUint32(buf[4:], uint32(packetID))
binary.LittleEndian.PutUint32(buf[8:], uint32(_type))
copy(buf[12:], payload)
client.conn.Write(buf)
return
}
// SendLoginPacket is a wrapper of SendPacket where packetID = 3
func (client *Client) SendLoginPacket(packetID int32, password string) error {
return client.SendPacket(packetID, 3, password)
}
// SendCommandPacket is a wrapper of SendPacket where packetID = 2
func (client *Client) SendCommandPacket(packetID int32, cmd string) error {
return client.SendPacket(packetID, 2, cmd)
}
// SendPaddingPacket sends a invalid packet with a packetID different from the packetID for the actually command
// to identify the end of a response sequence
func (client *Client) SendPaddingPacket(packetID int32) error {
return client.SendPacket(packetID, 0, "")
}
// RecvPacket receives rcon packet
func (client *Client) RecvPacket() (packetID int32, _type int32, payload string, err error) {
var lengthOfPacket int32
err = binary.Read(client.conn, binary.LittleEndian, &lengthOfPacket)
if err != nil {
return
}
bytesBuf := make([]byte, lengthOfPacket)
io.ReadFull(client.conn, bytesBuf)
packetID = int32(binary.LittleEndian.Uint32(bytesBuf[:4]))
_type = int32(binary.LittleEndian.Uint32(bytesBuf[4:8]))
payload = string(bytesBuf[8 : lengthOfPacket-2])
return
}
// NewClient dials the server address and return a Client
func NewClient(serverAddress string) (client Client, err error) {
conn, err := net.Dial("tcp", serverAddress)
if err != nil {
return
}
client.conn = conn
client.isLoggedIn = false
return
}
// Login logs the client in
func (client *Client) Login(password string) error {
packetID := getNewPacketID()
err := client.SendLoginPacket(packetID, password)
if err != nil {
return err
}
RecvPacketID, _, _, err := client.RecvPacket()
if err != nil {
return err
}
if RecvPacketID == -1 {
return errors.New("Wrong password")
} else if RecvPacketID != packetID {
return errors.New("Unexpected packet id while login")
}
client.isLoggedIn = true
return nil
}
// SendCommandNaively is a naive implementation of sending rcon command
// it sends the command and return the first packet that has the packet id sent by this function
// it only receive the first response packet
// it discards all the other packets that does not have the expected packet id
// thus, it does not support go routine
func (client *Client) SendCommandNaively(command string) (string, error) {
packetID := getNewPacketID()
err := client.SendCommandPacket(packetID, command)
if err != nil {
return "", err
}
// for some reason we need a delay
time.Sleep(time.Millisecond)
err = client.SendPaddingPacket(packetID + 2<<29)
if err != nil {
return "", err
}
var fullResponse string
for {
responsePacketID, _type, response, err := client.RecvPacket()
if err != nil {
return "", err
}
if _type == 0 {
if responsePacketID == packetID {
fullResponse += response
} else if responsePacketID == packetID+2<<29 {
return fullResponse, nil
} else {
return "", errors.New("unknown packet id")
}
} else {
return "", errors.New("unexpected packet type")
}
}
}
|
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func insert(intervals []Interval, newInterval Interval) []Interval {
ret := make([]Interval, 0)
idx := 0
for idx < len(intervals) {
inv := intervals[idx]
if inv.End < newInterval.Start {
ret = append(ret, inv)
idx += 1
} else {
break
}
}
overlapStart, overlapEnd := newInterval.Start, newInterval.End
for idx < len(intervals) {
inv := intervals[idx]
if inv.Start <= overlapEnd {
overlapStart = min(overlapStart, inv.Start)
overlapEnd = max(overlapEnd, inv.End)
idx += 1
} else {
break
}
}
ret = append(ret, Interval{overlapStart, overlapEnd})
for ; idx < len(intervals); idx += 1 {
ret = append(ret, intervals[idx])
}
return ret
}
|
package main
import (
"math"
"github.com/faiface/pixel"
"github.com/faiface/pixel/pixelgl"
)
type planet struct {
orb
*player
satellites []*planet
ships []*ship
shipsProduced float64
shipAngleMod float64
radius float64
sprite *pixelgl.Canvas
}
func newPlanet(dist, radius, dir float64, vel pixel.Vec, anchor *pixel.Vec, player *player, sprite *pixelgl.Canvas) *planet {
p := &planet{
orb: orb{
dist: dist,
anchor: anchor,
vel: vel,
dir: dir,
},
player: player,
radius: radius,
satellites: []*planet{},
ships: make([]*ship, int(radius/3)),
sprite: sprite,
}
if anchor != nil {
p.pos.X = anchor.X + dist
} else {
p.pos.X = dist
}
for i := 0; i < len(p.ships); i++ {
p.ships[i] = newShip(p, player)
}
p.setShips(0)
objectCount++
return p
}
// rotateGroup rotates the planet and adjusts the position of its satellites accordingly.
func (p *planet) rotateGroup(dt float64) {
dvec := p.rotate(dt)
for i := 0; i < len(p.satellites); i++ {
p.satellites[i].pos.X += dvec.X
p.satellites[i].pos.Y += dvec.Y
}
}
func (p *planet) update(dt float64) {
p.rotateGroup(dt)
// Ship production depends on planet size: production = sqrt(radius)/5
prod := math.Sqrt(p.radius) * productionFactor
p.shipsProduced += prod * dt
// Add new ships to slice.
for i := 0; i < int(p.shipsProduced); i++ {
added := false
// Search a free spot and if there is none append.
nship := newShip(p, p.player)
for j := 0; j < len(p.ships); j++ {
if p.ships[i] == nil {
p.ships[i] = nship
added = true
}
}
if !added {
p.ships = append(p.ships, nship)
}
p.shipsProduced--
}
p.setShips(dt)
}
func (p *planet) draw(translation pixel.Matrix) {
// TODO magic numbers
p.sprite.DrawColorMask(worldCanvas, pixel.IM.Moved(p.pos).Scaled(p.pos, p.radius/30), nil)
// Draw all ships stationed at this planet.
for _, s := range p.ships {
s.draw()
}
}
// distributeShips evenly distributes ships around a planet.
func (p *planet) setShips(dt float64) {
amount := len(p.ships)
step := (2 * math.Pi) / float64(amount)
p.shipAngleMod += dt
if p.shipAngleMod > 2*math.Pi {
p.shipAngleMod -= 2 * math.Pi
}
for i := 0; i < amount; i++ {
p.ships[i].pos.X = p.pos.X + p.ships[i].dist
p.ships[i].pos.Y = p.pos.Y
omega := float64(i) * step
rotatePoint(&p.pos, &p.ships[i].pos, omega+p.shipAngleMod)
}
}
type ship struct {
orb
*player
}
func newShip(planet *planet, player *player) *ship {
// Test if there is a ship available for recycling.
var sp *ship
i := -1
for i, sp = range recycledShips {
if sp != nil {
break
}
}
if sp != nil && i >= 0 {
// Remove ship from recycled slice.
recycledShips[i] = nil
} else {
// Create new ship.
sp = &ship{}
}
objectCount++
// TODO remove magic numbers
sp.dist = planet.radius * 2
sp.anchor = &planet.pos
sp.vel = pixel.V(5, 5)
sp.dir = 1
sp.player = player
return sp
}
func (s *ship) draw() {
sprites.ship.Draw(batches.ships, pixel.IM.Moved(s.pos).Scaled(s.pos, 0.125))
}
|
package model
import (
"errors"
"github.com/mongodb/mongo-go-driver/bson"
"github.com/mongodb/mongo-go-driver/bson/primitive"
)
// MediaUser ...
type MediaUser struct {
Model `bson:",inline"`
UserID primitive.ObjectID `bson:"user_id"`
MediaID primitive.ObjectID `bson:"media_id"`
user *User
media *Media
}
// NewMediaUser ...
func NewMediaUser() *MediaUser {
return &MediaUser{}
}
func (u *MediaUser) _Name() string {
return "media_user"
}
// CreateIfNotExist ...
func (u *MediaUser) CreateIfNotExist() error {
return CreateIfNotExist(u)
}
// IsExist ...
func (u *MediaUser) IsExist() bool {
return IsExist(u, bson.M{
"media_id": u.MediaID,
"user_id": u.UserID,
})
}
// Media ...
func (u *MediaUser) Media() (*Media, error) {
if u.ID == primitive.NilObjectID {
return nil, errors.New("id is null")
}
if u.MediaID != primitive.NilObjectID {
md := NewMedia()
md.ID = u.MediaID
err := md.Find()
if err != nil {
return nil, err
}
u.media = md
return md, nil
}
return nil, errors.New("role not found")
}
// SetMedia ...
func (u *MediaUser) SetMedia(media *Media) {
u.media = media
u.MediaID = media.ID
}
// User ...
func (u *MediaUser) User() (*User, error) {
if u.ID == primitive.NilObjectID {
return nil, errors.New("id is null")
}
if u.UserID != primitive.NilObjectID {
user := NewUser()
user.ID = u.UserID
err := user.Find()
if err != nil {
return nil, err
}
u.user = user
return user, nil
}
return nil, errors.New("permission not found")
}
// SetUser ...
func (u *MediaUser) SetUser(user *User) {
u.user = user
u.UserID = user.ID
}
// GetID ...
func (u *MediaUser) GetID() primitive.ObjectID {
return u.ID
}
// SetID ...
func (u *MediaUser) SetID(id primitive.ObjectID) {
u.ID = id
}
// Create ...
func (u *MediaUser) Create() error {
return InsertOne(u)
}
// Update ...
func (u *MediaUser) Update() error {
return UpdateOne(u)
}
// Delete ...
func (u *MediaUser) Delete() error {
return DeleteByID(u)
}
// Find ...
func (u *MediaUser) Find() error {
return FindByID(u)
}
|
package controllers
import (
"log"
"mick/models"
"net/http"
"text/template"
)
func check(err error) {
if err != nil {
log.Fatal(err)
}
}
type PhotoHandler int
func (h PhotoHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html; charset=utf-8")
photos := []models.Photo{}
db, err := ConnDataBase()
if err != nil {
log.Println(err)
}
defer db.Close()
db.Find(&photos)
const page = `
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>{{ .Titre }}</title>
{{ .CSS }}
</head>
<body>
<div class="container">
<header>
<h1>{{ .Titre }}</h1>
{{ range $p := .photo }}
<figure>
<h4>{{ $p.Album }}</h4>
<img src="/images/{{ $p.Name }}" style="max-width: 300px; height: auto" />
<figcaption>
<p>{{ $p.Legend }}
</figcaption>
</figure
{{ end }}
</body>
</html>
`
t, err := template.New("page").Parse(page)
check(err)
model := map[string]interface{}{
"Titre": "Photos",
"photo": photos,
}
err = t.Execute(w, model)
check(err)
}
|
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"io"
"log"
"github.com/spf13/cobra"
"helm.sh/helm/v3/cmd/helm/require"
"helm.sh/helm/v3/pkg/action"
)
const showDesc = `
This command consists of multiple subcommands to display information about a chart
`
const showAllDesc = `
This command inspects a chart (directory, file, or URL) and displays all its content
(values.yaml, Chart.yaml, README)
`
const showValuesDesc = `
This command inspects a chart (directory, file, or URL) and displays the contents
of the values.yaml file
`
const showChartDesc = `
This command inspects a chart (directory, file, or URL) and displays the contents
of the Chart.yaml file
`
const readmeChartDesc = `
This command inspects a chart (directory, file, or URL) and displays the contents
of the README file
`
const showCRDsDesc = `
This command inspects a chart (directory, file, or URL) and displays the contents
of the CustomResourceDefinition files
`
func newShowCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
client := action.NewShowWithConfig(action.ShowAll, cfg)
showCommand := &cobra.Command{
Use: "show",
Short: "show information of a chart",
Aliases: []string{"inspect"},
Long: showDesc,
Args: require.NoArgs,
ValidArgsFunction: noCompletions, // Disable file completion
}
// Function providing dynamic auto-completion
validArgsFunc := func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) != 0 {
return nil, cobra.ShellCompDirectiveNoFileComp
}
return compListCharts(toComplete, true)
}
all := &cobra.Command{
Use: "all [CHART]",
Short: "show all information of the chart",
Long: showAllDesc,
Args: require.ExactArgs(1),
ValidArgsFunction: validArgsFunc,
RunE: func(cmd *cobra.Command, args []string) error {
client.OutputFormat = action.ShowAll
err := addRegistryClient(client)
if err != nil {
return err
}
output, err := runShow(args, client)
if err != nil {
return err
}
fmt.Fprint(out, output)
return nil
},
}
valuesSubCmd := &cobra.Command{
Use: "values [CHART]",
Short: "show the chart's values",
Long: showValuesDesc,
Args: require.ExactArgs(1),
ValidArgsFunction: validArgsFunc,
RunE: func(cmd *cobra.Command, args []string) error {
client.OutputFormat = action.ShowValues
err := addRegistryClient(client)
if err != nil {
return err
}
output, err := runShow(args, client)
if err != nil {
return err
}
fmt.Fprint(out, output)
return nil
},
}
chartSubCmd := &cobra.Command{
Use: "chart [CHART]",
Short: "show the chart's definition",
Long: showChartDesc,
Args: require.ExactArgs(1),
ValidArgsFunction: validArgsFunc,
RunE: func(cmd *cobra.Command, args []string) error {
client.OutputFormat = action.ShowChart
err := addRegistryClient(client)
if err != nil {
return err
}
output, err := runShow(args, client)
if err != nil {
return err
}
fmt.Fprint(out, output)
return nil
},
}
readmeSubCmd := &cobra.Command{
Use: "readme [CHART]",
Short: "show the chart's README",
Long: readmeChartDesc,
Args: require.ExactArgs(1),
ValidArgsFunction: validArgsFunc,
RunE: func(cmd *cobra.Command, args []string) error {
client.OutputFormat = action.ShowReadme
err := addRegistryClient(client)
if err != nil {
return err
}
output, err := runShow(args, client)
if err != nil {
return err
}
fmt.Fprint(out, output)
return nil
},
}
crdsSubCmd := &cobra.Command{
Use: "crds [CHART]",
Short: "show the chart's CRDs",
Long: showCRDsDesc,
Args: require.ExactArgs(1),
ValidArgsFunction: validArgsFunc,
RunE: func(cmd *cobra.Command, args []string) error {
client.OutputFormat = action.ShowCRDs
err := addRegistryClient(client)
if err != nil {
return err
}
output, err := runShow(args, client)
if err != nil {
return err
}
fmt.Fprint(out, output)
return nil
},
}
cmds := []*cobra.Command{all, readmeSubCmd, valuesSubCmd, chartSubCmd, crdsSubCmd}
for _, subCmd := range cmds {
addShowFlags(subCmd, client)
showCommand.AddCommand(subCmd)
}
return showCommand
}
func addShowFlags(subCmd *cobra.Command, client *action.Show) {
f := subCmd.Flags()
f.BoolVar(&client.Devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored")
if subCmd.Name() == "values" {
f.StringVar(&client.JSONPathTemplate, "jsonpath", "", "supply a JSONPath expression to filter the output")
}
addChartPathOptionsFlags(f, &client.ChartPathOptions)
err := subCmd.RegisterFlagCompletionFunc("version", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) != 1 {
return nil, cobra.ShellCompDirectiveNoFileComp
}
return compVersionFlag(args[0], toComplete)
})
if err != nil {
log.Fatal(err)
}
}
func runShow(args []string, client *action.Show) (string, error) {
debug("Original chart version: %q", client.Version)
if client.Version == "" && client.Devel {
debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0"
}
cp, err := client.ChartPathOptions.LocateChart(args[0], settings)
if err != nil {
return "", err
}
return client.Run(cp)
}
func addRegistryClient(client *action.Show) error {
registryClient, err := newRegistryClient(client.CertFile, client.KeyFile, client.CaFile, client.InsecureSkipTLSverify)
if err != nil {
return fmt.Errorf("missing registry client: %w", err)
}
client.SetRegistryClient(registryClient)
return nil
}
|
package main
import(
"os"
"fmt"
"log"
"io"
"cloud.google.com/go/bigquery"
"google.golang.org/api/iterator"
"golang.org/x/net/context"
"strings"
)
func main(){
if len(os.Args) < 5 {
message := ""
switch(len(os.Args)){
case 1:
message = "Missing 4 args: [1] Google Cloud Project Name [2] Filepath to Google Application Credentials [3] SQL statement [4] Output file"
case 2:
message = "Missing 3 args: [2] Filepath to Google Application Credentials [3] SQL statment [4] Output file"
case 3:
message = "Missing 2 args: [3] SQL statement [4] Output file"
case 4:
message = "Missing 1 arg: [4] Output file"
}
log.Fatal(message)
}
os.Setenv("GOOGLE_CLOUD_PROJECT",os.Args[1])
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS",os.Args[2])
sql := os.Args[3]
outfile := os.Args[4]
proj := os.Getenv("GOOGLE_CLOUD_PROJECT")
if proj == "" {
fmt.Println("GOOGLE_CLOUD_PROJECT environment variable must be set.")
os.Exit(1)
}
rows,err := query(proj,sql)
if err != nil {
log.Fatal(err)
}
if err := saveResults(rows, outfile); err != nil {
log.Fatal(err)
}
}
//query runs a sql like statement provided in the arguments
//Returns a bigquery rowiterator
func query(proj, sql string)(*bigquery.RowIterator, error){
ctx := context.Background()
client,err := bigquery.NewClient(ctx,proj)
if err != nil {
return nil, err
}
query := client.Query(sql)
return query.Read(ctx)
}
//saves the results of the query based on the extension provided in the command line arguments
//currently only json, csv and tsv are supported
func saveResults(iter *bigquery.RowIterator, outfile string) error {
ext := strings.Split(outfile,".")[1]
file, err := os.Create(outfile)
if err != nil {
log.Fatal("Cannot create file",err)
}
defer file.Close()
switch ext {
case "json":
err = saveJson(file,iter)
case "csv":
err = saveDelim(file,iter,",")
case "tsv":
err = saveDelim(file,iter,"\t")
default:
os.Remove(outfile)
fmt.Println(ext + " is not a currently supported output type.")
os.Exit(1)
}
if err != nil{
return err
}
return nil
}
//function to save to JSON
//as a list of dictionaries
//each row a dictionary, key is the column name
func saveJson(file io.Writer, iter *bigquery.RowIterator) error {
rowstr := "[\n"
for{
var row map[string]bigquery.Value
err := iter.Next(&row)
if err == iterator.Done {
rowstr = rowstr[:len(rowstr)-2]
rowstr += "]\n"
fmt.Fprintf(file,rowstr)
return nil
}
if err != nil {
return err
}
rowstr += "{"
for k,v := range row {
if _,ok := v.(string); ok != false{
rowstr += fmt.Sprintf("\"%s\": \"%v\",",k,v)
} else {
rowstr += fmt.Sprintf("\"%s\": %v,",k,v)
}
}
rowstr = rowstr[:len(rowstr)-1]
rowstr += "},\n"
}
return nil
}
//function to save delimited file
//currently there no headers, just rows of observations
func saveDelim(file io.Writer, iter *bigquery.RowIterator, delim string) error {
for{
var row []bigquery.Value
err := iter.Next(&row)
if err == iterator.Done{
return nil
}
if err != nil{
return err
}
for _, v := range row {
if row[len(row)-1] == v {
fmt.Fprintf(file,"\"%v\"\n",v)
} else {
fmt.Fprintf(file,"\"%v\"" + delim,v)
}
}
}
return nil
}
|
package carriage
import "time"
type (
carriage struct {
Contract contract `json:"contract"`
Nomenclatures []nomenclature `json:"nomenclatures"`
}
contract struct {
Number string `json:"number"`
CustomsLink string `json:"customsLink"`
From store `json:"from"`
Before store `json:"before"`
Packaging string `json:"packaging"`
Manager manager `json:"manager"`
}
store struct {
Name string `json:"name"`
Address string `json:"address"`
}
nomenclature struct {
Name string `json:"name"`
Amount float32 `json:"amount"`
Measure string `json:"measure"`
}
manager struct {
Name string `json:"name"`
PhotoLink string `json:"photolink"`
Phone string `json:"phone"`
}
checkPoint struct {
Id int `json:"id"`
Name string `json:"name"`
Address string `json:"address"`
Coordinates coordinates `json:"coordinates"`
Planned time.Time `json:"planned"`
Fact *time.Time `json:"fact"`
}
coordinates struct {
Latitude float32 `json:"latitude"`
Longitude float32 `json:"longitude"`
}
)
|
package sm
import (
"github.com/jinzhu/gorm"
"github.com/qor/transition"
"github.com/tppgit/we_service/entity/order"
)
type Event struct {
Obj interface{}
Name string
}
type EventHandler interface {
Emit(event Event)
}
type OrderSateMachine interface {
CreateTransaction(from []order.OrderState, to order.OrderState, before func(o *order.Order), after func(o *order.Order)) *transition.StateMachine
Apply(state order.OrderState, o *order.Order) error
initStates()
initResidentRequestState()
initConfirmedState()
initResidentPendingState()
initServiceInProgressState()
initPaymentInProgressState()
initCompletedState()
initCancelState()
}
type OrderMachine struct {
*transition.StateMachine
//EventHandler EventHandler `inject:"event_handler"`
OrderService order.OrderService `inject:"order_service"`
}
func CreateOrderSM() *OrderMachine {
sm := &OrderMachine{StateMachine: CreateSM(&order.Order{})}
sm.initStates()
return sm
}
func (s *OrderMachine) CreateTransaction(from []order.OrderState, to order.OrderState, before func(o *order.Order), after func(o *order.Order)) *transition.StateMachine {
sm := s.StateMachine
sm.State(string(to))
var fString []string
for _, f := range from {
fString = append(fString, string(f))
sm.State(string(f))
}
event := sm.Event(string(to))
event.To(string(to)).From(fString...).Before(func(value interface{}, tx *gorm.DB) error {
o, _ := value.(*order.Order)
before(o)
return nil
}).After(func(value interface{}, tx *gorm.DB) error {
o, _ := value.(*order.Order)
after(o)
return nil
})
return sm
}
func (s *OrderMachine) Apply(state order.OrderState, o *order.Order) error {
return s.Trigger(string(state), o, nil)
}
func (s *OrderMachine) initStates() {
s.initResidentRequestState()
s.initConfirmedState()
s.initResidentPendingState()
s.initServiceInProgressState()
s.initPaymentInProgressState()
s.initCompletedState()
s.initCancelState()
}
func (s *OrderMachine) initResidentRequestState() {
before := func(o *order.Order) {
}
after := func(o *order.Order) {
//send email to resident regarding all changes
}
s.CreateTransaction([]order.OrderState{PendingState}, order.NewRequest, before, after)
}
func (s *OrderMachine) initConfirmedState() {
before := func(o *order.Order) {
}
after := func(o *order.Order) {
//send email to service provider
}
s.CreateTransaction([]order.OrderState{order.NewRequest, order.ResidentPending}, order.RequestConfirmed, before, after)
}
func (s *OrderMachine) initResidentPendingState() {
before := func(o *order.Order) {
}
after := func(o *order.Order) {
}
s.CreateTransaction([]order.OrderState{order.RequestConfirmed, order.NewRequest}, order.ResidentPending, before, after)
}
func (s *OrderMachine) initServiceInProgressState() {
before := func(o *order.Order) {
}
after := func(o *order.Order) {
//send email to service provider
}
s.CreateTransaction([]order.OrderState{order.RequestConfirmed}, order.InProgress, before, after)
}
func (s *OrderMachine) initPaymentInProgressState() {
before := func(o *order.Order) {
}
after := func(o *order.Order) {
//send email to service provider
}
s.CreateTransaction([]order.OrderState{order.InProgress}, order.PaymentPending, before, after)
}
func (s *OrderMachine) initCompletedState() {
before := func(o *order.Order) {
}
after := func(o *order.Order) {
//send email to service provider
}
s.CreateTransaction([]order.OrderState{order.PaymentPending}, order.Completed, before, after)
}
func (s *OrderMachine) initCancelState() {
before := func(o *order.Order) {
}
after := func(o *order.Order) {
}
s.CreateTransaction([]order.OrderState{order.NewRequest, order.SPPending, order.ResidentPending, order.RequestConfirmed}, order.Cancel, before, after)
}
|
package game
type Game interface {
Next()
GetCurrentRound() Round
}
|
package main
import (
"sort"
"github.com/heartchord/jxonline/gamestruct"
"github.com/lxn/walk"
)
// RoleTaskDataItem :
type RoleTaskDataItem struct {
DataModelItemBase
TaskID string // 数据名称
TaskValue string // 数据内容
}
// RoleTaskDataModel :
type RoleTaskDataModel struct {
DataModelBase
items []*RoleTaskDataItem
}
// NewRoleTaskDataModel :
func NewRoleTaskDataModel() *RoleTaskDataModel {
var err error
m := new(RoleTaskDataModel)
m.itemIcon, err = walk.NewIconFromFile("../../gameresource/img/right-arrow2.ico")
if err != nil {
}
m.ResetRows(nil)
return m
}
// RowCount :
func (m *RoleTaskDataModel) RowCount() int {
return len(m.items)
}
// Value :
func (m *RoleTaskDataModel) Value(row, col int) interface{} {
item := m.items[row]
switch col {
case 0:
return item.Index
case 1:
return item.TaskID
case 2:
return item.TaskValue
}
panic("unexpected col")
}
// Checked :
func (m *RoleTaskDataModel) Checked(row int) bool {
return m.items[row].checked
}
// SetChecked :
func (m *RoleTaskDataModel) SetChecked(row int, checked bool) error {
m.items[row].checked = checked
return nil
}
// Sort :
func (m *RoleTaskDataModel) Sort(col int, order walk.SortOrder) error {
m.sortColumn, m.sortOrder = col, order
sort.Stable(m)
return m.SorterBase.Sort(col, order)
}
// Len :
func (m *RoleTaskDataModel) Len() int {
return len(m.items)
}
// Less : 数据排序小于运算符
func (m *RoleTaskDataModel) Less(i int, j int) bool {
a, b := m.items[i], m.items[j]
f := func(ls bool) bool {
if m.sortOrder == walk.SortAscending { // 升序
return ls
}
// 降序
return !ls
}
switch m.sortColumn {
case 0:
return f(a.Index < b.Index)
case 1:
return f(a.TaskID < b.TaskID)
case 2:
return f(a.TaskValue < b.TaskValue)
}
panic("Unreachable Column Index!")
}
// Swap : 数据交换
func (m *RoleTaskDataModel) Swap(i, j int) {
m.items[i], m.items[j] = m.items[j], m.items[i]
}
// Image : 获取数据Item图标
func (m *RoleTaskDataModel) Image(row int) interface{} {
return m.itemIcon
}
// ResetRows :
func (m *RoleTaskDataModel) ResetRows(data []gamestruct.TaskData) {
if data == nil {
return
}
dataCount := len(data)
if dataCount <= 0 {
return
}
m.items = make([]*RoleTaskDataItem, dataCount)
for i := 0; i < dataCount; i++ {
fieldStrings := getStructFieldStrings(data[i])
m.items[i] = &RoleTaskDataItem{}
m.items[i].Index = i
m.items[i].TaskID = fieldStrings[0]
m.items[i].TaskValue = fieldStrings[1]
}
// Notify TableView and other interested parties about the reset.
m.PublishRowsReset()
m.Sort(m.sortColumn, m.sortOrder)
}
// SwitchRowCheckedState :
func (m *RoleTaskDataModel) SwitchRowCheckedState(idx int) {
checked := m.Checked(idx)
m.SetChecked(idx, !checked)
m.PublishRowChanged(idx)
}
// Items :
func (m *RoleTaskDataModel) Items() []*RoleTaskDataItem {
return m.items
}
|
// Copyright 2015 Matthew Collins
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"fmt"
"image"
"image/png"
"net/http"
"strconv"
"github.com/gorilla/mux"
)
func isoHead(rw http.ResponseWriter, rq *http.Request) {
v := mux.Vars(rq)
size, err := strconv.Atoi(v["size"])
if err != nil {
size = 64
}
if size > Config.MaxSize {
size = Config.MaxSize
} else if size < Config.MinSize {
size = Config.MinSize
}
hat := v["hat"]
if hat == "" {
hat = "nohat"
}
id := fmt.Sprintf("head_iso:%d:%s:%s", size, hat, v["uuid"])
rw.Write(getOrCreateEntry(id, func(entry *imageEntry) {
skin := getSkinForID(v["uuid"])
img, err := png.Decode(bytes.NewReader(skin.Data))
if err != nil {
return
}
top := image.Rect(8, 0, 16, 8)
left := image.Rect(0, 8, 8, 16)
right := image.Rect(8, 8, 16, 16)
out := image.NewNRGBA(image.Rect(0, 0, size, size))
fo := 0
fs := size
if hat == "hat" {
fo += size / 32
fs -= fo * 2
}
drawIsometricCube(out, fo, fo, fs, fs, img, top, left, right)
if hat == "hat" {
top = image.Rect(32+8, 0, 32+16, 8)
left = image.Rect(32+0, 8, 32+8, 16)
right = image.Rect(32+8, 8, 32+16, 16)
drawIsometricCube(out, 0, 0, size, size, img, top, left, right)
}
var buf bytes.Buffer
png.Encode(&buf, out)
entry.Data = buf.Bytes()
}).Data)
}
func drawIsometricCube(out *image.NRGBA, x, y, w, h int, src image.Image, top, left, right image.Rectangle) {
for tx := 0; tx < w/2; tx++ {
for ty := 0; ty < h/2; ty++ {
col := src.At(
left.Min.X+int((float64(tx)/float64(w/2))*float64(left.Dx())),
left.Min.Y+int((float64(ty)/float64(h/2))*float64(left.Dy())),
)
if _, _, _, a := col.RGBA(); a == 0xFFFF {
out.Set(
x+tx,
(h/4)+y+ty+int(float64(tx)*0.5),
col,
)
}
col = src.At(
right.Min.X+int((float64(tx)/float64(w/2))*float64(right.Dx())),
right.Min.Y+int((float64(ty)/float64(h/2))*float64(right.Dy())),
)
if _, _, _, a := col.RGBA(); a == 0xFFFF {
out.Set(
x+tx+(w/2),
(h/4)+y+ty+int(float64((w/2)-tx)*0.5),
col,
)
}
}
}
for ttx := -1; ttx < (w/2)+1; ttx++ {
for tty := -1; tty < (h/2)+1; tty++ {
tx, ty := clamp(ttx, 0, w/2), clamp(tty, 0, h/2)
col := src.At(
top.Min.X+int((float64(tx)/float64(w/2))*float64(top.Dx())),
top.Min.Y+int((float64(ty)/float64(h/2))*float64(top.Dy())),
)
if _, _, _, a := col.RGBA(); a == 0xFFFF {
out.Set(
x+1+ttx+tty,
(h/4)+y+int(float64(ttx)*0.5-0.75-float64(tty)*0.5),
col,
)
}
}
}
}
func clamp(x, min, max int) int {
if x < min {
return min
}
if x > max {
return max
}
return x
}
|
package client
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"math/rand"
"github.com/yekhlakov/gojsonrpc/client/transport"
"github.com/yekhlakov/gojsonrpc/common"
)
// Create a new empty Client
func New() *Client {
return &Client{
T: &transport.Discard{},
logger: log.New(ioutil.Discard, "", 0),
}
}
// Set client transport
func (c *Client) SetTransport(t Transport) error {
if t == nil {
return fmt.Errorf("nil transport not allowed")
}
c.T = t
return nil
}
// Set client logger. The same logger will be set for client's transport
func (c *Client) SetLogger(l *log.Logger) error {
if l == nil {
return fmt.Errorf("nil logger not allowed")
}
c.logger = l
return c.T.SetLogger(l)
}
// Do the request
func (c *Client) PerformRequest(rc *common.RequestContext) error {
if err := rc.RebuildRawRequest(); err != nil {
return err
}
if err := c.T.PerformRequest(rc); err != nil {
return err
}
if err := rc.ParseRawResponse(); err != nil {
return err
}
return nil
}
// Create a new Json-Rpc Request
func NewJsonRpcRequest(method string, params interface{}) (common.Request, error) {
r := common.Request{
JsonRPC: "2.0",
Id: fmt.Sprintf("%8.8x%8.8x", rand.Uint64(), rand.Uint64()),
Method: method,
Params: nil,
}
p, err := json.Marshal(params)
if err == nil {
r.Params = p
}
return r, err
}
// Create a new Request Context for the given Client
func (c *Client) NewRequestContext(method string, params interface{}) (rc common.RequestContext, err error) {
rc = common.EmptyRequestContext()
rc.Logger = c.logger
rc.JsonRpcRequest, err = NewJsonRpcRequest(method, params)
return
}
// The main entry point for request processing
func (c *Client) Request(method string, params interface{}) (response common.Response, err error) {
rc, err := c.NewRequestContext(method, params)
if err != nil {
return
}
err = c.PerformRequest(&rc)
if err != nil {
return
}
return rc.JsonRpcResponse, nil
}
|
package sgs
import (
"encoding/json"
"time"
"github.com/gorilla/websocket"
)
type wsConn struct {
clientId int
conn *websocket.Conn
}
func (me *wsConn) Send(cmd Command) error {
_log.Dbg("WS send command: 0x%v, 0x%x, %v", cmd.HexID(), cmd.Who, cmd.Payload)
text, e := json.Marshal(cmd)
if e != nil {
return e
}
return me.conn.WriteMessage(websocket.TextMessage, text)
}
func (me *wsConn) Run(ch chan Command, mch chan Command) {
_log.Inf("WS listening to client: %v", me.clientId)
readch := make(chan Command)
go func() {
for {
_, message, err := me.conn.ReadMessage()
if err != nil {
_log.Ntf("Failed to read client: %v, %v", me.clientId, err.Error())
mch <- Command{
ID: _CMD_CLOSE_NET_CLIENT,
Who: _CMD_WHO_WSCONN,
Payload: err.Error(),
}
}
readch <- Command{
ID: CMD_FORWARD_TO_APP,
Payload: message,
}
}
}()
for {
select {
case mc := <-mch:
if mc.ID == _CMD_CLOSE_NET_CLIENT {
_log.Inf("Close WS client: %v, %v, %v", me.clientId, mc.Who, mc.Payload.(string))
goto __close
}
case command := <-readch:
ch <- command
case <-time.After(time.Duration(1) * time.Second):
}
}
__close:
me.conn.Close()
_log.Inf("WS closed: %v", me.clientId)
}
|
package types
import (
"encoding/hex"
"testing"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/google/uuid"
p8e "github.com/provenance-io/provenance/x/metadata/types/p8e"
"github.com/stretchr/testify/require"
)
func ownerPartyList(addresses ...string) []Party {
retval := make([]Party, len(addresses))
for i, addr := range addresses {
retval[i] = Party{Address: addr, Role: PartyType_PARTY_TYPE_OWNER}
}
return retval
}
func TestAddScopeRoute(t *testing.T) {
var scope = NewScope(
ScopeMetadataAddress(uuid.MustParse("8d80b25a-c089-4446-956e-5d08cfe3e1a5")),
ScopeSpecMetadataAddress(uuid.MustParse("22fc17a6-40dd-4d68-a95b-ec94e7572a09")),
ownerPartyList("data_owner"),
[]string{"data_accessor"},
"value_owner",
)
var msg = NewMsgAddScopeRequest(*scope, []string{})
require.Equal(t, msg.Route(), RouterKey)
require.Equal(t, msg.Type(), "add_scope_request")
yaml := `scope:
scope_id: scope1qzxcpvj6czy5g354dews3nlruxjsahhnsp
specification_id: scopespec1qs30c9axgrw5669ft0kffe6h9gysfe58v3
owners:
- address: data_owner
role: 5
data_access:
- data_accessor
value_owner_address: value_owner
signers: []
`
require.Equal(t, yaml, msg.String())
require.Equal(t, "{\"type\":\"provenance/metadata/AddScopeRequest\",\"value\":{\"scope\":{\"data_access\":[\"data_accessor\"],\"owners\":[{\"address\":\"data_owner\",\"role\":5}],\"scope_id\":\"scope1qzxcpvj6czy5g354dews3nlruxjsahhnsp\",\"specification_id\":\"scopespec1qs30c9axgrw5669ft0kffe6h9gysfe58v3\",\"value_owner_address\":\"value_owner\"}}}", string(msg.GetSignBytes()))
}
func TestAddScopeValidation(t *testing.T) {
var scope = NewScope(
ScopeMetadataAddress(uuid.MustParse("8d80b25a-c089-4446-956e-5d08cfe3e1a5")),
ScopeSpecMetadataAddress(uuid.MustParse("22fc17a6-40dd-4d68-a95b-ec94e7572a09")),
ownerPartyList("data_owner"),
[]string{"data_accessor"},
"value_owner",
)
var msg = NewMsgAddScopeRequest(*scope, []string{"invalid"})
err := msg.ValidateBasic()
require.Panics(t, func() { msg.GetSigners() }, "panics due to invalid addresses")
require.Error(t, err, "invalid addresses")
require.Equal(t, "invalid owner on scope: decoding bech32 failed: invalid index of 1", err.Error())
msg.Scope = *NewScope(
ScopeMetadataAddress(uuid.MustParse("8d80b25a-c089-4446-956e-5d08cfe3e1a5")),
ScopeSpecMetadataAddress(uuid.MustParse("22fc17a6-40dd-4d68-a95b-ec94e7572a09")),
[]Party{},
[]string{},
"",
)
err = msg.ValidateBasic()
require.Error(t, err, "no owners")
require.Equal(t, "scope must have at least one owner", err.Error())
msg.Scope = *NewScope(
ScopeMetadataAddress(uuid.MustParse("8d80b25a-c089-4446-956e-5d08cfe3e1a5")),
ScopeSpecMetadataAddress(uuid.MustParse("22fc17a6-40dd-4d68-a95b-ec94e7572a09")),
ownerPartyList("cosmos1sh49f6ze3vn7cdl2amh2gnc70z5mten3y08xck"),
[]string{},
"",
)
msg.Signers = []string{"cosmos1sh49f6ze3vn7cdl2amh2gnc70z5mten3y08xck"}
err = msg.ValidateBasic()
require.NoError(t, err, "valid add scope request")
requiredSigners := msg.GetSigners()
require.Equal(t, 1, len(requiredSigners))
hex, err := hex.DecodeString("85EA54E8598B27EC37EAEEEEA44F1E78A9B5E671")
require.NoError(t, err)
require.Equal(t, sdk.AccAddress(hex), requiredSigners[0])
}
func TestAddP8eContractSpecValidation(t *testing.T) {
validInputSpec := p8e.DefinitionSpec{
Name: "perform_input_checks",
ResourceLocation: &p8e.Location{Classname: "io.provenance.loan.LoanProtos$PartiesList",
Ref: &p8e.ProvenanceReference{Hash: "Adv+huolGTKofYCR0dw5GHm/R7sUWOwF32XR8r8r9kDy4il5U/LApxOWYHb05jhK4+eY4YzRMRiWcxU3Lx0+Mw=="},
},
Type: 1,
}
validOutputSpec := p8e.OutputSpec{Spec: &p8e.DefinitionSpec{
Name: "additional_parties",
ResourceLocation: &p8e.Location{
Classname: "io.provenance.loan.LoanProtos$PartiesList",
Ref: &p8e.ProvenanceReference{
Hash: "Adv+huolGTKofYCR0dw5GHm/R7sUWOwF32XR8r8r9kDy4il5U/LApxOWYHb05jhK4+eY4YzRMRiWcxU3Lx0+Mw==",
},
},
Type: 1,
},
}
validDefinition := p8e.DefinitionSpec{
Name: "ExampleContract",
ResourceLocation: &p8e.Location{Classname: "io.provenance.contracts.ExampleContract",
Ref: &p8e.ProvenanceReference{Hash: "E36eeTUk8GYXGXjIbZTm4s/Dw3G1e42SinH1195t4ekgcXXPhfIpfQaEJ21PTzKhdv6JjhzQJ2kAJXK+TRXmeQ=="},
},
Type: 1,
}
validContractSpec := p8e.ContractSpec{ConsiderationSpecs: []*p8e.ConsiderationSpec{
{FuncName: "additionalParties",
InputSpecs: []*p8e.DefinitionSpec{&validInputSpec},
OutputSpec: &validOutputSpec,
ResponsibleParty: 1,
},
},
Definition: &validDefinition,
InputSpecs: []*p8e.DefinitionSpec{&validInputSpec},
PartiesInvolved: []p8e.PartyType{p8e.PartyType_PARTY_TYPE_AFFILIATE},
}
msg := NewMsgAddP8EContractSpecRequest(validContractSpec, []string{})
err := msg.ValidateBasic()
require.Error(t, err, "should fail due to signatures < 1")
msg = NewMsgAddP8EContractSpecRequest(validContractSpec, []string{"invalid"})
err = msg.ValidateBasic()
require.Error(t, err, "should fail in convert validation due to address not being valid")
msg = NewMsgAddP8EContractSpecRequest(validContractSpec, []string{"cosmos1s0kcwmhstu6urpp4080qjzatta02y0rarrcgrp"})
err = msg.ValidateBasic()
require.NoError(t, err)
}
|
package mappers
import (
"fmt"
"github.com/vfreex/gones/pkg/emulator/memory"
"github.com/vfreex/gones/pkg/emulator/rom/ines"
)
/*
http://wiki.nesdev.com/w/index.php/UxROM
PRG ROM capacity 256K/4096K
PRG ROM window 16K + 16K fixed
PRG RAM capacity None
CHR capacity 8K
CHR window n/a
CPU $8000-$BFFF: 16 KB switchable PRG ROM bank
CPU $C000-$FFFF: 16 KB PRG ROM bank, fixed to the last bank
*/
type UxRomMapper struct {
mapperBase
bankSelect byte
}
func init() {
MapperConstructors[2] = NewUxRomMapper
}
func NewUxRomMapper(rom *ines.INesRom) Mapper {
p := &UxRomMapper{}
p.prgBin = rom.PrgBin
if len(rom.ChrBin) > 0 {
p.chrBin = rom.ChrBin
} else {
// cartridge use CHR-RAM rather than CHR-ROM
p.chrBin = make([]byte, ChrBankSize)
p.useChrRam = true
}
return p
}
func (p *UxRomMapper) PeekPrg(addr memory.Ptr) byte {
if addr < 0x4020 {
panic(fmt.Errorf("program trying to read from Mapper 2 via invalid ROM address %04x", addr))
}
if addr < 0x8000 {
return p.prgRam[addr-0x4020]
}
var bank int
if addr >= 0xc000 {
bank = len(p.prgBin)/PrgBankSize - 1
} else {
bank = int(p.bankSelect)
}
return p.prgBin[bank*PrgBankSize|int(addr)&0x3fff]
}
func (p *UxRomMapper) PokePrg(addr memory.Ptr, val byte) {
if addr < 0x4020 {
panic(fmt.Errorf("mapper 2 PRG-ROM address 0x%x is not configured", addr))
}
if addr < 0x8000 {
// write to PRG-RAM
p.prgRam[addr-0x4020] = val
return
}
p.bankSelect = val
}
func (p *UxRomMapper) PeekChr(addr memory.Ptr) byte {
if addr >= 0x2000 {
panic(fmt.Errorf("mapper 2 CHR-ROM/CHR-RAM address %04x is not configured", addr))
}
return p.chrBin[addr]
}
func (p *UxRomMapper) PokeChr(addr memory.Ptr, val byte) {
if addr >= 0x2000 {
panic(fmt.Errorf("mapper 2 CHR-ROM/CHR-RAM address %04x is not configured", addr))
}
if !p.useChrRam {
panic(fmt.Errorf("this mapper 2 cartridge uses CHR-ROM, writing address %04x is not possible", addr))
}
p.chrBin[addr] = val
}
|
package handler
import (
"crypto/sha512"
"encoding/base64"
"fmt"
"golang-api/model"
"log"
"net/http"
"time"
)
// HashPassword takes a given HTTP request, and encodes a password string from form data attached to the request into base64, and then hashes in SHA512. Request statistics are also stored in a global struct.
// It returns the base64 encoded and SHA512 hashed password after a 5 seconds pause.
func HashPassword(w http.ResponseWriter, r *http.Request) {
start := time.Now()
time.Sleep(time.Duration(5) * time.Second)
switch r.Method {
case http.MethodPost:
password := r.FormValue("password")
passwordHashed := hashSHA512(password)
passwordHashedAndEncoded := base64.StdEncoding.EncodeToString(passwordHashed)
if _, err := fmt.Fprintf(w, passwordHashedAndEncoded); err != nil {
log.Fatal(err)
}
elapsedTime := time.Now().Sub(start).Seconds() * 1000
model.Stats.Process(int(elapsedTime))
default:
w.WriteHeader(http.StatusBadRequest)
log.Printf("Unsupported HTTP method '%v'", r.Method)
}
}
// hashSHA512 returns a text string in a SHA512 hashed form.
func hashSHA512(text string) []byte {
hash := sha512.New()
hash.Write([]byte(text))
return hash.Sum(nil)
}
|
// Package transform defines a Transformation representing rigid
// plane-transformations and provides constructors for, ways to find the types
// of, and ways to simplify Transformations.
package transform
import (
"fmt"
"github.com/jwowillo/viztransform/geometry"
)
// Types of Transformations.
//
// All Transformations fall into one of these categories after simplification.
// The Type of a non-simplified Transformation is the Type of its simplified
// form.
const (
// TypeNoTransformation belongs to Transformations that do nothing.
//
// A Transformation with no geometry.Lines has this Type.
TypeNoTransformation Type = iota
// TypeLineReflection belongs to Transformations that reflects
// geometry.Points across a geometry.Line by mirroring the
// geometry.Point across the geometry.Line through the perpendicular
// to the geometry.Line passing through the geometry.Point.
//
// A Transformation with a single geometry.Line has this Type where the
// geometry.Line is the one being reflected across.
TypeLineReflection
// TypeTranslation belongs to Transformations that translate
// geometry.Points by a geometry.Vector.
//
// A Transformation with 2 parallel geometry.Lines has this Type where
// the geometry.Vector is the shortest one from the first geometry.Line
// to the second.
TypeTranslation
// TypeRotation belongs to Transformations that rotate geometry.Points
// by an angle around a geometry.Point.
//
// A Transformation with 2 intersecting geometry.Lines has this Type
// where the geometry.Point is the intersection of the geometry.Lines
// and the angle is the angle between the first geometry.Line and the
// second.
TypeRotation
// TypeGlideReflection belongs to Transformations that both translate
// and reflect geometry.Points in any order.
//
// A Transformation with 2 parallel geometry.Lines perpendicular to
// another geometry.Line with the parallel geometry.Lines next to each
// other has this Type where the parallel geometry.Lines define the
// corresponding Transformation with TypeTranslation and the remaining
// geometry.Line defines the corresponding Transformation with
// TypeLineReflection.
TypeGlideReflection
)
// Transformation is a list of geometry.Lines each representing an individual
// Transformation with TypeLineReflection that are all composed together.
//
// The arrangement of the geometry.Lines creates different Transformation-Types.
type Transformation []geometry.Line
// String-representation of the Transformation.
//
// Looks like a called Transformation-constructor with arguments in their
// respective string-representations. Examples are:
//
// NoTransformation()
// LineReflection(geometry.Line)
// Translation(geometry.Vector)
// Rotation(geometry.Point, geometry.Number)
// GlideReflection(geometry.Line, geometry.Vector)
func (t Transformation) String() string {
t = Simplify(t)
var out string
switch TypeOf(t) {
case TypeNoTransformation:
out = stringNoTransformation()
case TypeLineReflection:
out = stringLineReflection(t[0])
case TypeTranslation:
out = stringTranslation(t[0], t[1])
case TypeRotation:
out = stringRotation(t[0], t[1])
case TypeGlideReflection:
out = stringGlideReflection(t[0], t[1], t[2])
}
return out
}
// stringNoTransformation returns the string-representation of a Transformation
// with TypeNoTransformation.
//
// Looks like 'NoTransformation()'.
func stringNoTransformation() string {
return "NoTransformation()"
}
// stringLineReflection returns the string-representation of the Transformation
// with TypeLineReflection over geometry.Line l.
//
// Looks like 'LineReflection(geometry.Line)' where geometry.Line is the passed
// geometry.Line.
func stringLineReflection(l geometry.Line) string {
return fmt.Sprintf("LineReflection(%s)", l)
}
// stringTranslation returns the string-representation of the Transformation
// with TypeTranslation created by parallel geometry.Lines a and b.
//
// Looks like 'Translation(geometry.Vector)' where geometry.Vector is the
// shortest geometry.Vector from a to b scaled by 2 since a translation from 2
// line-reflections translates by 2 times the shortest distance from the first
// geometry.Line to the second.
func stringTranslation(a, b geometry.Line) string {
v := geometry.ShortestVector(a, b)
v = geometry.MustVector(geometry.Scale(v, 2*geometry.Length(v)))
return fmt.Sprintf("Translation(%s)", v)
}
// stringRotation returns the string-representation of the Transformation with
// TypeRotation created by intersecting geometry.Lines a and b.
//
// Looks like 'Rotation(geometry.Point, geometry.Angle)' where geometry.Point
// is the intersection of a and b and geometry.Angle is 2 times the angle from
// a to b mod 2pi since a rotation from 2 line-reflections rotates 2 times the
// angle from the first geometry.Line to the second.
func stringRotation(a, b geometry.Line) string {
return fmt.Sprintf(
"Rotation(%s, %s)",
geometry.MustPoint(geometry.Intersection(a, b)),
2*geometry.AngleBetween(a, b),
)
}
// stringGlideReflection returns the string-representation of the Transformation
// with TypeGlieReflection created by geometry.Lines a, b, and c with a and be
// parallel and b and c perpendicular or a and b perpendicular or b and c
// parallel.
//
// Looks like 'GlideReflection(geometry.Line, geometry.Vector)' where
// geometry.Line is the geometry.Line the reflection is happening over and the
// projection of the geometry.Vector onto the geometry.Line is the
// geometry.Vector of translation.
func stringGlideReflection(a, b, c geometry.Line) string {
if geometry.AreParallel(b, c) {
a, b, c = b, c, a
}
v := geometry.ShortestVector(a, b)
return fmt.Sprintf(
"GlideReflection(%s, %s)",
c,
geometry.MustVector(geometry.Scale(v, 2*geometry.Length(v))),
)
}
// Type of a Transformation in terms of how it transforms geometry.Points.
type Type int
// TypeOf a Transformation from the defined Transformation-Types.
func TypeOf(t Transformation) Type {
t = Simplify(t)
if len(t) == 0 {
return TypeNoTransformation
} else if len(t) == 1 {
return TypeLineReflection
} else if len(t) == 2 && geometry.AreParallel(t[0], t[1]) {
return TypeTranslation
} else if len(t) == 2 {
return TypeRotation
}
return TypeGlideReflection
}
|
package database
import "loranet20181205/exception"
type TbEdConf struct {
ID uint
EdID uint
Boardsn string
DevAddr int `gorm:"column:devAddr"`
DRstep int `gorm:"column:DRstep"`
RX1DRoffset int `gorm:"column:RX1DRoffset"`
RX2DataRate int `gorm:"column:RX2DataRate"`
RX2FC int `gorm:"column:RX2FC"`
Delay int `gorm:"column:Delay"`
DataRate int `gorm:"column:DataRate"`
TXPower int `gorm:"column:TXPower"`
RFU int `gorm:"column:RFU"`
Linkparam int
Uplinkfc int
}
func (TbEdConf) TableName() string {
return "tb_ed_conf"
}
func GetEdConf(devAddrI int) TbEdConf {
var edConf []TbEdConf
db, err := DbConnect()
exception.CheckError(err)
defer db.Close()
db.Where("devAddr = ?", devAddrI).Find(&edConf)
return edConf[0]
}
|
package gonigsberg
func concat(slices ...[]int) []int{
totalLen := 0
for _,v := range slices{
totalLen += len(v)
}
concatSlice := make([]int, totalLen)
copy(concatSlice, slices[0])
start := 0
for i := 1; i < len(slices); i++{
start += len(slices[i-1])
copy(concatSlice[start:], slices[i])
}
return concatSlice
}
func filter(data, exclude []int) []int {
retSlice := make([]int, 0)
for _, v := range data{
add := true
for _, val := range exclude{
if val == v{
add = false
break
}
}
if add {
retSlice = append(retSlice, v)
}
}
return retSlice
}
func sumLength(slices ...[]int) int{
totalLen := 0
for _, v := range slices{
totalLen += len(v)
}
return totalLen
} |
package main
import (
"net/url"
)
func (api *Api) GetInbox(role string) ([]PullRequest, error) {
logger.Debug(
"requesting pull requests count from Stash for role '%s'...",
role,
)
cookies, err := api.authViaWeb()
if err != nil {
return nil, err
}
hostURL, _ := url.Parse(api.URL)
resource := api.GetResource().Res("inbox/latest")
resource.Api.Cookies.SetCookies(hostURL, cookies)
prReply := struct {
Values []PullRequest
}{}
err = api.DoGet(resource.Res("pull-requests", &prReply),
map[string]string{
"limit": "1000",
"role": role,
})
if err != nil {
return nil, err
}
return prReply.Values, nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.