text stringlengths 11 4.05M |
|---|
package instancestoresql
import (
"fmt"
"strings"
"time"
"github.com/direktiv/direktiv/pkg/refactor/instancestore"
)
func wheres(clauses ...string) string {
if len(clauses) == 0 {
return ""
}
if len(clauses) == 1 {
return ` WHERE ` + clauses[0]
}
return ` WHERE (` + strings.Join(clauses, ") AND (") + `)`
}
func generateGetInstancesOrderings(opts *instancestore.ListOpts) (string, error) {
if opts == nil || len(opts.Orders) == 0 {
return ` ORDER BY ` + fieldCreatedAt + " " + desc, nil
}
keys := make(map[string]bool)
var orderStrings []string
for _, order := range opts.Orders {
var s string
switch order.Field {
case instancestore.FieldCreatedAt:
s = fieldCreatedAt
default:
return "", fmt.Errorf("order field '%s': %w", order.Field, instancestore.ErrBadListOpts)
}
if _, exists := keys[order.Field]; exists {
return "", fmt.Errorf("duplicate order field '%s': %w", order.Field, instancestore.ErrBadListOpts)
}
keys[order.Field] = true
if order.Descending {
s += " " + desc
}
orderStrings = append(orderStrings, s)
}
return ` ORDER BY ` + strings.Join(orderStrings, ", "), nil
}
// nolint:gocognit
func generateGetInstancesFilters(opts *instancestore.ListOpts) ([]string, []interface{}, error) {
if opts == nil {
return []string{}, []interface{}{}, nil
}
var clauses []string
var vals []interface{}
for idx := range opts.Filters {
filter := opts.Filters[idx]
var clause string
var val interface{}
switch filter.Field {
case fieldNamespaceID:
if filter.Kind == instancestore.FilterKindMatch {
clause = fieldNamespaceID + " = ?"
val = filter.Value
} else {
return nil, nil, fmt.Errorf("filter kind '%s' for use with field '%s': %w", filter.Kind, filter.Field, instancestore.ErrBadListOpts)
}
case instancestore.FieldCreatedAt:
if t, ok := filter.Value.(time.Time); ok {
filter.Value = t.UTC()
}
if t, ok := filter.Value.(*time.Time); ok {
filter.Value = t.UTC()
}
if filter.Kind == instancestore.FilterKindBefore {
clause = fieldCreatedAt + " < ?"
val = filter.Value
} else if filter.Kind == instancestore.FilterKindAfter {
clause = fieldCreatedAt + " > ?"
val = filter.Value
} else {
return nil, nil, fmt.Errorf("filter kind '%s' for use with field '%s': %w", filter.Kind, filter.Field, instancestore.ErrBadListOpts)
}
case fieldDeadline:
if t, ok := filter.Value.(time.Time); ok {
filter.Value = t.UTC()
}
if t, ok := filter.Value.(*time.Time); ok {
filter.Value = t.UTC()
}
if filter.Kind == instancestore.FilterKindBefore {
clause = fieldDeadline + " < ?"
val = filter.Value
} else if filter.Kind == instancestore.FilterKindAfter {
clause = fieldDeadline + " > ?"
val = filter.Value
} else {
return nil, nil, fmt.Errorf("filter kind '%s' for use with field '%s': %w", filter.Kind, filter.Field, instancestore.ErrBadListOpts)
}
case instancestore.FieldWorkflowPath:
if filter.Kind == instancestore.FilterKindPrefix {
clause = fieldWorkflowPath + " LIKE ?"
val = fmt.Sprintf("%s", filter.Value) + "%"
} else if filter.Kind == instancestore.FilterKindContains {
clause = fieldWorkflowPath + " LIKE ?"
val = "%" + fmt.Sprintf("%s", filter.Value) + "%"
} else {
return nil, nil, fmt.Errorf("filter kind '%s' for use with field '%s': %w", filter.Kind, filter.Field, instancestore.ErrBadListOpts)
}
case instancestore.FieldStatus:
if filter.Kind == instancestore.FilterKindMatch {
clause = fieldStatus + " = ?"
val = filter.Value
} else if filter.Kind == "<" {
clause = fieldStatus + " < ?"
val = filter.Value
} else {
return nil, nil, fmt.Errorf("filter kind '%s' for use with field '%s': %w", filter.Kind, filter.Field, instancestore.ErrBadListOpts)
}
case instancestore.FieldInvoker:
if filter.Kind == instancestore.FilterKindMatch {
clause = fieldInvoker + " = ?"
val = fmt.Sprintf("%s", filter.Value)
} else if filter.Kind == instancestore.FilterKindContains {
clause = fieldInvoker + " LIKE ?"
val = "%" + fmt.Sprintf("%s", filter.Value) + "%"
} else {
return nil, nil, fmt.Errorf("filter kind '%s' for use with field '%s': %w", filter.Kind, filter.Field, instancestore.ErrBadListOpts)
}
default:
return nil, nil, fmt.Errorf("filter field '%s': %w", filter.Field, instancestore.ErrBadListOpts)
}
clauses = append(clauses, clause)
vals = append(vals, val)
}
return clauses, vals, nil
}
func generateInsertQuery(columns []string) string {
into := strings.Join(columns, ", ")
valPlaceholders := strings.Repeat("?, ", len(columns)-1) + "?"
query := fmt.Sprintf(`INSERT INTO %s(%s) VALUES (%s)`, table, into, valPlaceholders)
return query
}
func generateGetInstancesQueries(columns []string, opts *instancestore.ListOpts) (string, string, []interface{}, error) {
clauses, vals, err := generateGetInstancesFilters(opts)
if err != nil {
return "", "", nil, err
}
orderings, err := generateGetInstancesOrderings(opts)
if err != nil {
return "", "", nil, err
}
countQuery := fmt.Sprintf(`SELECT COUNT(*) FROM %s`, table)
countQuery += wheres(clauses...)
query := fmt.Sprintf(`SELECT %s FROM %s`, strings.Join(columns, ", "), table)
query += wheres(clauses...)
query += orderings
if opts != nil && opts.Limit > 0 {
query += fmt.Sprintf(` LIMIT %d`, opts.Limit)
if opts.Offset > 0 {
query += fmt.Sprintf(` OFFSET %d`, opts.Offset)
}
}
return countQuery, query, vals, nil
}
|
package main
import (
"encoding/binary"
"flag"
"fmt"
"net"
"os"
"time"
"github.com/golang/glog"
dhcp "github.com/krolaw/dhcp4"
dhcpConn "github.com/krolaw/dhcp4/conn"
"github.com/spf13/cobra"
"golang.org/x/sys/unix"
)
const (
infiniteLease = 999 * 24 * time.Hour
)
func main() {
flag.Parse()
if err := flag.Set("alsologtostderr", "true"); err != nil {
os.Exit(1)
}
rootCmd := &cobra.Command{
Use: "capabilities-demo",
}
createRawSocket := &cobra.Command{
Use: "raw-socket",
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ifaceName := args[0]
glog.Infof("Will create a RAW socket on interface: %s", ifaceName)
fd, err := unix.Socket(unix.AF_INET6, unix.SOCK_RAW, unix.IPPROTO_ICMPV6)
if err != nil {
return fmt.Errorf("cannot get a RAW socket: %v", err)
}
f := os.NewFile(uintptr(fd), "")
// net.FilePacketConn dups the FD, so we have to close this in any case.
defer f.Close()
listenAddr := &net.IPAddr{
IP: net.IPv6unspecified,
Zone: ifaceName,
}
// Bind to the port.
saddr := &unix.SockaddrInet6{}
copy(saddr.Addr[:], listenAddr.IP)
if err := unix.Bind(fd, saddr); err != nil {
return fmt.Errorf("cannot bind to address %v: %v", saddr, err)
}
glog.Infof("Successfully created a RAW socket on iface: %s w/ fd number: %d", ifaceName, fd)
return nil
},
}
bindToDevice := &cobra.Command{
Use: "bind-to-device",
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ifaceName := args[0]
port, err := cmd.Flags().GetUint("port")
if err != nil {
return fmt.Errorf("could not parse the port number: %v", err)
}
glog.Infof("Will create a DGRAM socket on interface: %s", ifaceName)
fd, err := unix.Socket(unix.AF_INET6, unix.SOCK_DGRAM, unix.IPPROTO_UDP)
if err != nil {
return fmt.Errorf("cannot get a DGRAM socket: %v", err)
}
f := os.NewFile(uintptr(fd), "")
// net.FilePacketConn dups the FD, so we have to close this in any case.
defer f.Close()
// Bind directly to the interface.
if err := unix.BindToDevice(fd, ifaceName); err != nil {
if errno, ok := err.(unix.Errno); ok && errno == unix.EPERM {
// Return a more helpful error message in this (fairly common) case
return fmt.Errorf("cannot bind to interface without CAP_NET_RAW or root permissions")
}
return fmt.Errorf("cannot bind to interface %s: %v", ifaceName, err)
}
saddr := unix.SockaddrInet6{Port: int(port)}
copy(saddr.Addr[:], net.IPv6unspecified)
if err := unix.Bind(fd, &saddr); err != nil {
return fmt.Errorf("cannot bind to address %v: %v", saddr, err)
}
glog.Infof("Created a UDP socket bound to device: %s", ifaceName)
return nil
},
}
startDhcpServer := &cobra.Command{
Use: "start-dhcp-server",
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ifaceName := args[0]
cidr := cmd.Flag("cidr").Value.String()
ip, ipNet, err := net.ParseCIDR(cidr)
if err != nil {
return fmt.Errorf("error parsing CIDR: %v", err)
}
defaultGwIP, err := cmd.Flags().GetIP("ip-router")
if err != nil {
return fmt.Errorf("error parsing default GW IP: %v", err)
}
serverIP, err := cmd.Flags().GetIP("ip-server")
if err != nil {
return fmt.Errorf("error parsing server IP: %v", err)
}
mtu, err := cmd.Flags().GetUint16("mtu")
if err != nil {
return fmt.Errorf("error parsing MTU: %v", err)
}
err = SingleClientDHCPServer(ip, ipNet.Mask, ifaceName, serverIP, defaultGwIP, mtu)
if err != nil {
return fmt.Errorf("woop: %v", err)
}
return nil
},
}
bindToDevice.Flags().Uint("port", 547, "specify the port to bind to")
startDhcpServer.Flags().String("mac-addr", "", "the MAC address of the DHCP server")
startDhcpServer.Flags().String("cidr", "", "the IP address to advertise")
startDhcpServer.Flags().IP("ip-server", net.IP{}, "the IP address of the advertising server")
startDhcpServer.Flags().IP("ip-router", net.IP{}, "the IP address of the router")
startDhcpServer.Flags().Uint16("mtu", 1280, "the MTU to advertise")
rootCmd.AddCommand(createRawSocket, bindToDevice, startDhcpServer)
if err := rootCmd.Execute(); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func SingleClientDHCPServer(
clientIP net.IP,
clientMask net.IPMask,
serverIface string,
serverIP net.IP,
routerIP net.IP,
mtu uint16) error {
glog.Info("Starting SingleClientDHCPServer")
hostname, err := os.Hostname()
if err != nil {
return fmt.Errorf("reading the pods hostname failed: %v", err)
}
options, err := prepareDHCPOptions(clientMask, routerIP, mtu, hostname)
if err != nil {
return err
}
handler := &DHCPHandler{
clientIP: clientIP,
serverIP: serverIP.To4(),
leaseDuration: infiniteLease,
options: options,
}
l, err := dhcpConn.NewUDP4BoundListener(serverIface, ":67")
if err != nil {
return err
}
defer l.Close()
err = dhcp.Serve(l, handler)
if err != nil {
return err
}
return nil
}
func prepareDHCPOptions(
clientMask net.IPMask,
routerIP net.IP,
mtu uint16,
hostname string) (dhcp.Options, error) {
mtuArray := make([]byte, 2)
binary.BigEndian.PutUint16(mtuArray, mtu)
dhcpOptions := dhcp.Options{
dhcp.OptionSubnetMask: []byte(clientMask),
dhcp.OptionRouter: []byte(routerIP),
dhcp.OptionInterfaceMTU: mtuArray,
}
dhcpOptions[dhcp.OptionHostName] = []byte(hostname)
return dhcpOptions, nil
}
type DHCPHandler struct {
serverIP net.IP
clientIP net.IP
leaseDuration time.Duration
options dhcp.Options
}
func (h *DHCPHandler) ServeDHCP(p dhcp.Packet, msgType dhcp.MessageType, options dhcp.Options) (d dhcp.Packet) {
glog.Info("Serving a new request")
switch msgType {
case dhcp.Discover:
glog.Info("The request has message type DISCOVER")
return dhcp.ReplyPacket(p, dhcp.Offer, h.serverIP, h.clientIP, h.leaseDuration,
h.options.SelectOrderOrAll(nil))
case dhcp.Request:
glog.Info("The request has message type REQUEST")
return dhcp.ReplyPacket(p, dhcp.ACK, h.serverIP, h.clientIP, h.leaseDuration,
h.options.SelectOrderOrAll(nil))
default:
glog.Info("The request has unhandled message type")
return nil // Ignored message type
}
}
|
// Copyright 2013-2014 go-diameter authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package diamtype
import (
"encoding/binary"
"fmt"
)
// Integer32 Diameter Type
type Integer32 int32
func DecodeInteger32(b []byte) (DataType, error) {
return Integer32(binary.BigEndian.Uint32(b)), nil
}
func (n Integer32) Serialize() []byte {
b := make([]byte, 4)
binary.BigEndian.PutUint32(b, uint32(n))
return b
}
func (n Integer32) Len() int {
return 4
}
func (n Integer32) Padding() int {
return 0
}
func (n Integer32) Type() DataTypeId {
return Integer32Type
}
func (n Integer32) String() string {
return fmt.Sprintf("Integer32{%d}", n)
}
|
package trace
// Trace key
const (
KratosTraceID = "trace-id"
KratosTraceDebug = "trace-debug"
)
|
package main
import "fmt"
func min3(a, b, c int) int {
if a < b {
if a < c {
return a
}
return c
}
if b < c {
return b
}
return c
}
func nthUglyNumber(n int) int {
nums := make([]int, n)
x, y, z := 0, 0, 0
nums[0] = 1
next := 1
for next < len(nums) {
a := 2 * nums[x]
b := 3 * nums[y]
c := 5 * nums[z]
min := min3(a, b, c)
nums[next] = min
next++
if a == min {
x++
}
if b == min {
y++
}
if c == min {
z++
}
}
return nums[len(nums)-1]
}
func test() {
for i := 1; i < 11; i++ {
uglyNumber := nthUglyNumber(i)
fmt.Printf("%d ", uglyNumber)
}
fmt.Printf("\n")
}
func main() {
test()
}
|
package stack
import "testing"
func TestPopWorkAfterPush(t *testing.T) {
s := new(Stack)
s.Push(11)
pop_res := s.Pop()
if pop_res != 11 {
t.Log("Pop must return 11")
t.Fail()
}
}
func TestPushPop(t *testing.T) {
s := new(Stack)
s.pushTimes(5)
if s.data != [10]int{0, 1, 2, 3, 4} {
t.Log("s.data must be equal [0,1,2,3,4,0,0,0,0,0]")
t.Fail()
}
if s.i != 5 {
t.Log("s.i must be equal 5")
t.Fail()
}
s.pushTimes(10)
if s.data != [10]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} {
t.Log("s.data must be equal [0,1,2,3,4,5,6,7,8,9]")
t.Fail()
}
if s.i != 10 {
t.Log("s.i must be equal 10")
t.Fail()
}
s.popTimes(5)
if s.data != [10]int{0, 1, 2, 3, 4} {
t.Log("s.data must be equal [0,1,2,3,4,0,0,0,0,0]")
t.Fail()
}
if s.i != 5 {
t.Log("s.i must be equal 5")
t.Fail()
}
s.popTimes(10)
if s.data != [10]int{} {
t.Log("s.data must be equal [0,0,0,0,0,0,0,0,0,0]")
t.Fail()
}
if s.i != 0 {
t.Log("s.i must be equal 0")
t.Fail()
}
}
func (s *Stack) pushTimes(n int) {
size := s.i
for i := 0; i < n; i++ {
s.Push(size + i)
}
}
func (s *Stack) popTimes(n int) {
for i := 0; i < n; i++ {
s.Pop()
}
}
|
// +build !excludecodegen
/*
Copyright IBM Corporation 2020
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
)
func TestMakeConstants(t *testing.T) {
t.Run("try to generate code for non existent directory", func(t *testing.T) {
path := "testdata/nonexistent"
if err := makeConstants(path, maptemp); err == nil {
t.Fatalf("Should have failed since the directory %q does not exist.", path)
}
})
t.Run("read empty directory and generate code with maptemp", func(t *testing.T) {
// Setup
testparentdir := t.TempDir()
testdir := filepath.Join(testparentdir, "foobar")
if err := os.Mkdir(testdir, os.ModePerm); err != nil {
t.Fatalf("Failed to create the test directory at path %q. Error: %q", testdir, err)
}
fpath := filepath.Join(testdir, "constants.go")
testdatapath := "testdata/maptempemptyskiptimestamp.txt"
testdatabytes, err := ioutil.ReadFile(testdatapath)
if err != nil {
t.Fatalf("Failed to read the testdata at path %q. Error: %q", testdatapath, err)
}
want := string(testdatabytes)
wantNumLines := 8 + 17 // 17 lines for license and spaces
// Test
if err := makeConstants(testdir, maptemp); err != nil {
t.Fatalf("Failed to generate the code for directory %q with maps template Error: %q", testdir, err)
}
databytes, err := ioutil.ReadFile(fpath)
if err != nil {
t.Fatal("Failed to create the constants.go file (or failed to read it after creation). Error:", err)
}
data := string(databytes)
lines := strings.Split(data, "\n")
if len(lines) != wantNumLines {
t.Fatal("Failed to generate the code properly. Expected number of lines:", wantNumLines, "Actual:", len(lines))
}
lines = append(lines[:2], lines[3:]...) // Skip the timestamp
data = strings.Join(lines, "\n")
if data != want {
t.Fatal("Failed to generate the code properly. Expected:", want, "Actual:", data)
}
})
t.Run("read empty directory and generate code with conststemp", func(t *testing.T) {
// Setup
testparentdir := t.TempDir()
testdir := filepath.Join(testparentdir, "foobar")
if err := os.Mkdir(testdir, os.ModePerm); err != nil {
t.Fatalf("Failed to create the test directory at path %q. Error: %q", testdir, err)
}
fpath := filepath.Join(testdir, "constants.go")
testdatapath := "testdata/conststempemptyskiptimestamp.txt"
testdatabytes, err := ioutil.ReadFile(testdatapath)
if err != nil {
t.Fatalf("Failed to read the testdata at path %q. Error: %q", testdatapath, err)
}
want := string(testdatabytes)
wantNumLines := 8 + 17 // 17 lines for license and spaces
// Test
if err := makeConstants(testdir, conststemp); err != nil {
t.Fatalf("Failed to generate the code for directory %q with consts template Error: %q", testdir, err)
}
databytes, err := ioutil.ReadFile(fpath)
if err != nil {
t.Fatal("Failed to create the constants.go file (or failed to read it after creation). Error:", err)
}
data := string(databytes)
lines := strings.Split(data, "\n")
if len(lines) != wantNumLines {
t.Fatal("Failed to generate the code properly. Expected number of lines:", wantNumLines, "Actual:", len(lines))
}
lines = append(lines[:2], lines[3:]...) // Skip the timestamp
data = strings.Join(lines, "\n")
if data != want {
t.Fatal("Failed to generate the code properly. Expected:", want, "Actual:", data)
}
})
t.Run("read filled directory and generate code with maptemp", func(t *testing.T) {
// Setup
testdir := "testdata/datafortempfilled"
fpath := filepath.Join(testdir, "constants.go")
// Remove the constants.go file if it exists from previous runs.
if err := os.Remove(fpath); err != nil && !os.IsNotExist(err) {
t.Fatalf("Failed to remove the file at path %q. Error: %q", fpath, err)
}
testdatapath := "testdata/maptempfilledskiptimestamp.txt"
testdatabytes, err := ioutil.ReadFile(testdatapath)
if err != nil {
t.Fatalf("Failed to read the testdata at path %q. Error: %q", testdatapath, err)
}
want := string(testdatabytes)
wantNumLines := 22 + 17 // 17 lines for license and spaces
// Test
if err := makeConstants(testdir, maptemp); err != nil {
t.Fatalf("Failed to generate the code for directory %q with maps template Error: %q", testdir, err)
}
defer os.Remove(fpath)
databytes, err := ioutil.ReadFile(fpath)
if err != nil {
t.Fatal("Failed to create the constants.go file (or failed to read it after creation). Error:", err)
}
data := string(databytes)
lines := strings.Split(data, "\n")
if len(lines) != wantNumLines {
t.Fatal("Failed to generate the code properly. Expected number of lines:", wantNumLines, "Actual:", len(lines))
}
lines = append(lines[:2], lines[3:]...) // Skip the timestamp
data = strings.Join(lines, "\n")
if data != want {
t.Fatal("Failed to generate the code properly. Expected:", want, "Actual:", data)
}
})
t.Run("read filled directory and generate code with conststemp", func(t *testing.T) {
// Setup
testdir := "testdata/datafortempfilled"
fpath := filepath.Join(testdir, "constants.go")
// Remove the constants.go file if it exists from previous runs.
if err := os.Remove(fpath); err != nil && !os.IsNotExist(err) {
t.Fatalf("Failed to remove the file at path %q. Error: %q", fpath, err)
}
testdatapath := "testdata/conststempfilledskiptimestamp.txt"
testdatabytes, err := ioutil.ReadFile(testdatapath)
if err != nil {
t.Fatalf("Failed to read the testdata at path %q. Error: %q", testdatapath, err)
}
want := string(testdatabytes)
wantNumLines := 22 + 17 // 17 lines for license and spaces
// Test
if err := makeConstants(testdir, conststemp); err != nil {
t.Fatalf("Failed to generate the code for directory %q with consts template Error: %q", testdir, err)
}
defer os.Remove(fpath)
databytes, err := ioutil.ReadFile(fpath)
if err != nil {
t.Fatal("Failed to create the constants.go file (or failed to read it after creation). Error:", err)
}
data := string(databytes)
lines := strings.Split(data, "\n")
if len(lines) != wantNumLines {
t.Fatal("Failed to generate the code properly. Expected number of lines:", wantNumLines, "Actual:", len(lines))
}
lines = append(lines[:2], lines[3:]...) // Skip the timestamp
data = strings.Join(lines, "\n")
if data != want {
t.Fatal("Failed to generate the code properly. Expected:", want, "Actual:", data)
}
})
t.Run("generate code from directory containing files that we have no permissions to read", func(t *testing.T) {
// Setup
testdir := t.TempDir()
fpath := filepath.Join(testdir, "foobar")
if err := ioutil.WriteFile(fpath, []byte("no permission to read this file"), 0); err != nil {
t.Fatalf("Failed to create the temporary file %q for testing.", fpath)
}
// Test
if err := makeConstants(testdir, conststemp); err == nil {
t.Fatalf("Should not have succeeded since the directory contains a file %q we don't have permissions to read.", fpath)
}
})
t.Run("generate code from directory that we have no permissions to write to", func(t *testing.T) {
// Setup
tempdir := t.TempDir()
testdir := filepath.Join(tempdir, "foobar")
if err := os.Mkdir(testdir, 0400); err != nil {
t.Fatalf("Failed to create the temporary directory %q for testing. Error: %q", testdir, err)
}
// Test
if err := makeConstants(testdir, conststemp); err == nil {
t.Fatalf("Should not have succeeded since we don't have permissions to write into the directory %q", testdir)
}
})
}
func TestMakeTar(t *testing.T) {
t.Run("make a tar using a filled directory", func(t *testing.T) {
// Setup
testdir := "testdata/datafortempfilled"
fpath := filepath.Join(testdir, "constants.go")
// Remove the constants.go file if it exists from previous runs.
if err := os.Remove(fpath); err != nil && !os.IsNotExist(err) {
t.Fatalf("Failed to remove the file at path %q. Error: %q", fpath, err)
}
testdatapath := "testdata/tartempfilledskiptimestampandtar.txt"
testdatabytes, err := ioutil.ReadFile(testdatapath)
if err != nil {
t.Fatalf("Failed to read the testdata at path %q. Error: %q", testdatapath, err)
}
want := string(testdatabytes)
wantNumLines := 6 + 17 // 17 lines for license and spaces
// Test
if err := makeTar(testdir); err != nil {
t.Fatalf("Failed to generate the code for directory %q with tar template Error: %q", testdir, err)
}
defer os.Remove(fpath)
databytes, err := ioutil.ReadFile(fpath)
if err != nil {
t.Fatal("Failed to create the constants.go file (or failed to read it after creation). Error:", err)
}
data := string(databytes)
lines := strings.Split(data, "\n")
if len(lines) != wantNumLines {
t.Fatal("Failed to generate the code properly. Expected number of lines:", wantNumLines, "Actual:", len(lines))
}
lines = append(lines[:2], lines[3:len(lines)-1]...) // Skip the timestamp and the last line since the tar string also has a timestamp
data = strings.Join(lines, "\n")
if data != want {
t.Fatal("Failed to generate the code properly. Expected:", want, "Actual:", data)
}
})
t.Run("make a tar when the directory has files which we have no permissions to read", func(t *testing.T) {
tempdir := t.TempDir()
fpath := filepath.Join(tempdir, "nopermstoread")
if err := ioutil.WriteFile(fpath, []byte("no permission to read this file"), 0); err != nil {
t.Fatalf("Failed to create the temporary file %q for testing.", fpath)
}
if err := makeTar(tempdir); err == nil {
t.Fatalf("Should not have succeeded since the directory contains a file %q we don't have permissions to read.", fpath)
}
})
t.Run("make a tar in a directory that we have no permissions to write to", func(t *testing.T) {
// Setup
tempdir := t.TempDir()
testdir := filepath.Join(tempdir, "foobar")
if err := os.Mkdir(testdir, 0400); err != nil {
t.Fatalf("Failed to create the temporary directory %q for testing. Error: %q", testdir, err)
}
// Test
if err := makeTar(testdir); err == nil {
t.Fatalf("Should not have succeeded since we don't have permissions to write into the directory %q", testdir)
}
})
}
|
package flv
import (
"fmt"
//flvutils "github.com/sigmaxue/streamdump/src/utils"
)
type ScriptData struct {
}
func (s *ScriptData)Decode(buffer []byte) (count int, err error){
return len(buffer), nil
}
func (s *ScriptData)ToString() string{
out := fmt.Sprintf("")
return out
}
|
package actions
import (
"net/http"
"github.com/nerdynz/datastore"
flow "github.com/nerdynz/flow"
"github.com/nerdynz/schwifty/backend/server/models"
)
// Login Route
func Login(w http.ResponseWriter, req *http.Request, ctx *flow.Context, store *datastore.Datastore) {
helper := models.PersonHelper()
person, err := helper.FromRequest("", ctx.Req)
if err != nil {
ctx.ErrorJSON(http.StatusInternalServerError, "Failed to read login details", err)
return
}
sessionInfo, err := ctx.Padlock.LoginReturningInfo(person.Email, person.Password)
if err != nil {
ctx.ErrorJSON(http.StatusUnauthorized, "Failed to login. Incorrect username or password", err)
return
}
ctx.JSON(http.StatusOK, sessionInfo)
}
// Logout Route
func Logout(w http.ResponseWriter, req *http.Request, ctx *flow.Context, store *datastore.Datastore) {
ctx.Padlock.Logout()
ctx.Redirect("/", http.StatusSeeOther)
}
|
package generator_test
import (
"fmt"
"io/ioutil"
"os"
"path"
"testing"
"time"
"github.com/syncromatics/kafmesh/internal/generator"
"github.com/syncromatics/kafmesh/internal/models"
)
func Test_Generator(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "Test_Generator")
if err != nil {
t.Fatal(err)
}
fmt.Println(tmpDir)
ioutil.WriteFile(path.Join(tmpDir, "go.mod"), []byte(`module test
go 1.16`), os.ModePerm)
protoDir := path.Join(tmpDir, "protos")
err = os.MkdirAll(protoDir, os.ModePerm)
if err != nil {
t.Fatal(err)
}
package1 := path.Join(protoDir, "testMesh", "testId")
err = os.MkdirAll(package1, os.ModePerm)
if err != nil {
t.Fatal(err)
}
ioutil.WriteFile(path.Join(package1, "test.proto"), []byte(`syntax ="proto3";
package testMesh.testId;
message Test {
string name = 1;
}`), os.ModePerm)
ioutil.WriteFile(path.Join(package1, "test2.proto"), []byte(`syntax ="proto3";
package testMesh.testId;
import "google/protobuf/timestamp.proto";
message Test2 {
string serial = 1;
google.protobuf.Timestamp time = 2;
}`), os.ModePerm)
package2 := path.Join(protoDir, "testMesh", "testSerial")
err = os.MkdirAll(package2, os.ModePerm)
if err != nil {
t.Fatal(err)
}
ioutil.WriteFile(path.Join(package2, "details.proto"), []byte(`syntax ="proto3";
package testMesh.testSerial;
message Details {
string name = 1;
}`), os.ModePerm)
ioutil.WriteFile(path.Join(package2, "detailsState.proto"), []byte(`syntax ="proto3";
package testMesh.testSerial;
message DetailsState {
string name = 1;
}`), os.ModePerm)
ioutil.WriteFile(path.Join(package2, "detailsEnriched.proto"), []byte(`syntax ="proto3";
package testMesh.testSerial;
message DetailsEnriched {
string name = 1;
}`), os.ModePerm)
newPath := path.Join(tmpDir, "defin")
options := generator.Options{
Service: &models.Service{
Name: "testMesh",
Output: models.OutputSettings{
Path: "internal/kafmesh",
Package: "kafmesh",
Module: "test",
},
Messages: models.MessageDefinitions{
Protobuf: []string{
"../protos",
},
},
Defaults: models.TopicDefaults{
Partition: 10,
Replication: 1,
Retention: 24 * time.Hour,
Segment: 12 * time.Hour,
Type: "protobuf",
},
},
RootPath: newPath,
DefinitionsPath: newPath,
Components: []*models.Component{
&models.Component{
Name: "details",
Processors: []models.Processor{
models.Processor{
Name: "enricher",
Inputs: []models.Input{
models.Input{
TopicDefinition: models.TopicDefinition{
Message: "testId.test",
},
},
models.Input{
TopicDefinition: models.TopicDefinition{
Message: "testId.test2",
},
},
},
Lookups: []models.Lookup{
models.Lookup{
TopicDefinition: models.TopicDefinition{
Message: "testSerial.details",
},
},
},
Joins: []models.Join{
models.Join{
TopicDefinition: models.TopicDefinition{
Message: "testSerial.details",
},
},
},
Outputs: []models.Output{
models.Output{
TopicDefinition: models.TopicDefinition{
Message: "testSerial.detailsEnriched",
},
},
},
Persistence: &models.Persistence{
TopicDefinition: models.TopicDefinition{
Message: "testSerial.detailsState",
},
},
},
},
Sources: []models.Source{
models.Source{
TopicDefinition: models.TopicDefinition{
Message: "testSerial.details",
},
},
},
Sinks: []models.Sink{
models.Sink{
Name: "Enriched Data Postgres",
TopicDefinition: models.TopicDefinition{
Message: "testSerial.detailsEnriched",
},
},
},
Views: []models.View{
models.View{
TopicDefinition: models.TopicDefinition{
Message: "testSerial.detailsEnriched",
},
},
},
ViewSources: []models.ViewSource{
models.ViewSource{
Name: "test to database",
TopicDefinition: models.TopicDefinition{
Message: "testId.test",
},
},
},
ViewSinks: []models.ViewSink{
models.ViewSink{
Name: "test to api",
TopicDefinition: models.TopicDefinition{
Message: "testId.test",
},
},
},
},
},
}
err = generator.Generate(options)
if err != nil {
t.Fatal(err)
}
validateProcessors(newPath, t)
validateEmitter(newPath, t)
validateSink(newPath, t)
validateView(newPath, t)
validateViewSource(newPath, t)
validateViewSink(newPath, t)
validateService(newPath, t)
validateTopic(newPath, t)
}
|
package resell
import "github.com/selectel/go-selvpcclient/selvpcclient"
const (
// ServiceType contains the name of the Selectel VPC service for which this
// package is intended.
ServiceType = "resell"
// Endpoint contains the base url for all versions of the Resell client.
Endpoint = selvpcclient.DefaultEndpoint + "/" + ServiceType
// UserAgent contains the user agent for all versions of the Resell client.
UserAgent = selvpcclient.DefaultUserAgent
)
|
package csnotes
import (
"fmt"
)
// 打印从 1 到最大的 n 位数
func print(n int) {
if n <= 0 {
return
}
// fmt.Println(byte(100))
// fmt.Println(string(100))
number := make([]int, n)
print1ToMaxOfNDigits(number, 0)
}
func print1ToMaxOfNDigits(number []int, digit int) {
if digit == len(number) {
printNumber(number)
return
}
for i := 0; i < 10; i++ {
number[digit] = i
print1ToMaxOfNDigits(number, digit+1)
}
}
func printNumber(number []int) {
index := 0
for index < len(number) && number[index] == 0 {
index++
}
for index < len(number) {
fmt.Print(number[index])
index++
}
fmt.Println()
}
|
package product
import (
"github.com/gingerxman/eel"
b_product "github.com/gingerxman/ginger-product/business/product"
)
type SkuStockConsumption struct {
eel.RestResource
}
func (this *SkuStockConsumption) Resource() string {
return "product.sku_stock_consumption"
}
func (this *SkuStockConsumption) GetParameters() map[string][]string {
return map[string][]string{
"PUT": []string{
"sku_id:int",
"count:int",
},
"DELETE": []string{
"sku_id:int",
"count:int",
},
}
}
func (this *SkuStockConsumption) Put(ctx *eel.Context) {
req := ctx.Request
skuId, _ := req.GetInt("sku_id")
count, _ := req.GetInt("count")
bCtx := ctx.GetBusinessContext()
err := b_product.NewUpdateSkuStockService(bCtx).Use(skuId, count)
if err != nil {
ctx.Response.Error("sku_stock_consumption:use_fail", err.Error())
} else {
ctx.Response.JSON(eel.Map{})
}
}
func (this *SkuStockConsumption) Delete(ctx *eel.Context) {
req := ctx.Request
skuId, _ := req.GetInt("sku_id")
count, _ := req.GetInt("count")
bCtx := ctx.GetBusinessContext()
err := b_product.NewUpdateSkuStockService(bCtx).Add(skuId, count)
if err != nil {
ctx.Response.Error("sku_stock_consumption:add_fail", err.Error())
} else {
ctx.Response.JSON(eel.Map{})
}
}
|
package mc_msgpack
import (
"io"
gocodec "gx/ipfs/QmVTAmbCaPqdfbmpWDCJMQNFxbyJoG2USFsumXmTWY5LFp/go-codec/codec"
mc "gx/ipfs/QmYMiyZRYDmhMr2phMc4FGrYbsyzvR751BgeobnWroiq2z/go-multicodec"
)
const HeaderPath = "/msgpack"
var Header = mc.Header([]byte(HeaderPath))
type codec struct {
mc bool
handle *gocodec.MsgpackHandle
}
func Codec(h *gocodec.MsgpackHandle) mc.Codec {
return &codec{
mc: false,
handle: h,
}
}
func DefaultMsgpackHandle() *gocodec.MsgpackHandle {
return &gocodec.MsgpackHandle{}
}
func Multicodec(h *gocodec.MsgpackHandle) mc.Multicodec {
return &codec{
mc: true,
handle: h,
}
}
func (c *codec) Encoder(w io.Writer) mc.Encoder {
return &encoder{
w: w,
mc: c.mc,
enc: gocodec.NewEncoder(w, c.handle),
}
}
func (c *codec) Decoder(r io.Reader) mc.Decoder {
return &decoder{
r: r,
mc: c.mc,
dec: gocodec.NewDecoder(r, c.handle),
}
}
func (c *codec) Header() []byte {
return Header
}
type encoder struct {
w io.Writer
mc bool
enc *gocodec.Encoder
}
type decoder struct {
r io.Reader
mc bool
dec *gocodec.Decoder
}
func (c *encoder) Encode(v interface{}) error {
// if multicodec, write the header first
if c.mc {
if _, err := c.w.Write(Header); err != nil {
return err
}
}
return c.enc.Encode(v)
}
func (c *decoder) Decode(v interface{}) error {
// if multicodec, consume the header first
if c.mc {
if err := mc.ConsumeHeader(c.r, Header); err != nil {
return err
}
}
return c.dec.Decode(v)
}
|
package upload
import (
"fmt"
"io"
"io/ioutil"
"launchpad.net/goamz/aws"
"launchpad.net/goamz/s3"
"mime"
"path/filepath"
"log"
)
const (
BUCKET_NAME = "dotaprofiles"
)
var PicsBucket *s3.Bucket
func AWSInit(access_key string, secret_key string) {
var auth aws.Auth
var err error
if access_key != "" && secret_key != "" {
auth = aws.Auth{access_key, secret_key}
} else {
auth, err = aws.EnvAuth()
if err != nil {
log.Fatal(err)
}
}
s := s3.New(auth, aws.USEast)
PicsBucket = s.Bucket(BUCKET_NAME)
}
func Get_Mime(file string) (string, error) {
ext := filepath.Ext(file)
if ext == "" {
return "", fmt.Errorf("Failed to get file extension of %s", file)
}
mime_t := mime.TypeByExtension(ext)
if mime_t == "" {
return "", fmt.Errorf("Failed to fine a mimetype for %s", file)
}
return mime_t, nil
}
func Get_New_Name(file string, new_name string) (string, error) {
ext := filepath.Ext(file)
if ext == "" {
return "", fmt.Errorf("Failed to get file extension of %s", file)
}
return (new_name + ext), nil
}
func Upload_S3(file io.Reader, new_name string) error {
data, err := ioutil.ReadAll(file)
if err != nil {
return err
}
mime_t, err := Get_Mime(new_name)
if err != nil {
return err
}
err = PicsBucket.Put(new_name, data, mime_t, s3.PublicRead)
return err
}
|
package rbac
import (
"errors"
"reflect"
)
// all roles
const (
admin string = "admin"
manager string = "manager"
moderator string = "moderator"
customer string = "customer"
guest string = "guest"
)
var (
//Roles переменная со всеми доступными ролями
Roles roles = roles{admin, manager, moderator, customer, guest}
//Возможные ошибки
errorAlreadyLogged = errors.New("You already logged")
errorAlowedAccess = errors.New("Not alowed access")
errorItIsNotRole = errors.New("It is not role")
)
type AlowedRoles map[string]bool
type roles struct {
admin string
manager string
moderator string
customer string
guest string
}
//Admin return role admin
func (r roles)Admin() string {
return r.admin
}
//Manager return role manager
func (r roles)Manager() string {
return r.manager
}
//Moderator return role moderator
func (r roles)Moderator() string {
return r.moderator
}
//Customer return role customer
func (r roles)Customer() string {
return r.customer
}
//NotLogged return role notLogged
func (r roles)Guest() string {
return r.guest
}
//LoggedUsers return roles logged user
func (r roles)LoggedOnUsers() AlowedRoles {
return AlowedRoles{r.admin:true, r.customer:true, r.moderator:true, r.manager:true, }
}
//LoggedUsers return roles logged user
func (r roles)StaffUsers() AlowedRoles {
return AlowedRoles{r.admin:true, r.moderator:true, r.manager:true, }
}
//LoggedUsers return roles logged user
func (r roles)StaffManagersUsers() AlowedRoles {
return AlowedRoles{r.admin:true, r.manager:true, }
}
//LoggedUsers return roles logged user
func (r roles)AdminsUsers() AlowedRoles {
return AlowedRoles{r.admin:true}
}
//NotLoggedUsers return roles not logged user
func (r roles)GuestUsers() AlowedRoles {
return AlowedRoles{r.guest:true}
}
//AllUsers return all possible user roles
func (r roles)AllUsers() AlowedRoles {
return AlowedRoles{r.admin:true, r.customer:true, r.manager:true, r.moderator:true, r.guest:true}
}
//AllClient return empty map
func (roles)AllClient() AlowedRoles {
return AlowedRoles{}
}
//CheckRoleAccess checks the access for this role
func CheckRoleAccess(alowedRoles AlowedRoles, role string) error {
if len(alowedRoles) == 0 {
return nil
}
if !checkItIsRole(role) {
return errorItIsNotRole
} else if checkUserAlredyLogged(alowedRoles, role) {
return errorAlreadyLogged
} else if alowedRoles[role] != true {
return errorAlowedAccess
}
return nil
}
//checkUserAlredyLogined checks the user is logged on
func checkUserAlredyLogged(alowedRoles AlowedRoles, role string) bool {
if reflect.DeepEqual(Roles.GuestUsers(), alowedRoles) && alowedRoles[role] != true {
return true
}
return false
}
func checkItIsRole(role string) bool {
allUser := Roles.AllUsers()
return allUser[role]
}
|
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"strconv"
utilsstrings "github.com/yamajik/kess/utils/strings"
appsv1 "k8s.io/api/apps/v1"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
)
// Default bulabula
func (r *Runtime) Default() {
var (
labels = r.Labels()
)
if r.ObjectMeta.Labels == nil {
r.ObjectMeta.Labels = make(map[string]string)
}
for k, v := range labels {
r.ObjectMeta.Labels[k] = v
}
}
// DefaultStatus bulabula
func (r *Runtime) DefaultStatus() {
if r.Status.Functions == nil {
r.Status.Functions = make(map[string]RuntimeConfigMap)
}
if r.Status.Libraries == nil {
r.Status.Libraries = make(map[string]RuntimeConfigMap)
}
if r.Status.Ready == "" {
r.Status.Ready = DefaultReady
}
}
// Labels bulabula
func (r *Runtime) Labels() map[string]string {
return map[string]string{
"kess-type": TypeRuntime,
"kess-runtime": r.Name,
}
}
// NamespacedName bulabula
func (r *Runtime) NamespacedName() types.NamespacedName {
return types.NamespacedName{
Name: r.Name,
Namespace: r.Namespace,
}
}
// ConfigMapNamespacedName bulabula
func (r *Runtime) ConfigMapNamespacedName(name string) types.NamespacedName {
return types.NamespacedName{
Name: name,
Namespace: r.Namespace,
}
}
// Deployment bulabula
func (r *Runtime) Deployment() appsv1.Deployment {
var (
volumes []apiv1.Volume
mounts []apiv1.VolumeMount
)
labels := r.Labels()
// Functions ConfigMap Volumes
{
for _, fn := range r.Status.Functions {
volumes = append(volumes, apiv1.Volume{
Name: fn.Name,
VolumeSource: apiv1.VolumeSource{
ConfigMap: &apiv1.ConfigMapVolumeSource{
LocalObjectReference: apiv1.LocalObjectReference{
Name: fn.Name,
},
},
},
})
mounts = append(mounts, apiv1.VolumeMount{
Name: fn.Name,
MountPath: fn.Mount,
})
}
}
// Libraries ConfigMap Volumes
{
for _, lib := range r.Status.Libraries {
volumes = append(volumes, apiv1.Volume{
Name: lib.Name,
VolumeSource: apiv1.VolumeSource{
ConfigMap: &apiv1.ConfigMapVolumeSource{
LocalObjectReference: apiv1.LocalObjectReference{
Name: lib.Name,
},
},
},
})
mounts = append(mounts, apiv1.VolumeMount{
Name: lib.Name,
MountPath: lib.Mount,
})
}
}
port := apiv1.ContainerPort{
Name: r.Spec.PortName,
ContainerPort: r.Spec.Port,
Protocol: apiv1.ProtocolTCP,
}
container := apiv1.Container{
Name: r.Name,
Image: r.Spec.Image,
Command: r.Spec.Command,
Ports: []apiv1.ContainerPort{port},
VolumeMounts: mounts,
}
template := apiv1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: r.Name,
Namespace: r.Namespace,
Labels: labels,
},
Spec: apiv1.PodSpec{
Volumes: volumes,
Containers: []apiv1.Container{container},
},
}
deployment := appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: r.Name,
Namespace: r.Namespace,
Labels: labels,
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Template: template,
Replicas: r.Spec.Replicas,
},
}
return deployment
}
// UpdateDeployment bulabula
func (r *Runtime) UpdateDeployment(out *appsv1.Deployment) {
in := r.Deployment()
out.ObjectMeta = in.ObjectMeta
out.Spec = in.Spec
}
// Service bulabula
func (r *Runtime) Service() apiv1.Service {
labels := r.Labels()
port := apiv1.ServicePort{
Name: r.Spec.PortName,
Port: r.Spec.Port,
TargetPort: intstr.FromInt(int(r.Spec.Port)),
Protocol: apiv1.ProtocolTCP,
}
service := apiv1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: r.Name,
Namespace: r.Namespace,
Labels: labels,
},
Spec: apiv1.ServiceSpec{
Selector: labels,
ClusterIP: r.Spec.ClusterIP,
Ports: []apiv1.ServicePort{port},
},
}
return service
}
// UpdateService bulabula
func (r *Runtime) UpdateService(out *apiv1.Service) {
in := r.Service()
out.ObjectMeta = in.ObjectMeta
out.Spec = in.Spec
}
// UpdateStatusFunctions bulabula
func (r *Runtime) UpdateStatusFunctions(fn *Function) {
r.DefaultStatus()
runtimeConfigMap := fn.RuntimeConfigMap()
r.Status.Functions[runtimeConfigMap.Name] = runtimeConfigMap
}
// DeleteStatusFunctions bulabula
func (r *Runtime) DeleteStatusFunctions(fn *Function) {
r.DefaultStatus()
if len(r.Status.Functions) == 0 {
return
}
runtimeConfigMap := fn.RuntimeConfigMap()
delete(r.Status.Libraries, runtimeConfigMap.Name)
}
// UpdateStatusLibraries bulabula
func (r *Runtime) UpdateStatusLibraries(lib *Library) {
r.DefaultStatus()
runtimeConfigMap := lib.RuntimeConfigMap()
r.Status.Libraries[runtimeConfigMap.Name] = runtimeConfigMap
}
// DeleteStatusLibraries bulabula
func (r *Runtime) DeleteStatusLibraries(lib *Library) {
r.DefaultStatus()
if len(r.Status.Libraries) == 0 {
return
}
runtimeConfigMap := lib.RuntimeConfigMap()
delete(r.Status.Libraries, runtimeConfigMap.Name)
}
// UpdateStatusReady bulabula
func (r *Runtime) UpdateStatusReady(deploy *appsv1.Deployment) {
r.DefaultStatus()
r.Status.Ready = utilsstrings.Format(r.Spec.ReadyFormat, map[string]interface{}{
"Replicas": strconv.Itoa(int(deploy.Status.Replicas)),
"UpdatedReplicas": strconv.Itoa(int(deploy.Status.UpdatedReplicas)),
"ReadyReplicas": strconv.Itoa(int(deploy.Status.ReadyReplicas)),
"AvailableReplicas": strconv.Itoa(int(deploy.Status.AvailableReplicas)),
"UnavailableReplicas": strconv.Itoa(int(deploy.Status.UnavailableReplicas)),
})
}
|
package tsrv
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00400101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:tsrv.004.001.01 Document"`
Message *UndertakingAmendmentRequestV01 `xml:"UdrtkgAmdmntReq"`
}
func (d *Document00400101) AddMessage() *UndertakingAmendmentRequestV01 {
d.Message = new(UndertakingAmendmentRequestV01)
return d.Message
}
// The UndertakingAmendmentRequest message is sent by the party that requested issuance of the undertaking (applicant or obligor) to the party that issued the undertaking to request issuance of a proposed amendment to the undertaking. The undertaking could be a demand guarantee, standby letter of credit, counter-undertaking (counter-guarantee or counter-standby), or suretyship undertaking. The message provides details on proposed changes to the undertaking, for example, to the expiry date, amount, and/or terms and conditions. It may also be used to request termination or cancellation of the undertaking.
type UndertakingAmendmentRequestV01 struct {
// Details related to the request for an amendment of an undertaking.
UndertakingAmendmentRequestDetails *iso20022.Amendment3 `xml:"UdrtkgAmdmntReqDtls"`
// Instructions specific to the bank receiving the message.
InstructionsToBank []*iso20022.Max2000Text `xml:"InstrsToBk,omitempty"`
// Digital signature of the undertaking amendment request.
DigitalSignature *iso20022.PartyAndSignature2 `xml:"DgtlSgntr,omitempty"`
}
func (u *UndertakingAmendmentRequestV01) AddUndertakingAmendmentRequestDetails() *iso20022.Amendment3 {
u.UndertakingAmendmentRequestDetails = new(iso20022.Amendment3)
return u.UndertakingAmendmentRequestDetails
}
func (u *UndertakingAmendmentRequestV01) AddInstructionsToBank(value string) {
u.InstructionsToBank = append(u.InstructionsToBank, (*iso20022.Max2000Text)(&value))
}
func (u *UndertakingAmendmentRequestV01) AddDigitalSignature() *iso20022.PartyAndSignature2 {
u.DigitalSignature = new(iso20022.PartyAndSignature2)
return u.DigitalSignature
}
|
package dogmatiqapp
import (
"github.com/dogmatiq/dogma"
"github.com/koden-km/dogma-app-setup/customer"
)
// App is an implementation of dogma.Application for the practice app.
type App struct {
customerAggregate customer.Aggregate
}
// Configure configures the Dogma engine for this application.
func (a *App) Configure(c dogma.ApplicationConfigurer) {
c.Name("dogmaticapp")
c.RegisterAggregate(a.customerAggregate)
}
|
package rest
import (
"time"
jwt "github.com/dgrijalva/jwt-go"
"github.com/jinmukeji/jiujiantang-services/pkg/rest"
proto "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/core/v1"
"github.com/kataras/iris/v12"
)
// ClientAuthReq 客户端授权的 Request
type ClientAuthReq struct {
ClientID string `json:"client_id"`
SecretKeyHash string `json:"secret_key_hash"`
Seed string `json:"seed"`
}
// ClientAuth 授权
type ClientAuth struct {
Authorization string `json:"authorization"`
ExpiredAt time.Time `json:"expired_at"`
}
// 客户端授权
func (h *v2Handler) ClientAuth(ctx iris.Context) {
var clientAuth ClientAuthReq
err := ctx.ReadJSON(&clientAuth)
if err != nil {
writeError(ctx, wrapError(ErrParsingRequestFailed, "", err), false)
return
}
req := new(proto.ClientAuthRequest)
req.ClientId = clientAuth.ClientID
req.SecretKeyHash = clientAuth.SecretKeyHash
req.Seed = clientAuth.Seed
resp, errClientAuth := h.rpcSvc.ClientAuth(
newRPCContext(ctx), req,
)
if errClientAuth != nil {
writeRPCInternalError(ctx, errClientAuth, false)
return
}
jwtToken, errBuildJwtToken, expiredAt := h.jwtMiddleware.BuildJwtToken(jwt.SigningMethodHS256, resp.ClientId, resp.Zone, resp.Name, resp.CustomizedCode)
if errBuildJwtToken != nil {
writeError(ctx, wrapError(ErrBuildJwtToken, "", err), false)
}
rest.WriteOkJSON(ctx, ClientAuth{
Authorization: jwtToken,
ExpiredAt: expiredAt.UTC(),
})
}
|
package serv
import (
"encoding/json"
"fmt"
"log"
"net/http"
"io/ioutil"
simplejson "github.com/bitly/go-simplejson"
"github.com/chenyoufu/esql/g"
"github.com/chenyoufu/esql/sp"
"github.com/toolkits/file"
)
func renderJSON(w http.ResponseWriter, v interface{}, pretty bool) {
var bs []byte
var err error
if pretty {
bs, err = json.MarshalIndent(v, "", " ")
} else {
bs, err = json.Marshal(v)
}
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.Write(bs)
}
func translate(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{}, 1)
var pretty string
pretty = r.URL.Query().Get("sql")
var sql string
switch r.Method {
case "GET":
sql = r.URL.Query().Get("sql")
if len(sql) == 0 {
http.Error(w, fmt.Errorf("sql param error").Error(), http.StatusBadRequest)
return
}
case "POST":
defer r.Body.Close()
body, err := ioutil.ReadAll(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
sql = string(body)
}
m["sql"] = sql
dsl, err := sp.EsDsl(sql)
if err != nil {
m["err"] = err.Error()
} else {
js, _ := simplejson.NewJson([]byte(dsl))
m["dsl"] = js.MustMap()
}
if pretty == "1" {
renderJSON(w, m, true)
} else {
renderJSON(w, m, false)
}
return
}
func configRoutes() {
http.HandleFunc("/", translate)
http.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
m := make(map[string]string, 1)
m["health"] = "good"
renderJSON(w, m, false)
})
http.HandleFunc("/version", func(w http.ResponseWriter, r *http.Request) {
m := make(map[string]string, 1)
m["version"] = g.VERSION
renderJSON(w, m, false)
})
http.HandleFunc("/workdir", func(w http.ResponseWriter, r *http.Request) {
m := make(map[string]string, 1)
m["workdir"] = file.SelfDir()
renderJSON(w, m, false)
})
}
// Start http server for requests
func Start() {
if !g.Config().HTTP.Enabled {
return
}
addr := g.Config().HTTP.Listen
if addr == "" {
return
}
configRoutes()
log.Println("http listening", addr)
err := http.ListenAndServe(addr, nil)
if err != nil {
log.Fatalln(err)
}
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cli
import (
"context"
"strings"
"github.com/oam-dev/kubevela/pkg/utils"
"github.com/gosuri/uitable"
"github.com/spf13/cobra"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
commontypes "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/utils/common"
cmdutil "github.com/oam-dev/kubevela/pkg/utils/util"
)
// AllNamespace list app in all namespaces
var AllNamespace bool
// LabelSelector list app using label selector
var LabelSelector string
// FieldSelector list app using field selector
var FieldSelector string
// NewListCommand creates `ls` command and its nested children command
func NewListCommand(c common.Args, order string, ioStreams cmdutil.IOStreams) *cobra.Command {
ctx := context.Background()
cmd := &cobra.Command{
Use: "ls",
Aliases: []string{"list"},
DisableFlagsInUseLine: true,
Short: "List applications.",
Long: "List all vela applications.",
Example: `vela ls`,
RunE: func(cmd *cobra.Command, args []string) error {
newClient, err := c.GetClient()
if err != nil {
return err
}
namespace, err := GetFlagNamespaceOrEnv(cmd, c)
if err != nil {
return err
}
if AllNamespace {
namespace = ""
}
return printApplicationList(ctx, newClient, namespace, ioStreams)
},
Annotations: map[string]string{
types.TagCommandOrder: order,
types.TagCommandType: types.TypeStart,
},
}
addNamespaceAndEnvArg(cmd)
cmd.Flags().BoolVarP(&AllNamespace, "all-namespaces", "A", false, "If true, check the specified action in all namespaces.")
cmd.Flags().StringVarP(&LabelSelector, "selector", "l", LabelSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2).")
cmd.Flags().StringVar(&FieldSelector, "field-selector", FieldSelector, "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2).")
return cmd
}
func printApplicationList(ctx context.Context, c client.Reader, namespace string, ioStreams cmdutil.IOStreams) error {
table, err := buildApplicationListTable(ctx, c, namespace)
if err != nil {
return err
}
ioStreams.Info(table.String())
return nil
}
func buildApplicationListTable(ctx context.Context, c client.Reader, namespace string) (*uitable.Table, error) {
table := newUITable()
header := []interface{}{"APP", "COMPONENT", "TYPE", "TRAITS", "PHASE", "HEALTHY", "STATUS", "CREATED-TIME"}
if AllNamespace {
header = append([]interface{}{"NAMESPACE"}, header...)
}
table.AddRow(header...)
labelSelector := labels.NewSelector()
if len(LabelSelector) > 0 {
selector, err := labels.Parse(LabelSelector)
if err != nil {
return nil, err
}
labelSelector = selector
}
applist := v1beta1.ApplicationList{}
if err := c.List(ctx, &applist, client.InNamespace(namespace), &client.ListOptions{LabelSelector: labelSelector}); err != nil {
if apierrors.IsNotFound(err) {
return table, nil
}
return nil, err
}
if len(FieldSelector) > 0 {
fieldSelector, err := fields.ParseSelector(FieldSelector)
if err != nil {
return nil, err
}
var objects []runtime.Object
for i := range applist.Items {
objects = append(objects, &applist.Items[i])
}
applist.Items = objectsToApps(utils.FilterObjectsByFieldSelector(objects, fieldSelector))
}
for _, a := range applist.Items {
service := map[string]commontypes.ApplicationComponentStatus{}
for _, s := range a.Status.Services {
service[s.Name] = s
}
if len(a.Spec.Components) == 0 {
if AllNamespace {
table.AddRow(a.Namespace, a.Name, "", "", "", a.Status.Phase, "", "", a.CreationTimestamp)
} else {
table.AddRow(a.Name, "", "", "", a.Status.Phase, "", "", a.CreationTimestamp)
}
continue
}
for idx, cmp := range a.Spec.Components {
var appName = a.Name
if idx > 0 {
appName = "├─"
if idx == len(a.Spec.Components)-1 {
appName = "└─"
}
}
var healthy, status string
if s, ok := service[cmp.Name]; ok {
healthy = getHealthString(s.Healthy)
status = s.Message
}
var traits []string
for _, tr := range cmp.Traits {
traits = append(traits, tr.Type)
}
if AllNamespace {
table.AddRow(a.Namespace, appName, cmp.Name, cmp.Type, strings.Join(traits, ","), a.Status.Phase, healthy, status, a.CreationTimestamp)
} else {
table.AddRow(appName, cmp.Name, cmp.Type, strings.Join(traits, ","), a.Status.Phase, healthy, status, a.CreationTimestamp)
}
}
}
return table, nil
}
func getHealthString(healthy bool) string {
if healthy {
return "healthy"
}
return "unhealthy"
}
// objectsToApps objects to apps
func objectsToApps(objs []runtime.Object) []v1beta1.Application {
res := make([]v1beta1.Application, 0)
for _, obj := range objs {
obj, ok := obj.(*v1beta1.Application)
if ok {
res = append(res, *obj)
}
}
return res
}
|
package main
import "fmt"
func main() {
/*
obj := Constructor()
obj.AddWord("bad")
obj.AddWord("dad")
obj.AddWord("mad")
fmt.Printf("search pad: %t\n", obj.Search("pad"))
fmt.Printf("search bad: %t\n", obj.Search("bad"))
fmt.Printf("search .ad: %t\n", obj.Search(".ad"))
fmt.Printf("search b..: %t\n", obj.Search("b.."))
*/
/*
obj1 := Constructor()
obj1.AddWord("a")
obj1.AddWord("a")
fmt.Printf("search .: %t\n", obj1.Search("."))
fmt.Printf("search a: %t\n", obj1.Search("a"))
fmt.Printf("search aa: %t\n", obj1.Search("aa"))
fmt.Printf("search .a: %t\n", obj1.Search(".a"))
fmt.Printf("search a.: %t\n", obj1.Search("a."))
*/
obj1 := Constructor()
obj1.AddWord("at")
obj1.AddWord("and")
obj1.AddWord("an")
obj1.AddWord("add")
fmt.Printf("search a: %t\n", obj1.Search("a"))
fmt.Printf("search .at: %t\n", obj1.Search(".at"))
obj1.AddWord("bat")
fmt.Printf("search .at: %t\n", obj1.Search(".at"))
fmt.Printf("search an.: %t\n", obj1.Search("an."))
fmt.Printf("search a.d.: %t\n", obj1.Search("a.d."))
fmt.Printf("search b.: %t\n", obj1.Search("b."))
fmt.Printf("search a.d: %t\n", obj1.Search("a.d"))
fmt.Printf("search .: %t\n", obj1.Search("."))
}
|
// Package interfaces ...
//nolint:misspell // I know, BUT, I cant fix it
package interfaces
import (
"io"
"sync"
"time"
"awesome-dragon.science/go/goGoGameBot/internal/config/tomlconf"
)
// GameManager handles games
type GameManager interface {
ReloadGames(configs []tomlconf.Game) // Reload the games on this Manager with the given configs
GetGameFromName(name string) Game // get the Game instance on this Manager that has the given name, or nil
GameExists(name string) bool // check whether or not this Manager has a Game with this name
AddGame(game Game) error // add a Game to this manager (game names should be case sensitive and unique)
ForEachGame(f func(Game), skip []Game)
StopAllGames()
}
// Game Represents a runnable game server
type Game interface {
GetName() string
GetComment() string
UpdateFromConfig(*tomlconf.Game) error
StopOrKiller
Runner
AutoStarter
Statuser //nolint:misspell // Its Status-er not a misspelling of stature
io.Writer
io.StringWriter
OnMessage(source, target, msg string, isAction bool)
OnJoin(source, channel string)
OnPart(source, channel, message string)
OnNick(source, newnick string)
OnQuit(source, message string)
OnKick(source, channel, kickee, message string)
SendLineFromOtherGame(msg string, source Game)
}
// StopOrKiller holds methods to stop running processes, killing them after a timeout
type StopOrKiller interface {
StopOrKill() error
StopOrKillTimeout(duration time.Duration) error
StopOrKillWaitgroup(group *sync.WaitGroup)
}
// Runner holds methods to Run a process and query the status
type Runner interface {
Run() error
IsRunning() bool
}
// AutoStarter refers to any type that can be autostarted
type AutoStarter interface {
AutoStart()
}
// Statuser refers to any type that can report its status as a string
type Statuser interface { //nolint:misspell // Its Status-er not a misspelling of stature
// Status returns a human readable status string
Status() string
}
|
package core
import (
"fmt"
"io/ioutil"
"math/rand"
gnet "net"
"os"
"path"
"sync"
"testing"
"time"
"google.golang.org/grpc"
"github.com/BurntSushi/toml"
"github.com/benbjohnson/clock"
"github.com/drand/drand/beacon"
"github.com/drand/drand/key"
"github.com/drand/drand/log"
"github.com/drand/drand/net"
"github.com/drand/drand/protobuf/drand"
"github.com/drand/drand/test"
"github.com/drand/kyber"
"github.com/kabukky/httpscerts"
"github.com/stretchr/testify/require"
)
func getSleepDuration() time.Duration {
if os.Getenv("TRAVIS_BRANCH") != "" {
return time.Duration(3000) * time.Millisecond
}
return time.Duration(300) * time.Millisecond
}
func TestDrandDKGReshareTimeout(t *testing.T) {
oldN := 5 // 4 / 5
newN := 6 // 5 / 6
oldThr := key.DefaultThreshold(oldN)
newThr := key.DefaultThreshold(newN)
timeoutStr := "200ms"
timeout, _ := time.ParseDuration(timeoutStr)
period := 300 * time.Millisecond
offline := 1 // can't do more anyway with a 2/3 + 1 threshold
dt := NewDrandTest(t, oldN, oldThr, period)
defer dt.Cleanup()
dt.RunDKG()
pubShare := func(s *key.Share) kyber.Point {
return key.KeyGroup.Point().Mul(s.Share.V, nil)
}
for _, drand := range dt.drands {
pk := drand.priv.Public
idx, ok := dt.group.Index(pk)
require.True(t, ok)
fmt.Printf("idx: %d : pubkey %s\n\t - pub share: %s\n\n", idx, pk.Key.String(), pubShare(drand.share).String())
}
dt.SetupReshare(oldN-offline, newN-oldN, newThr)
fmt.Println("SETUP RESHARE DONE")
// run the resharing
var doneReshare = make(chan bool, 1)
go func() {
dt.RunReshare(oldN-offline, newN-oldN, timeoutStr)
doneReshare <- true
}()
checkDone := func() bool {
select {
case <-doneReshare:
return true
default:
return false
}
}
// check it is not done yet
time.Sleep(getSleepDuration())
require.False(t, checkDone())
// advance time to the timeout
dt.MoveTime(timeout * time.Duration(2))
// give time to finish for the go routines and such
time.Sleep(getSleepDuration())
require.True(t, checkDone())
}
type SyncClock struct {
*clock.Mock
*sync.Mutex
}
func (s *SyncClock) Add(d time.Duration) {
//s.Lock()
//defer s.Unlock()
s.Mock.Add(d)
}
type DrandTest struct {
t *testing.T
n int
thr int
dir string
newDir string
drands map[string]*Drand
newDrands map[string]*Drand
group *key.Group
newGroup *key.Group
groupPath string
newGroupPath string
period time.Duration
ids []string
newIds []string
shares map[string]*key.Share
clocks map[string]*SyncClock
certPaths []string
newCertPaths []string
}
func (d *DrandTest) Cleanup() {
os.RemoveAll(d.dir)
os.RemoveAll(d.newDir)
}
func (d *DrandTest) GetBeacon(id string, round int) (*beacon.Beacon, error) {
dd, ok := d.drands[id]
require.True(d.t, ok)
return dd.beaconStore.Get(uint64(round))
}
// returns new ids generated
func (d *DrandTest) SetupReshare(keepOld, addNew, newThr int) []string {
newN := keepOld + addNew
ids := make([]*key.Identity, 0, newN)
newAddr := make([]string, addNew)
newDrands, _, newDir, newCertPaths := BatchNewDrand(addNew, false,
WithCallOption(grpc.FailFast(true)),
)
d.newDir = newDir
d.newDrands = make(map[string]*Drand)
// add old participants
for _, id := range d.ids[:keepOld] {
drand := d.drands[id]
ids = append(ids, drand.priv.Public)
for _, cp := range newCertPaths {
drand.opts.certmanager.Add(cp)
}
}
// add new participants
for i, drand := range newDrands {
ids = append(ids, drand.priv.Public)
newAddr[i] = drand.priv.Public.Address()
d.newDrands[drand.priv.Public.Address()] = drand
d.setClock(newAddr[i])
for _, cp := range d.certPaths {
drand.opts.certmanager.Add(cp)
}
}
d.newIds = newAddr
//
shuffledIds := make([]*key.Identity, len(ids))
copy(shuffledIds, ids)
// shuffle with random swaps
for i := 0; i < len(ids)*3; i++ {
i1 := rand.Intn(len(ids))
i2 := rand.Intn(len(ids))
shuffledIds[i1], shuffledIds[i2] = shuffledIds[i2], shuffledIds[i1]
}
d.newGroup = key.NewGroup(shuffledIds, newThr)
d.newGroup.Period = d.period
fmt.Println("RESHARE GROUP:\n", d.newGroup.String())
d.newGroupPath = path.Join(newDir, "newgroup.toml")
require.NoError(d.t, key.Save(d.newGroupPath, d.newGroup, false))
return newAddr
}
func (d *DrandTest) RunReshare(oldRun, newRun int, timeout string) {
var clientCounter = &sync.WaitGroup{}
runreshare := func(dr *Drand, leader bool) {
// instruct to be ready for a reshare
client, err := net.NewControlClient(dr.opts.controlPort)
require.NoError(d.t, err)
_, err = client.InitReshare(d.groupPath, d.newGroupPath, leader, timeout)
require.NoError(d.t, err)
fmt.Printf("\n\nDKG TEST: drand %s DONE RESHARING (%v)\n", dr.priv.Public.Address(), leader)
clientCounter.Done()
}
// take list of old nodes present in new groups
var oldNodes []string
for _, id := range d.ids {
drand := d.drands[id]
if d.newGroup.Contains(drand.priv.Public) {
oldNodes = append(oldNodes, drand.priv.Public.Address())
}
}
var allIds []string
// run the old ones
// exclude leader
clientCounter.Add(oldRun - 1)
for _, id := range oldNodes[1:oldRun] {
fmt.Println("Launching reshare on old", id)
go runreshare(d.drands[id], false)
allIds = append(allIds, id)
}
// stop the rest
for _, id := range oldNodes[oldRun:] {
d.drands[id].Stop()
}
// run the new ones
clientCounter.Add(newRun)
for _, id := range d.newIds[:newRun] {
fmt.Println("Launching reshare on new", id)
go runreshare(d.newDrands[id], false)
allIds = append(allIds, id)
}
allIds = append(allIds, oldNodes[0])
d.setDKGCallback(allIds)
// run leader
fmt.Println("Launching reshare on (old) root", d.ids[0])
clientCounter.Add(1)
go runreshare(d.drands[oldNodes[0]], true)
// wait for the return of the clients
checkWait(clientCounter)
fmt.Printf("\n\n -- TEST FINISHED ALL RESHARE DKG --\n\n")
}
func checkWait(counter *sync.WaitGroup) {
var doneCh = make(chan bool, 1)
go func() {
counter.Wait()
doneCh <- true
}()
select {
case <-doneCh:
break
case <-time.After(11 * time.Second):
panic("outdated beacon time")
}
}
func NewDrandTest(t *testing.T, n, thr int, period time.Duration) *DrandTest {
drands, group, dir, certPaths := BatchNewDrand(n, false,
WithCallOption(grpc.FailFast(true)),
)
group.Period = period
groupPath := path.Join(dir, "dkggroup.toml")
require.NoError(t, key.Save(groupPath, group, false))
ids := make([]string, n)
mDrands := make(map[string]*Drand, n)
for i, d := range drands {
ids[i] = d.priv.Public.Address()
mDrands[ids[i]] = d
}
return &DrandTest{
t: t,
n: n,
thr: thr,
drands: mDrands,
group: group,
groupPath: groupPath,
period: period,
ids: ids,
shares: make(map[string]*key.Share),
clocks: make(map[string]*SyncClock),
certPaths: certPaths,
}
}
func (d *DrandTest) RunDKG() {
var wg sync.WaitGroup
wg.Add(d.n - 1)
d.setClock(d.ids...)
d.setDKGCallback(d.ids)
for _, id := range d.ids[1:] {
go func(dd *Drand) {
client, err := net.NewControlClient(dd.opts.controlPort)
require.NoError(d.t, err)
_, err = client.InitDKG(d.groupPath, false, "", nil)
require.NoError(d.t, err)
wg.Done()
fmt.Printf("\n\n\n TESTDKG NON-ROOT %s FINISHED\n\n\n", dd.priv.Public.Address())
}(d.drands[id])
}
root := d.drands[d.ids[0]]
controlClient, err := net.NewControlClient(root.opts.controlPort)
require.NoError(d.t, err)
_, err = controlClient.InitDKG(d.groupPath, true, "", nil)
require.NoError(d.t, err)
wg.Wait()
fmt.Printf("\n\n\n TESTDKG ROOT %s FINISHED\n\n\n", d.ids[0])
resp, err := controlClient.GroupFile()
require.NoError(d.t, err)
group := new(key.Group)
groupToml := new(key.GroupTOML)
_, err = toml.Decode(resp.GetGroupToml(), groupToml)
require.NoError(d.t, err)
require.NoError(d.t, group.FromTOML(groupToml))
d.group = group
require.Equal(d.t, d.thr, d.group.Threshold)
for _, drand := range d.drands {
require.True(d.t, d.group.Contains(drand.priv.Public))
}
require.Len(d.t, d.group.PublicKey.Coefficients, d.thr)
require.NoError(d.t, key.Save(d.groupPath, d.group, false))
}
func (d *DrandTest) tryBoth(id string, fn func(d *Drand)) {
if dr, ok := d.drands[id]; ok {
fn(dr)
} else if dr, ok = d.newDrands[id]; ok {
fn(dr)
} else {
panic("that should not happen")
}
}
func (d *DrandTest) setClock(ids ...string) {
for _, id := range ids {
d.tryBoth(id, func(dr *Drand) {
c := &SyncClock{
Mock: clock.NewMock(),
Mutex: new(sync.Mutex),
}
addr := dr.priv.Public.Address()
d.clocks[addr] = c
dr.opts.clock = c
dr.opts.dkgCallback = func(s *key.Share) {
d.shares[addr] = s
fmt.Printf("\n\n\n --- DKG %s FINISHED ---\n\n\n", addr)
}
})
}
}
// first wait for all dkg callbacks to trigger, then update the clock of every
// ids
func (d *DrandTest) setDKGCallback(ids []string) {
var wg sync.WaitGroup
wg.Add(len(ids))
for _, id := range ids {
d.tryBoth(id, func(dr *Drand) {
dr.opts.dkgCallback = func(s *key.Share) {
d.shares[dr.priv.Public.Address()] = s
fmt.Printf("\n\n %s DKG DONE \n\n", dr.priv.Public.Address())
wg.Done()
}
})
}
go func() {
wg.Wait()
// TRAVIS: let all peers go into sleep mode before increasing
// their clock
time.Sleep(100 * time.Millisecond)
for _, id := range ids {
d.clocks[id].Add(syncTime)
}
}()
}
func (d *DrandTest) GetDrand(id string) *Drand {
return d.drands[id]
}
func (d *DrandTest) StopDrand(id string) {
dr := d.drands[id]
dr.Stop()
pinger, err := net.NewControlClient(dr.opts.controlPort)
require.NoError(d.t, err)
var counter = 1
for range time.Tick(100 * time.Millisecond) {
if err := pinger.Ping(); err != nil {
break
}
counter++
require.LessOrEqual(d.t, counter, 5)
}
}
func (d *DrandTest) StartDrand(id string, catchup bool) {
dr, ok := d.drands[id]
require.True(d.t, ok)
var err error
dr, err = LoadDrand(dr.store, dr.opts)
require.NoError(d.t, err)
d.drands[id] = dr
d.setClock(id)
dr.StartBeacon(catchup)
}
func (d *DrandTest) MoveTime(p time.Duration) {
for _, c := range d.clocks {
c.Add(p)
}
time.Sleep(getSleepDuration())
}
func (d *DrandTest) TestBeaconLength(max int, ids ...string) {
for _, id := range ids {
drand, ok := d.drands[id]
require.True(d.t, ok)
require.LessOrEqual(d.t, drand.beaconStore.Len(), max)
}
}
func (d *DrandTest) TestPublicBeacon(id string) {
dr := d.GetDrand(id)
client := net.NewGrpcClientFromCertManager(dr.opts.certmanager, dr.opts.grpcOpts...)
resp, err := client.PublicRand(test.NewTLSPeer(dr.priv.Public.Addr), &drand.PublicRandRequest{})
require.NoError(d.t, err)
require.NotNil(d.t, resp)
}
func TestDrandDKGFresh(t *testing.T) {
n := 5
p := 200 * time.Millisecond
dt := NewDrandTest(t, n, key.DefaultThreshold(n), p)
defer dt.Cleanup()
dt.RunDKG()
// make the last node fail
// XXX The node still replies to early beacon packet
lastID := dt.ids[n-1]
lastOne := dt.GetDrand(lastID)
lastOne.Stop()
// test everyone has two beacon except the one we stopped
dt.MoveTime(p)
dt.TestBeaconLength(2, dt.ids[:n-1]...)
// start last one
dt.StartDrand(lastID, true)
dt.MoveTime(p)
dt.TestBeaconLength(3, dt.ids[:n-1]...)
// 2 because the first beacon is ran automatically by everyone, can't stop
// it before at the moment
dt.TestBeaconLength(2, lastID)
dt.TestPublicBeacon(dt.ids[0])
}
// Check they all have same public group file after dkg
func TestDrandPublicGroup(t *testing.T) {
n := 10
thr := key.DefaultThreshold(n)
p := 200 * time.Millisecond
dt := NewDrandTest(t, n, thr, p)
defer dt.Cleanup()
dt.RunDKG()
//client := NewGrpcClient()
cm := dt.drands[dt.ids[0]].opts.certmanager
client := NewGrpcClientFromCert(cm)
rest := net.NewRestClientFromCertManager(cm)
var group *drand.GroupResponse
for i, id := range dt.ids {
d := dt.drands[id]
groupResp, err := client.Group(d.priv.Public.Address(), d.priv.Public.TLS)
require.NoError(t, err, fmt.Sprintf("idx %d: addr %s", i, id))
if group == nil {
group = groupResp
}
require.Equal(t, uint32(group.Period), groupResp.Period)
require.Equal(t, uint32(group.Threshold), groupResp.Threshold)
require.Equal(t, group.Distkey, groupResp.Distkey)
require.Len(t, groupResp.Nodes, len(group.Nodes))
nodes := groupResp.GetNodes()
for addr, d := range dt.drands {
var found bool
for _, n := range nodes {
sameAddr := n.GetAddress() == addr
sameKey := n.GetKey() == key.PointToString(d.priv.Public.Key)
sameTLS := n.GetTLS() == d.priv.Public.TLS
if sameAddr && sameKey && sameTLS {
found = true
break
}
}
require.True(t, found)
}
restGroup, err := rest.Group(d.priv.Public, &drand.GroupRequest{})
require.NoError(t, err)
require.Equal(t, groupResp, restGroup)
}
}
// BatchNewDrand returns n drands, using TLS or not, with the given
// options. It returns the list of Drand structures, the group created,
// the folder where db, certificates, etc are stored. It is the folder
// to delete at the end of the test. As well, it returns a public grpc
// client that can reach any drand node.
func BatchNewDrand(n int, insecure bool, opts ...ConfigOption) ([]*Drand, *key.Group, string, []string) {
var privs []*key.Pair
var group *key.Group
if insecure {
privs, group = test.BatchIdentities(n)
} else {
privs, group = test.BatchTLSIdentities(n)
}
ports := test.Ports(n)
var err error
drands := make([]*Drand, n, n)
tmp := os.TempDir()
dir, err := ioutil.TempDir(tmp, "drand")
if err != nil {
panic(err)
}
certPaths := make([]string, n)
keyPaths := make([]string, n)
if !insecure {
for i := 0; i < n; i++ {
certPath := path.Join(dir, fmt.Sprintf("server-%d.crt", i))
keyPath := path.Join(dir, fmt.Sprintf("server-%d.key", i))
if httpscerts.Check(certPath, keyPath) != nil {
h, _, err := gnet.SplitHostPort(privs[i].Public.Address())
if err != nil {
panic(err)
}
if err := httpscerts.Generate(certPath, keyPath, h); err != nil {
panic(err)
}
}
certPaths[i] = certPath
keyPaths[i] = keyPath
}
}
for i := 0; i < n; i++ {
s := test.NewKeyStore()
s.SaveKeyPair(privs[i])
// give each one their own private folder
dbFolder := path.Join(dir, fmt.Sprintf("db-%d", i))
confOptions := append([]ConfigOption{WithDbFolder(dbFolder)}, opts...)
if !insecure {
confOptions = append(confOptions, WithTLS(certPaths[i], keyPaths[i]))
confOptions = append(confOptions, WithTrustedCerts(certPaths...))
} else {
confOptions = append(confOptions, WithInsecure())
}
confOptions = append(confOptions, WithControlPort(ports[i]))
confOptions = append(confOptions, WithLogLevel(log.LogDebug))
drands[i], err = NewDrand(s, NewConfig(confOptions...))
if err != nil {
panic(err)
}
}
return drands, group, dir, certPaths
}
// CloseAllDrands closes all drands
func CloseAllDrands(drands []*Drand) {
for i := 0; i < len(drands); i++ {
drands[i].Stop()
//os.RemoveAll(drands[i].opts.dbFolder)
}
}
|
package neatly
import (
"fmt"
"github.com/viant/toolbox/data"
"strings"
)
type referenceValues map[string]*referenceValue
func (v *referenceValues) CheckUnused() error {
var unused = make([]string, 0)
for k, value := range *v {
if !value.Used {
unused = append(unused, k)
}
}
if len(unused) == 0 {
return nil
}
return fmt.Errorf("Unresolved references: '%v' ", strings.Join(unused, ","))
}
func (v *referenceValues) Add(tagName string, field *Field, object data.Map) error {
var referencedValue = &referenceValue{
Key: tagName,
Field: field,
Object: object,
}
referencedValue.Setter = func(value interface{}) {
referencedValue.Used = true
field.Set(value, referencedValue.Object)
}
(*v)[tagName] = referencedValue
return nil
}
func (v *referenceValues) Apply(tagName string, value interface{}) error {
referencedValue, ok := (*v)[tagName]
if !ok {
var referencesSoFar = make([]string, 0)
for k := range *v {
referencesSoFar = append(referencesSoFar, k)
}
return fmt.Errorf("Missing referenceValue %v in the previous rows, available[%v]", tagName, strings.Join(referencesSoFar, ","))
}
referencedValue.Setter(value)
return nil
}
func newReferenceValues() referenceValues {
var result referenceValues = make(map[string]*referenceValue)
return result
}
//referenceValue represent reference value
type referenceValue struct {
Setter func(value interface{}) //setter handler
Key string //reference key
Field *Field //field
Object data.Map //target object
Used bool //flag indicating if reference was used, if not then error
}
|
package conf
// CommonConfig is set of common configurations
type CommonConfig struct { // nolint
APIEndpoint *string
APIKey *string
AppVersion string
ExtendedOutput *bool
IsDebugMode bool
}
// StartConfig is set of configurations for starting the process
type StartConfig struct {
Common *CommonConfig
Platform *string
Asynchronous *bool
}
// SuccessConfig is set of configurations for ending the process with success state
type SuccessConfig struct {
Common *CommonConfig
Message *string
}
|
package server
import (
"net/http"
"reflect"
"testing"
"github.com/julienschmidt/httprouter"
"github.com/syariatifaris/shopeetax/app/resource/uires"
"github.com/syariatifaris/shopeetax/app/usecase"
)
func TestInitShopeeHTTPServer(t *testing.T) {
type args struct {
ui *uires.UIResource
port int
}
tests := []struct {
name string
args args
want Server
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := InitShopeeHTTPServer(tt.args.ui, tt.args.port); !reflect.DeepEqual(got, tt.want) {
t.Errorf("InitShopeeHTTPServer() = %v, want %v", got, tt.want)
}
})
}
}
func Test_httpServer_Run(t *testing.T) {
type fields struct {
ui *uires.UIResource
router *httprouter.Router
port int
}
tests := []struct {
name string
fields fields
wantErr bool
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := &httpServer{
ui: tt.fields.ui,
router: tt.fields.router,
port: tt.fields.port,
}
if err := h.Run(); (err != nil) != tt.wantErr {
t.Errorf("httpServer.Run() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func Test_httpServer_registerRouters(t *testing.T) {
type fields struct {
ui *uires.UIResource
router *httprouter.Router
port int
}
tests := []struct {
name string
fields fields
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := &httpServer{
ui: tt.fields.ui,
router: tt.fields.router,
port: tt.fields.port,
}
h.registerRouters()
})
}
}
func Test_httpServer_handle(t *testing.T) {
type fields struct {
ui *uires.UIResource
router *httprouter.Router
port int
}
type args struct {
uiRes *uires.UIResource
httpHandlerFunc httpHandlerFunc
useCase usecase.HandleUseCase
subscribers []usecase.UseCase
}
tests := []struct {
name string
fields fields
args args
want http.HandlerFunc
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := &httpServer{
ui: tt.fields.ui,
router: tt.fields.router,
port: tt.fields.port,
}
if got := h.handle(tt.args.uiRes, tt.args.httpHandlerFunc, tt.args.useCase, tt.args.subscribers...); !reflect.DeepEqual(got, tt.want) {
t.Errorf("httpServer.handle() = %v, want %v", got, tt.want)
}
})
}
}
func Test_httpServer_handleView(t *testing.T) {
type fields struct {
ui *uires.UIResource
router *httprouter.Router
port int
}
type args struct {
uiRes *uires.UIResource
httpHandlerViewFunc httpHandlerViewFunc
useCase usecase.HandleUseCase
subscribers []usecase.UseCase
}
tests := []struct {
name string
fields fields
args args
want http.HandlerFunc
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := &httpServer{
ui: tt.fields.ui,
router: tt.fields.router,
port: tt.fields.port,
}
if got := h.handleView(tt.args.uiRes, tt.args.httpHandlerViewFunc, tt.args.useCase, tt.args.subscribers...); !reflect.DeepEqual(got, tt.want) {
t.Errorf("httpServer.handleView() = %v, want %v", got, tt.want)
}
})
}
}
|
package layer
import "io"
type Packer interface {
Encode(query *Query) ([]byte, error)
Decode([]byte) (*Query, error)
DecodeReader(reader io.Reader) (*Query, error)
}
type packer struct {
queryEncoder
queryDecoder
}
func NewPacker() Packer {
return new(packer)
}
|
package main
import (
"reflect"
"testing"
)
const (
simple = "..3.2.6..9..3.5..1..18.64....81.29..7.......8..67.82....26.95..8..2.3..9..5.1.3.."
harder = "4.....8.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......"
diagonal1 = "2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3"
diagonal2 = "4.......3..9.........1...7.....1.8.....5.9.....1.2.....3...5.........7..7.......8"
)
func TestStandardSudokuBoardInit(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
} else {
board := NewSudoku(simple, STANDARD)
if unitsLength := len(board.allUnits); unitsLength != 27 {
t.Error("Standard Sudoku board should test 27 units")
}
}
}
func TestDiagonalSudokuBoardInit(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
} else {
board := NewSudoku(diagonal1, DIAGONAL)
if unitsLength := len(board.allUnits); unitsLength != 29 {
t.Error("Diagonal Sudoku board should test 29 units")
}
}
}
func TestUnitsByCell(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
} else {
board := NewSudoku(simple, STANDARD)
if unitsLength := len(board.unitsByCell); unitsLength != len(BOXES) {
t.Error("Incorrect number of units in unitsByCell")
}
unitsA1 := board.unitsByCell["A1"]
foundRow := false
foundCol := false
for i := range unitsA1 {
if reflect.DeepEqual(unitsA1[i], board.rowUnits[0]) {
foundRow = true
}
if reflect.DeepEqual(unitsA1[i], board.colUnits[0]) {
foundCol = true
}
}
if !foundRow || !foundCol {
t.Error("Units for A1 does not include one of either row A or col 1 units")
}
}
}
func TestPeersByCell(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
} else {
board := NewSudoku(simple, STANDARD)
if unitsLength := len(board.peersByCell); unitsLength != len(BOXES) {
t.Error("Incorrect number of units in peersByCell")
}
peersA1 := board.peersByCell["A1"]
foundA9 := false
foundI1 := false
for i := range peersA1 {
if peersA1[i] == "A9" {
foundA9 = true
}
if peersA1[i] == "I1" {
foundI1 = true
}
}
if !foundA9 || !foundI1 {
t.Error("Peers for A1 does not include one of either A9 or I1")
}
}
}
func TestValues(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
} else {
board := NewSudoku(simple, STANDARD)
if unitsLength := len(board.values); unitsLength != len(BOXES) {
t.Error("Incorrect number of values in peersByCell")
}
}
}
func TestDeepCopy(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
} else {
board := NewSudoku(simple, STANDARD)
board2 := board.Copy()
board2.values["A1"] = CellValue{Value: "4", Source: RESOLVED}
if board.values["A1"] == board2.values["A1"] {
t.Error("Board is not deep copied")
}
}
}
func TestSolve(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
} else {
board := NewSudoku(simple, STANDARD)
resultBoard, success := board.Solve()
if !success {
t.Error("Board was not solved")
} else {
resultBoard.Print()
}
}
}
|
package 字符串
import "fmt"
func addStrings(num1 string, num2 string) string {
readIndexOfNum1 := len(num1) - 1
readIndexOfNum2 := len(num2) - 1
result := ""
carry := 0
for readIndexOfNum1 >= 0 || readIndexOfNum2 >= 0 || carry != 0 {
sum := carry
if readIndexOfNum1 >= 0 {
sum += int(num1[readIndexOfNum1] - '0')
readIndexOfNum1--
}
if readIndexOfNum2 >= 0 {
sum += int(num2[readIndexOfNum2] - '0')
readIndexOfNum2--
}
digit := sum % 10
result = fmt.Sprintf("%d", digit) + result
carry = sum / 10
}
return result
}
|
package main
import (
"fmt"
"io/ioutil"
"os"
"github.com/nitohu/err"
)
func getCmdLineArgs(args []string) (map[string]string, err.Error) {
if len(args) <= 1 {
return nil, err.Error{}
}
res := make(map[string]string)
for i := 1; i < len(args); i += 2 {
var kw, val string
kw = args[i]
if i+1 < len(args) {
val = args[i+1]
}
if kw == "-h" || kw == "--help" {
fmt.Println("Accounting Command line arguments")
fmt.Println("\n\t-h, --help\t\tLoad the current page")
fmt.Println("\t-c, --config <path>\tLoad the config file with the given <path>")
fmt.Println("\t-p, --port <port>\tSet the port this app is running at (standard 80)")
fmt.Println("\t-H, --dbhost <addr>\tSpecifies the host address of the postgres database")
fmt.Println("\t-u, --dbuser <name>\tSpecifies the database user")
fmt.Println("\t-pw, --dbpassword <pw>\tSpecifies the database password")
fmt.Println("\t-P, --dbport <port>\tSpecifies the database port")
os.Exit(0)
return nil, err.Error{}
} else if kw == "-c" || kw == "--config" {
if e := readConfFile(&res, val); !e.Empty() {
return nil, e
}
} else if kw == "-H" || kw == "--dbhost" {
res["dbhost"] = val
} else if kw == "-u" || kw == "--dbuser" {
res["dbuser"] = val
} else if kw == "-pw" || kw == "--dbpassword" {
res["dbpassword"] = val
} else if kw == "-P" || kw == "--dbport" {
res["dbport"] = val
} else if kw == "-p" || kw == "--port" {
res["port"] = val
} else if kw == "-d" || kw == "--database" {
res["dbdatabase"] = val
}
}
if err := validateCmdlineData(res); !err.Empty() {
return nil, err
}
return res, err.Error{}
}
func readConfFile(res *map[string]string, filePath string) err.Error {
data, error := ioutil.ReadFile(filePath)
if error != nil {
var e err.Error
e.Init("getCmdLineArgs()", error.Error())
return e
}
var kw, val string
kwPassed := false
for x := 0; x < len(data); x++ {
c := data[x]
if c == byte('=') {
kwPassed = true
} else if c == byte('\n') {
if kw != "" && val != "" {
(*res)[kw] = val
}
kwPassed = false
kw = ""
val = ""
} else if c == byte(';') {
var comment string
for c != byte('\n') {
x++
c = data[x]
comment += string(c)
}
} else if kwPassed {
val += string(c)
} else {
kw += string(c)
}
}
return err.Error{}
}
func validateCmdlineData(data map[string]string) err.Error {
var err err.Error
if val, ok := data["dbhost"]; !ok || val == "" {
err.Init("validateCmdlineData()", "Please provide a host for the database.")
}
if val, ok := data["dbuser"]; !ok || val == "" {
err.Init("validateCmdlineData()", "Please provide a user for the database.")
}
if val, ok := data["dbpassword"]; !ok || val == "" {
err.Init("validateCmdlineData()", "Please provide a password for the database.")
}
if val, ok := data["dbhost"]; !ok || val == "" {
err.Init("validateCmdlineData()", "Please provide a host for the database.")
}
if val, ok := data["dbdatabase"]; !ok || val == "" {
err.Init("validateCmdlineData()", "Please provide a database name.")
}
return err
}
|
package nomad
import "fmt"
func (n nomadInvocation) Uninstall() error {
fmt.Println("install")
return nil
}
|
package main
import "fmt"
func main() {
var a int
a = 1
one(a)
fmt.Println("a:", a)
fmt.Println("&a:", &a)
var b *int
b = &a
fmt.Println("*b:", *b)
two(b)
fmt.Println("b:",b)
fmt.Println("*b", *b)
fmt.Println("a:",a)
}
func one(x int) {
x = 2
}
func two(x *int) {
*x = 2
} |
package channels
import (
"testing"
)
func TestEasyUse(t *testing.T) {
UseChanel()
}
func TestChannelBlock(t *testing.T) {
BlockChanel()
}
func TestUseChannelPanic(t*testing.T){
UseChannelPanic()
}
func TestUseChannelArray(t*testing.T){
UseChannelArray()
} |
package read
import (
"os"
"fmt"
"encoding/binary"
"log"
)
//(defprotocol ReadChannel
//(position!! [this offset])
//(read-byte!! [this])
//(read-int!! [this])
//(read-wire-format!! [this length])
//(size!! [this])
//(close-read!! [this]))
type ReadChannel interface {
Position(offset int64)
ReadByte()
ReadBytes(uint32) []byte
ReadInt() uint32
//read the wire format starting at this offset
ReadWireFormat(start_offset int64) ([]byte, []byte)
Size()
CloseRead()
}
type DiskReadChannel struct {
segment_read_only_file *os.File
}
func NewDiskReadChannel(segment_read_only_file *os.File) *DiskReadChannel {
return &DiskReadChannel{segment_read_only_file}
}
func (c DiskReadChannel) Position(offset int64) {
c.segment_read_only_file.Seek(offset, 0)
}
func (c DiskReadChannel) ReadInt() uint32 {
int_bytes := make([]byte, 4)
n2, err := c.segment_read_only_file.Read(int_bytes)
if err != nil {
fmt.Println(err)
panic(err)
} else {
log.Printf("read %d bytes\n", n2)
}
return Bytes2Int(int_bytes)
}
func (c DiskReadChannel) ReadWireFormat(start_offset int64) ([]byte, []byte) {
fmt.Printf("read channel seeking to pos: %d bytes\n", start_offset)
c.Position(start_offset)
fmt.Printf("reading wireformat starting at the address: %d bytes\n", start_offset)
//1- read key length
//2- read key
//3- read marker
//4- read value length
//5- read value
//1
key_len := c.ReadInt()
//2
key_as_bytes := c.ReadBytes(key_len)
//3
marker := c.ReadInt()
if (marker == 41) {
//4
val_len := c.ReadInt()
//5
val_as_bytes := c.ReadBytes(val_len)
//log.Printf("read key_len: %d:%d and marker %d with value_len: %d:%d ... \n",
// key_len, string(key_as_bytes), marker, val_len, string(val_as_bytes))
return key_as_bytes, val_as_bytes
}
return key_as_bytes, nil
}
func (c DiskReadChannel) CloseRead() {
fmt.Println("TODO close read channel...")
}
func (c DiskReadChannel) ReadByte() {
fmt.Println("TODO ReadByte...")
}
func (c DiskReadChannel) ReadBytes(no_bytes uint32) []byte {
log.Printf("DiskReadChannel about to read %d bytes\n", no_bytes)
read_into := make([]byte, no_bytes)
n2, err := c.segment_read_only_file.Read(read_into)
if err != nil {
fmt.Println(err)
panic(err)
} else {
log.Printf("DiskReadChannel read %d bytes\n", n2)
}
return read_into
}
func (c DiskReadChannel) Size() {
fmt.Println("TODO Size...")
}
func Bytes2Int(bytes []byte) uint32 {
return binary.BigEndian.Uint32(bytes)
}
|
package main
import(
"net/http"
"net/http/httptest"
"testing"
"github.com/julienschmidt/httprouter"
"strings"
)
// Test the index page
// Route should redirect to the login page
func TestIndex(t *testing.T) {
router := httprouter.New()
router.GET("/", index)
writer := httptest.NewRecorder()
request, _ := http.NewRequest("GET", "/", nil)
router.ServeHTTP(writer, request)
if writer.Code != 302 {
t.Errorf("Response code is %v", writer.Code)
}
}
// Test package function that adds a new user
func TestAddUser(t *testing.T) {
addUser("Sau Sheong", "sausheong@gmail.com", "password")
if val, ok := users["sausheong@gmail.com"]; !ok {
t.Errorf("Cannot add user")
} else {
if val.Name != "Sau Sheong" {
t.Errorf("User name is wrong")
}
}
}
// Test the sign up form
// Route should sign up a new user and redirect to the login page
func TestSignUp(t *testing.T) {
router := httprouter.New()
router.POST("/signup", createUser)
writer := httptest.NewRecorder()
body := strings.NewReader("name=Sau Sheong&email=sausheong@gmail.com&password=password")
request, _ := http.NewRequest("POST", "/signup", body)
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
router.ServeHTTP(writer, request)
if writer.Code != 302 {
t.Errorf("Response code is %v", writer.Code)
}
if writer.Header().Get("Location") != "/login" {
t.Errorf("Location is %v", writer.Header().Get("Location"))
}
}
// Test user authentication
// Route should authenticate a user given the email and password
// from a form post
// If authentication is successful, a cookie that starts with
// pixelate_cookie must be created and added to the browser
func TestAuthenticate(t *testing.T) {
addUser("Sau Sheong", "sausheong@gmail.com", "password")
router := httprouter.New()
router.POST("/login", authenticate)
writer := httptest.NewRecorder()
body := strings.NewReader("email=sausheong@gmail.com&password=password")
request, _ := http.NewRequest("POST", "/login", body)
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
router.ServeHTTP(writer, request)
if writer.Code != 302 {
t.Errorf("Response code is %v", writer.Code)
}
if !strings.HasPrefix(writer.Header().Get("Set-Cookie"), "pixelate_cookie") {
t.Errorf("Cookie not set")
}
}
// Test user authentication
// Make sure that user authentication fails if the wrong password is given
func TestAuthenticateFail(t *testing.T) {
addUser("Sau Sheong", "sausheong@gmail.com", "password")
router := httprouter.New()
router.POST("/login", authenticate)
writer := httptest.NewRecorder()
body := strings.NewReader("email=sausheong@gmail.com&password=wrong_password")
request, _ := http.NewRequest("POST", "/login", body)
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
router.ServeHTTP(writer, request)
if writer.Code != 302 {
t.Errorf("Response code is %v", writer.Code)
}
if writer.Header().Get("Location") != "/login" {
t.Errorf("Not redirected to login")
}
if strings.HasPrefix(writer.Header().Get("Set-Cookie"), "pixelate_cookie") {
t.Errorf("Cookie is set")
}
}
|
package inventoryd
import (
"bufio"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"time"
)
//go:generate go-bindata -pkg inventoryd ./models/
// CreateDefaultConfig : デフォルトの設定ファイルを生成する
func CreateDefaultConfig(configPath string) error {
rootPath := filepath.Join(configPath, "..")
endpointClientName := "inventoryd-" + time.Now().Format("20060102030405")
config := &Config{
RootPath: rootPath,
ObserveInterval: 5,
BootstrapServer: "bootstrap.soracom.io:5683",
EndpointClientName: endpointClientName}
_, err := os.Stat(rootPath)
if os.IsNotExist(err) {
err := os.MkdirAll(rootPath, 0755)
if err != nil {
return err
}
}
modelsPath := filepath.Join(rootPath, inventorydModelsDir)
_, err = os.Stat(modelsPath)
if os.IsNotExist(err) {
err := os.MkdirAll(modelsPath, 0755)
if err != nil {
return err
}
}
modelFiles, err := AssetDir(inventorydModelsDir)
if err != nil {
fmt.Fprintln(os.Stderr, "定義ファイルが展開できませんでした")
} else {
for _, modelFile := range modelFiles {
modelData, err := Asset(filepath.Join(inventorydModelsDir, modelFile))
if err == nil {
err = ioutil.WriteFile(filepath.Join(modelsPath, modelFile), modelData, 0644)
}
if err != nil {
fmt.Fprintf(os.Stderr, "定義ファイル(%s)が展開できませんでした\n", modelFile)
}
}
}
resourcesPath := filepath.Join(rootPath, inventorydResourcesDir)
_, err = os.Stat(resourcesPath)
if os.IsNotExist(err) {
err := os.MkdirAll(resourcesPath, 0755)
if err != nil {
return err
}
}
jsonStr, err := json.MarshalIndent(config, "", " ")
if err != nil {
return err
}
err = ioutil.WriteFile(configPath, jsonStr, 0644)
if err != nil {
return err
}
return nil
}
// Prepare : 使用前準備
func (daemon *Inventoryd) Prepare(config *Config) error {
daemon.Config = config
objectDefinitions, err := LoadLwm2mDefinitions(filepath.Join(daemon.Config.RootPath, inventorydModelsDir))
if err != nil {
return err
}
// 自動設定モードの設定
autoMode := false
fmt.Println("オブジェクト、インスタンス、リソースの初期設定を行います")
fmt.Println("インスタンス生成時はリソースは以下の初期値が設定されます")
fmt.Println("Integer: 0 / Float: 0.0 / String: \"\" / Time: 0 / Boolean: false / Opaque: 空データ / Objlnk: 0:0")
fmt.Println("Executeのリソースはデフォルトのシェルスクリプトを生成します")
fmt.Println("自動初期設定モード: 定義のあるモデルのインスタンスを1つずつ生成\n手動初期設定モード: インスタンスの個数を問い合わせながら生成")
fmt.Print("自動初期設定モードを使用しますか? [ Y / n ] : ")
scanner := bufio.NewScanner(os.Stdin)
done := scanner.Scan()
if done {
input := strings.ToLower(scanner.Text())
if input == "" || input == "y" || input == "yes" {
autoMode = true
}
} else {
return errors.New("入力が中断されました")
}
for _, objectDefinition := range objectDefinitions {
err := daemon.prepareObject(objectDefinition, autoMode)
if err != nil {
return err
}
}
return nil
}
// SetSecurityParams : コマンドラインで指定されたデバイスID、PSKを設定する
// 既存のデバイスID、PSKは削除する
func SetSecurityParams(config *Config, handler Lwm2mHandler, identity string, pskOpaque string) error {
identityOpaque := base64.StdEncoding.EncodeToString([]byte(identity))
definitions, err := LoadLwm2mDefinitions(filepath.Join(config.RootPath, inventorydModelsDir))
if err != nil {
return err
}
securityDefinition := definitions.findObjectDefinitionByID(lwm2mObjectIDSecurity)
serverDefinition := definitions.findObjectDefinitionByID(lwm2mObjectIDServer)
code := handler.DeleteObject(&Lwm2mObject{ID: lwm2mObjectIDSecurity, Definition: securityDefinition})
if code != CoapCodeDeleted {
return errors.New("セキュリティオブジェクトの削除に失敗しました")
}
code = handler.DeleteObject(&Lwm2mObject{ID: lwm2mObjectIDServer, Definition: serverDefinition})
if code != CoapCodeDeleted {
return errors.New("サーバーオブジェクトの削除に失敗しました")
}
code = handler.CreateInstance(&Lwm2mInstance{objectID: lwm2mObjectIDSecurity, ID: 0})
if code != CoapCodeCreated {
return errors.New("サーバーインスタンスの登録に失敗しました")
}
code = handler.CreateInstance(&Lwm2mInstance{objectID: lwm2mObjectIDServer, ID: 0})
if code != CoapCodeCreated {
return errors.New("サーバーインスタンスの登録に失敗しました")
}
code = setSecurityResource(
handler, lwm2mObjectIDSecurity, 0, lwm2mResourceIDSecurityURI, securityDefinition, lwm2mDefaultDMServerURL)
if code != CoapCodeChanged {
return errors.New("サーバーURIの登録に失敗しました")
}
code = setSecurityResource(
handler, lwm2mObjectIDSecurity, 0, lwm2mResourceIDSecurityBootstrap, securityDefinition, "false")
if code != CoapCodeChanged {
return errors.New("ブートストラップ種別の登録に失敗しました")
}
code = setSecurityResource(
handler, lwm2mObjectIDSecurity, 0, lwm2mResourceIDSecurityIdentity, securityDefinition, identityOpaque)
if code != CoapCodeChanged {
return errors.New("デバイスIDの登録に失敗しました")
}
code = setSecurityResource(
handler, lwm2mObjectIDSecurity, 0, lwm2mResourceIDSecuritySecretKey, securityDefinition, pskOpaque)
if code != CoapCodeChanged {
return errors.New("PSKの登録に失敗しました")
}
code = setSecurityResource(
handler, lwm2mObjectIDSecurity, 0, lwm2mResourceIDSecurityShortServerID, securityDefinition, strconv.Itoa(lwm2mDefaultShortServerID))
if code != CoapCodeChanged {
return errors.New("サーバIDの登録に失敗しました")
}
code = setSecurityResource(
handler, lwm2mObjectIDServer, 0, lwm2mResourceIDServerShortServerID, serverDefinition, strconv.Itoa(lwm2mDefaultShortServerID))
if code != CoapCodeChanged {
return errors.New("サーバIDの登録に失敗しました")
}
code = setSecurityResource(
handler, lwm2mObjectIDServer, 0, lwm2mResourceIDServerLifetime, serverDefinition, strconv.Itoa(lwm2mDefaultLifetime))
if code != CoapCodeChanged {
return errors.New("サーバIDの登録に失敗しました")
}
return nil
}
func (daemon *Inventoryd) prepareObject(objectDefinition *Lwm2mObjectDefinition, autoMode bool) error {
objectDirPath := filepath.Join(daemon.Config.RootPath, inventorydResourcesDir, strconv.Itoa((int)(objectDefinition.ID)))
dir, err := os.Stat(objectDirPath)
objectExist := false
if !os.IsNotExist(err) && dir.IsDir() {
objectExist = true
}
if !os.IsNotExist(err) && !dir.IsDir() {
if err := os.Remove(objectDirPath); err != nil {
return err
}
fmt.Fprintf(os.Stderr, "オブジェクトのパスにファイルがあるため削除しました")
}
if autoMode {
if !objectExist {
os.Mkdir(objectDirPath, 0755)
}
daemon.createDefaultInstance(objectDefinition, 0, objectDirPath)
return nil
}
fmt.Printf("オブジェクトNo.%d(%s)のインスタンスをいくつ生成しますか? [ default: 0 ] : ", objectDefinition.ID, objectDefinition.Name)
var instanceNum int
for {
scanner := bufio.NewScanner(os.Stdin)
done := scanner.Scan()
if done {
input := strings.ToLower(scanner.Text())
if input == "" {
instanceNum = 0
break
} else {
inputNum, err := strconv.Atoi(input)
if err != nil {
fmt.Fprintln(os.Stderr, "入力値が不正です。整数値を入力してください")
continue
}
instanceNum = inputNum
break
}
} else {
return errors.New("入力が中断されました")
}
}
if !objectExist && instanceNum > 0 {
os.Mkdir(objectDirPath, 0755)
}
for i := 0; i < instanceNum; i++ {
daemon.createDefaultInstance(objectDefinition, (uint16)(i), objectDirPath)
}
return nil
}
func (daemon *Inventoryd) createDefaultInstance(
objectDefinition *Lwm2mObjectDefinition,
instanceID uint16,
objectDirPath string) {
instanceDirPath := filepath.Join(objectDirPath, strconv.Itoa((int)(instanceID)))
dir, err := os.Stat(instanceDirPath)
if !os.IsNotExist(err) && dir.IsDir() {
return
}
if !os.IsNotExist(err) && !dir.IsDir() {
if err := os.Remove(instanceDirPath); err != nil {
return
}
fmt.Fprintf(os.Stderr, "インスタンスのパスにファイルがあるため削除しました")
}
fmt.Printf("オブジェクトNo.%d(%s)のインスタンスNo.%dを生成します\n", objectDefinition.ID, objectDefinition.Name, instanceID)
os.Mkdir(instanceDirPath, 0755)
for _, resourceDefinition := range objectDefinition.Resources {
resourcePath := filepath.Join(instanceDirPath, strconv.Itoa((int)(resourceDefinition.ID)))
file, err := os.Stat(resourcePath)
if !os.IsNotExist(err) && !file.IsDir() {
return
}
if resourceDefinition.Excutable {
defaultScript := fmt.Sprintf("#/bin/bash\necho \"execute %s script\"", resourceDefinition.Name)
ioutil.WriteFile(resourcePath, []byte(defaultScript), 0755)
continue
}
switch resourceDefinition.Type {
case lwm2mResourceTypeString, lwm2mResourceTypeOpaque:
ioutil.WriteFile(resourcePath, []byte{}, 0644)
case lwm2mResourceTypeInteger, lwm2mResourceTypeTime:
ioutil.WriteFile(resourcePath, []byte("0"), 0644)
case lwm2mResourceTypeFloat:
ioutil.WriteFile(resourcePath, []byte("0.0"), 0644)
case lwm2mResourceTypeBoolean:
ioutil.WriteFile(resourcePath, []byte("false"), 0644)
case lwm2mResourceTypeObjlnk:
ioutil.WriteFile(resourcePath, []byte("0:0"), 0644)
}
}
}
func setSecurityResource(
handler Lwm2mHandler,
objectID, instanceID, resourceID uint16,
objectDefinition *Lwm2mObjectDefinition,
value string) CoapCode {
code := handler.WriteResource(&Lwm2mResource{
objectID: objectID,
instanceID: instanceID,
ID: resourceID,
Definition: objectDefinition.findResourceByID(resourceID)},
value)
return code
}
func SaveConfig(configPath string, config *Config) error {
jsonStr, err := json.MarshalIndent(config, "", " ")
if err != nil {
return err
}
err = ioutil.WriteFile(configPath, jsonStr, 0644)
if err != nil {
return err
}
return nil
}
|
package main
import (
"fmt"
"strings"
)
func main() {
//test
pat := "ABCAC"
raw := "ABCABBAC."
yes := pattern(pat, raw)
fmt.Printf("\"%s\" is in \"%s\": %v\n", pat, raw, yes)
}
func pattern(pat, raw string) bool {
rawArray := strings.Split(raw, "")
patArray := strings.Split(pat, "")
if !(len(patArray) < len(rawArray)) {
return false
}
var (
k1 int
k2 int
patBarrel []int
patLen int
rawLen int
)
patBarrel = barrel(pat)
patLen = len(patArray)
rawLen = len(rawArray)
fmt.Printf("patBarrel : %v\n", patBarrel)
fmt.Printf("patArray : %v\n", patArray)
if len(patBarrel) != len(patArray) {
fmt.Printf("patBarrel len: %d, patArray len: %d\n", len(patBarrel), len(patArray))
return false
}
for {
loop:
if !(k1 < rawLen && k2 < patLen) {
break
}
fmt.Printf("k1=%d k2=%d\n", k1, k2)
fmt.Printf("v1: %s, v2: %s \n", rawArray[k1], patArray[k2])
if patArray[k2] != rawArray[k1] {
k1++
if patBarrel[k2] > 1 {
k1 -= patBarrel[k2]
}
k2 = 0
goto loop
} else if k2+1 == len(patArray) {
return true
} else {
k1++
k2++
}
}
return false
}
func barrel(val string) []int {
bar := []int{}
tmp := map[string]int{}
valArray := strings.Split(val, "")
for _, v := range valArray {
if _, ok := tmp[v]; ok {
tmp[v] = tmp[v] + 1
} else {
tmp[v] = 1
}
bar = append(bar, tmp[v])
}
return bar
}
|
package vm
import (
bmstemcell "github.com/cloudfoundry/bosh-micro-cli/stemcell"
)
type Infrastructure interface {
CreateVM(bmstemcell.CID, map[string]interface{}, map[string]interface{}, map[string]interface{}) (CID, error)
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//69. Sqrt(x)
//Implement int sqrt(int x).
//Compute and return the square root of x, where x is guaranteed to be a non-negative integer.
//Since the return type is an integer, the decimal digits are truncated and only the integer part of the result is returned.
//Example 1:
//Input: 4
//Output: 2
//Example 2:
//Input: 8
//Output: 2
//Explanation: The square root of 8 is 2.82842..., and since
// the decimal part is truncated, 2 is returned.
//func mySqrt(x int) int {
//}
// Time Is Money |
package handlers
import (
"github.com/gin-gonic/gin"
"github.com/jinzhu/gorm"
"golang-rest/models"
"golang-rest/utils"
"net/http"
)
func GetAccessToken(c *gin.Context) {
db := c.MustGet("db").(*gorm.DB)
// create login user struct to restrict data.
var login models.Login
err := c.BindJSON(&login)
if err != nil {
c.AbortWithError(http.StatusUnprocessableEntity, err)
}
// create user struct and get user by username.
var user models.User
if err := db.Where("username = ?", login.Username).First(&user).Error; err != nil {
c.AbortWithError(http.StatusNotFound, err)
}
// check user password is equal with login user password, if equal generate token and return it.
if utils.CheckPassword(user, login.Password) {
token, err := utils.GenerateJWT(user)
if err != nil {
c.AbortWithError(http.StatusUnprocessableEntity, err)
c.Abort()
}
c.JSON(http.StatusCreated, gin.H{"token": token})
} else {
c.AbortWithError(http.StatusUnprocessableEntity, err)
}
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dbutil
import (
"context"
"fmt"
"strings"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/model"
_ "github.com/pingcap/tidb/planner/core" // to setup expression.EvalAstExpr. See: https://github.com/pingcap/tidb/blob/a94cff903cd1e7f3b050db782da84273ef5592f4/planner/core/optimizer.go#L202
"github.com/pingcap/tidb/types"
_ "github.com/pingcap/tidb/types/parser_driver" // for parser driver
"github.com/pingcap/tidb/util/collate"
)
func init() {
collate.SetNewCollationEnabledForTest(false)
}
// GetTableInfo returns table information.
func GetTableInfo(ctx context.Context, db QueryExecutor, schemaName string, tableName string) (*model.TableInfo, error) {
createTableSQL, err := GetCreateTableSQL(ctx, db, schemaName, tableName)
if err != nil {
return nil, errors.Trace(err)
}
parser2, err := GetParserForDB(ctx, db)
if err != nil {
return nil, errors.Trace(err)
}
return GetTableInfoBySQL(createTableSQL, parser2)
}
// GetTableInfoBySQL returns table information by given create table sql.
func GetTableInfoBySQL(createTableSQL string, parser2 *parser.Parser) (table *model.TableInfo, err error) {
stmt, err := parser2.ParseOneStmt(createTableSQL, "", "")
if err != nil {
return nil, errors.Trace(err)
}
s, ok := stmt.(*ast.CreateTableStmt)
if ok {
table, err := ddl.BuildTableInfoFromAST(s)
if err != nil {
return nil, errors.Trace(err)
}
// put primary key in indices
if table.PKIsHandle {
pkIndex := &model.IndexInfo{
Name: model.NewCIStr("PRIMARY"),
Primary: true,
State: model.StatePublic,
Unique: true,
Tp: model.IndexTypeBtree,
Columns: []*model.IndexColumn{
{
Name: table.GetPkName(),
Length: types.UnspecifiedLength,
},
},
}
table.Indices = append(table.Indices, pkIndex)
}
return table, nil
}
return nil, errors.Errorf("get table info from sql %s failed", createTableSQL)
}
// FindColumnByName finds column by name.
func FindColumnByName(cols []*model.ColumnInfo, name string) *model.ColumnInfo {
// column name don't distinguish capital and small letter
name = strings.ToLower(name)
for _, col := range cols {
if col.Name.L == name {
return col
}
}
return nil
}
// EqualTableInfo returns true if this two table info have same columns and indices
func EqualTableInfo(tableInfo1, tableInfo2 *model.TableInfo) (bool, string) {
// check columns
if len(tableInfo1.Columns) != len(tableInfo2.Columns) {
return false, fmt.Sprintf("column num not equal, one is %d another is %d", len(tableInfo1.Columns), len(tableInfo2.Columns))
}
for j, col := range tableInfo1.Columns {
if col.Name.O != tableInfo2.Columns[j].Name.O {
return false, fmt.Sprintf("column name not equal, one is %s another is %s", col.Name.O, tableInfo2.Columns[j].Name.O)
}
if col.GetType() != tableInfo2.Columns[j].GetType() {
return false, fmt.Sprintf("column %s's type not equal, one is %v another is %v", col.Name.O, col.GetType(), tableInfo2.Columns[j].GetType())
}
}
// check index
if len(tableInfo1.Indices) != len(tableInfo2.Indices) {
return false, fmt.Sprintf("index num not equal, one is %d another is %d", len(tableInfo1.Indices), len(tableInfo2.Indices))
}
index2Map := make(map[string]*model.IndexInfo)
for _, index := range tableInfo2.Indices {
index2Map[index.Name.O] = index
}
for _, index1 := range tableInfo1.Indices {
index2, ok := index2Map[index1.Name.O]
if !ok {
return false, fmt.Sprintf("index %s not exists", index1.Name.O)
}
if len(index1.Columns) != len(index2.Columns) {
return false, fmt.Sprintf("index %s's columns num not equal, one is %d another is %d", index1.Name.O, len(index1.Columns), len(index2.Columns))
}
for j, col := range index1.Columns {
if col.Name.O != index2.Columns[j].Name.O {
return false, fmt.Sprintf("index %s's column not equal, one has %s another has %s", index1.Name.O, col.Name.O, index2.Columns[j].Name.O)
}
}
}
return true, ""
}
|
package timed_task
type TimeTask struct {
delMgoChatChan chan bool
}
var timeTask TimeTask
func GetInstance() *TimeTask {
if timeTask.delMgoChatChan == nil {
timeTask.delMgoChatChan = make(chan bool)
go func() {
timeTask.delMgoChatChan <- true
}()
}
return &timeTask
}
func (t *TimeTask) Run() {
for {
select {
case <-t.delMgoChatChan:
t.timedDeleteUserChat()
}
}
}
|
package cmd
import tgbotapi "github.com/go-telegram-bot-api/telegram-bot-api"
type Command interface {
Execute (update tgbotapi.Update, scope *tgbotapi.BotAPI) error
GetName() string
GetHelp() string
}
type Commander struct {
Name string
Help string
Scope string
Cmd func(update tgbotapi.Update, scope *tgbotapi.BotAPI) error
}
func (c *Commander) GetName() string {
return c.Name
}
func (c *Commander) GetHelp() string {
return c.Help
}
func (c *Commander) Execute(update tgbotapi.Update, scope *tgbotapi.BotAPI) error {
return c.Cmd(update, scope)
}
func NewCommand(name, help, scope string, cmd func(update tgbotapi.Update, scope *tgbotapi.BotAPI) error) Command {
if scope == "" {
scope = "public"
}
return &Commander{
Name: name,
Help: help,
Scope: scope,
Cmd: cmd,
}
}
|
const (
FILE = "./MX.txt"
FILE_DELIMITER = '\t'
TOTAL_ROWS = 5
)
func main() {
file, err := os.Open(FILE)
printError(err)
defer file.Close()
reader := csv.NewReader(file)
reader.Comma = FILE_DELIMITER
rows, err := reader.ReadAll()
printError(err)
fmt.Printf("\nrows type: %T\n\n", rows)
d := new(Document)
for n, col := range rows {
d.colonia = col[2]
d.ciudad = col[3]
d.delegacion = col[5]
n++
if n <= TOTAL_ROWS {
fmt.Printf("%v,%v,%v;\n", d.colonia, d.ciudad, d.delegacion)
}
}
}
|
package beginner
import "fmt"
/**
* created: 2019/5/8 9:25
* By Will Fan
*/
func main() {
data := []int {1,2,3}
i := 0
// wrong
//++i
//fmt.Println(data[i++])
// right
i++
fmt.Println(data[i])
}
|
package email
import (
"gopkg.in/gomail.v2"
"crypto/tls"
"log"
)
// Send PlainText.
func SendText(host string, port int, username, password string, to []string, from, cc, subject, body string) error {
addresses := make([]string, len(to))
m := gomail.NewMessage()
m.SetHeader("From", from)
for i := range addresses {
addresses[i] = m.FormatAddress(to[i], "")
}
m.SetHeader("To", addresses...)
m.SetAddressHeader("Cc", cc, "")
m.SetHeader("Subject", subject)
m.SetBody("text/plain", body)
//m.Attach()
d := gomail.Dialer{
Host: host,
Port: port,
Username: username,
Password: password,
}
d.TLSConfig = &tls.Config{InsecureSkipVerify: true}
if err := d.DialAndSend(m); err != nil {
log.Println("Send email failed.")
return err
}
return nil
} |
package runtime
import (
"reflect"
"github.com/qlova/script"
)
//RunMethod implements script.Language.RunMethod.
func (runtime *Runtime) RunMethod(structure script.Value, name string, args []script.Value) {
var NumberOfArguments = len(args)
var Args = make(map[int]func() interface{}, NumberOfArguments)
for i, arg := range args {
Args[i] = (*arg.T().Runtime)
}
Args[-1] = (*structure.T().Runtime)
name = reflect.TypeOf(structure).Elem().Name() + "." + name
runtime.WriteStatement(func() {
var block, ok = runtime.Functions[name]
if !ok {
panic("invalid block: " + name)
}
var RuntimeArgs = make(map[int]interface{}, NumberOfArguments)
for i := range Args {
RuntimeArgs[i] = Args[i]()
}
block.Args = RuntimeArgs
block.Jump()
runtime.returned = runtime.returning
runtime.returning = nil
})
}
//CallMethod implements script.Language.CallMethod
func (runtime *Runtime) CallMethod(structure script.Value, name string, args []script.Value) script.Result {
var NumberOfArguments = len(args)
var Args = make(map[int]func() interface{}, NumberOfArguments)
for i, arg := range args {
Args[i] = (*arg.T().Runtime)
}
Args[-1] = (*structure.T().Runtime)
name = reflect.TypeOf(structure).Elem().Name() + "." + name
f := func() interface{} {
var block, ok = runtime.Functions[name]
if !ok {
panic("invalid function: " + name)
}
var RuntimeArgs = make(map[int]interface{}, NumberOfArguments)
for i := range Args {
RuntimeArgs[i] = Args[i]()
}
block.Args = RuntimeArgs
block.Jump()
runtime.returned = runtime.returning
runtime.returning = nil
return runtime.returned
}
return &f
}
|
package main
import (
"context"
"github.com/spf13/cobra"
cmder "github.com/yaegashi/cobra-cmder"
)
type AppSPJobSchemaReset struct {
*AppSPJobSchema
}
func (app *AppSPJobSchema) AppSPJobSchemaResetComder() cmder.Cmder {
return &AppSPJobSchemaReset{AppSPJobSchema: app}
}
func (app *AppSPJobSchemaReset) Cmd() *cobra.Command {
cmd := &cobra.Command{
Use: "reset",
Short: "Reset schema",
RunE: app.RunE,
SilenceUsage: true,
}
return cmd
}
func (app *AppSPJobSchemaReset) RunE(cmd *cobra.Command, args []string) error {
ctx := context.Background()
err := app.GetSynchronizationJob(ctx)
if err != nil {
return err
}
return app.SynchronizationJobRB.Schema().Request().Delete(ctx)
}
|
package dynamo
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/google/uuid"
"log"
"os"
"strconv"
"testing"
)
var (
qupDynamo QupDynamo
tableName = "Persons"
)
type PersonTest struct {
Id string `dynamo:"uid,key"`
FirstName string `dynamo:"first_name"`
LastName string `dynamo:"last_name"`
City string `dynamo:"city"`
Age int32 `dynamo:"age"`
}
func DeleteTable(connection *dynamodb.DynamoDB) {
input := &dynamodb.DeleteTableInput{TableName: &tableName}
_, err := connection.DeleteTable(input)
if err != nil {
fmt.Println("Error cleaning up table " + tableName)
fmt.Println(err.Error())
os.Exit(1)
}
}
func CreateTable(connection *dynamodb.DynamoDB) {
input := &dynamodb.CreateTableInput{
AttributeDefinitions: []*dynamodb.AttributeDefinition{
{
AttributeName: aws.String("uid"),
AttributeType: aws.String("S"),
},
},
KeySchema: []*dynamodb.KeySchemaElement{
{
AttributeName: aws.String("uid"),
KeyType: aws.String("HASH"),
},
},
ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
ReadCapacityUnits: aws.Int64(10),
WriteCapacityUnits: aws.Int64(10),
},
TableName: aws.String(tableName),
}
_, err := connection.CreateTable(input)
if err != nil {
fmt.Println("Got error calling CreateTable:")
fmt.Println(err.Error())
os.Exit(1)
}
fmt.Println("Created the table", tableName)
}
func Setup() {
sess, err := session.NewSession(&aws.Config{
Region: aws.String("eu-west-1"),
Endpoint: aws.String("http://localhost:8000"),
})
if err != nil {
log.Println(err)
return
}
dbSvc := dynamodb.New(sess)
result, err := dbSvc.ListTables(&dynamodb.ListTablesInput{})
if err != nil {
log.Println(err)
return
}
log.Println("Tables:")
for _, table := range result.TableNames {
if *table == tableName {
DeleteTable(dbSvc)
}
}
CreateTable(dbSvc)
qupDynamo = CreateNewQupDynamo(dbSvc)
}
func TestSaveExisting(t *testing.T) {
Setup()
id := uuid.New().String()
person := &PersonTest{
Id: id,
FirstName: "John",
LastName: "Doe",
City: "New York",
Age: 20,
}
err := qupDynamo.SaveExisting(tableName, person)
if err == nil || err.Error() != "Item does not exist." {
t.Fail()
}
err = qupDynamo.Save(tableName, person)
if err != nil {
t.Fail()
}
person.Age = 40
err = qupDynamo.SaveExisting(tableName, person)
if err != nil {
t.Fail()
}
}
func TestScan(t *testing.T) {
Setup()
person1 := &PersonTest{
Id: uuid.New().String(),
FirstName: "John",
LastName: "Doe",
City: "New York",
Age: 20,
}
person2 := &PersonTest{
Id: uuid.New().String(),
FirstName: "Sara",
LastName: "Jones",
City: "Washington",
Age: 29,
}
person3 := &PersonTest{
Id: uuid.New().String(),
FirstName: "Peter",
LastName: "Jansen",
City: "Haarlem",
Age: 42,
}
person4 := &PersonTest{
Id: uuid.New().String(),
FirstName: "Jan",
LastName: "de Wit",
City: "Zoetermeer",
Age: 67,
}
err := qupDynamo.Save(tableName, person1)
if err != nil {
t.Log(err.Error())
t.Fail()
}
err = qupDynamo.Save(tableName, person2)
if err != nil {
t.Log(err.Error())
t.Fail()
}
err = qupDynamo.Save(tableName, person3)
if err != nil {
t.Log(err.Error())
t.Fail()
}
err = qupDynamo.Save(tableName, person4)
if err != nil {
t.Log(err.Error())
t.Fail()
}
persons := &[]PersonTest{}
err = qupDynamo.Scan(tableName, persons, 2)
if len(*persons) != 2 {
t.Fail()
t.Log("expected length of 2, length of " + strconv.Itoa(len(*persons)) + " returned")
}
}
func TestPersonCRUD(t *testing.T) {
Setup()
recordId := uuid.New().String()
person := &PersonTest{
Id: recordId,
FirstName: "John",
LastName: "Doe",
City: "New York",
Age: 20,
}
err := qupDynamo.Save(tableName, person)
if err != nil {
t.Log(err.Error())
t.Fail()
}
result := &PersonTest{}
err = qupDynamo.Retrieve(tableName, recordId, result)
if err != nil {
t.Log(err.Error())
t.Fail()
}
if result.FirstName != "John" || result.LastName != "Doe" || result.City != "New York" || result.Age != 20 {
t.Fail()
}
expression := "set #age = #age + :test"
values := map[string]interface{}{":test": 2}
err = qupDynamo.Update(tableName, recordId, PersonTest{}, expression, values)
if err != nil {
t.Log(err.Error())
t.Fail()
}
result2 := &PersonTest{}
err = qupDynamo.Retrieve(tableName, recordId, result2)
if err != nil {
t.Log(err.Error())
t.Fail()
}
if result2.Age != 22 {
t.Log("failed asserting that age " + strconv.Itoa(int(result2.Age)) + " is age 22")
t.Fail()
}
err = qupDynamo.Delete(tableName, recordId, PersonTest{})
if err != nil {
t.Log(err.Error())
t.Fail()
}
result3 := &PersonTest{}
err = qupDynamo.Retrieve(tableName, recordId, result3)
if err == nil || err.Error() != "record not found" {
t.Fail()
}
}
|
package utils
import (
"fmt"
"testing"
"time"
)
func Test_parseDateStr(t *testing.T) {
type args struct {
str string
}
year := time.Now().Year()
time1, _ := time.Parse("2006/1/2 MST", fmt.Sprintf("%d/12/31 JST", year))
time2, _ := time.Parse("2006/1/2 MST", fmt.Sprintf("%d/1/1 JST", year+1))
tests := []struct {
name string
args args
want time.Time
wantErr bool
}{
{
name: "correct pattern",
args: args{
str: "12/31",
},
want: time1,
wantErr: false,
},
{
name: "correct pattern (next year)",
args: args{
str: "1/1",
},
want: time2,
wantErr: false,
},
{
name: "incorrect pattern",
args: args{
str: "1234",
},
want: time.Time{},
wantErr: true,
},
{
name: "incorrect pattern",
args: args{
str: "ほげ",
},
want: time.Time{},
wantErr: true,
},
{
name: "incorrect pattern",
args: args{
str: "",
},
want: time.Time{},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := ParseDateStr(tt.args.str)
if (err != nil) != tt.wantErr {
t.Errorf("parseDateStr() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !got.Equal(tt.want) {
t.Errorf("parseDateStr() got = %v, want %v", got, tt.want)
}
})
}
}
|
package main
import (
"fmt"
"github.com/coreos/go-etcd/etcd"
"github.com/miekg/dns"
)
type NodeConversionError struct {
Message string
Node *etcd.Node
AttemptedType uint16
}
func (e *NodeConversionError) Error() string {
return fmt.Sprintf(
"Unable to convert etc Node into a RR of type %d ('%s'): %s. Node details: %+v",
e.AttemptedType,
dns.TypeToString[e.AttemptedType],
e.Message,
&e.Node)
}
type RecordValueError struct {
Message string
AttemptedType uint16
}
func (e *RecordValueError) Error() string {
return fmt.Sprintf(
"Invalid record value for type %d: %s",
e.AttemptedType,
e.Message)
}
|
package bbir
import (
"context"
"errors"
"math"
"strconv"
"strings"
"time"
"github.com/vvatanabe/go-backlog/backlog/v2"
)
type CommandBuilder interface {
Build(int, *Line) (*Command, error)
}
func NewCommandBuilder(issue IssueRepository, project ProjectRepository, msgs Messages) CommandBuilder {
return &commandBuilder{issue, project, msgs}
}
type commandBuilder struct {
issue IssueRepository
project ProjectRepository
msgs Messages
}
type resolver func(*Command, *Line) error
func (b *commandBuilder) Build(lineNum int, line *Line) (*Command, error) {
var errs []error
command := &Command{Line: lineNum, Children: []*Command{}, CustomFields: make(map[CustomFieldID]interface{})}
for _, f := range []resolver{
b.ensureProjectID,
b.ensureSummary,
b.ensureIssueTypeID,
b.ensurePriorityID,
b.ensureDescription,
b.ensureStartAndDueDate,
b.ensureEstimatedHours,
b.ensureActualHours,
b.ensureCategoryID,
b.ensureVersionID,
b.ensureMilestoneID,
b.ensureAssigneeID,
b.ensureParentIssue,
b.ensureCustomFields,
} {
if err := f(command, line); err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 {
return nil, NewMultipleErrors(errs)
}
return command, nil
}
func (b *commandBuilder) ensureProjectID(cmd *Command, line *Line) error {
cmd.ProjectID = b.project.GetProjectID()
return nil
}
func (b *commandBuilder) ensureSummary(cmd *Command, line *Line) error {
if line.Summary == "" {
return errors.New(b.msgs.SummaryIsRequired())
}
cmd.Summary = line.Summary
return nil
}
func (b *commandBuilder) ensureDescription(cmd *Command, line *Line) error {
cmd.Description = line.Description
return nil
}
const DateLayout = "2006-01-02"
func (b *commandBuilder) ensureStartAndDueDate(cmd *Command, line *Line) error {
var startDate, dueDate time.Time
if line.StartDate != "" {
var err error
startDate, err = time.Parse(DateLayout, line.StartDate)
if err != nil {
return errors.New(b.msgs.StartDateIsInvalid(line.StartDate))
}
}
if line.DueDate != "" {
var err error
dueDate, err = time.Parse(DateLayout, line.DueDate)
if err != nil {
return errors.New(b.msgs.DueDateIsInvalid(line.DueDate))
}
}
if !startDate.IsZero() && !dueDate.IsZero() {
if startDate.After(dueDate) {
return errors.New(b.msgs.StartDateIsAfterDueDate(line.StartDate, line.DueDate))
}
}
cmd.StartDate = line.StartDate
cmd.DueDate = line.DueDate
return nil
}
func (b *commandBuilder) ensureEstimatedHours(cmd *Command, line *Line) error {
if line.EstimatedHours != "" {
split := strings.Split(line.EstimatedHours, ".")
// Pattern Ex. "1.2.3" => ["1", "2", "3"]
if len(split) > 2 {
return errors.New(b.msgs.EstimatedHoursIsInvalid(line.EstimatedHours))
}
// Pattern Ex. 1.234 => ["1", "234"]
if len(split) == 2 && len(split[1]) > 2 {
return errors.New(b.msgs.EstimatedHoursIsInvalid(line.EstimatedHours))
}
v, err := strconv.ParseFloat(line.EstimatedHours, 64)
if err != nil || (math.Signbit(v) && v < 0) {
return errors.New(b.msgs.EstimatedHoursIsInvalid(line.EstimatedHours))
}
cmd.EstimatedHours = &v
}
return nil
}
func (b *commandBuilder) ensureActualHours(cmd *Command, line *Line) error {
if line.ActualHours != "" {
split := strings.Split(line.ActualHours, ".")
// Pattern Ex. "1.2.3" => ["1", "2", "3"]
if len(split) > 2 {
return errors.New(b.msgs.EstimatedHoursIsInvalid(line.ActualHours))
}
// Pattern Ex. 1.234 => ["1", "234"]
if len(split) == 2 && len(split[1]) > 2 {
return errors.New(b.msgs.EstimatedHoursIsInvalid(line.ActualHours))
}
v, err := strconv.ParseFloat(line.ActualHours, 64)
if err != nil || (math.Signbit(v) && v < 0) {
return errors.New(b.msgs.EstimatedHoursIsInvalid(line.ActualHours))
}
cmd.ActualHours = &v
}
return nil
}
func (b *commandBuilder) ensureIssueTypeID(cmd *Command, line *Line) error {
issueType := b.project.FindIssueTypeByName(line.IssueType)
if issueType == nil {
return errors.New(b.msgs.IssueTypeIsRequired())
}
cmd.IssueTypeID = IssueTypeID(issueType.ID)
return nil
}
const DefaultPriorityID = 3
func (b *commandBuilder) ensurePriorityID(cmd *Command, line *Line) error {
priorityID := DefaultPriorityID
if line.Priority != "" {
if v := b.project.FindPriorityByName(line.Priority); v != nil {
priorityID = v.ID
} else {
return errors.New(b.msgs.PriorityIsInvalid(line.Priority))
}
}
cmd.PriorityID = PriorityID(priorityID)
return nil
}
func (b *commandBuilder) ensureCategoryID(cmd *Command, line *Line) error {
if line.Category != "" {
if v := b.project.FindCategoryByName(line.Category); v != nil {
cmd.CategoryID = NewCategoryIDPtr(v.ID)
} else {
return errors.New(b.msgs.CategoryIsNotRegistered(line.Category))
}
}
return nil
}
func (b *commandBuilder) ensureVersionID(cmd *Command, line *Line) error {
if line.Version != "" {
v := b.project.FindVersionByName(line.Version)
if v == nil {
return errors.New(b.msgs.VersionIsNotRegistered(line.Version))
}
cmd.VersionID = NewVersionIDPtr(v.ID)
}
return nil
}
func (b *commandBuilder) ensureMilestoneID(cmd *Command, line *Line) error {
if line.Milestone != "" {
v := b.project.FindVersionByName(line.Milestone)
if v == nil {
return errors.New(b.msgs.MilestoneIsNotRegistered(line.Milestone))
}
cmd.MilestoneID = NewVersionIDPtr(v.ID)
}
return nil
}
func (b *commandBuilder) ensureAssigneeID(cmd *Command, line *Line) error {
if line.Assignee != "" {
v := b.project.FindUserByName(line.Assignee)
if v == nil {
return errors.New(b.msgs.AssigneeIsNotJoining(line.Assignee))
}
cmd.AssigneeID = NewUserIDPtr(v.ID)
}
return nil
}
func (b *commandBuilder) ensureParentIssue(cmd *Command, line *Line) error {
if line.ParentIssue == "" {
return nil
}
if line.ParentIssue == "*" {
cmd.HasUnregisteredParentIssue = true
return nil
}
if v, err := b.issue.FindIssueByKey(context.Background(), line.ParentIssue); err != nil || v == nil {
return errors.New(b.msgs.ParentIssueIsNotRegistered(line.ParentIssue))
} else if v.ParentIssueID != nil {
return errors.New(b.msgs.ParentIssueAlreadyRegisteredAsChildIssue(line.ParentIssue))
} else {
cmd.ParentIssueID = NewIssueIDPtr(v.ID)
return nil
}
}
func (b *commandBuilder) ensureCustomFields(cmd *Command, line *Line) error {
var errs []error
for k, v := range line.CustomFields {
cf := b.project.FindCustomFieldByName(k)
if cf == nil {
errs = append(errs, errors.New(b.msgs.CustomFieldIsNotRegistered(k)))
continue
}
if v == "" {
continue
}
if 1 <= cf.TypeID && cf.TypeID <= 2 { // 1: Text, 2: Sentence
cmd.CustomFields[CustomFieldID(cf.ID)] = v
} else if cf.TypeID == 3 { // 3: Number
if _, err := strconv.Atoi(v); err != nil {
errs = append(errs, errors.New(b.msgs.CustomFieldValueShouldBeTypeInt(k, v)))
} else {
cmd.CustomFields[CustomFieldID(cf.ID)] = v
}
} else if cf.TypeID == 4 { // 4: Date
v = strings.Replace(v, "/", "-", 3)
if _, err := time.Parse(DateLayout, v); err != nil {
errs = append(errs, errors.New(b.msgs.CustomFieldValueShouldBeTypeDate(k, v)))
} else {
cmd.CustomFields[CustomFieldID(cf.ID)] = v
}
} else if 5 <= cf.TypeID && cf.TypeID <= 8 { // 5: Single list, 6: Multiple list, 7: Checkbox, 8: Radio
if item := findCustomFieldItemByName(cf, v); item == nil {
errs = append(errs, errors.New(b.msgs.CustomFieldChoiceIsNotRegistered(k, v)))
} else {
cmd.CustomFields[CustomFieldID(cf.ID)] = item.ID
}
}
}
if len(errs) > 0 {
return NewMultipleErrors(errs)
}
return nil
}
func findCustomFieldItemByName(field *v2.CustomField, name string) *v2.CustomFieldItem {
for _, item := range field.Items {
if name == item.Name {
return item
}
}
return nil
}
|
package LinkedList
import (
"fmt"
"github.com/Amertz08/EECS560-go/Lab01/Node"
)
type LinkedList struct {
head *Node.Node
}
func NewLinkedList() LinkedList {
return LinkedList{}
}
func (l *LinkedList) Insert(val int) {
fmt.Printf("Inserting value %v\n", val)
if l.isEmpty() {
newNode := Node.NewNode(val)
l.head = &newNode
} else {
tmp := l.head
l.insertHelper(tmp, val)
}
}
func (l *LinkedList) insertHelper(tmp *Node.Node, val int) {
if tmp.Next == nil {
newNode := Node.NewNode(val)
tmp.Next = &newNode
return
}
l.insertHelper(tmp.Next, val)
}
func (l *LinkedList) InsertFront(val int) {
newNode := Node.NewNode(val)
if l.isEmpty() {
l.head = &newNode
} else {
tmp := l.head
l.head = &newNode
l.head.Next = tmp
}
}
func (l *LinkedList) isEmpty() bool {
return l.head == nil
}
func (l *LinkedList) Print() {
if l.isEmpty() {
fmt.Println("List is empty")
return
}
tmp := l.head
for tmp != nil {
fmt.Printf("%d ", tmp.Value)
tmp = tmp.Next
}
fmt.Println()
}
func (l *LinkedList) Find(val int) bool {
tmp := l.head
for tmp != nil {
if tmp.Value == val {
return true
}
tmp = tmp.Next
}
return false
}
func (l *LinkedList) eraseHelper(tmp *Node.Node, val int) {
if tmp.Next == nil {
fmt.Println("Value not in list")
} else if tmp.Next.Value == val {
tmp.Next = tmp.Next.Next
} else {
l.eraseHelper(tmp.Next, val)
}
}
func (l *LinkedList) Erase(val int) {
if l.isEmpty() {
fmt.Println("Empty list cannot erase value")
return
}
if l.head.Value == val {
l.head = l.head.Next
return
}
tmp := l.head
l.eraseHelper(tmp, val)
} |
package worldengine
|
package main
import (
"bufio"
"encoding/gob"
"fmt"
"net"
"os"
)
//dialing the server
func runClient(userName string, status chan int, msg chan string) {
c, err := net.Dial("tcp", ":9999")
if err != nil {
fmt.Println(err)
return
}
var message, received string
//welcome message
message = userName + " has entered the chat room"
//create its own directory for files
err = os.Mkdir(userName+"/", 0755)
if err != nil {
fmt.Println(err)
return
}
//always listening
go func() {
defer c.Close()
for {
err := gob.NewDecoder(c).Decode(&received)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(received)
}
}()
//keep the connection
for {
select {
case _status := <-status:
//first connection
if _status == 0 {
err = gob.NewEncoder(c).Encode(message)
if err != nil {
fmt.Println(err)
return
}
}
//send message
if _status == 1 {
message = <-msg
err := gob.NewEncoder(c).Encode(message)
if err != nil {
fmt.Println(err)
return
}
}
//send file
if _status == 2 {
message = <-msg
clientSendFile(c, userName, message)
}
//terminate connection
if _status == 3 {
message = <-msg
err := gob.NewEncoder(c).Encode(message)
if err != nil {
fmt.Println(err)
return
}
fmt.Println("\nGoodbye!")
return
}
}
}
}
//send file name and its extension
func clientSendFile(c net.Conn, userName, message string) {
message = userName + ": " + message
err := gob.NewEncoder(c).Encode(message)
if err != nil {
fmt.Println(err)
return
}
}
func main() {
var opc int
var status = make(chan int)
var msg = make(chan string)
//until the client enters their name
//the client won't connect to the server
fmt.Println("Enter your username")
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
userName := scanner.Text()
//goroutine connecting to server
go runClient(userName, status, msg)
status <- 0
//main menu
fmt.Println("\nMenu" +
"\n 1. Send message" +
"\n 2. Send File" +
"\n 3. Stop client")
//keep listening to user input
for {
fmt.Scanln(&opc)
switch opc {
//send message
case 1:
fmt.Println("Enter your message")
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
message := scanner.Text()
message = userName + ": " + message
status <- 1
msg <- message
//send file
case 2:
fmt.Println("Enter your file name")
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
message := scanner.Text()
status <- 2
msg <- message
//disconnect
case 3:
status <- 3
msg <- "disconnect"
return
//any other input won't be accepted
default:
fmt.Println("\nWrong option")
}
}
}
|
package forkexec
import (
"syscall"
)
// prepareExec prepares execve parameters
func prepareExec(Args, Env []string) (*byte, []*byte, []*byte, error) {
// make exec args0
argv0, err := syscall.BytePtrFromString(Args[0])
if err != nil {
return nil, nil, nil, err
}
// make exec args
argv, err := syscall.SlicePtrFromStrings(Args)
if err != nil {
return nil, nil, nil, err
}
// make env
env, err := syscall.SlicePtrFromStrings(Env)
if err != nil {
return nil, nil, nil, err
}
return argv0, argv, env, nil
}
// prepareFds prepares fd array
func prepareFds(files []uintptr) ([]int, int) {
fd := make([]int, len(files))
nextfd := len(files)
for i, ufd := range files {
if nextfd < int(ufd) {
nextfd = int(ufd)
}
fd[i] = int(ufd)
}
nextfd++
return fd, nextfd
}
// syscallStringFromString prepares *byte if string is not empty, other wise nil
func syscallStringFromString(str string) (*byte, error) {
if str != "" {
return syscall.BytePtrFromString(str)
}
return nil, nil
}
|
package portals
import (
"log"
"fmt"
"os"
"strings"
"github.com/galaco/bsp/lumps/datatypes/portal"
"github.com/galaco/bsp/lumps/datatypes/visibility"
"github.com/galaco/bsp/lumps"
"github.com/go-gl/mathgl/mgl32"
)
const PORTALFILE = "PRT1"
const MAX_POINTS_ON_WINDING = 64
const MAX_POINTS_ON_FIXED_WINDING = 12
type Vec3 mgl32.Vec3
type VStatus int8
const (
Stat_none VStatus = iota
Stat_working VStatus = iota
Stat_done VStatus = iota
)
type Winding struct {
Original int32 //qboolean
NumPoints int32
Points [MAX_POINTS_ON_FIXED_WINDING]Vec3
}
type Vis struct {
NumClusters int32
Bitofs [8][2]int32 // bitofs[numclusters][2]
}
type Plane struct {
Normal Vec3
Dist float32
}
type Portal struct {
Plane Plane
Leaf int32
Origin Vec3
Radius float32
Winding *Winding
Status VStatus
PortalFront *byte
PortalFlood *byte
PortalVis *byte
NumMightSee int32
}
type Leaf struct {
Portals []*Portal
}
type Exports struct {
PortalLongs int32
LeafLongs int32
UncompressedVis []byte
VismapEnd *[]byte
PortalClusters int
}
func Load(name string, useMPI bool, visLump *lumps.Visibility) Exports {
var i int32
var j int32
var p *Portal
var l *Leaf
var magic [80]byte //char
var numpoints int32
var w *Winding
var leafnums [2]int32
var plane Plane
var portalExports Exports
var dvis Vis
var f *os.File
var err error
var numPortals int32
// Open the portal file.
if useMPI {
/*
// If we're using MPI, copy off the file to a temporary first. This will download the file
// from the MPI master, then we get to use nice functions like fscanf on it.
tempPath [MAX_PATH]byte //char
tempFile [MAX_PATH]byte //char
if GetTempPath( sizeof( tempPath ), tempPath ) == 0 {
log.Fatalf( "LoadPortals: GetTempPath failed.\n" )
}
if GetTempFileName( tempPath, "vvis_portal_", 0, tempFile ) == 0 {
log.Fatalf( "LoadPortals: GetTempFileName failed.\n" )
}
// Read all the data from the network file into memory.
FileHandle_t hFile = g_pFileSystem->Open(name, "r");
if hFile == FILESYSTEM_INVALID_HANDLE {
log.Fatalf( "LoadPortals( %s ): couldn't get file from master.\n", name )
}
CUtlVector<char> data;
data.SetSize( g_pFileSystem->Size( hFile ) );
g_pFileSystem->Read( data.Base(), data.Count(), hFile );
g_pFileSystem->Close( hFile );
// Dump it into a temp file.
f = fopen( tempFile, "wt" );
fwrite( data.Base(), 1, data.Count(), f );
fclose( f );
// Open the temp file up.
f = fopen( tempFile, "rSTD" ); // read only, sequential, temporary, delete on close
*/
} else {
f,err = os.Open(name)
}
if err != nil {
log.Fatalf("LoadPortals: couldn't read %s\n",name)
}
if n,_ :=fmt.Fscanf(f,"%79s\n%i\n%i\n", &magic, &portalExports.PortalClusters, &numPortals); n != 3 {
log.Fatalf("LoadPortals %s: failed to read header", name)
}
if strings.Compare(string(magic[:]), PORTALFILE) > 0 {
log.Fatalf("LoadPortals %s: not a portal file", name)
}
fmt.Printf("%4i portalclusters\n", portalExports.PortalClusters)
fmt.Printf("%4i numportals\n", numPortals)
if numPortals * 2 >= portal.MAX_PORTALS {
log.Fatalf("The map overflows the max portal count (%d of max %d)!\n", numPortals, portal.MAX_PORTALS / 2 )
}
// these counts should take advantage of 64 bit systems automatically
leafbytes := ((portalExports.PortalClusters + 63) &~ 63) >> 3
portalExports.LeafLongs = int32(leafbytes / 4) //4 = sizeof int32 | x86 long
portalbytes := ((numPortals * 2 + 63) &~ 63) >> 3
portalExports.PortalLongs = portalbytes / 4 //4 = sizeof int32 | x86 long
// each file portal is split into two memory portals
//portals := (portal_t*)malloc(2*numPortals*sizeof(portal_t))
//memset (portals, 0, 2*numPortals*sizeof(portal_t))
portals := make([]Portal, 2*numPortals)
//leafs = (leaf_t*)malloc(ortalClusters*int32(unsafe.Sizeof(leaf.Leaf{})))
//memset (leafs, 0, portalClusters*int32(unsafe.Sizeof(leaf.Leaf{})))
leafs := make([]Leaf, portalExports.PortalClusters)
originalvismapsize := portalExports.PortalClusters*leafbytes
portalExports.UncompressedVis = make([]byte, originalvismapsize)
vismap := visLump.ToBytes()
dvis.NumClusters = int32(portalExports.PortalClusters)
//buf := new(bytes.Buffer)
//binary.Write(buf, binary.LittleEndian, &dvis.Bitofs[portalClusters])
//vismap_p := buf.Bytes()
// need to think about this solution
// *byte = *byte + int
portalExports.VismapEnd = (vismap + visibility.MAX_MAP_VISIBILITY)
pIndex := 0
for i, p = 0, &portals[pIndex]; i < numPortals; i++ {
if n,_ :=fmt.Fscanf (f, "%i %i %i ", &numpoints, &leafnums[0], &leafnums[1]); n != 3 {
log.Fatalf("LoadPortals: reading portal %i", i)
}
if numpoints > MAX_POINTS_ON_WINDING {
log.Fatalf("LoadPortals: portal %i has too many points", i)
}
if leafnums[0] > int32(portalExports.PortalClusters) || leafnums[1] > int32(portalExports.PortalClusters) {
log.Fatalf("LoadPortals: reading portal %i", i)
}
p.Winding = newWinding (numpoints)
w = p.Winding
w.Original = 1 //true b/c qboolean
w.NumPoints = numpoints
for j=0; j<numpoints; j++ {
v := [3]float32{} //actually a double
k := 0
// scanf into double, then assign to vec_t
// so we don't care what size vec_t is
if n,_ := fmt.Fscanf (f, "(%lf %lf %lf ) ", &v[0], &v[1], &v[2]); n != 3 {
log.Fatalf("LoadPortals: reading portal %i", i);
}
for k=0; k<3; k++ {
w.Points[j][k] = v[k]
}
}
fmt.Fscanf (f, "\n")
// calc plane
PlaneFromWinding (w, &plane)
// create forward portal
l = &leafs[leafnums[0]]
l.Portals = append(l.Portals, p)
p.Winding = w
VectorSubtract (vec3_origin, plane.Normal, p.Plane.Normal)
p.Plane.Normal = vec3_origin.Sub(plane.Normal)
p.Plane.Dist = - plane.Dist;
p.Leaf = leafnums[1]
SetPortalSphere (p)
pIndex++
p = &portals[pIndex]
// create backwards portal
l = &leafs[leafnums[1]]
//l->portals.AddToTail(p)
l.Portals = append(l.Portals, p)
p.Winding = newWinding(w.NumPoints)
p.Winding.NumPoints = w.NumPoints
for j=0 ; j< w.NumPoints ; j++ {
VectorCopy (w.Points[w.NumPoints-1-j], p.Winding.Points[j])
}
p.Plane = plane
p.Leaf = leafnums[0]
SetPortalSphere (p)
pIndex++
p = &portals[pIndex]
}
//fclose (f)
f.Close()
return portalExports
}
func newWinding(points int32) *Winding {
var w *Winding
if points > MAX_POINTS_ON_WINDING {
log.Fatalf("NewWinding: %i points, max %d", points, MAX_POINTS_ON_WINDING)
}
// @TODO WTF is this?
//size = (int)(&((winding_t *)0)->points[points]);
//w = (winding_t*)malloc (size);
//memset (w, 0, size);
return w
}
func SetPortalSphere (p *Portal) {
var i int32
var total Vec3
var dist Vec3
var w *Winding
var r float32
var bestr float32
w = p.Winding
VectorCopy (vec3_origin, total)
for i = 0; i < w.NumPoints; i++ {
VectorAdd (total, w.Points[i], total)
//total = total.Add(w.Points[i])
}
for i=0 ; i<3 ; i++ {
total[i] /= w.NumPoints
}
bestr = 0;
for i=0; i<w.NumPoints ; i++ {
VectorSubtract (w.Points[i], total, dist)
//dist = w.Points[i].Sub(total)
r = VectorLength (dist)
//r = dist.Length()
if r > bestr {
bestr = r
}
}
VectorCopy (total, p.Origin)
p.Radius = bestr
}
func PlaneFromWinding (w *Winding, plane *Plane) {
var v1 Vec3
var v2 Vec3
// calc plane
VectorSubtract (w.Points[2], w.Points[1], v1);
//v1 = w.Points[2].Sub(w.Points[1])
VectorSubtract (w.Points[0], w.Points[1], v2);
//v2 = w.Points[0].Sub(w.Points[1])
CrossProduct (v2, v1, plane.Normal);
//plane.Normal = v2.Cross(v1)
VectorNormalize (plane.Normal);
//plane.Normal = plane.Normal.Normalize()
plane.Dist = DotProduct (w.Points[0], plane.Normal);
//plane.Dist = w.Points[0].Dot(plane.Normal)
}
|
package health
import (
"time"
"github.com/coupa/foundation-go/config"
"github.com/gin-gonic/gin"
)
const (
OK = "OK"
WARN = "WARN"
CRIT = "CRIT"
TypeInternal = "internal"
TypeService = "service"
TypeThirdParty = "third-party"
)
var (
startTime time.Time
//CriticalLevels starts with 1 since 0 will be the value for invalid keys
CriticalLevels = map[string]int{
OK: 1,
WARN: 2,
CRIT: 3,
}
)
type AppInfo struct {
Version string
Revision string
AppName string `env:"APPLICATION_NAME"`
Hostname string `env:"HOSTNAME"`
}
//FillFromENV will load pre-dfined values for env tags from environment variables
func (ai *AppInfo) FillFromENV(version, revision string) *AppInfo {
config.PopulateEnvConfig(ai)
return ai
}
type ProjectInfo struct {
Repo string `json:"repo" env:"PROJECT_REPO"`
Home string `json:"home" env:"PROJECT_HOME"`
Owners []string `json:"owners"`
Logs []string `json:"logs"`
Stats []string `json:"stats"`
OwnersStr string `json:"-" env:"PROJECT_OWNERS"`
LogsStr string `json:"-" env:"PROJECT_LOG_URLS"`
StatsStr string `json:"-" env:"PROJECT_STATS_URLS"`
}
//FillFromENV will load pre-dfined values for env tags from environment variables
func (pi *ProjectInfo) FillFromENV() *ProjectInfo {
config.PopulateEnvConfig(pi)
pi.convertStrToSlices()
return pi
}
//Coverts comma-separated string fields to respective string slices
func (pi *ProjectInfo) convertStrToSlices() {
pi.Owners = config.SplitByCommaSpace(pi.OwnersStr)
pi.Logs = config.SplitByCommaSpace(pi.LogsStr)
pi.Stats = config.SplitByCommaSpace(pi.StatsStr)
}
type DependencyInfo struct {
Name string `json:"name"`
Type string `json:"type"`
Version string `json:"version"`
Revision string `json:"revision"`
State DependencyState `json:"state"`
ResponseTime float64 `json:"responseTime"`
}
type DependencyState struct {
Status string `json:"status"`
Details string `json:"details,omitempty"`
}
func init() {
startTime = time.Now()
}
//UpTime is the application up time.
func UpTime() int64 {
return int64(time.Since(startTime).Seconds())
}
/**
// To make data for detailed health check:
h := NewDetailedHealth(c, "some...")
d := DependencyInfo{...}
h.AddDependency(d) // Add as many dependencies as needed.
*/
type Health map[string]interface{}
func (h Health) AddDependency(d *DependencyInfo) {
if d == nil {
return
}
if h["dependencies"] == nil {
h["dependencies"] = []interface{}{*d}
return
}
h["dependencies"] = append(h["dependencies"].([]interface{}), *d)
}
func (h Health) SetDependencies(d []DependencyInfo) {
h["dependencies"] = d
}
//NewSimpleHealth creates a health struct that can be rendered for the simple health check.
func NewSimpleHealth(ai *AppInfo, status string) Health {
if ai == nil {
ai = &AppInfo{}
}
return Health{
"status": status,
"version": ai.Version,
"revision": ai.Revision,
}
}
//NewDetailedHealth creates a health struct without any dependency.
func NewDetailedHealth(ai *AppInfo, pi *ProjectInfo, description string) Health {
if ai == nil {
ai = &AppInfo{}
}
if pi == nil {
pi = &ProjectInfo{}
}
health := NewSimpleHealth(ai, OK)
health["project"] = *pi
health["host"] = ai.Hostname
health["description"] = description
health["name"] = ai.AppName
health["uptime"] = UpTime()
return health
}
type HealthChecker interface {
Check() *DependencyInfo
GetName() string
GetType() string
}
type AdditionalHealthData struct {
DependencyChecks []HealthChecker
//The custom data can override the default health detail values
DataProvider func(*gin.Context) map[string]interface{}
//Description is set in server.RegisterDetailedHealth function, so there is no need
//to set this field when initializing AdditionalHealthData struct
Description string
}
//IsMoreCritical checks if a's status level is more critical than b
func IsMoreCritical(a string, b string) bool {
return CriticalLevels[a] > CriticalLevels[b]
}
|
package room
import (
"github.com/name5566/leaf/module"
"myLeaf/base"
"myLeaf/room/logic"
)
var (
skeleton = base.NewSkeleton()
ChanRPC = skeleton.ChanRPCServer
)
type Module struct {
*module.Skeleton
}
func (m *Module) OnInit() {
m.Skeleton = skeleton
m.initChanRPC()
}
func (m *Module) initChanRPC() {
m.RegisterChanRPC("NewBattle", createNewBattle)
m.RegisterChanRPC(msg.Ping, handlePing)
} |
package util
import (
"fmt"
k8scontrollerclient "sigs.k8s.io/controller-runtime/pkg/client"
"sync"
)
type ResourceQueue struct {
queue []k8scontrollerclient.Object
lookupTable map[k8scontrollerclient.Object]bool
lock *sync.Mutex
}
func NewResourceQueue() *ResourceQueue {
return &ResourceQueue{
queue: []k8scontrollerclient.Object{},
lookupTable: map[k8scontrollerclient.Object]bool{},
lock: &sync.Mutex{},
}
}
func (q *ResourceQueue) Enqueue(obj k8scontrollerclient.Object) error {
q.lock.Lock()
defer q.lock.Unlock()
if _, ok := q.lookupTable[obj]; ok {
return fmt.Errorf("error inserting duplicate object: %s", obj)
}
q.queue = append(q.queue, obj)
q.lookupTable[obj] = true
return nil
}
func (q *ResourceQueue) EnqueueIgnoreExisting(obj k8scontrollerclient.Object) {
q.lock.Lock()
defer q.lock.Unlock()
if _, ok := q.lookupTable[obj]; ok {
return
}
q.queue = append(q.queue, obj)
q.lookupTable[obj] = true
}
func (q *ResourceQueue) Length() int {
return len(q.queue)
}
func (q *ResourceQueue) RemoveIgnoreNotFound(obj k8scontrollerclient.Object) {
q.lock.Lock()
defer q.lock.Unlock()
if _, ok := q.lookupTable[obj]; ok {
for index, existingObj := range q.queue {
if q.equals(existingObj, obj) {
_ = q.removeItem(index)
delete(q.lookupTable, obj)
return
}
}
}
}
func (q *ResourceQueue) equals(objOne k8scontrollerclient.Object, objTwo k8scontrollerclient.Object) bool {
return objOne.GetName() == objTwo.GetName() && objOne.GetNamespace() == objTwo.GetNamespace()
}
func (q *ResourceQueue) DequeueHead() (k8scontrollerclient.Object, bool) {
q.lock.Lock()
defer q.lock.Unlock()
if q.Length() == 0 {
return nil, false
}
return q.removeItem(0), true
}
func (q *ResourceQueue) DequeueTail() (k8scontrollerclient.Object, bool) {
q.lock.Lock()
defer q.lock.Unlock()
if len(q.queue) == 0 {
return nil, false
}
return q.removeItem(q.Length() - 1), true
}
func (q *ResourceQueue) removeItem(index int) k8scontrollerclient.Object {
if index < 0 || index >= q.Length() {
panic("index out of bounds")
}
item := q.queue[index]
copy(q.queue[index:], q.queue[index+1:])
q.queue[len(q.queue)-1] = nil
q.queue = q.queue[:len(q.queue)-1]
delete(q.lookupTable, item)
return item
}
|
package config
import (
"crypto/md5"
"errors"
"fmt"
"net/url"
"strings"
"github.com/ryanmoran/viron"
)
var UAAPublicKey string
type Environment struct {
CCHost string `env:"CC_HOST" env-required:"true"`
CORSOrigin string `env:"CORS_ORIGIN" env-default:"*"`
DBLoggingEnabled bool `env:"DB_LOGGING_ENABLED"`
DatabaseURL string `env:"DATABASE_URL" env-required:"true"`
EncryptionKey string `env:"ENCRYPTION_KEY" env-required:"true"`
ModelMigrationsDir string `env:"MODEL_MIGRATIONS_DIRECTORY" env-required:"true"`
Port string `env:"PORT" env-default:"3000"`
RootPath string `env:"ROOT_PATH"`
SMTPHost string `env:"SMTP_HOST" env-required:"true"`
SMTPLoggingEnabled bool `env:"SMTP_LOGGING_ENABLED" env-default:"false"`
SMTPPass string `env:"SMTP_PASS"`
SMTPPort string `env:"SMTP_PORT" env-required:"true"`
SMTPTLS bool `env:"SMTP_TLS" env-default:"true"`
SMTPUser string `env:"SMTP_USER"`
Sender string `env:"SENDER" env-required:"true"`
TestMode bool `env:"TEST_MODE" env-default:"false"`
UAAClientID string `env:"UAA_CLIENT_ID" env-required:"true"`
UAAClientSecret string `env:"UAA_CLIENT_SECRET" env-required:"true"`
UAAHost string `env:"UAA_HOST" env-required:"true"`
VerifySSL bool `env:"VERIFY_SSL" env-default:"true"`
VCAPApplication struct {
InstanceIndex int `json:"instance_index"`
} `env:"VCAP_APPLICATION" env-required:"true"`
}
func NewEnvironment() Environment {
env := Environment{}
err := viron.Parse(&env)
if err != nil {
panic(err)
}
env.parseDatabaseURL()
env.validateEncryptionKey()
return env
}
func (env *Environment) parseDatabaseURL() {
databaseURL := env.DatabaseURL
databaseURL = strings.TrimPrefix(databaseURL, "http://")
databaseURL = strings.TrimPrefix(databaseURL, "https://")
databaseURL = strings.TrimPrefix(databaseURL, "tcp://")
databaseURL = strings.TrimPrefix(databaseURL, "mysql://")
databaseURL = strings.TrimPrefix(databaseURL, "mysql2://")
parsedURL, err := url.Parse("tcp://" + databaseURL)
if err != nil {
panic(errors.New(fmt.Sprintf("Could not parse DATABASE_URL %q, it does not fit format %q", env.DatabaseURL, "tcp://user:pass@host/dname")))
}
password, _ := parsedURL.User.Password()
env.DatabaseURL = fmt.Sprintf("%s:%s@%s(%s)%s?parseTime=true", parsedURL.User.Username(), password, parsedURL.Scheme, parsedURL.Host, parsedURL.Path)
}
func (env *Environment) validateEncryptionKey() {
key := []byte(env.EncryptionKey)
sum := md5.Sum(key)
env.EncryptionKey = string(sum[:])
}
|
package api
import (
"context"
"os"
"github.com/go-playground/validator"
"github.com/gofiber/fiber"
"github.com/gofiber/session"
"github.com/rs/zerolog"
"github.com/hi019/fiber-boilerplate/ent"
us "github.com/hi019/fiber-boilerplate/pkg/api/user"
uw "github.com/hi019/fiber-boilerplate/pkg/api/user/web"
_ "github.com/mattn/go-sqlite3"
)
// Config contains config values for the API
type Config struct {
Port string
DriverName string
DataSourceName string
}
// Start starts the api
func Start(cfg *Config) (*fiber.App, *ent.Client, error) {
// Configure db
db, err := ent.Open(cfg.DriverName, cfg.DataSourceName)
if err != nil {
panic(err)
}
// Run the auto migration tool.
if err := db.Schema.Create(context.Background()); err != nil {
panic(err)
}
// Configure logger and validator
logger := zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}).With().Timestamp().Logger()
vd := validator.New()
// Initialize session
sessions := session.New()
// Default error handler
errorHandler := func(ctx *fiber.Ctx, err error) error {
// Statuscode defaults to 500
code := fiber.StatusInternalServerError
message := "Internal Server Error"
// Retrieve the custom statuscode if it's an fiber.*Error
if e, ok := err.(*fiber.Error); ok {
code = e.Code
message = e.Message
}
// If its an Internal Server Error, we'll log it
if code == fiber.StatusInternalServerError {
logger.Error().Msg(err.Error())
}
return ctx.Status(code).JSON(fiber.Map{"status": "error", "message": message})
}
// Configure api
app := fiber.New(fiber.Config{ErrorHandler: errorHandler})
// Configure api routes and services
uw.NewHTTP(us.Initialize(db, &logger), app, vd, sessions)
return app, db, nil
}
|
package collector
import (
"context"
"github.com/jenningsloy318/panos_exporter/panos"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
var (
InterfaceSubsystem = "interface"
InterfaceLabelNames = []string{"name", "domain", "category"}
)
type InterfaceCollector struct {
ctx context.Context
panosClient *panos.PaloAlto
metrics map[string]InterfaceMetric
collectorScrapeStatus *prometheus.GaugeVec
}
type InterfaceMetric struct {
desc *prometheus.Desc
}
func NewInterfaceCollector(ctx context.Context, namespace string, panosClient *panos.PaloAlto) *InterfaceCollector {
return &InterfaceCollector{
ctx: ctx,
panosClient: panosClient,
collectorScrapeStatus: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Name: "collector_scrape_status",
Help: "collector_scrape_status",
},
[]string{"collector"},
),
}
}
func (i *InterfaceCollector) Describe(ch chan<- *prometheus.Desc) {
for _, metric := range i.metrics {
ch <- metric.desc
}
i.collectorScrapeStatus.Describe(ch)
}
func (i *InterfaceCollector) Collect(ch chan<- prometheus.Metric) {
iContext, iCancel := context.WithCancel(i.ctx)
defer iCancel()
i.metrics = map[string]InterfaceMetric{}
InterfaceResponse, err := i.panosClient.GetInterfaceData(iContext)
if err != nil {
log.Errorf("Error getting standard interfaces data, %s", err)
} else {
// Add status for standard (non-management) hardware interfaces
HWEntries := InterfaceResponse.Result.Hw.HwEntries
for _, entry := range HWEntries {
labelValues := []string{entry.Name, "interface", "hw"}
// HW interface state
stateInterfaceMetric := InterfaceMetric{
desc: prometheus.NewDesc(
prometheus.BuildFQName(namespace, InterfaceSubsystem, "state"),
"Status of hw interface",
InterfaceLabelNames,
nil,
),
}
i.metrics["state"] = stateInterfaceMetric
stateValue := 0
if entry.State == "up" {
stateValue = 1
}
ch <- prometheus.MustNewConstMetric(stateInterfaceMetric.desc, prometheus.GaugeValue, float64(stateValue), labelValues...)
}
}
ManagementInterfaceResponse, err := i.panosClient.GetManagementInterfaceInfo(iContext)
if err != nil {
log.Errorf("Error getting management interfaces data, %s", err)
} else {
// Add status for management interface
managementLabelValues := []string{"management", "interface", "hw"}
stateManagementInterfaceMetric := InterfaceMetric{
desc: prometheus.NewDesc(
prometheus.BuildFQName(namespace, InterfaceSubsystem, "state"),
"Status of hw interface",
InterfaceLabelNames,
nil,
),
}
managementStateValue := 0
if ManagementInterfaceResponse.Result.Info.State == "up" {
managementStateValue = 1
}
ch <- prometheus.MustNewConstMetric(stateManagementInterfaceMetric.desc, prometheus.GaugeValue, float64(managementStateValue), managementLabelValues...)
}
i.collectorScrapeStatus.WithLabelValues("interface").Set(float64(1))
}
|
package comm
// 将斐波那契数列项存入通道
func Fibonacci(c chan int) {
x, y := 0, 1
for i := 0; i < cap(c); i++ {
c <- x
x, y = y, x+y
}
close(c)
}
|
package terminator
import (
"reflect"
"testing"
"github.com/jouir/pgterminate/base"
)
func TestFilterUsers(t *testing.T) {
sessions := []*base.Session{
{User: "test"},
{User: "test_1"},
{User: "test_2"},
{User: "postgres"},
}
tests := []struct {
name string
config *base.Config
want []*base.Session
}{
{
"No filter",
&base.Config{},
sessions,
},
{
"Include a single user",
&base.Config{IncludeUsers: []string{"test"}},
[]*base.Session{{User: "test"}},
},
{
"Include multiple users",
&base.Config{IncludeUsers: []string{"test_1", "test_2"}},
[]*base.Session{{User: "test_1"}, {User: "test_2"}},
},
{
"Exclude a single user",
&base.Config{ExcludeUsers: []string{"test"}},
[]*base.Session{{User: "test_1"}, {User: "test_2"}, {User: "postgres"}},
},
{
"Exclude multiple users",
&base.Config{ExcludeUsers: []string{"test_1", "test_2"}},
[]*base.Session{{User: "test"}, {User: "postgres"}},
},
{
"Include multiple users and exclude one",
&base.Config{IncludeUsers: []string{"test", "test_1", "test_2"}, ExcludeUsers: []string{"test"}},
[]*base.Session{{User: "test_1"}, {User: "test_2"}},
},
{
"Include users from list and regex",
&base.Config{
IncludeUsers: []string{"test"},
IncludeUsersRegex: "^test_[0-9]$",
},
[]*base.Session{{User: "test"}, {User: "test_1"}, {User: "test_2"}},
},
{
"Exclude users from list and regex",
&base.Config{
ExcludeUsers: []string{"test"},
ExcludeUsersRegex: "^test_[0-9]$",
},
[]*base.Session{{User: "postgres"}},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
err := tc.config.CompileRegexes()
if err != nil {
t.Errorf("Failed to compile regex: %v", err)
}
tc.config.CompileFilters()
terminator := &Terminator{config: tc.config}
got := terminator.filterUsers(sessions)
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("got %+v; want %+v", ListUsers(got), ListUsers(tc.want))
} else {
t.Logf("got %+v; want %+v", ListUsers(got), ListUsers(tc.want))
}
})
}
}
// ListUsers extract usernames from a list of sessions
func ListUsers(sessions []*base.Session) (users []string) {
for _, session := range sessions {
users = append(users, session.User)
}
return users
}
func TestFilterDatabases(t *testing.T) {
sessions := []*base.Session{
{Db: "test"},
{Db: "test_1"},
{Db: "test_2"},
{Db: "postgres"},
}
tests := []struct {
name string
config *base.Config
want []*base.Session
}{
{
"No filter",
&base.Config{},
sessions,
},
{
"Include a single database",
&base.Config{IncludeDatabases: []string{"test"}},
[]*base.Session{{Db: "test"}},
},
{
"Include multiple databases",
&base.Config{IncludeDatabases: []string{"test_1", "test_2"}},
[]*base.Session{{Db: "test_1"}, {Db: "test_2"}},
},
{
"Exclude a single database",
&base.Config{ExcludeDatabases: []string{"test"}},
[]*base.Session{{Db: "test_1"}, {Db: "test_2"}, {Db: "postgres"}},
},
{
"Exclude multiple databases",
&base.Config{ExcludeDatabases: []string{"test_1", "test_2"}},
[]*base.Session{{Db: "test"}, {Db: "postgres"}},
},
{
"Include multiple databases and exclude one",
&base.Config{IncludeDatabases: []string{"test", "test_1", "test_2"}, ExcludeDatabases: []string{"test"}},
[]*base.Session{{Db: "test_1"}, {Db: "test_2"}},
},
{
"Include databases from list and regex",
&base.Config{
IncludeDatabases: []string{"test"},
IncludeDatabasesRegex: "^test_[0-9]$",
},
[]*base.Session{{Db: "test"}, {Db: "test_1"}, {Db: "test_2"}},
},
{
"Exclude databases from list and regex",
&base.Config{
ExcludeDatabases: []string{"test"},
ExcludeDatabasesRegex: "^test_[0-9]$",
},
[]*base.Session{{Db: "postgres"}},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
err := tc.config.CompileRegexes()
if err != nil {
t.Errorf("Failed to compile regex: %v", err)
}
tc.config.CompileFilters()
terminator := &Terminator{config: tc.config}
got := terminator.filterDatabases(sessions)
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("got %+v; want %+v", ListDatabases(got), ListDatabases(tc.want))
} else {
t.Logf("got %+v; want %+v", ListDatabases(got), ListDatabases(tc.want))
}
})
}
}
// ListDatabases extract databases from a list of sessions
func ListDatabases(sessions []*base.Session) (databases []string) {
for _, session := range sessions {
databases = append(databases, session.Db)
}
return databases
}
|
package main
import (
"flag"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"strings"
"golang.org/x/text/encoding/charmap"
"parser1c/internal/parser1c"
"parser1c/internal/storage"
)
func main() {
var (
format = flag.String("format", "xlsx", "Формат вывода (по умолчанию xlsx)")
formats = map[string]string{"json": ".json", "xlsx": ".xlsx", "csv": ".csv"}
)
flag.Parse()
flag.Args()
if len(flag.Args()) == 0 {
log.Printf("usage: %s -format xlsx <file1> \n", filepath.Base(os.Args[0]))
os.Exit(1)
}
file := flag.Args()[0]
log.Printf("файл на входе: %s\n", file)
f, err := os.OpenFile(file, os.O_RDONLY, os.ModePerm)
if err != nil {
log.Fatal(err)
}
defer f.Close()
//Перекодируем из win1251
readerDecoder := charmap.Windows1251.NewDecoder().Reader(f)
var rawBytes []byte
if rawBytes, err = ioutil.ReadAll(readerDecoder); err != nil {
log.Fatal(err)
}
//log.Println(string(rawBytes))
doc, err := parser1c.ImportData(string(rawBytes))
if err != nil {
log.Fatal(err)
}
if doc.CountDoc() == 0 {
//log.Println(doc)
log.Printf("Кол-во документов в реестре %d. Выходной файл не создан!", doc.CountDoc())
return
}
// формируем имя файла - меняем расширение
newFileName := strings.TrimSuffix(file, path.Ext(file)) + formats[*format]
storage.SaveInFile(doc, newFileName, *format)
}
|
// A very basic load balancer implementation.
// It provides round-robin load balancing and sends hearbeats messages
// to backend endpoints in order to detect unreacheable hosts
package main
import (
"context"
"flag"
"fmt"
"sync"
"sync/atomic"
"log"
"strings"
"time"
"net"
"net/http"
"net/http/httputil"
"net/url"
)
// Store information about the backend endpoints
type Backend struct {
Url *url.URL
Alive bool
mux sync.RWMutex
ReverseProxy *httputil.ReverseProxy
}
const (
Attempts int = iota
Retry
)
// Tracks all the backend endpoints in a slice and has
// a counter variable
type ServerPool struct {
backends []*Backend
current uint64
}
// Add an backend to the server pool
func (s *ServerPool) AddBackend(backend *Backend) {
s.backends = append(s.backends,backend)
}
//Increase the counter and returns the next available index in the ServerPool slice
func (s *ServerPool) NextIndex() int {
return int(atomic.AddUint64(&s.current,uint64(1)) % uint64(len(s.backends)))
}
//Set whether this backend endpoint is alive or not
func (b *Backend) SetAlive(isAlive bool) {
b.mux.Lock()
b.Alive = isAlive
b.mux.Unlock()
}
//ISAlive returns true when any backend is alive
func (b *Backend) IsAlive() (alive bool) {
b.mux.RLock()
alive = b.Alive
b.mux.RUnlock()
return
}
//Mark backend status change of a a particular server
func (s *ServerPool) MarkBackendStatus(backendURL *url.URL, alive bool) {
for _, b := range s.backends {
if b.Url.String() == backendURL.String() {
b.SetAlive(alive)
break
}
}
}
//Returns the next active/isAlive endpoint to accept the next request
func (s *ServerPool) GetNextActivePeer() *Backend {
//Look over the ServerPool to find the next active backend endpoint
// and if isAlive then return itsi value
next := s.NextIndex()
//start from the next and move a full cycle
l := len(s.backends) + next
for i := next; i < l; i++ {
idx := i % len(s.backends) // use modding to keep index within range
if s.backends[idx].IsAlive() {
if i != next {
atomic.StoreUint64(&s.current,uint64(idx))
}
return s.backends[idx]
}
}
return nil
}
// Checks to see if a particular backend is alive by pining it
func isBackendAlive(url *url.URL) bool {
timeout := 2 * time.Second
conn, err := net.DialTimeout("tcp",url.Host,timeout)
if err != nil {
log.Println("Site unreachable, error: ", err)
return false
}
_ = conn.Close()
return true
}
//Pings every backend endpoints int eh slice to check their status
func (s *ServerPool) HealthCheck() {
for _, b := range s.backends {
status := "up"
alive := isBackendAlive(b.Url)
b.SetAlive(alive)
if !alive {
status = "down"
}
log.Printf("%s [%s]\n", b.Url, status)
}
}
// Get the number of attepts from the request header
// context package allows you to store useful data in an Http request.
// therefore heavily utilize this to track request specific data such as Attempt count and Retry count.
func GetAttemptsfromRequest(req *http.Request) int {
if attempts, ok := req.Context().Value(Attempts).(int); ok {
return attempts
}
return 1
}
//Get the number of failures from the request
func GetRetriesfromRequest(req *http.Request) int {
if retry,ok := req.Context().Value(Retry).(int); ok {
return retry
}
return 0
}
func runHealthCheck() {
t := time.NewTicker(time.Minute * 2)
for {
select {
case <- t.C:
log.Println("Starting health check...")
serverpool.HealthCheck()
log.Println("Health check completed")
}
}
}
//load balance incoming requests in a round robin manner
func loadBalance(w http.ResponseWriter,req *http.Request) {
attempts := GetAttemptsfromRequest(req)
if attempts > 3 {
log.Printf("%s(%s) Max attempts reached, terminating\n", req.RemoteAddr, req.URL.Path)
http.Error(w, "Service not available.", http.StatusServiceUnavailable)
return
}
nextService := serverpool.GetNextActivePeer ()
if nextService != nil {
nextService.ReverseProxy.ServeHTTP(w,req)
return
}
http.Error(w,"Service not available.",http.StatusServiceUnavailable)
}
var serverpool ServerPool
func main() {
//parse args and create ServerPool
var serverList string
var port int
flag.StringVar(&serverList,"backends","", "Load balanced backends, use commas to separate")
flag.IntVar(&port,"port",3030,"Port to server")
flag.Parse()
if len(serverList) == 0 {
log.Fatal("Please provide one or more server backends to load balance")
}
//Now parse the backends
tokens := strings.Split(serverList,",")
for _, tok := range tokens {
serverUrl, err := url.Parse(tok)
if err != nil {
log.Fatal(err)
}
proxy := httputil.NewSingleHostReverseProxy(serverUrl)
proxy.ErrorHandler = func(w http.ResponseWriter, req *http.Request, e error) {
log.Printf("[%s] %s\n", serverUrl.Host, e.Error())
retries := GetRetriesfromRequest(req)
if retries < 3 {
select {
case <- time.After(10 * time.Millisecond):
//increment retries and add it to context
ctx := context.WithValue(req.Context(),Retry, retries+1)
proxy.ServeHTTP(w, req.WithContext(ctx))
}
return
}
//Consider the endpoint to be down after 3 retries
serverpool.MarkBackendStatus(serverUrl,false)
attempts := GetAttemptsfromRequest(req)
log.Printf("%s(%s) Attempting another retry. %d\n", req.RemoteAddr, req.URL.Path,attempts)
ctx := context.WithValue(req.Context(),Attempts,attempts+1)
loadBalance(w,req.WithContext(ctx))
}
serverpool.AddBackend(&Backend {
Url: serverUrl,
Alive: true,
ReverseProxy: proxy,
})
log.Printf("Configured endpoint: %s\n", serverUrl)
}
//create http server
server := http.Server {
Addr: fmt.Sprintf(":%d", port),
Handler: http.HandlerFunc(loadBalance),
}
go runHealthCheck()
//Print start message
log.Printf("Simple load balancer started at :%d\n", port)
if err := server.ListenAndServe(); err != nil {
log.Fatal(err)
}
}
|
/*
Tests basic Puts.
First tx puts 4 different key/values and commits.
Second tx puts 4 different values on same key.
Usage:
go run 2_PutTests.go
*/
package main
import "../kvservice"
import (
"fmt"
)
func main() {
var nodes []string
nodes = []string{"52.233.45.243:2222", "52.175.29.87:2222", "40.69.195.111:2222", "13.65.91.243:2222", "51.140.126.235:2222", "52.233.190.164:2222"}
c := kvservice.NewConnection(nodes)
fmt.Printf("NewConnection returned: %v\n", c)
// t1 Puts 4 different keys/values and commits
// Expect to see Keys A, B, C, D with values in keyValueStore
fmt.Println("\nTest1\n")
fmt.Println("t1 Puts 4 different keys/values and commits\n")
t1, err := c.NewTX()
fmt.Printf("NewTX returned: %v, %v\n", t1, err)
success, err := t1.Put("A", "T1A")
fmt.Printf("Put returned: %v, %v\n", success, err)
success, err = t1.Put("B", "T1B")
fmt.Printf("Put returned: %v, %v\n", success, err)
success, err = t1.Put("C", "T1C")
fmt.Printf("Put returned: %v, %v\n", success, err)
success, err = t1.Put("D", "T1D")
fmt.Printf("Put returned: %v, %v\n", success, err)
success, txID, err := t1.Commit(1)
fmt.Printf("Commit returned: %v, %v, %v\n", success, txID, err)
// t2 Puts 4 different values on same key and commits
// Expect to see only Key "E" with value "T2_4" in keyValueStore
fmt.Println("\nTest2")
fmt.Println("t2 Puts 4 different values on same key and commits\n")
t2, err := c.NewTX()
fmt.Printf("NewTX returned: %v, %v\n", t2, err)
success, err = t2.Put("E", "T2_1")
fmt.Printf("Put returned: %v, %v\n", success, err)
success, err = t2.Put("E", "T2_2")
fmt.Printf("Put returned: %v, %v\n", success, err)
success, err = t2.Put("E", "T2_3")
fmt.Printf("Put returned: %v, %v\n", success, err)
success, err = t2.Put("E", "T2_4")
fmt.Printf("Put returned: %v, %v\n", success, err)
success, txID, err = t2.Commit(1)
fmt.Printf("Commit returned: %v, %v, %v\n", success, txID, err)
c.Close()
}
|
package http
type TreeNode struct {
Id string `json:"id"`
Label string `json:"label"`
Creator string `json:"creator"`
Type string `json:"type"`
Children []TreeNode `json:"children"`
}
|
package genInitialData
import (
"database/sql"
"fmt"
"github.com/jackc/pgtype"
_ "github.com/jackc/pgx/v4/stdlib"
)
// Placeholder hack to populate database with fake teams and games
func GenerateInitialData(db *sql.DB) {
var teams []string
db.Exec("TRUNCATE TABLE game")
db.Exec("DELETE FROM team")
for i := 0; i < 30; i++ {
id := pgtype.UUID{}
err := db.QueryRow(fmt.Sprintf(`INSERT INTO team(name) VALUES ('Team %d') RETURNING id;`, i + 1)).Scan(&id)
if err != nil {
println(err.Error())
}
idStr, _ := id.EncodeText(nil, nil)
teams = append(teams, string(idStr))
}
for i := 0; i < len(teams); i += 2 {
_, err := db.Exec(fmt.Sprintf(`INSERT INTO game(location, team_a, team_b) VALUES ('Location %d', '%s', '%s') RETURNING id;`, i + 1, teams[i], teams[i + 1]))
if err != nil {
println(err.Error())
}
}
} |
package service
import (
"context"
"fmt"
"io"
"io/ioutil"
netHttp "net/http"
"strconv"
"sync"
"time"
"github.com/go-ocf/cloud/cloud2cloud-connector/events"
oapiStore "github.com/go-ocf/cloud/cloud2cloud-connector/store"
"github.com/go-ocf/cloud/cloud2cloud-gateway/store"
"github.com/go-ocf/cqrs/eventstore"
"github.com/go-ocf/kit/log"
"github.com/go-ocf/sdk/schema"
)
type EventHandler struct {
store store.Store
goroutinePoolGo GoroutinePoolGoFunc
}
func newEventHandler(
store store.Store,
goroutinePoolGo GoroutinePoolGoFunc,
) *EventHandler {
return &EventHandler{
store: store,
goroutinePoolGo: goroutinePoolGo,
}
}
type incrementSubscriptionSequenceNumberFunc func(ctx context.Context, subscriptionID string) (uint64, error)
func emitEvent(ctx context.Context, eventType events.EventType, s store.Subscription, incrementSubscriptionSequenceNumber incrementSubscriptionSequenceNumberFunc, rep interface{}) (remove bool, err error) {
log.Debugf("emitEvent: %v: %+v", eventType, s)
defer log.Debugf("emitEvent done: %v: %+v", eventType, s)
client := netHttp.Client{}
encoder, err := getEncoder(s.ContentType)
if err != nil {
return false, fmt.Errorf("cannot get encoder: %w", err)
}
seqNum, err := incrementSubscriptionSequenceNumber(ctx, s.ID)
if err != nil {
return false, fmt.Errorf("cannot increment sequence number: %w", err)
}
r, w := io.Pipe()
req, err := netHttp.NewRequest("POST", s.URL, r)
if err != nil {
return false, fmt.Errorf("cannot create post request: %w", err)
}
timestamp := time.Now()
req.Header.Set(events.EventTypeKey, string(eventType))
req.Header.Set(events.SubscriptionIDKey, s.ID)
req.Header.Set(events.SequenceNumberKey, strconv.FormatUint(seqNum, 10))
req.Header.Set(events.CorrelationIDKey, s.CorrelationID)
req.Header.Set(events.EventTimestampKey, strconv.FormatInt(timestamp.Unix(), 10))
var body []byte
if rep != nil {
body, err = encoder(rep)
if err != nil {
return false, fmt.Errorf("cannot encode data to body: %w", err)
}
req.Header.Set(events.ContentTypeKey, s.ContentType)
go func() {
defer w.Close()
if len(body) > 0 {
_, err := w.Write(body)
if err != nil {
log.Errorf("cannot write data to client: %v", err)
}
}
}()
} else {
w.Close()
}
req.Header.Set(events.EventSignatureKey, events.CalculateEventSignature(
s.SigningSecret,
req.Header.Get(events.ContentTypeKey),
eventType,
req.Header.Get(events.SubscriptionIDKey),
seqNum,
timestamp,
body,
))
resp, err := client.Do(req)
if err != nil {
return false, fmt.Errorf("cannot post: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != netHttp.StatusOK {
errBody, _ := ioutil.ReadAll(resp.Body)
return resp.StatusCode == netHttp.StatusGone, fmt.Errorf("%v: unexpected statusCode %v: body: '%v'", s.URL, resp.StatusCode, string(errBody))
}
return eventType == events.EventType_SubscriptionCanceled, nil
}
type resourceSubscriptionHandler struct {
store store.Store
goroutinePoolGo GoroutinePoolGoFunc
event Event
}
func newResourceSubscriptionHandler(
store store.Store,
goroutinePoolGo GoroutinePoolGoFunc,
event Event,
) *resourceSubscriptionHandler {
return &resourceSubscriptionHandler{
store: store,
goroutinePoolGo: goroutinePoolGo,
event: event,
}
}
func (c *resourceSubscriptionHandler) Handle(ctx context.Context, iter store.SubscriptionIter) error {
var wg sync.WaitGroup
for {
var s store.Subscription
if !iter.Next(ctx, &s) {
break
}
if s.SequenceNumber == 0 {
// first event is emitted by after create subscription.
continue
}
for _, e := range s.EventTypes {
if e == c.event.EventType {
wg.Add(1)
c.goroutinePoolGo(func() {
defer wg.Done()
rs := s
remove, err := emitEvent(ctx, c.event.EventType, rs, c.store.IncrementSubscriptionSequenceNumber, c.event.Representation)
if remove {
c.store.PopSubscription(ctx, rs.ID)
}
if err != nil {
log.Errorf("cannot emit event: %v", err)
}
})
break
}
}
}
wg.Wait()
return iter.Err()
}
func (h *EventHandler) processResourceEvent(e Event) error {
err := h.store.LoadSubscriptions(
context.Background(),
store.SubscriptionQuery{
Type: oapiStore.Type_Resource,
DeviceID: e.DeviceID,
Href: e.Href,
},
newResourceSubscriptionHandler(h.store, h.goroutinePoolGo, e),
)
if err != nil {
return fmt.Errorf("cannot process resource event (DeviceID: %v, Href: %v): %w", e.DeviceID, e.Href, err)
}
return nil
}
type deviceSubscriptionHandlerEvent struct {
store store.Store
goroutinePoolGo GoroutinePoolGoFunc
event Event
}
func newDeviceSubscriptionHandler(
store store.Store,
goroutinePoolGo GoroutinePoolGoFunc,
event Event,
) *deviceSubscriptionHandlerEvent {
return &deviceSubscriptionHandlerEvent{
store: store,
goroutinePoolGo: goroutinePoolGo,
event: event,
}
}
func makeLinksRepresentation(eventType events.EventType, models []eventstore.Model) []schema.ResourceLink {
result := make([]schema.ResourceLink, 0, len(models))
for _, m := range models {
c := m.(*resourceCtx).Clone()
switch eventType {
case events.EventType_ResourcesPublished:
if c.isPublished {
result = append(result, makeResourceLink(c.resource))
}
case events.EventType_ResourcesUnpublished:
if !c.isPublished {
result = append(result, makeResourceLink(c.resource))
}
}
}
return result
}
func (c *deviceSubscriptionHandlerEvent) Handle(ctx context.Context, iter store.SubscriptionIter) error {
var wg sync.WaitGroup
for {
var s store.Subscription
if !iter.Next(ctx, &s) {
break
}
for _, e := range s.EventTypes {
if e == c.event.EventType {
wg.Add(1)
err := c.goroutinePoolGo(func() {
defer wg.Done()
rs := s
remove, err := emitEvent(ctx, c.event.EventType, rs, c.store.IncrementSubscriptionSequenceNumber, c.event.Representation)
if remove {
c.store.PopSubscription(ctx, rs.ID)
}
if err != nil {
log.Errorf("cannot emit event: %v", err)
}
})
if err != nil {
wg.Done()
}
break
}
}
}
wg.Wait()
return iter.Err()
}
func (h *EventHandler) processDeviceEvent(e Event) error {
err := h.store.LoadSubscriptions(
context.Background(),
store.SubscriptionQuery{
Type: oapiStore.Type_Device,
DeviceID: e.DeviceID,
},
newDeviceSubscriptionHandler(h.store, h.goroutinePoolGo, e),
)
if err != nil {
return fmt.Errorf("cannot process resource event (DeviceID: %v, Href: %v): %w", e.DeviceID, e.Href, err)
}
return nil
}
func (h *EventHandler) processEvent(e Event) error {
switch e.EventType {
case events.EventType_ResourceChanged:
return h.processResourceEvent(e)
case events.EventType_ResourcesPublished, events.EventType_ResourcesUnpublished:
return h.processDeviceEvent(e)
}
return fmt.Errorf("cannot process event: unknown event-type %v", e.EventType)
}
func (h *EventHandler) Handle(ctx context.Context, iter Iter) (err error) {
for {
var e Event
if !iter.Next(ctx, &e) {
break
}
err := h.processEvent(e)
if err != nil {
log.Errorf("%v", err)
}
}
return iter.Err()
}
|
// Copyright (c) 2021 Alexey Khan
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package main
import (
"fmt"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
var calculateSavingsConfig = struct {
title string
about string
overview string
results string
detailed string
examples []string
}{
title: "Расчет будущих накоплений",
about: "Узнайте, какую сумму сможете накопить с учетом сложного процента, если на " +
"протяжении следующих N лет будете ежемесячно инвестировать X рублей под R% годовых " +
"с ежемесячной капитализацией процентов.",
overview: "Задача: рассчитать сумму, которую можно накопить с учетом сложного процента, " +
"если на протяжении следующих %s ежемесячно инвестировать %.2f рублей под %.2f%% " +
"годовых с ежемесячной капитализацией процентов.",
results: " > Накопленная сумма составит: %.2f\n\n",
detailed: "\n > Накопленная сумма составит: %.2f\n" +
" > Сумма собственных вложений за период: %.2f\n" +
" > Сумма начисленных процентов за период: %.2f\n\n",
examples: []string{
"./bin/assist calculate savings --payment=10000 --years=10 --interest=6.5 --detailed=M",
"./bin/assist calculate savings -p=10000 -y=10 -i=6.5 -d=M",
"./bin/assist calculate savings --help",
},
}
var calculateSavingsFlags = struct {
Years pflag.Flag
Payment pflag.Flag
Interest pflag.Flag
}{
Years: pflag.Flag{
Name: "years", Shorthand: "y",
Usage: "Количество лет, на протяжении которых будут производиться накопления",
},
Payment: pflag.Flag{
Name: "payment", Shorthand: "p",
Usage: "Размер ежемесячного пополнения инвестиционного портфеля",
},
Interest: pflag.Flag{
Name: "interest", Shorthand: "i",
Usage: "Доходность вашего инвестиционного портфеля в процентах годовых",
},
}
var calculateSavings = &cobra.Command{
Use: "savings",
Example: commandOverview(
calculateSavingsConfig.title,
calculateSavingsConfig.about,
calculateSavingsConfig.examples,
),
RunE: func(cmd *cobra.Command, args []string) (err error) {
printHeader()
years := getUint8(cmd, calculateSavingsFlags.Years.Name)
interest := getFloat64(cmd, calculateSavingsFlags.Interest.Name)
payment := getFloat64(cmd, calculateSavingsFlags.Payment.Name)
detailed := getString(cmd, detailedFlag.Name)
if err = validateDetailedOption(detailed); err != nil {
return err
}
overview := fmt.Sprintf(calculateSavingsConfig.overview, yearsDuration(years), payment, interest)
taskOverview := getTaskOverview(calculateSavingsConfig.title, overview)
if detailed == commandOptionEmpty {
var savings float64
if savings, err = core.CalculateSavings(payment, interest, years); err != nil {
return err
}
fmt.Println(taskOverview)
fmt.Printf(calculateSavingsConfig.results, savings)
return
}
rendered, personalInvestments, interestIncome, totalSavings := savingsTable(
payment, interest, years, detailed == commandOptionDetailedMonthly)
fmt.Println(taskOverview)
fmt.Println(rendered)
fmt.Printf(calculateSavingsConfig.detailed, totalSavings, personalInvestments, interestIncome)
return
},
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mathutil
import (
"math"
"golang.org/x/exp/constraints"
)
// Architecture and/or implementation specific integer limits and bit widths.
const (
MaxInt = 1<<(IntBits-1) - 1
MinInt = -MaxInt - 1
MaxUint = 1<<IntBits - 1
IntBits = 1 << (^uint(0)>>32&1 + ^uint(0)>>16&1 + ^uint(0)>>8&1 + 3)
)
// Abs implement the abs function according to http://cavaliercoder.com/blog/optimized-abs-for-int64-in-go.html
func Abs(n int64) int64 {
y := n >> 63
return (n ^ y) - y
}
// uintSizeTable is used as a table to do comparison to get uint length is faster than doing loop on division with 10
var uintSizeTable = [21]uint64{
0, // redundant 0 here, so to make function StrLenOfUint64Fast to count from 1 and return i directly
9, 99, 999, 9999, 99999,
999999, 9999999, 99999999, 999999999, 9999999999,
99999999999, 999999999999, 9999999999999, 99999999999999, 999999999999999,
9999999999999999, 99999999999999999, 999999999999999999, 9999999999999999999,
math.MaxUint64,
} // math.MaxUint64 is 18446744073709551615 and it has 20 digits
// StrLenOfUint64Fast efficiently calculate the string character lengths of an uint64 as input
func StrLenOfUint64Fast(x uint64) int {
for i := 1; ; i++ {
if x <= uintSizeTable[i] {
return i
}
}
}
// StrLenOfInt64Fast efficiently calculate the string character lengths of an int64 as input
func StrLenOfInt64Fast(x int64) int {
size := 0
if x < 0 {
size = 1 // add "-" sign on the length count
}
return size + StrLenOfUint64Fast(uint64(Abs(x)))
}
// IsFinite reports whether f is neither NaN nor an infinity.
func IsFinite(f float64) bool {
return !math.IsNaN(f - f)
}
// Max returns the largest one from its arguments.
func Max[T constraints.Ordered](x T, xs ...T) T {
max := x
for _, n := range xs {
if n > max {
max = n
}
}
return max
}
// Min returns the smallest one from its arguments.
func Min[T constraints.Ordered](x T, xs ...T) T {
min := x
for _, n := range xs {
if n < min {
min = n
}
}
return min
}
// Clamp restrict a value to a certain interval.
func Clamp[T constraints.Ordered](n, min, max T) T {
if n >= max {
return max
} else if n <= min {
return min
}
return n
}
|
package config
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"errors"
"fmt"
"net/http"
"net/url"
"os"
"path/filepath"
"reflect"
"strings"
"time"
envoy_http_connection_manager "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
"github.com/mitchellh/mapstructure"
"github.com/rs/zerolog"
"github.com/spf13/viper"
"github.com/volatiletech/null/v9"
"google.golang.org/protobuf/types/known/durationpb"
"github.com/pomerium/csrf"
"github.com/pomerium/pomerium/internal/atomicutil"
"github.com/pomerium/pomerium/internal/hashutil"
"github.com/pomerium/pomerium/internal/httputil"
"github.com/pomerium/pomerium/internal/identity/oauth"
"github.com/pomerium/pomerium/internal/identity/oauth/apple"
"github.com/pomerium/pomerium/internal/log"
"github.com/pomerium/pomerium/internal/sets"
"github.com/pomerium/pomerium/internal/telemetry"
"github.com/pomerium/pomerium/internal/telemetry/metrics"
"github.com/pomerium/pomerium/internal/urlutil"
"github.com/pomerium/pomerium/pkg/cryptutil"
"github.com/pomerium/pomerium/pkg/grpc/config"
"github.com/pomerium/pomerium/pkg/grpc/crypt"
"github.com/pomerium/pomerium/pkg/hpke"
)
// DisableHeaderKey is the key used to check whether to disable setting header
const DisableHeaderKey = "disable"
// DefaultAlternativeAddr is the address used is two services are competing over
// the same listener. Typically this is invisible to the end user (e.g. localhost)
// gRPC server, or is used for healthchecks (authorize only service)
const DefaultAlternativeAddr = ":5443"
// The randomSharedKey is used if no shared key is supplied in all-in-one mode.
var randomSharedKey = cryptutil.NewBase64Key()
// Options are the global environmental flags used to set up pomerium's services.
// Use NewXXXOptions() methods for a safely initialized data structure.
type Options struct {
// InstallationID is used to indicate a unique installation of pomerium. Useful for telemetry.
InstallationID string `mapstructure:"installation_id" yaml:"installation_id,omitempty"`
// Debug outputs human-readable logs to Stdout.
Debug bool `mapstructure:"pomerium_debug" yaml:"pomerium_debug,omitempty"`
// LogLevel sets the global override for log level. All Loggers will use at least this value.
// Possible options are "info","warn","debug" and "error". Defaults to "info".
LogLevel LogLevel `mapstructure:"log_level" yaml:"log_level,omitempty"`
// ProxyLogLevel sets the log level for the proxy service.
// Possible options are "info","warn", and "error". Defaults to the value of `LogLevel`.
ProxyLogLevel LogLevel `mapstructure:"proxy_log_level" yaml:"proxy_log_level,omitempty"`
// AccessLogFields are the fields to log in access logs.
AccessLogFields []log.AccessLogField `mapstructure:"access_log_fields" yaml:"access_log_fields,omitempty"`
// AuthorizeLogFields are the fields to log in authorize logs.
AuthorizeLogFields []log.AuthorizeLogField `mapstructure:"authorize_log_fields" yaml:"authorize_log_fields,omitempty"`
// SharedKey is the shared secret authorization key used to mutually authenticate
// requests between services.
SharedKey string `mapstructure:"shared_secret" yaml:"shared_secret,omitempty"`
SharedSecretFile string `mapstructure:"shared_secret_file" yaml:"shared_secret_file,omitempty"`
// Services is a list enabled service mode. If none are selected, "all" is used.
// Available options are : "all", "authenticate", "proxy".
Services string `mapstructure:"services" yaml:"services,omitempty"`
// Addr specifies the host and port on which the server should serve
// HTTPS requests. If empty, ":443" (localhost:443) is used.
Addr string `mapstructure:"address" yaml:"address,omitempty"`
// InsecureServer when enabled disables all transport security.
// In this mode, Pomerium is susceptible to man-in-the-middle attacks.
// This should be used only for testing.
InsecureServer bool `mapstructure:"insecure_server" yaml:"insecure_server,omitempty"`
// DNSLookupFamily is the DNS IP address resolution policy.
// If this setting is not specified, the value defaults to V4_PREFERRED.
DNSLookupFamily string `mapstructure:"dns_lookup_family" yaml:"dns_lookup_family,omitempty"`
CertificateFiles []certificateFilePair `mapstructure:"certificates" yaml:"certificates,omitempty"`
// Cert and Key is the x509 certificate used to create the HTTPS server.
Cert string `mapstructure:"certificate" yaml:"certificate,omitempty"`
Key string `mapstructure:"certificate_key" yaml:"certificate_key,omitempty"`
// CertFile and KeyFile is the x509 certificate used to hydrate TLSCertificate
CertFile string `mapstructure:"certificate_file" yaml:"certificate_file,omitempty"`
KeyFile string `mapstructure:"certificate_key_file" yaml:"certificate_key_file,omitempty"`
// HttpRedirectAddr, if set, specifies the host and port to run the HTTP
// to HTTPS redirect server on. If empty, no redirect server is started.
HTTPRedirectAddr string `mapstructure:"http_redirect_addr" yaml:"http_redirect_addr,omitempty"`
// Timeout settings : https://github.com/pomerium/pomerium/issues/40
ReadTimeout time.Duration `mapstructure:"timeout_read" yaml:"timeout_read,omitempty"`
WriteTimeout time.Duration `mapstructure:"timeout_write" yaml:"timeout_write,omitempty"`
IdleTimeout time.Duration `mapstructure:"timeout_idle" yaml:"timeout_idle,omitempty"`
// Policies define per-route configuration and access control policies.
Policies []Policy `mapstructure:"policy"`
PolicyFile string `mapstructure:"policy_file" yaml:"policy_file,omitempty"`
Routes []Policy `mapstructure:"routes"`
// AdditionalPolicies are any additional policies added to the options.
AdditionalPolicies []Policy `yaml:"-"`
// AuthenticateURL represents the externally accessible http endpoints
// used for authentication requests and callbacks
AuthenticateURLString string `mapstructure:"authenticate_service_url" yaml:"authenticate_service_url,omitempty"`
AuthenticateInternalURLString string `mapstructure:"authenticate_internal_service_url" yaml:"authenticate_internal_service_url,omitempty"`
// SignOutRedirectURL represents the url that user will be redirected to after signing out.
SignOutRedirectURLString string `mapstructure:"signout_redirect_url" yaml:"signout_redirect_url,omitempty"`
// AuthenticateCallbackPath is the path to the HTTP endpoint that will
// receive the response from your identity provider. The value must exactly
// match one of the authorized redirect URIs for the OAuth 2.0 client.
// Defaults to: `/oauth2/callback`
AuthenticateCallbackPath string `mapstructure:"authenticate_callback_path" yaml:"authenticate_callback_path,omitempty"`
// Session/Cookie management
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie
CookieName string `mapstructure:"cookie_name" yaml:"cookie_name,omitempty"`
CookieSecret string `mapstructure:"cookie_secret" yaml:"cookie_secret,omitempty"`
CookieSecretFile string `mapstructure:"cookie_secret_file" yaml:"cookie_secret_file,omitempty"`
CookieDomain string `mapstructure:"cookie_domain" yaml:"cookie_domain,omitempty"`
CookieSecure bool `mapstructure:"cookie_secure" yaml:"cookie_secure,omitempty"`
CookieHTTPOnly bool `mapstructure:"cookie_http_only" yaml:"cookie_http_only,omitempty"`
CookieExpire time.Duration `mapstructure:"cookie_expire" yaml:"cookie_expire,omitempty"`
CookieSameSite string `mapstructure:"cookie_same_site" yaml:"cookie_same_site,omitempty"`
// Identity provider configuration variables as specified by RFC6749
// https://openid.net/specs/openid-connect-basic-1_0.html#RFC6749
ClientID string `mapstructure:"idp_client_id" yaml:"idp_client_id,omitempty"`
ClientSecret string `mapstructure:"idp_client_secret" yaml:"idp_client_secret,omitempty"`
ClientSecretFile string `mapstructure:"idp_client_secret_file" yaml:"idp_client_secret_file,omitempty"`
Provider string `mapstructure:"idp_provider" yaml:"idp_provider,omitempty"`
ProviderURL string `mapstructure:"idp_provider_url" yaml:"idp_provider_url,omitempty"`
Scopes []string `mapstructure:"idp_scopes" yaml:"idp_scopes,omitempty"`
// RequestParams are custom request params added to the signin request as
// part of an Oauth2 code flow.
//
// https://www.iana.org/assignments/oauth-parameters/oauth-parameters.xhtml
// https://openid.net/specs/openid-connect-basic-1_0.html#RequestParameters
RequestParams map[string]string `mapstructure:"idp_request_params" yaml:"idp_request_params,omitempty"`
// AuthorizeURLString is the routable destination of the authorize service's
// gRPC endpoint. NOTE: As many load balancers do not support
// externally routed gRPC so this may be an internal location.
AuthorizeURLString string `mapstructure:"authorize_service_url" yaml:"authorize_service_url,omitempty"`
AuthorizeURLStrings []string `mapstructure:"authorize_service_urls" yaml:"authorize_service_urls,omitempty"`
AuthorizeInternalURLString string `mapstructure:"authorize_internal_service_url" yaml:"authorize_internal_service_url,omitempty"`
// Settings to enable custom behind-the-ingress service communication
OverrideCertificateName string `mapstructure:"override_certificate_name" yaml:"override_certificate_name,omitempty"`
CA string `mapstructure:"certificate_authority" yaml:"certificate_authority,omitempty"`
CAFile string `mapstructure:"certificate_authority_file" yaml:"certificate_authority_file,omitempty"`
// DeriveInternalDomainCert is an option that would derive certificate authority
// and domain certificates from the shared key and use them for internal communication
DeriveInternalDomainCert *string `mapstructure:"tls_derive" yaml:"tls_derive,omitempty"`
// SigningKey is the private key used to add a JWT-signature to upstream requests.
// https://www.pomerium.com/docs/topics/getting-users-identity.html
SigningKey string `mapstructure:"signing_key" yaml:"signing_key,omitempty"`
SigningKeyFile string `mapstructure:"signing_key_file" yaml:"signing_key_file,omitempty"`
HeadersEnv string `yaml:",omitempty"`
// SetResponseHeaders to set on all proxied requests. Add a 'disable' key map to turn off.
SetResponseHeaders map[string]string `yaml:",omitempty"`
// List of JWT claims to insert as x-pomerium-claim-* headers on proxied requests
JWTClaimsHeaders JWTClaimHeaders `mapstructure:"jwt_claims_headers" yaml:"jwt_claims_headers,omitempty"`
DefaultUpstreamTimeout time.Duration `mapstructure:"default_upstream_timeout" yaml:"default_upstream_timeout,omitempty"`
// Address/Port to bind to for prometheus metrics
MetricsAddr string `mapstructure:"metrics_address" yaml:"metrics_address,omitempty"`
// - require basic auth for prometheus metrics, base64 encoded user:pass string
MetricsBasicAuth string `mapstructure:"metrics_basic_auth" yaml:"metrics_basic_auth,omitempty"`
// - TLS options
MetricsCertificate string `mapstructure:"metrics_certificate" yaml:"metrics_certificate,omitempty"`
MetricsCertificateKey string `mapstructure:"metrics_certificate_key" yaml:"metrics_certificate_key,omitempty"`
MetricsCertificateFile string `mapstructure:"metrics_certificate_file" yaml:"metrics_certificate_file,omitempty"`
MetricsCertificateKeyFile string `mapstructure:"metrics_certificate_key_file" yaml:"metrics_certificate_key_file,omitempty"`
MetricsClientCA string `mapstructure:"metrics_client_ca" yaml:"metrics_client_ca,omitempty"`
MetricsClientCAFile string `mapstructure:"metrics_client_ca_file" yaml:"metrics_client_ca_file,omitempty"`
// Tracing shared settings
TracingProvider string `mapstructure:"tracing_provider" yaml:"tracing_provider,omitempty"`
TracingSampleRate float64 `mapstructure:"tracing_sample_rate" yaml:"tracing_sample_rate,omitempty"`
// Datadog tracing address
TracingDatadogAddress string `mapstructure:"tracing_datadog_address" yaml:"tracing_datadog_address,omitempty"`
// Jaeger
//
// CollectorEndpoint is the full url to the Jaeger HTTP Thrift collector.
// For example, http://localhost:14268/api/traces
TracingJaegerCollectorEndpoint string `mapstructure:"tracing_jaeger_collector_endpoint" yaml:"tracing_jaeger_collector_endpoint,omitempty"`
// AgentEndpoint instructs exporter to send spans to jaeger-agent at this address.
// For example, localhost:6831.
TracingJaegerAgentEndpoint string `mapstructure:"tracing_jaeger_agent_endpoint" yaml:"tracing_jaeger_agent_endpoint,omitempty"`
// Zipkin
//
// ZipkinEndpoint configures the zipkin collector URI
// Example: http://zipkin:9411/api/v2/spans
ZipkinEndpoint string `mapstructure:"tracing_zipkin_endpoint" yaml:"tracing_zipkin_endpoint"`
// GRPC Service Settings
// GRPCAddr specifies the host and port on which the server should serve
// gRPC requests. If running in all-in-one mode, ":5443" (localhost:5443) is used.
GRPCAddr string `mapstructure:"grpc_address" yaml:"grpc_address,omitempty"`
// GRPCInsecure disables transport security.
// If running in all-in-one mode, defaults to true.
GRPCInsecure *bool `mapstructure:"grpc_insecure" yaml:"grpc_insecure,omitempty"`
GRPCClientTimeout time.Duration `mapstructure:"grpc_client_timeout" yaml:"grpc_client_timeout,omitempty"`
GRPCClientDNSRoundRobin bool `mapstructure:"grpc_client_dns_roundrobin" yaml:"grpc_client_dns_roundrobin,omitempty"`
// DataBrokerURLString is the routable destination of the databroker service's gRPC endpoint.
DataBrokerURLString string `mapstructure:"databroker_service_url" yaml:"databroker_service_url,omitempty"`
DataBrokerURLStrings []string `mapstructure:"databroker_service_urls" yaml:"databroker_service_urls,omitempty"`
DataBrokerInternalURLString string `mapstructure:"databroker_internal_service_url" yaml:"databroker_internal_service_url,omitempty"`
// DataBrokerStorageType is the storage backend type that databroker will use.
// Supported type: memory, redis
DataBrokerStorageType string `mapstructure:"databroker_storage_type" yaml:"databroker_storage_type,omitempty"`
// DataBrokerStorageConnectionString is the data source name for storage backend.
DataBrokerStorageConnectionString string `mapstructure:"databroker_storage_connection_string" yaml:"databroker_storage_connection_string,omitempty"`
DataBrokerStorageCertFile string `mapstructure:"databroker_storage_cert_file" yaml:"databroker_storage_cert_file,omitempty"`
DataBrokerStorageCertKeyFile string `mapstructure:"databroker_storage_key_file" yaml:"databroker_storage_key_file,omitempty"`
DataBrokerStorageCAFile string `mapstructure:"databroker_storage_ca_file" yaml:"databroker_storage_ca_file,omitempty"`
DataBrokerStorageCertSkipVerify bool `mapstructure:"databroker_storage_tls_skip_verify" yaml:"databroker_storage_tls_skip_verify,omitempty"`
// ClientCA is the base64-encoded certificate authority to validate client mTLS certificates against.
//
// Deprecated: Use DownstreamMTLS.CA instead.
ClientCA string `mapstructure:"client_ca" yaml:"client_ca,omitempty"`
// ClientCAFile points to a file that contains the certificate authority to validate client mTLS certificates against.
//
// Deprecated: Use DownstreamMTLS.CAFile instead.
ClientCAFile string `mapstructure:"client_ca_file" yaml:"client_ca_file,omitempty"`
// DownstreamMTLS holds all downstream mTLS settings.
DownstreamMTLS DownstreamMTLSSettings `mapstructure:"downstream_mtls" yaml:"downstream_mtls,omitempty"`
// GoogleCloudServerlessAuthenticationServiceAccount is the service account to use for GCP serverless authentication.
// If unset, the GCP metadata server will be used to query for identity tokens.
GoogleCloudServerlessAuthenticationServiceAccount string `mapstructure:"google_cloud_serverless_authentication_service_account" yaml:"google_cloud_serverless_authentication_service_account,omitempty"` //nolint
// UseProxyProtocol configures the HTTP listener to require the HAProxy proxy protocol (either v1 or v2) on incoming requests.
UseProxyProtocol bool `mapstructure:"use_proxy_protocol" yaml:"use_proxy_protocol,omitempty" json:"use_proxy_protocol,omitempty"`
viper *viper.Viper
AutocertOptions `mapstructure:",squash" yaml:",inline"`
// SkipXffAppend instructs proxy not to append its IP address to x-forwarded-for header.
// see https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/headers.html?highlight=skip_xff_append#x-forwarded-for
SkipXffAppend bool `mapstructure:"skip_xff_append" yaml:"skip_xff_append,omitempty" json:"skip_xff_append,omitempty"`
// XffNumTrustedHops determines the trusted client address from x-forwarded-for addresses.
// see https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/headers.html?highlight=xff_num_trusted_hops#x-forwarded-for
XffNumTrustedHops uint32 `mapstructure:"xff_num_trusted_hops" yaml:"xff_num_trusted_hops,omitempty" json:"xff_num_trusted_hops,omitempty"`
// Envoy bootstrap options. These do not support dynamic updates.
EnvoyAdminAccessLogPath string `mapstructure:"envoy_admin_access_log_path" yaml:"envoy_admin_access_log_path"`
EnvoyAdminProfilePath string `mapstructure:"envoy_admin_profile_path" yaml:"envoy_admin_profile_path"`
EnvoyAdminAddress string `mapstructure:"envoy_admin_address" yaml:"envoy_admin_address"`
EnvoyBindConfigSourceAddress string `mapstructure:"envoy_bind_config_source_address" yaml:"envoy_bind_config_source_address,omitempty"`
EnvoyBindConfigFreebind null.Bool `mapstructure:"envoy_bind_config_freebind" yaml:"envoy_bind_config_freebind,omitempty"`
// ProgrammaticRedirectDomainWhitelist restricts the allowed redirect URLs when using programmatic login.
ProgrammaticRedirectDomainWhitelist []string `mapstructure:"programmatic_redirect_domain_whitelist" yaml:"programmatic_redirect_domain_whitelist,omitempty" json:"programmatic_redirect_domain_whitelist,omitempty"` //nolint
// CodecType is the codec to use for downstream connections.
CodecType CodecType `mapstructure:"codec_type" yaml:"codec_type"`
AuditKey *PublicKeyEncryptionKeyOptions `mapstructure:"audit_key"`
BrandingOptions httputil.BrandingOptions
}
type certificateFilePair struct {
// CertFile and KeyFile is the x509 certificate used to hydrate TLSCertificate
CertFile string `mapstructure:"cert" yaml:"cert,omitempty"`
KeyFile string `mapstructure:"key" yaml:"key,omitempty"`
}
// DefaultOptions are the default configuration options for pomerium
var defaultOptions = Options{
Debug: false,
LogLevel: LogLevelInfo,
Services: "all",
CookieHTTPOnly: true,
CookieSecure: true,
CookieExpire: 14 * time.Hour,
CookieName: "_pomerium",
DefaultUpstreamTimeout: 30 * time.Second,
Addr: ":443",
ReadTimeout: 30 * time.Second,
WriteTimeout: 0, // support streaming by default
IdleTimeout: 5 * time.Minute,
GRPCAddr: ":443",
GRPCClientTimeout: 10 * time.Second, // Try to withstand transient service failures for a single request
GRPCClientDNSRoundRobin: true,
AuthenticateURLString: "https://authenticate.pomerium.app",
AuthenticateCallbackPath: "/oauth2/callback",
TracingSampleRate: 0.0001,
AutocertOptions: AutocertOptions{
Folder: dataDir(),
},
DataBrokerStorageType: "memory",
SkipXffAppend: false,
XffNumTrustedHops: 0,
EnvoyAdminAccessLogPath: os.DevNull,
EnvoyAdminProfilePath: os.DevNull,
ProgrammaticRedirectDomainWhitelist: []string{"localhost"},
}
var defaultSetResponseHeaders = map[string]string{
"X-Frame-Options": "SAMEORIGIN",
"X-XSS-Protection": "1; mode=block",
"Strict-Transport-Security": "max-age=31536000; includeSubDomains; preload",
}
// NewDefaultOptions returns a copy the default options. It's the caller's
// responsibility to do a follow up Validate call.
func NewDefaultOptions() *Options {
newOpts := defaultOptions
newOpts.viper = viper.New()
return &newOpts
}
// newOptionsFromConfig builds the main binary's configuration options by parsing
// environmental variables and config file
func newOptionsFromConfig(configFile string) (*Options, error) {
o, err := optionsFromViper(configFile)
if err != nil {
return nil, fmt.Errorf("config: options from config file %q: %w", configFile, err)
}
serviceName := telemetry.ServiceName(o.Services)
metrics.AddPolicyCountCallback(serviceName, func() int64 {
return int64(len(o.GetAllPolicies()))
})
return o, nil
}
func optionsFromViper(configFile string) (*Options, error) {
// start a copy of the default options
o := NewDefaultOptions()
v := o.viper
// Load up config
err := bindEnvs(v)
if err != nil {
return nil, fmt.Errorf("failed to bind options to env vars: %w", err)
}
if configFile != "" {
v.SetConfigFile(configFile)
if err := v.ReadInConfig(); err != nil {
return nil, fmt.Errorf("failed to read config: %w", err)
}
}
var metadata mapstructure.Metadata
if err := v.Unmarshal(o, ViperPolicyHooks, func(c *mapstructure.DecoderConfig) { c.Metadata = &metadata }); err != nil {
return nil, fmt.Errorf("failed to unmarshal config: %w", err)
}
if err := checkConfigKeysErrors(configFile, metadata.Unused); err != nil {
return nil, err
}
// This is necessary because v.Unmarshal will overwrite .viper field.
o.viper = v
if err := o.Validate(); err != nil {
return nil, fmt.Errorf("validation error %w", err)
}
return o, nil
}
func checkConfigKeysErrors(configFile string, unused []string) error {
checks := CheckUnknownConfigFields(unused)
ctx := context.Background()
errInvalidConfigKeys := errors.New("some configuration options are no longer supported, please check logs for details")
var err error
for _, check := range checks {
var evt *zerolog.Event
switch check.KeyAction {
case KeyActionError:
evt = log.Error(ctx)
err = errInvalidConfigKeys
default:
evt = log.Warn(ctx)
}
evt.Str("config_file", configFile).Str("key", check.Key)
if check.DocsURL != "" {
evt = evt.Str("help", check.DocsURL)
}
evt.Msg(string(check.FieldCheckMsg))
}
return err
}
// parsePolicy initializes policy to the options from either base64 environmental
// variables or from a file
func (o *Options) parsePolicy() error {
var policies []Policy
if err := o.viper.UnmarshalKey("policy", &policies, ViperPolicyHooks); err != nil {
return err
}
if len(policies) != 0 {
o.Policies = policies
}
var routes []Policy
if err := o.viper.UnmarshalKey("routes", &routes, ViperPolicyHooks); err != nil {
return err
}
if len(routes) != 0 {
o.Routes = routes
}
// Finish initializing policies
for i := range o.Policies {
p := &o.Policies[i]
if err := p.Validate(); err != nil {
return err
}
}
for i := range o.Routes {
p := &o.Routes[i]
if err := p.Validate(); err != nil {
return err
}
}
for i := range o.AdditionalPolicies {
p := &o.AdditionalPolicies[i]
if err := p.Validate(); err != nil {
return err
}
}
return nil
}
func (o *Options) viperSet(key string, value interface{}) {
o.viper.Set(key, value)
}
func (o *Options) viperIsSet(key string) bool {
return o.viper.IsSet(key)
}
// parseHeaders handles unmarshalling any custom headers correctly from the
// environment or viper's parsed keys
func (o *Options) parseHeaders(_ context.Context) error {
var headers map[string]string
if o.HeadersEnv != "" {
// Handle JSON by default via viper
if headers = o.viper.GetStringMapString("HeadersEnv"); len(headers) == 0 {
// Try to parse "Key1:Value1,Key2:Value2" syntax
headerSlice := strings.Split(o.HeadersEnv, ",")
for n := range headerSlice {
headerFields := strings.SplitN(headerSlice[n], ":", 2)
if len(headerFields) == 2 {
headers[headerFields[0]] = headerFields[1]
} else {
// Something went wrong
return fmt.Errorf("failed to decode headers from '%s'", o.HeadersEnv)
}
}
}
o.SetResponseHeaders = headers
return nil
}
if o.viperIsSet("set_response_headers") {
if err := o.viper.UnmarshalKey("set_response_headers", &headers); err != nil {
return fmt.Errorf("header %s failed to parse: %w", o.viper.Get("set_response_headers"), err)
}
o.SetResponseHeaders = headers
}
return nil
}
// bindEnvs adds a Viper environment variable binding for each field in the
// Options struct (including nested structs), based on the mapstructure tag.
func bindEnvs(v *viper.Viper) error {
if _, err := bindEnvsRecursive(reflect.TypeOf(Options{}), v, "", ""); err != nil {
return err
}
// Statically bind fields
err := v.BindEnv("Policy", "POLICY")
if err != nil {
return fmt.Errorf("failed to bind field 'Policy' to env var 'POLICY': %w", err)
}
err = v.BindEnv("HeadersEnv", "HEADERS")
if err != nil {
return fmt.Errorf("failed to bind field 'HeadersEnv' to env var 'HEADERS': %w", err)
}
return nil
}
// bindEnvsRecursive binds all fields of the provided struct type that have a
// "mapstructure" tag to corresponding environment variables, recursively. If a
// nested struct contains no fields with a "mapstructure" tag, a binding will
// be added for the struct itself (e.g. null.Bool).
func bindEnvsRecursive(t reflect.Type, v *viper.Viper, keyPrefix, envPrefix string) (bool, error) {
anyFieldHasMapstructureTag := false
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
tag, hasTag := field.Tag.Lookup("mapstructure")
if !hasTag || tag == "-" {
continue
}
anyFieldHasMapstructureTag = true
key, _, _ := strings.Cut(tag, ",")
keyPath := keyPrefix + key
envName := envPrefix + strings.ToUpper(key)
if field.Type.Kind() == reflect.Struct {
newKeyPrefix := keyPath
newEnvPrefix := envName
if key != "" {
newKeyPrefix += "."
newEnvPrefix += "_"
}
nestedMapstructure, err := bindEnvsRecursive(field.Type, v, newKeyPrefix, newEnvPrefix)
if err != nil {
return false, err
} else if nestedMapstructure {
// If we've bound any nested fields from this struct, do not
// also bind this struct itself.
continue
}
}
if key != "" {
if err := v.BindEnv(keyPath, envName); err != nil {
return false, fmt.Errorf("failed to bind field '%s' to env var '%s': %w",
field.Name, envName, err)
}
}
}
return anyFieldHasMapstructureTag, nil
}
// Validate ensures the Options fields are valid, and hydrated.
func (o *Options) Validate() error {
ctx := context.TODO()
if !IsValidService(o.Services) {
return fmt.Errorf("config: %s is an invalid service type", o.Services)
}
switch o.DataBrokerStorageType {
case StorageInMemoryName:
case StorageRedisName, StoragePostgresName:
if o.DataBrokerStorageConnectionString == "" {
return errors.New("config: missing databroker storage backend dsn")
}
default:
return errors.New("config: unknown databroker storage backend type")
}
_, err := o.GetSharedKey()
if err != nil {
return fmt.Errorf("config: invalid shared secret: %w", err)
}
if o.AuthenticateURLString != "" {
_, err := urlutil.ParseAndValidateURL(o.AuthenticateURLString)
if err != nil {
return fmt.Errorf("config: bad authenticate-url %s : %w", o.AuthenticateURLString, err)
}
}
if o.AuthenticateInternalURLString != "" {
_, err := urlutil.ParseAndValidateURL(o.AuthenticateInternalURLString)
if err != nil {
return fmt.Errorf("config: bad authenticate-internal-url %s : %w", o.AuthenticateInternalURLString, err)
}
}
if o.SignOutRedirectURLString != "" {
_, err := urlutil.ParseAndValidateURL(o.SignOutRedirectURLString)
if err != nil {
return fmt.Errorf("config: bad signout-redirect-url %s : %w", o.SignOutRedirectURLString, err)
}
}
if o.AuthorizeURLString != "" {
_, err := urlutil.ParseAndValidateURL(o.AuthorizeURLString)
if err != nil {
return fmt.Errorf("config: bad authorize-url %s : %w", o.AuthorizeURLString, err)
}
}
if o.AuthorizeInternalURLString != "" {
_, err := urlutil.ParseAndValidateURL(o.AuthorizeInternalURLString)
if err != nil {
return fmt.Errorf("config: bad authorize-internal-url %s : %w", o.AuthorizeInternalURLString, err)
}
}
if o.DataBrokerURLString != "" {
_, err := urlutil.ParseAndValidateURL(o.DataBrokerURLString)
if err != nil {
return fmt.Errorf("config: bad databroker service url %s : %w", o.DataBrokerURLString, err)
}
}
if o.DataBrokerInternalURLString != "" {
_, err := urlutil.ParseAndValidateURL(o.DataBrokerInternalURLString)
if err != nil {
return fmt.Errorf("config: bad databroker internal service url %s : %w", o.DataBrokerInternalURLString, err)
}
}
if o.PolicyFile != "" {
return errors.New("config: policy file setting is deprecated")
}
if err := o.parsePolicy(); err != nil {
return fmt.Errorf("config: failed to parse policy: %w", err)
}
if err := o.parseHeaders(ctx); err != nil {
return fmt.Errorf("config: failed to parse headers: %w", err)
}
hasCert := false
if o.Cert != "" || o.Key != "" {
_, err := cryptutil.CertificateFromBase64(o.Cert, o.Key)
if err != nil {
return fmt.Errorf("config: bad cert base64 %w", err)
}
hasCert = true
}
for _, c := range o.CertificateFiles {
_, err := cryptutil.CertificateFromBase64(c.CertFile, c.KeyFile)
if err != nil {
_, err = cryptutil.CertificateFromFile(c.CertFile, c.KeyFile)
}
if err != nil {
return fmt.Errorf("config: bad cert entry, base64 or file reference invalid. %w", err)
}
hasCert = true
}
if o.CertFile != "" || o.KeyFile != "" {
_, err := cryptutil.CertificateFromFile(o.CertFile, o.KeyFile)
if err != nil {
return fmt.Errorf("config: bad cert file %w", err)
}
hasCert = true
}
if o.DataBrokerStorageCertFile != "" || o.DataBrokerStorageCertKeyFile != "" {
_, err := cryptutil.CertificateFromFile(o.DataBrokerStorageCertFile, o.DataBrokerStorageCertKeyFile)
if err != nil {
return fmt.Errorf("config: bad databroker cert file %w", err)
}
}
if o.DataBrokerStorageCAFile != "" {
if _, err := os.Stat(o.DataBrokerStorageCAFile); err != nil {
return fmt.Errorf("config: bad databroker ca file: %w", err)
}
}
if o.ClientCA != "" {
log.Warn(context.Background()).Msg("config: client_ca is deprecated, set " +
"downstream_mtls.ca instead")
if o.DownstreamMTLS.CA == "" {
o.DownstreamMTLS.CA = o.ClientCA
}
}
if o.ClientCAFile != "" {
log.Warn(context.Background()).Msg("config: client_ca_file is deprecated, set " +
"downstream_mtls.ca_file instead")
if o.DownstreamMTLS.CAFile == "" {
o.DownstreamMTLS.CAFile = o.ClientCAFile
}
}
if err := o.DownstreamMTLS.validate(); err != nil {
return fmt.Errorf("config: bad downstream mTLS settings: %w", err)
}
// strip quotes from redirect address (#811)
o.HTTPRedirectAddr = strings.Trim(o.HTTPRedirectAddr, `"'`)
if !o.InsecureServer && !hasCert && !o.AutocertOptions.Enable {
log.Warn(ctx).Msg("neither `autocert`, " +
"`insecure_server` or manually provided certificates were provided, server will be using a self-signed certificate")
}
if err := ValidateDNSLookupFamily(o.DNSLookupFamily); err != nil {
return fmt.Errorf("config: %w", err)
}
if o.MetricsAddr != "" {
if err := ValidateMetricsAddress(o.MetricsAddr); err != nil {
return fmt.Errorf("config: invalid metrics_addr: %w", err)
}
}
// validate metrics basic auth
if o.MetricsBasicAuth != "" {
str, err := base64.StdEncoding.DecodeString(o.MetricsBasicAuth)
if err != nil {
return fmt.Errorf("config: metrics_basic_auth must be a base64 encoded string")
}
if !strings.Contains(string(str), ":") {
return fmt.Errorf("config: metrics_basic_auth should contain a user name and password separated by a colon")
}
}
if o.MetricsCertificate != "" && o.MetricsCertificateKey != "" {
_, err := cryptutil.CertificateFromBase64(o.MetricsCertificate, o.MetricsCertificateKey)
if err != nil {
return fmt.Errorf("config: invalid metrics_certificate or metrics_certificate_key: %w", err)
}
}
if o.MetricsCertificateFile != "" && o.MetricsCertificateKeyFile != "" {
_, err := cryptutil.CertificateFromFile(o.MetricsCertificateFile, o.MetricsCertificateKeyFile)
if err != nil {
return fmt.Errorf("config: invalid metrics_certificate_file or metrics_certificate_key_file: %w", err)
}
}
// validate the Autocert options
err = o.AutocertOptions.Validate()
if err != nil {
return err
}
if err := ValidateCookieSameSite(o.CookieSameSite); err != nil {
return fmt.Errorf("config: invalid cookie_same_site: %w", err)
} else if !o.CookieSecure && o.GetCookieSameSite() == http.SameSiteNoneMode {
return errors.New("config: cannot use cookie_same_site: none with cookie_secure: false")
}
if err := ValidateLogLevel(o.LogLevel); err != nil {
return fmt.Errorf("config: invalid log_level: %w", err)
}
if err := ValidateLogLevel(o.ProxyLogLevel); err != nil {
return fmt.Errorf("config: invalid proxy_log_level: %w", err)
}
for _, field := range o.AccessLogFields {
if err := field.Validate(); err != nil {
return fmt.Errorf("config: invalid access_log_fields: %w", err)
}
}
for _, field := range o.AuthorizeLogFields {
if err := field.Validate(); err != nil {
return fmt.Errorf("config: invalid authorize_log_fields: %w", err)
}
}
return nil
}
// GetDeriveInternalDomain returns an optional internal domain name to use for gRPC endpoint
func (o *Options) GetDeriveInternalDomain() string {
if o.DeriveInternalDomainCert == nil {
return ""
}
return strings.ToLower(*o.DeriveInternalDomainCert)
}
// GetAuthenticateURL returns the AuthenticateURL in the options or 127.0.0.1.
func (o *Options) GetAuthenticateURL() (*url.URL, error) {
rawurl := o.AuthenticateURLString
if rawurl == "" {
rawurl = "https://127.0.0.1"
}
return urlutil.ParseAndValidateURL(rawurl)
}
// GetInternalAuthenticateURL returns the internal AuthenticateURL in the options or the AuthenticateURL.
func (o *Options) GetInternalAuthenticateURL() (*url.URL, error) {
rawurl := o.AuthenticateInternalURLString
if rawurl == "" {
return o.GetAuthenticateURL()
}
return urlutil.ParseAndValidateURL(o.AuthenticateInternalURLString)
}
// GetAuthorizeURLs returns the AuthorizeURLs in the options or 127.0.0.1:5443.
func (o *Options) GetAuthorizeURLs() ([]*url.URL, error) {
if IsAll(o.Services) && o.AuthorizeURLString == "" && len(o.AuthorizeURLStrings) == 0 {
u, err := urlutil.ParseAndValidateURL("http://127.0.0.1" + DefaultAlternativeAddr)
if err != nil {
return nil, err
}
return []*url.URL{u}, nil
}
return o.getURLs(append([]string{o.AuthorizeURLString}, o.AuthorizeURLStrings...)...)
}
// GetInternalAuthorizeURLs returns the internal AuthorizeURLs in the options or the AuthorizeURLs.
func (o *Options) GetInternalAuthorizeURLs() ([]*url.URL, error) {
rawurl := o.AuthorizeInternalURLString
if rawurl == "" {
return o.GetAuthorizeURLs()
}
return o.getURLs(rawurl)
}
// GetDataBrokerURLs returns the DataBrokerURLs in the options or 127.0.0.1:5443.
func (o *Options) GetDataBrokerURLs() ([]*url.URL, error) {
if IsAll(o.Services) && o.DataBrokerURLString == "" && len(o.DataBrokerURLStrings) == 0 {
u, err := urlutil.ParseAndValidateURL("http://127.0.0.1" + DefaultAlternativeAddr)
if err != nil {
return nil, err
}
return []*url.URL{u}, nil
}
return o.getURLs(append([]string{o.DataBrokerURLString}, o.DataBrokerURLStrings...)...)
}
// GetInternalDataBrokerURLs returns the internal DataBrokerURLs in the options or the DataBrokerURLs.
func (o *Options) GetInternalDataBrokerURLs() ([]*url.URL, error) {
rawurl := o.DataBrokerInternalURLString
if rawurl == "" {
return o.GetDataBrokerURLs()
}
return o.getURLs(rawurl)
}
func (o *Options) getURLs(strs ...string) ([]*url.URL, error) {
var urls []*url.URL
if o != nil {
for _, str := range strs {
if str == "" {
continue
}
u, err := urlutil.ParseAndValidateURL(str)
if err != nil {
return nil, err
}
urls = append(urls, u)
}
}
if len(urls) == 0 {
u, _ := url.Parse("http://127.0.0.1" + DefaultAlternativeAddr)
urls = append(urls, u)
}
return urls, nil
}
// GetGRPCAddr gets the gRPC address.
func (o *Options) GetGRPCAddr() string {
// to avoid port collision when running on localhost
if IsAll(o.Services) && o.GRPCAddr == defaultOptions.GRPCAddr {
return DefaultAlternativeAddr
}
return o.GRPCAddr
}
// GetGRPCInsecure gets whether or not gRPC is insecure.
func (o *Options) GetGRPCInsecure() bool {
if o.GRPCInsecure != nil {
return *o.GRPCInsecure
}
if IsAll(o.Services) {
return true
}
return false
}
// GetSignOutRedirectURL gets the SignOutRedirectURL.
func (o *Options) GetSignOutRedirectURL() (*url.URL, error) {
rawurl := o.SignOutRedirectURLString
if rawurl == "" {
return nil, nil
}
return urlutil.ParseAndValidateURL(rawurl)
}
// GetMetricsCertificate returns the metrics certificate to use for TLS. `nil` will be
// returned if there is no certificate.
func (o *Options) GetMetricsCertificate() (*tls.Certificate, error) {
if o.MetricsCertificate != "" && o.MetricsCertificateKey != "" {
return cryptutil.CertificateFromBase64(o.MetricsCertificate, o.MetricsCertificateKey)
}
if o.MetricsCertificateFile != "" && o.MetricsCertificateKeyFile != "" {
return cryptutil.CertificateFromFile(o.MetricsCertificateFile, o.MetricsCertificateKeyFile)
}
return nil, nil
}
// GetOauthOptions gets the oauth.Options for the given config options.
func (o *Options) GetOauthOptions() (oauth.Options, error) {
redirectURL, err := o.GetAuthenticateURL()
if err != nil {
return oauth.Options{}, err
}
redirectURL = redirectURL.ResolveReference(&url.URL{
Path: o.AuthenticateCallbackPath,
})
clientSecret, err := o.GetClientSecret()
if err != nil {
return oauth.Options{}, err
}
return oauth.Options{
RedirectURL: redirectURL,
ProviderName: o.Provider,
ProviderURL: o.ProviderURL,
ClientID: o.ClientID,
ClientSecret: clientSecret,
Scopes: o.Scopes,
}, nil
}
// GetAllPolicies gets all the policies in the options.
func (o *Options) GetAllPolicies() []Policy {
if o == nil {
return nil
}
policies := make([]Policy, 0, len(o.Policies)+len(o.Routes)+len(o.AdditionalPolicies))
policies = append(policies, o.Policies...)
policies = append(policies, o.Routes...)
policies = append(policies, o.AdditionalPolicies...)
return policies
}
// GetMetricsBasicAuth gets the metrics basic auth username and password.
func (o *Options) GetMetricsBasicAuth() (username, password string, ok bool) {
if o.MetricsBasicAuth == "" {
return "", "", false
}
bs, err := base64.StdEncoding.DecodeString(o.MetricsBasicAuth)
if err != nil {
return "", "", false
}
idx := bytes.Index(bs, []byte{':'})
if idx == -1 {
return "", "", false
}
return string(bs[:idx]), string(bs[idx+1:]), true
}
// HasAnyDownstreamMTLSClientCA returns true if there is a global downstream
// client CA or there are any per-route downstream client CAs.
func (o *Options) HasAnyDownstreamMTLSClientCA() bool {
// All the CA settings should already have been validated.
ca, _ := o.DownstreamMTLS.GetCA()
if len(ca) > 0 {
return true
}
allPolicies := o.GetAllPolicies()
for i := range allPolicies {
// We don't need to check TLSDownstreamClientCAFile here because
// Policy.Validate() will populate TLSDownstreamClientCA when
// TLSDownstreamClientCAFile is set.
if allPolicies[i].TLSDownstreamClientCA != "" {
return true
}
}
return false
}
// GetDataBrokerCertificate gets the optional databroker certificate. This method will return nil if no certificate is
// specified.
func (o *Options) GetDataBrokerCertificate() (*tls.Certificate, error) {
if o.DataBrokerStorageCertFile == "" || o.DataBrokerStorageCertKeyFile == "" {
return nil, nil
}
return cryptutil.CertificateFromFile(o.DataBrokerStorageCertFile, o.DataBrokerStorageCertKeyFile)
}
// GetCertificates gets all the certificates from the options.
func (o *Options) GetCertificates() ([]tls.Certificate, error) {
var certs []tls.Certificate
if o.Cert != "" && o.Key != "" {
cert, err := cryptutil.CertificateFromBase64(o.Cert, o.Key)
if err != nil {
return nil, fmt.Errorf("config: invalid base64 certificate: %w", err)
}
certs = append(certs, *cert)
}
for _, c := range o.CertificateFiles {
cert, err := cryptutil.CertificateFromBase64(c.CertFile, c.KeyFile)
if err != nil {
cert, err = cryptutil.CertificateFromFile(c.CertFile, c.KeyFile)
}
if err != nil {
return nil, fmt.Errorf("config: invalid certificate entry: %w", err)
}
certs = append(certs, *cert)
}
if o.CertFile != "" && o.KeyFile != "" {
cert, err := cryptutil.CertificateFromFile(o.CertFile, o.KeyFile)
if err != nil {
return nil, fmt.Errorf("config: bad cert file %w", err)
}
certs = append(certs, *cert)
}
return certs, nil
}
// HasCertificates returns true if options has any certificates.
func (o *Options) HasCertificates() bool {
return o.Cert != "" || o.Key != "" || len(o.CertificateFiles) > 0 || o.CertFile != "" || o.KeyFile != ""
}
// GetX509Certificates gets all the x509 certificates from the options. Invalid certificates are ignored.
func (o *Options) GetX509Certificates() []*x509.Certificate {
var certs []*x509.Certificate
if o.CertFile != "" {
cert, err := cryptutil.ParsePEMCertificateFromFile(o.CertFile)
if err != nil {
log.Error(context.Background()).Err(err).Str("file", o.CertFile).Msg("invalid cert_file")
} else {
certs = append(certs, cert)
}
} else if o.Cert != "" {
if cert, err := cryptutil.ParsePEMCertificateFromBase64(o.Cert); err != nil {
log.Error(context.Background()).Err(err).Msg("invalid cert")
} else {
certs = append(certs, cert)
}
}
for _, c := range o.CertificateFiles {
cert, err := cryptutil.ParsePEMCertificateFromBase64(c.CertFile)
if err != nil {
cert, err = cryptutil.ParsePEMCertificateFromFile(c.CertFile)
}
if err != nil {
log.Error(context.Background()).Err(err).Msg("invalid certificate_file")
} else {
certs = append(certs, cert)
}
}
return certs
}
// GetSharedKey gets the decoded shared key.
func (o *Options) GetSharedKey() ([]byte, error) {
sharedKey := o.SharedKey
if o.SharedSecretFile != "" {
bs, err := os.ReadFile(o.SharedSecretFile)
if err != nil {
return nil, err
}
sharedKey = string(bs)
}
// mutual auth between services on the same host can be generated at runtime
if IsAll(o.Services) && sharedKey == "" {
sharedKey = randomSharedKey
}
if sharedKey == "" {
return nil, errors.New("empty shared secret")
}
if strings.TrimSpace(sharedKey) != sharedKey {
return nil, errors.New("shared secret contains whitespace")
}
return base64.StdEncoding.DecodeString(sharedKey)
}
// GetHPKEPrivateKey gets the hpke.PrivateKey dervived from the shared key.
func (o *Options) GetHPKEPrivateKey() (*hpke.PrivateKey, error) {
sharedKey, err := o.GetSharedKey()
if err != nil {
return nil, err
}
return hpke.DerivePrivateKey(sharedKey), nil
}
// GetGoogleCloudServerlessAuthenticationServiceAccount gets the GoogleCloudServerlessAuthenticationServiceAccount.
func (o *Options) GetGoogleCloudServerlessAuthenticationServiceAccount() string {
return o.GoogleCloudServerlessAuthenticationServiceAccount
}
// GetSetResponseHeaders gets the SetResponseHeaders.
func (o *Options) GetSetResponseHeaders() map[string]string {
return o.GetSetResponseHeadersForPolicy(nil)
}
// GetSetResponseHeadersForPolicy gets the SetResponseHeaders for a policy.
func (o *Options) GetSetResponseHeadersForPolicy(policy *Policy) map[string]string {
hdrs := o.SetResponseHeaders
if hdrs == nil {
hdrs = make(map[string]string)
for k, v := range defaultSetResponseHeaders {
hdrs[k] = v
}
if !o.HasCertificates() {
delete(hdrs, "Strict-Transport-Security")
}
}
if _, ok := hdrs[DisableHeaderKey]; ok {
hdrs = make(map[string]string)
}
if policy != nil && policy.SetResponseHeaders != nil {
for k, v := range policy.SetResponseHeaders {
hdrs[k] = v
}
}
if _, ok := hdrs[DisableHeaderKey]; ok {
hdrs = make(map[string]string)
}
return hdrs
}
// GetCodecType gets a codec type.
func (o *Options) GetCodecType() CodecType {
if o.CodecType == CodecTypeUnset {
return CodecTypeAuto
}
return o.CodecType
}
// GetAllRouteableGRPCHosts returns all the possible gRPC hosts handled by the Pomerium options.
func (o *Options) GetAllRouteableGRPCHosts() ([]string, error) {
hosts := sets.NewSorted[string]()
// authorize urls
if IsAll(o.Services) {
authorizeURLs, err := o.GetAuthorizeURLs()
if err != nil {
return nil, err
}
for _, u := range authorizeURLs {
hosts.Add(urlutil.GetDomainsForURL(u)...)
}
} else if IsAuthorize(o.Services) {
authorizeURLs, err := o.GetInternalAuthorizeURLs()
if err != nil {
return nil, err
}
for _, u := range authorizeURLs {
hosts.Add(urlutil.GetDomainsForURL(u)...)
}
}
// databroker urls
if IsAll(o.Services) {
dataBrokerURLs, err := o.GetDataBrokerURLs()
if err != nil {
return nil, err
}
for _, u := range dataBrokerURLs {
hosts.Add(urlutil.GetDomainsForURL(u)...)
}
} else if IsDataBroker(o.Services) {
dataBrokerURLs, err := o.GetInternalDataBrokerURLs()
if err != nil {
return nil, err
}
for _, u := range dataBrokerURLs {
hosts.Add(urlutil.GetDomainsForURL(u)...)
}
}
return hosts.ToSlice(), nil
}
// GetAllRouteableHTTPHosts returns all the possible HTTP hosts handled by the Pomerium options.
func (o *Options) GetAllRouteableHTTPHosts() ([]string, error) {
hosts := sets.NewSorted[string]()
if IsAuthenticate(o.Services) {
authenticateURL, err := o.GetInternalAuthenticateURL()
if err != nil {
return nil, err
}
hosts.Add(urlutil.GetDomainsForURL(authenticateURL)...)
authenticateURL, err = o.GetAuthenticateURL()
if err != nil {
return nil, err
}
hosts.Add(urlutil.GetDomainsForURL(authenticateURL)...)
}
// policy urls
if IsProxy(o.Services) {
for _, policy := range o.GetAllPolicies() {
fromURL, err := urlutil.ParseAndValidateURL(policy.From)
if err != nil {
return nil, err
}
hosts.Add(urlutil.GetDomainsForURL(fromURL)...)
if policy.TLSDownstreamServerName != "" {
tlsURL := fromURL.ResolveReference(&url.URL{Host: policy.TLSDownstreamServerName})
hosts.Add(urlutil.GetDomainsForURL(tlsURL)...)
}
}
}
return hosts.ToSlice(), nil
}
// GetClientSecret gets the client secret.
func (o *Options) GetClientSecret() (string, error) {
if o == nil {
return "", nil
}
if o.ClientSecretFile != "" {
bs, err := os.ReadFile(o.ClientSecretFile)
if err != nil {
return "", err
}
return string(bs), nil
}
return o.ClientSecret, nil
}
// GetCookieSecret gets the decoded cookie secret.
func (o *Options) GetCookieSecret() ([]byte, error) {
cookieSecret := o.CookieSecret
if o.CookieSecretFile != "" {
bs, err := os.ReadFile(o.CookieSecretFile)
if err != nil {
return nil, err
}
cookieSecret = string(bs)
}
if IsAll(o.Services) && cookieSecret == "" {
log.WarnCookieSecret()
cookieSecret = randomSharedKey
}
if cookieSecret == "" {
return nil, errors.New("empty cookie secret")
}
return base64.StdEncoding.DecodeString(cookieSecret)
}
// GetCookieSameSite gets the cookie same site option.
func (o *Options) GetCookieSameSite() http.SameSite {
str := strings.ToLower(o.CookieSameSite)
switch str {
case "strict":
return http.SameSiteStrictMode
case "lax":
return http.SameSiteLaxMode
case "none":
return http.SameSiteNoneMode
}
return http.SameSiteDefaultMode
}
// GetCSRFSameSite gets the csrf same site option.
func (o *Options) GetCSRFSameSite() csrf.SameSiteMode {
if o.Provider == apple.Name {
// csrf.SameSiteLaxMode will cause browsers to reset
// the session on POST. This breaks Appleid being able
// to verify the csrf token.
return csrf.SameSiteNoneMode
}
str := strings.ToLower(o.CookieSameSite)
switch str {
case "strict":
return csrf.SameSiteStrictMode
case "lax":
return csrf.SameSiteLaxMode
case "none":
return csrf.SameSiteNoneMode
}
return csrf.SameSiteDefaultMode
}
// GetSigningKey gets the signing key.
func (o *Options) GetSigningKey() ([]byte, error) {
if o == nil {
return nil, nil
}
rawSigningKey := o.SigningKey
if o.SigningKeyFile != "" {
bs, err := os.ReadFile(o.SigningKeyFile)
if err != nil {
return nil, err
}
rawSigningKey = string(bs)
}
rawSigningKey = strings.TrimSpace(rawSigningKey)
if bs, err := base64.StdEncoding.DecodeString(rawSigningKey); err == nil {
return bs, nil
}
return []byte(rawSigningKey), nil
}
// GetAccessLogFields returns the access log fields. If none are set, the default fields are returned.
func (o *Options) GetAccessLogFields() []log.AccessLogField {
if o.AccessLogFields == nil {
return log.DefaultAccessLogFields()
}
return o.AccessLogFields
}
// GetAuthorizeLogFields returns the authorize log fields. If none are set, the default fields are returned.
func (o *Options) GetAuthorizeLogFields() []log.AuthorizeLogField {
if o.AuthorizeLogFields == nil {
return log.DefaultAuthorizeLogFields()
}
return o.AuthorizeLogFields
}
// NewCookie creates a new Cookie.
func (o *Options) NewCookie() *http.Cookie {
return &http.Cookie{
Name: o.CookieName,
Domain: o.CookieDomain,
Expires: time.Now().Add(o.CookieExpire),
Secure: o.CookieSecure,
SameSite: o.GetCookieSameSite(),
HttpOnly: o.CookieHTTPOnly,
}
}
// Checksum returns the checksum of the current options struct
func (o *Options) Checksum() uint64 {
return hashutil.MustHash(o)
}
func (o *Options) applyExternalCerts(ctx context.Context, certsIndex *cryptutil.CertificatesIndex, certs []*config.Settings_Certificate) {
for _, c := range certs {
cfp := certificateFilePair{}
cfp.CertFile = base64.StdEncoding.EncodeToString(c.CertBytes)
cfp.KeyFile = base64.StdEncoding.EncodeToString(c.KeyBytes)
cert, err := cryptutil.ParsePEMCertificateFromBase64(cfp.CertFile)
if err != nil {
log.Error(ctx).Err(err).Msg("parsing cert from databroker: skipped")
continue
}
if overlaps, name := certsIndex.OverlapsWithExistingCertificate(cert); overlaps {
log.Error(ctx).Err(err).Str("domain", name).Msg("overlaps with local certs: skipped")
continue
}
o.CertificateFiles = append(o.CertificateFiles, cfp)
}
}
// ApplySettings modifies the config options using the given protobuf settings.
func (o *Options) ApplySettings(ctx context.Context, certsIndex *cryptutil.CertificatesIndex, settings *config.Settings) {
if settings == nil {
return
}
set(&o.InstallationID, settings.InstallationId)
set(&o.Debug, settings.Debug)
setLogLevel(&o.LogLevel, settings.LogLevel)
setAccessLogFields(&o.AccessLogFields, settings.AccessLogFields)
setAuthorizeLogFields(&o.AuthorizeLogFields, settings.AuthorizeLogFields)
setLogLevel(&o.ProxyLogLevel, settings.ProxyLogLevel)
set(&o.SharedKey, settings.SharedSecret)
set(&o.Services, settings.Services)
set(&o.Addr, settings.Address)
set(&o.InsecureServer, settings.InsecureServer)
set(&o.DNSLookupFamily, settings.DnsLookupFamily)
o.applyExternalCerts(ctx, certsIndex, settings.GetCertificates())
set(&o.HTTPRedirectAddr, settings.HttpRedirectAddr)
setDuration(&o.ReadTimeout, settings.TimeoutRead)
setDuration(&o.WriteTimeout, settings.TimeoutWrite)
setDuration(&o.IdleTimeout, settings.TimeoutIdle)
set(&o.AuthenticateURLString, settings.AuthenticateServiceUrl)
set(&o.AuthenticateInternalURLString, settings.AuthenticateInternalServiceUrl)
set(&o.SignOutRedirectURLString, settings.SignoutRedirectUrl)
set(&o.AuthenticateCallbackPath, settings.AuthenticateCallbackPath)
set(&o.CookieName, settings.CookieName)
set(&o.CookieSecret, settings.CookieSecret)
set(&o.CookieDomain, settings.CookieDomain)
set(&o.CookieSecure, settings.CookieSecure)
set(&o.CookieHTTPOnly, settings.CookieHttpOnly)
setDuration(&o.CookieExpire, settings.CookieExpire)
set(&o.CookieSameSite, settings.CookieSameSite)
set(&o.ClientID, settings.IdpClientId)
set(&o.ClientSecret, settings.IdpClientSecret)
set(&o.Provider, settings.IdpProvider)
set(&o.ProviderURL, settings.IdpProviderUrl)
setSlice(&o.Scopes, settings.Scopes)
setMap(&o.RequestParams, settings.RequestParams)
setSlice(&o.AuthorizeURLStrings, settings.AuthorizeServiceUrls)
set(&o.AuthorizeInternalURLString, settings.AuthorizeInternalServiceUrl)
set(&o.OverrideCertificateName, settings.OverrideCertificateName)
set(&o.CA, settings.CertificateAuthority)
setOptional(&o.DeriveInternalDomainCert, settings.DeriveTls)
set(&o.SigningKey, settings.SigningKey)
setMap(&o.SetResponseHeaders, settings.SetResponseHeaders)
setMap(&o.JWTClaimsHeaders, settings.JwtClaimsHeaders)
setDuration(&o.DefaultUpstreamTimeout, settings.DefaultUpstreamTimeout)
set(&o.MetricsAddr, settings.MetricsAddress)
set(&o.MetricsBasicAuth, settings.MetricsBasicAuth)
setCertificate(&o.MetricsCertificate, &o.MetricsCertificateKey, settings.MetricsCertificate)
set(&o.MetricsClientCA, settings.MetricsClientCa)
set(&o.TracingProvider, settings.TracingProvider)
set(&o.TracingSampleRate, settings.TracingSampleRate)
set(&o.TracingDatadogAddress, settings.TracingDatadogAddress)
set(&o.TracingJaegerCollectorEndpoint, settings.TracingJaegerCollectorEndpoint)
set(&o.TracingJaegerAgentEndpoint, settings.TracingJaegerAgentEndpoint)
set(&o.ZipkinEndpoint, settings.TracingZipkinEndpoint)
set(&o.GRPCAddr, settings.GrpcAddress)
setOptional(&o.GRPCInsecure, settings.GrpcInsecure)
setDuration(&o.GRPCClientTimeout, settings.GrpcClientTimeout)
set(&o.GRPCClientDNSRoundRobin, settings.GrpcClientDnsRoundrobin)
setSlice(&o.DataBrokerURLStrings, settings.DatabrokerServiceUrls)
set(&o.DataBrokerInternalURLString, settings.DatabrokerInternalServiceUrl)
set(&o.DataBrokerStorageType, settings.DatabrokerStorageType)
set(&o.DataBrokerStorageConnectionString, settings.DatabrokerStorageConnectionString)
set(&o.DataBrokerStorageCertSkipVerify, settings.DatabrokerStorageTlsSkipVerify)
o.DownstreamMTLS.applySettingsProto(ctx, settings.DownstreamMtls)
set(&o.GoogleCloudServerlessAuthenticationServiceAccount, settings.GoogleCloudServerlessAuthenticationServiceAccount)
set(&o.UseProxyProtocol, settings.UseProxyProtocol)
set(&o.AutocertOptions.Enable, settings.Autocert)
set(&o.AutocertOptions.CA, settings.AutocertCa)
set(&o.AutocertOptions.Email, settings.AutocertEmail)
set(&o.AutocertOptions.EABKeyID, settings.AutocertEabKeyId)
set(&o.AutocertOptions.EABMACKey, settings.AutocertEabMacKey)
set(&o.AutocertOptions.UseStaging, settings.AutocertUseStaging)
set(&o.AutocertOptions.MustStaple, settings.AutocertMustStaple)
set(&o.AutocertOptions.Folder, settings.AutocertDir)
set(&o.AutocertOptions.TrustedCA, settings.AutocertTrustedCa)
set(&o.SkipXffAppend, settings.SkipXffAppend)
set(&o.XffNumTrustedHops, settings.XffNumTrustedHops)
setSlice(&o.ProgrammaticRedirectDomainWhitelist, settings.ProgrammaticRedirectDomainWhitelist)
setAuditKey(&o.AuditKey, settings.AuditKey)
setCodecType(&o.CodecType, settings.CodecType)
o.BrandingOptions = settings
}
func dataDir() string {
homeDir, _ := os.UserHomeDir()
if homeDir == "" {
homeDir = "."
}
baseDir := filepath.Join(homeDir, ".local", "share")
if xdgData := os.Getenv("XDG_DATA_HOME"); xdgData != "" {
baseDir = xdgData
}
return filepath.Join(baseDir, "pomerium")
}
func compareByteSliceSlice(a, b [][]byte) int {
sz := min(len(a), len(b))
for i := 0; i < sz; i++ {
switch bytes.Compare(a[i], b[i]) {
case -1:
return -1
case 1:
return 1
}
}
switch {
case len(a) < len(b):
return -1
case len(b) < len(a):
return 1
default:
return 0
}
}
func min(x, y int) int {
if x < y {
return x
}
return y
}
// NewAtomicOptions creates a new AtomicOptions.
func NewAtomicOptions() *atomicutil.Value[*Options] {
return atomicutil.NewValue(new(Options))
}
func set[T any](dst, src *T) {
if src == nil {
return
}
*dst = *src
}
func setAccessLogFields(dst *[]log.AccessLogField, src *config.Settings_StringList) {
if src == nil {
return
}
*dst = make([]log.AccessLogField, len(src.Values))
for i, v := range src.Values {
(*dst)[i] = log.AccessLogField(v)
}
}
func setAuthorizeLogFields(dst *[]log.AuthorizeLogField, src *config.Settings_StringList) {
if src == nil {
return
}
*dst = make([]log.AuthorizeLogField, len(src.Values))
for i, v := range src.Values {
(*dst)[i] = log.AuthorizeLogField(v)
}
}
func setAuditKey(dst **PublicKeyEncryptionKeyOptions, src *crypt.PublicKeyEncryptionKey) {
if src == nil {
return
}
*dst = &PublicKeyEncryptionKeyOptions{
ID: src.GetId(),
Data: base64.StdEncoding.EncodeToString(src.GetData()),
}
}
func setCodecType(dst *CodecType, src *envoy_http_connection_manager.HttpConnectionManager_CodecType) {
if src == nil {
return
}
*dst = CodecTypeFromEnvoy(*src)
}
func setDuration(dst *time.Duration, src *durationpb.Duration) {
if src == nil {
return
}
*dst = src.AsDuration()
}
func setLogLevel(dst *LogLevel, src *string) {
if src == nil {
return
}
*dst = LogLevel(*src)
}
func setOptional[T any](dst **T, src *T) {
if src == nil {
return
}
v := *src
*dst = &v
}
func setSlice[T any](dst *[]T, src []T) {
if len(src) == 0 {
return
}
*dst = src
}
func setMap[TKey comparable, TValue any, TMap ~map[TKey]TValue](dst *TMap, src map[TKey]TValue) {
if len(src) == 0 {
return
}
*dst = src
}
func setCertificate(
dstCertificate *string,
dstCertificateKey *string,
src *config.Settings_Certificate,
) {
if src == nil {
return
}
if len(src.GetCertBytes()) > 0 {
*dstCertificate = base64.StdEncoding.EncodeToString(src.GetCertBytes())
}
if len(src.GetKeyBytes()) > 0 {
*dstCertificateKey = base64.StdEncoding.EncodeToString(src.GetKeyBytes())
}
}
|
package metadata
import (
"context"
"github.com/appootb/substratum/proto/go/common"
"google.golang.org/grpc"
)
type metadataKey struct{}
// UnaryServerInterceptor returns a new unary server interceptor for parsing request metadata.
func UnaryServerInterceptor() grpc.UnaryServerInterceptor {
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
md := ParseIncomingMetadata(ctx)
return handler(context.WithValue(ctx, metadataKey{}, md), req)
}
}
// StreamServerInterceptor returns a new streaming server interceptor for parsing request metadata.
func StreamServerInterceptor() grpc.StreamServerInterceptor {
return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
wrapper := &ctxWrapper{stream}
return handler(srv, wrapper)
}
}
type ctxWrapper struct {
grpc.ServerStream
}
func (s *ctxWrapper) Context() context.Context {
ctx := s.ServerStream.Context()
md := ParseIncomingMetadata(ctx)
return context.WithValue(ctx, metadataKey{}, md)
}
func ContextWithProduct(ctx context.Context, product string) context.Context {
return context.WithValue(ctx, metadataKey{}, &common.Metadata{
Product: product,
})
}
func RequestMetadata(ctx context.Context) *common.Metadata {
if md := ctx.Value(metadataKey{}); md != nil {
return md.(*common.Metadata)
}
return nil
}
|
package main
import (
"github.com/therecipe/qt/core"
"github.com/therecipe/qt/gui"
"github.com/therecipe/qt/quick"
)
func init() {
SomeComponentInternal_QmlRegisterType2("CustomComponentsInternal", 1, 0, "SomeComponent")
}
type SomeComponentInternal struct {
quick.QQuickPaintedItem
_ func() `constructor:"init"`
_ *gui.QColor `property:"color"`
}
func (p *SomeComponentInternal) init() {
p.SetWidth(200)
p.SetHeight(200)
p.SetColor(gui.NewQColor2(core.Qt__red))
p.ConnectPaint(p.paint)
}
func (p *SomeComponentInternal) paint(painter *gui.QPainter) {
pen := gui.NewQPen3(p.Color())
pen.SetWidth(2)
painter.SetPen(pen)
painter.SetRenderHints(gui.QPainter__Antialiasing, true)
painter.FillRect5(0, 0, int(p.Width()), int(p.Height()), p.Color())
}
|
package engine_test
import (
"reflect"
"runtime"
"testing"
"github.com/GoPex/caretaker/controllers"
"github.com/GoPex/caretaker/engine"
"github.com/GoPex/caretaker/tests"
)
// Struct to do a table driven test for routes
type expectedRoute struct {
method string
path string
handler interface{}
}
// Var used by table driven test for routes
var (
expectedRoutes = []expectedRoute{
{"GET", "/ping", controllers.GetPing},
{"GET", "/info/status", controllers.GetStatus},
{"GET", "/info/version", controllers.GetVersion},
}
)
// Function that return the name of given function in string
func nameOfFunction(f interface{}) string {
return runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()
}
// Test the initialize function of the application
func TestInitialize(t *testing.T) {
// Create an instance of the application
unleash := engine.New()
// Test the Initialize function
if err := unleash.Initialize(&tests.UnleashConfigTest); err != nil {
t.Errorf("Cannot initialize the application, cause: %s !", err.Error())
}
}
// Test the routes definition of the application
func TestRoutes(t *testing.T) {
// Create an instance of the application
unleash := engine.New()
// Get routes definine by the application
routesInfo := unleash.Engine.Routes()
// Test each routes values (method, path, handler)
found := false
for _, expected := range expectedRoutes {
found = false
for _, route := range routesInfo {
if expected.method == route.Method && expected.path == route.Path {
found = true
if nameOfFunction(expected.handler) != route.Handler {
t.Errorf("Route handler doest not match for %s %s, expected %s, actual %s", expected.method, expected.path, expected.handler, route.Handler)
}
}
}
if !found {
t.Errorf("No route found for %s %s !", expected.method, expected.path)
}
}
}
|
package repository
import "github.com/alexgtn/esi2021-lab5/pkg/domain"
type StudentFooRepository struct {
data []*domain.Student
}
func NewStudentFooRepostory() *StudentFooRepository {
return &StudentFooRepository{
data: []*domain.Student{},
}
}
func (r *StudentFooRepository) AddStudent(s *domain.Student) (*domain.Student, error) {
r.data = append(r.data, s)
return s, nil
}
func (r *StudentFooRepository) GetAllStudents() ([]*domain.Student, error) {
return r.data, nil
}
|
package cryptoki
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"encoding/asn1"
"errors"
"github.com/miekg/pkcs11"
)
// Named curves (RFC 5480 2.1.1.1)
var curveOIDs = map[elliptic.Curve]asn1.ObjectIdentifier{
elliptic.P224(): asn1.ObjectIdentifier{1, 3, 132, 0, 33},
elliptic.P256(): asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7},
elliptic.P384(): asn1.ObjectIdentifier{1, 3, 132, 0, 34},
elliptic.P521(): asn1.ObjectIdentifier{1, 3, 132, 0, 35},
}
// ecdsaKeyRequest contains parameters for generating ECDSA key pairs.
type ecdsaKeyRequest struct {
*genericKeyRequest
curve elliptic.Curve
}
// newECDSAKeyRequest returns a ECDSA key request.
func newECDSAKeyRequest(label string, size int) (*ecdsaKeyRequest, error) {
gkr := &genericKeyRequest{label}
switch size {
case 224:
return &ecdsaKeyRequest{gkr, elliptic.P224()}, nil
case 256:
return &ecdsaKeyRequest{gkr, elliptic.P256()}, nil
case 384:
return &ecdsaKeyRequest{gkr, elliptic.P384()}, nil
case 521:
return &ecdsaKeyRequest{gkr, elliptic.P521()}, nil
default:
return nil, errors.New("unknown elliptic curve")
}
}
// Algo returns the requested key algorithm, "ecdsa", as a string.
func (kr *ecdsaKeyRequest) Algo() string {
return ECDSA
}
// Size returns the requested key size, referring to a named curve.
func (kr *ecdsaKeyRequest) Size() int {
return kr.curve.Params().BitSize
}
// Mechanisms returns a list of PKCS#11 mechanisms for generating an
// ECDSA key pair.
func (kr *ecdsaKeyRequest) Mechanisms() []*pkcs11.Mechanism {
return []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_EC_KEY_PAIR_GEN, nil)}
}
// PublicAttrs returns the PKCS#11 public key object attributes for the
// ECDSA key request (PKCS #11-M1 6.3.3).
func (kr *ecdsaKeyRequest) PublicAttrs() []*pkcs11.Attribute {
ecParams, _ := asn1.Marshal(curveOIDs[kr.curve])
return append(kr.genericKeyRequest.PublicAttrs(),
pkcs11.NewAttribute(pkcs11.CKA_EC_PARAMS, ecParams),
)
}
// ecdsaPublicKey represents an ECDSA public key.
type ecdsaPublicKey struct {
ecParams []byte
ecPoint []byte
}
// newECDSAPublicKey returns an ecdsaPublicKey using a crypto.PublicKey.
func newECDSAPublicKey(key *ecdsa.PublicKey) (*ecdsaPublicKey, error) {
curveOID, ok := curveOIDs[key.Curve]
if !ok {
return nil, errors.New("unknown elliptic curve")
}
// CKA_EC_PARAMS is DER-encoding of an ANSI X9.62 Parameters value
ecParams, err := asn1.Marshal(curveOID)
if err != nil {
return nil, err
}
// CKA_EC_POINT is DER-encoding of ANSI X9.62 ECPoint value Q
ecPoint, err := asn1.Marshal(asn1.RawValue{
Tag: asn1.TagOctetString,
Bytes: elliptic.Marshal(key.Curve, key.X, key.Y),
})
if err != nil {
return nil, err
}
return &ecdsaPublicKey{ecParams, ecPoint}, nil
}
// Attrs returns the PKCS#11 public key object attributes for the ECDSA
// public key.
func (key *ecdsaPublicKey) Attrs() []*pkcs11.Attribute {
return []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_PUBLIC_KEY),
pkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, pkcs11.CKK_EC),
pkcs11.NewAttribute(pkcs11.CKA_EC_PARAMS, key.ecParams),
pkcs11.NewAttribute(pkcs11.CKA_EC_POINT, key.ecPoint),
}
}
// CryptoKey recreates the crypto.PublicKey.
func (key *ecdsaPublicKey) CryptoKey() (crypto.PublicKey, error) {
if key.ecParams == nil || key.ecPoint == nil {
return nil, errors.New("invalid ecdsaPublicKey")
}
var curveOID asn1.ObjectIdentifier
_, err := asn1.Unmarshal(key.ecParams, &curveOID)
if err != nil {
return nil, err
}
var curve elliptic.Curve
for c, oid := range curveOIDs {
if curveOID.Equal(oid) {
curve = c
break
}
}
if curve == nil {
return nil, errors.New("invalid EC params")
}
var ecPoint asn1.RawValue
_, err = asn1.Unmarshal(key.ecPoint, &ecPoint)
if err != nil {
return nil, err
}
x, y := elliptic.Unmarshal(curve, ecPoint.Bytes)
if x == nil || y == nil {
return nil, errors.New("invalid EC point")
}
return &ecdsa.PublicKey{curve, x, y}, nil
}
|
package main
import (
"fmt"
"time"
)
func main() {
fmt.Printf("%c", '\a')
time.Sleep(3 * time.Second)
fmt.Printf("%s", "\a")
time.Sleep(3 * time.Second)
fmt.Print("\a")
time.Sleep(3 * time.Second)
fmt.Print('\a')
//IDE直接运行, 不响铃, 必须在控制台运行才响铃
}
|
package pred
import floc "gopkg.in/workanator/go-floc.v1"
func alwaysTrue(state floc.State) bool {
return true
}
func alwaysFalse(state floc.State) bool {
return false
}
|
package cfb
import (
"crypto/cipher"
"encoding/hex"
)
// Decrypt is used for decrypt an encrypted term
func (b BuildModel) Decrypt(str string) string {
// // TODO: should fix auto read from stream
// encrypted, _ := hex.DecodeString(str)
// bReader := bytes.NewReader(encrypted)
// stream := cipher.NewOFB(b.block, b.iv)
// reader := &cipher.StreamReader{S: stream, R: bReader}
// buf := new(bytes.Buffer)
// buf.ReadFrom(reader)
// return buf.String()
//------------------------- different method with CTR specific length
encrypt, _ := hex.DecodeString(str)
result := make([]byte, b.length)
stream := cipher.NewCTR(b.block, b.iv)
stream.XORKeyStream(result, encrypt)
result = trimResult(result)
return string(result)
}
func trimResult(data []byte) []byte {
k := 0
for i := len(data) - 1; i >= 0; i-- {
if data[i] == 0 {
k++
}
}
data = data[0 : len(data)-k]
// fmt.Println(">>>>>>>>>>>>>....................", k, data)
return data
}
|
package demo
import (
"github.com/apache/thrift/lib/go/thrift"
"github.com/go-xe2/x/type/t"
"github.com/go-xe2/xthrift/pdl"
)
type HelloServiceSayHelloArgs struct {
*pdl.TDynamicStructBase
Name *string `thrift:"name,1,optional" json:"name"`
Age int32 `thrift:"age,2,required" json:"age"`
fieldNameMaps map[string]string
fields map[string]*pdl.TStructFieldInfo
}
var _ pdl.DynamicStruct = (*HelloServiceSayHelloArgs)(nil)
var _ thrift.TStruct = (*HelloServiceSayHelloArgs)(nil)
func NewHelloServiceSayHelloArgs() *HelloServiceSayHelloArgs {
inst := &HelloServiceSayHelloArgs{
fieldNameMaps: make(map[string]string),
fields: make(map[string]*pdl.TStructFieldInfo),
}
inst.TDynamicStructBase = pdl.NewBasicStruct(inst)
return inst.init()
}
func (p *HelloServiceSayHelloArgs) init() *HelloServiceSayHelloArgs {
p.fieldNameMaps["Name"] = "Name"
p.fieldNameMaps["name"] = "Name"
p.fields["Name"] = pdl.NewStructFieldInfo(1, thrift.STRING, func(obj pdl.DynamicStruct, val interface{}) bool {
thisObj := obj.(*HelloServiceSayHelloArgs)
s := t.String(val)
thisObj.Name = &s
return true
})
p.fieldNameMaps["Age"] = "Age"
p.fieldNameMaps["age"] = "Age"
p.fields["Age"] = pdl.NewStructFieldInfo(2, thrift.I32, func(obj pdl.DynamicStruct, val interface{}) bool {
thisObj := obj.(*HelloServiceSayHelloArgs)
n32 := t.Int32(val)
thisObj.Age = n32
return true
})
return p
}
func (p *HelloServiceSayHelloArgs) Read(in thrift.TProtocol) error {
_, err := in.ReadStructBegin()
if err != nil {
return err
}
var nMaxLoop = 512
nLoop := 0
var isMatch bool
for {
// 防止协议数据错误,无thrift.STOP时无限循环
nLoop++
if nLoop >= nMaxLoop {
_ = in.Skip(thrift.STRUCT)
return nil
}
isMatch = false
fdName, fdType, fdId, err := in.ReadFieldBegin()
if err != nil {
return err
}
if fdType == thrift.STOP {
break
}
if fdType == thrift.VOID {
if err := in.ReadFieldEnd(); err != nil {
return err
}
continue
}
if (fdId > 0 && fdId == 1) || (fdId <= 0 && fdName == "name") {
if fdId > 0 && fdType != thrift.STRING {
if err := in.Skip(fdType); err != nil {
return err
}
if err := in.ReadFieldEnd(); err != nil {
return err
}
continue
}
isMatch = true
s, err := in.ReadString()
if err != nil {
return err
}
p.Name = &s
}
if (fdId > 0 && fdId == 2) || (fdId <= 0 && fdName == "age") {
if fdId > 0 && fdType != thrift.I32 {
if err := in.Skip(fdType); err != nil {
return err
}
if err := in.ReadFieldEnd(); err != nil {
return err
}
continue
}
isMatch = true
n, err := in.ReadI32()
if err != nil {
return err
}
p.Age = n
}
if !isMatch {
if err := in.Skip(fdType); err != nil {
return err
}
}
if err := in.ReadFieldEnd(); err != nil {
return err
}
}
if err := in.ReadStructEnd(); err != nil {
return err
}
return nil
}
func (p *HelloServiceSayHelloArgs) Write(out thrift.TProtocol) error {
if err := out.WriteStructBegin("hello_service_say_hello_args"); err != nil {
return err
}
if p.Name != nil {
if err := out.WriteFieldBegin("name", thrift.STRING, 1); err != nil {
return err
}
if err := out.WriteString(*p.Name); err != nil {
return err
}
if err := out.WriteFieldEnd(); err != nil {
return err
}
}
if err := out.WriteFieldBegin("age", thrift.I32, 2); err != nil {
return err
}
if err := out.WriteI32(p.Age); err != nil {
return err
}
if err := out.WriteFieldEnd(); err != nil {
return err
}
if err := out.WriteFieldStop(); err != nil {
return err
}
if err := out.WriteStructEnd(); err != nil {
return err
}
return nil
}
// 字段Name读取方法,未设置时返回默认值
func (p *HelloServiceSayHelloArgs) GetName() string {
if p.Name == nil {
s := ""
p.Name = &s
}
return *p.Name
}
func (p *HelloServiceSayHelloArgs) NewInstance() pdl.DynamicStruct {
return NewHelloServiceSayHelloArgs()
}
func (p *HelloServiceSayHelloArgs) AllFields() map[string]*pdl.TStructFieldInfo {
return p.fields
}
func (p *HelloServiceSayHelloArgs) FieldNameMaps() map[string]string {
return p.fieldNameMaps
}
|
package exer9
import (
"math"
"math/rand"
"time"
)
// Random Arrays
// We want to generate some random arrays. In array_stats.go, write a function that creates and returns an array with length integers and values from 0 to maxInt-1 with this signature:
// func RandomArray(length int, maxInt int) []int { … }
// When doing this, create a new random number generator that has a reasonable initial seed
func RandomArray(length int, maxInt int) []int {
// TODO: create a new random generator with a decent seed; create an array with length values from 0 to values-1.
rand.Seed(time.Now().UTC().UnixNano())
time.Sleep( time.Nanosecond)
var arr []int
arr = make([]int, length, length)
for i := 0; i < length; i++ {
arr[i] = rand.Intn(maxInt)
}
return arr
}
// Array Summary Stats
// We want to calculate the mean and standard deviation of values in an array/slice. For arrays with values and elements, we will use these formulas:
// In order to calculate these, we need the length of the slice (easy), and . Those are things we can calculate in parallel on a large array.
// We will break the array into some evenly-sized slices and want to calculate the two sums on each of the chunks concurrently in goroutines. Our function will take the array/slice and number of chunks to break the array into:
// func MeanStddev(arr []int, chunks int) (mean, stddev float64) { … }
// You can assume that the length of arr is divisible by chunks: no rounding error to worry about.
// In order to do this, you need to create:
// A struct to hold partial sums as they are generated.
// A channel to communicate these partial sums back to the MeanStddev function.
// A function that can run in a goroutine, calculate the sum and sum of squares on a slice, and send the values back along the channel.
// In MeanStddev, you will need to create chunks goroutines to calculate partial sums. Then receive chunks values from the channel, create the final sums, and return the correct values.
func GenerateSum(arr []int, results chan int){
sum := 0
for i := 0; i < len(arr) ; i++ {
sum += arr[i]
}
results <- sum
}
func GenerateSumSquares(arr []int, results chan int){
sum := 0
for i := 0; i < len(arr) ; i++ {
sum += arr[i] * arr[i]
}
results <- sum
}
type Stats struct {
partialSums, partialSumsSquared chan int
}
func MeanStddev(arr []int, chunks int) (mean, stddev float64) {
// func MeanStddev(arr []int, chunks int) (mean float64) {
n := len(arr)
if n % chunks != 0 {
panic("You promised that chunks would divide slice size!")
}
stats := Stats{make(chan int, chunks), make(chan int, chunks)}
// chunk := make([]int, chunks)
chunkSize := n / chunks
for i := 0; i < chunks; i++ {
go GenerateSum(arr[i*chunkSize:i*chunkSize + chunkSize], stats.partialSums)
go GenerateSumSquares(arr[i*chunkSize:i*chunkSize + chunkSize], stats.partialSumsSquared)
}
sum := 0;
sumSquared := 0;
for i := 0; i < chunks*2; i++ {
select {
case sumChunks := <- stats.partialSums:
sum += sumChunks
case sumSquaredChunks := <- stats.partialSumsSquared:
sumSquared += sumSquaredChunks
}
}
mean = float64(sum) / float64(n)
stddev = math.Sqrt(float64(sumSquared) / float64(n) - mean * mean)
return mean, stddev
// TODO: calculate the mean and population standard deviation of the array, breaking the array into chunks segments
// and calculating on them in parallel.
}
|
package repositories_test
import (
"database/sql"
"os"
"testing"
"github.com/syncromatics/kafmesh/internal/storage/repositories"
"github.com/syncromatics/go-kit/database"
"github.com/syncromatics/go-kit/testing/docker"
_ "github.com/syncromatics/kafmesh/internal/storage/statik"
)
var (
databaseSettings *database.PostgresDatabaseSettings
db *sql.DB
repos *repositories.AllRepositories
)
func TestMain(m *testing.M) {
var err error
db, databaseSettings, err = docker.SetupPostgresDatabase("repositories_storage")
if err != nil {
panic(err)
}
err = databaseSettings.MigrateUpWithStatik("/")
if err != nil {
panic(err)
}
err = seedData(db)
if err != nil {
panic(err)
}
repos = repositories.All(db)
result := m.Run()
docker.TeardownPostgresDatabase("storage")
os.Exit(result)
}
func seedData(db *sql.DB) error {
_, err := db.Exec(`
insert into
topics
(id, name, message)
values
(1, 'topic1', 'topic1.message'),
(2, 'topic2', 'topic2.message'),
(3, 'topic3', 'topic3.message'),
(4, 'topic4', 'topic4.message');
insert into
services
(id, name, description)
values
(1, 'service1', 'service1 description'),
(2, 'service2', 'service2 description'),
(3, 'service3', 'service3 description'),
(4, 'service4', 'service4 description');
insert into
components
(id, service, name, description)
values
(1, 1, 'component1', 'component1 description'),
(2, 1, 'component2', 'component2 description'),
(3, 2, 'component3', 'component3 description'),
(4, 2, 'component4', 'component4 description');
insert into
processors
(id, component, name, description, group_name, persistence)
values
(1, 1, 'processor1', 'processor1 description', 'processor1.group', null),
(2, 1, 'processor2', 'processor2 description', 'processor2.group', 1),
(3, 2, 'processor3', 'processor3 description', 'processor3.group', 2),
(4, 2, 'processor4', 'processor4 description', 'processor4.group', 2);
insert into
processor_inputs
(id, processor, topic)
values
(1, 1, 1),
(2, 1, 2),
(3, 2, 1),
(4, 2, 2);
insert into
processor_joins
(id, processor, topic)
values
(1, 1, 1),
(2, 1, 2),
(3, 2, 1),
(4, 2, 2);
insert into
processor_lookups
(id, processor, topic)
values
(1, 1, 1),
(2, 1, 2),
(3, 2, 1),
(4, 2, 2);
insert into
processor_outputs
(id, processor, topic)
values
(1, 1, 1),
(2, 1, 2),
(3, 2, 1),
(4, 2, 2);
insert into
sources
(id, component, topic)
values
(1, 1, 1),
(2, 1, 2),
(3, 2, 1),
(4, 2, 2);
insert into
views
(id, component, topic)
values
(1, 1, 1),
(2, 1, 2),
(3, 2, 1),
(4, 2, 2),
(5, 3, 1);
insert into
sinks
(id, component, topic, name, description)
values
(1, 1, 1, 'sink1', 'sink1 description'),
(2, 1, 2, 'sink2', 'sink2 description'),
(3, 2, 1, 'sink3', 'sink3 description'),
(4, 2, 2, 'sink4', 'sink4 description');
insert into
view_sinks
(id, component, topic, name, description)
values
(1, 1, 1, 'viewSink1', 'viewSink1 description'),
(2, 1, 2, 'viewSink2', 'viewSink2 description'),
(3, 2, 1, 'viewSink3', 'viewSink3 description'),
(4, 2, 2, 'viewSink4', 'viewSink4 description');
insert into
view_sources
(id, component, topic, name, description)
values
(1, 1, 1, 'viewSource1', 'viewSource1 description'),
(2, 1, 2, 'viewSource2', 'viewSource2 description'),
(3, 2, 1, 'viewSource3', 'viewSource3 description'),
(4, 2, 2, 'viewSource4', 'viewSource4 description');
insert into
pods
(id, name)
values
(1, 'pod1'),
(2, 'pod2');
insert into
pod_processors
(id, pod, processor)
values
(1, 1, 1),
(2, 1, 2),
(3, 2, 1),
(4, 2, 2);
insert into
pod_sources
(id, pod, source)
values
(1, 1, 1),
(2, 1, 2),
(3, 2, 1),
(4, 2, 2);
insert into
pod_views
(id, pod, view)
values
(1, 1, 1),
(2, 1, 2),
(3, 2, 1),
(4, 2, 2);
insert into
pod_sinks
(id, pod, sink)
values
(1, 1, 1),
(2, 1, 2),
(3, 2, 1),
(4, 2, 2);
insert into
pod_view_sinks
(id, pod, view_sink)
values
(1, 1, 1),
(2, 1, 2),
(3, 2, 1),
(4, 2, 2);
insert into
pod_view_sources
(id, pod, view_source)
values
(1, 1, 1),
(2, 1, 2),
(3, 2, 1),
(4, 2, 2);
`)
return err
}
|
/*
* API v2.0 Links Framework
*/
package clcv2
import (
"fmt"
"strings"
"github.com/pkg/errors"
uuid "github.com/satori/go.uuid"
)
// Link adds hyperlink information to resources.
type Link struct {
// The link type (depends on context)
Rel string `json:"rel"`
// Address of the resource.
Href string `json:"href,omitempty"`
/*
* Optional Fields
*/
// Unique ID of the resource.
Id string `json:"id,omitempty"`
// Resource UUID (currently LBaaS only)
ResourceId uuid.UUID `json:"resourceId,omitempty"`
// Friendly name of the resource.
Name string `json:"name,omitempty"`
// Valid HTTP verbs that can act on this resource.
// If none are explicitly listed, GET is assumed to be the only one.
Verbs []string `json:"verbs,omitempty"`
}
func (l *Link) String() string {
return fmt.Sprintf("%s: %s %s", l.Rel, l.Href, strings.Join(l.Verbs, ", "))
}
// Extract Links whose 'Rel' field matches @rel_type.
func ExtractLinks(from []Link, rel_type string) (res []Link) {
for _, l := range from {
if l.Rel == rel_type {
res = append(res, l)
}
}
return res
}
// Extract first Link whose 'Rel' field matches @rel_type, return nil if none found.
func extractLink(from []Link, rel_type string) (l *Link, err error) {
if links := ExtractLinks(from, rel_type); len(links) > 0 {
// FIXME: maybe warn here if there is more than 1 match
l = &links[0]
} else {
err = errors.Errorf("No link with Rel=%s found in %+v", rel_type, from)
}
return l, nil
}
|
package models
import (
"golang.org/x/crypto/bcrypt"
)
type Person struct {
FirstName string `json:"firstname"`
LastName string `json:"lastname"`
Username string `json:"username"`
Hash string `json:"hash,omitempty"`
Emails []string `json:"emails"`
Addresses Addresses `json:"addresses"`
}
func (p *Person) IsPasswordValid(password string) (bool, error) {
err := bcrypt.CompareHashAndPassword([]byte(p.Hash), []byte(password))
if err != nil {
return false, err
}
return true, nil
}
func (p *Person) AddAddress(address Address) Addresses {
p.Addresses = append(p.Addresses, address)
return p.Addresses
}
func (p *Person) AddEmail(email string) []string {
p.Emails = append(p.Emails, email)
return p.Emails
}
|
// Copyright 2016 Google Inc. All Rights Reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"log"
"os"
"os/signal"
"sync"
"syscall"
)
const schedulerName = "hightower"
func main() {
log.Println("Starting custom scheduler...")
doneChan := make(chan struct{})
var wg sync.WaitGroup
wg.Add(1)
go monitorUnscheduledPods(doneChan, &wg)
wg.Add(1)
go reconcileUnscheduledPods(30, doneChan, &wg)
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
for {
select {
case <-signalChan:
log.Printf("Shutdown signal received, exiting...")
close(doneChan)
wg.Wait()
os.Exit(0)
}
}
}
|
package utils
import (
"bytes"
"encoding/binary"
"errors"
"os"
"github.com/sirupsen/logrus"
)
// FileExists tests if a file exists and is not a Directory
func FileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
// DirExists tests if a Directyory exists and is a Directory
func DirExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return info.IsDir()
}
// RipTexture detects and pulls image data from texture bytes
func RipTexture(data []byte) (fileExt string, fileData []byte, err error) {
// webp
start := bytes.Index(data, []byte{0x52, 0x49, 0x46, 0x46})
if start >= 0 {
var size int32
err = binary.Read(bytes.NewBuffer(data[start+4:start+8]), binary.LittleEndian, size)
if err != nil {
return
}
fileExt = ".webp"
fileData = data[start : start+8+int(size)]
return
}
// png
start = bytes.Index(data, []byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A})
if start >= 0 {
end := bytes.Index(data, []byte{0x49, 0x45, 0x4E, 0x44, 0xAE, 0x42, 0x60, 0x82})
if end < 0 {
err = errors.New("found PNG start but could not find PNG end tag")
return
}
fileExt = ".png"
fileData = data[start : end+8]
return
}
// jpg
start = bytes.Index(data, []byte{0xFF, 0xD8, 0xFF})
if start >= 0 {
end := bytes.Index(data, []byte{0xFF, 0xD9})
if end < 0 {
err = errors.New("found JPG start but could not find JPG end tag")
return
}
fileExt = ".jpg"
fileData = data[start:end]
return
}
err = errors.New("no valid image data found")
return
}
// StringInSlice tests inclusion of a string in a slice
func StringInSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
// CheckErrorRead checks and logs a read error
func CheckErrorRead(log logrus.FieldLogger, err error, n int, expected int) bool {
if err != nil {
log.WithError(err).Error("read error")
return false
} else if n < expected {
log.WithField("readBytes", n).
WithField("expectedBytes", expected).
Error("wrong number of bytes read")
return false
}
return true
}
// CheckErrorWrite checks and logs a read error
func CheckErrorWrite(log logrus.FieldLogger, err error) bool {
if err != nil {
log.WithError(err).Error("write error")
return false
}
return true
}
// CheckErrorSeek checks and looks a seek error
func CheckErrorSeek(log logrus.FieldLogger, err error) bool {
if err != nil {
log.WithError(err).Error("seek failure")
return false
}
return true
}
|
/*
Copyright (C) 2018 Synopsys, Inc.
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownershia. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package plugins
// This is a controller that updates the configmap
// in perceptor periodically.
// It is assumed that the configmap in perceptor will
// roll over any time this is updated, and if not, that
// there is a problem in the orchestration environment.
import (
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
hubv1 "github.com/blackducksoftware/perceptor-protoform/pkg/api/hub/v1"
opssightv1 "github.com/blackducksoftware/perceptor-protoform/pkg/api/opssight/v1"
hubclient "github.com/blackducksoftware/perceptor-protoform/pkg/hub/client/clientset/versioned"
opssightclientset "github.com/blackducksoftware/perceptor-protoform/pkg/opssight/client/clientset/versioned"
"github.com/blackducksoftware/perceptor-protoform/pkg/protoform"
"github.com/blackducksoftware/perceptor-protoform/pkg/util"
"github.com/juju/errors"
//extensions "github.com/kubernetes/kubernetes/pkg/apis/extensions"
log "github.com/sirupsen/logrus"
//metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
)
type hubConfig struct {
Hosts []string
User string
PasswordEnvVar string
ClientTimeoutMilliseconds *int
Port *int
ConcurrentScanLimit *int
TotalScanLimit *int
}
type timings struct {
CheckForStalledScansPauseHours *int
StalledScanClientTimeoutHours *int
ModelMetricsPauseSeconds *int
UnknownImagePauseMilliseconds *int
}
type perceptorConfig struct {
Hub *hubConfig
Timings *timings
UseMockMode bool
Port *int
LogLevel string
}
// ConfigMapUpdater ...
type ConfigMapUpdater struct {
config *protoform.Config
httpClient *http.Client
kubeClient *kubernetes.Clientset
hubClient *hubclient.Clientset
opssightClient *opssightclientset.Clientset
}
// NewConfigMapUpdater ...
func NewConfigMapUpdater(config *protoform.Config, kubeClient *kubernetes.Clientset, hubClient *hubclient.Clientset, opssightClient *opssightclientset.Clientset) *ConfigMapUpdater {
httpClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
Timeout: 5 * time.Second,
}
return &ConfigMapUpdater{
config: config,
httpClient: httpClient,
kubeClient: kubeClient,
hubClient: hubClient,
opssightClient: opssightClient,
}
}
// sendHubs is one possible way to configure the perceptor hub family.
func sendHubs(kubeClient *kubernetes.Clientset, opsSightSpec *opssightv1.OpsSightSpec, hubs []string) error {
configMapName := opsSightSpec.ConfigMapName
configMap, err := kubeClient.CoreV1().ConfigMaps(opsSightSpec.Namespace).Get(configMapName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("unable to find configmap %s in %s: %v", configMapName, opsSightSpec.Namespace, err)
}
var value perceptorConfig
err = json.Unmarshal([]byte(configMap.Data[fmt.Sprintf("%s.yaml", configMapName)]), &value)
if err != nil {
return err
}
value.Hub.Hosts = hubs
jsonBytes, err := json.Marshal(value)
if err != nil {
return err
}
configMap.Data[fmt.Sprintf("%s.yaml", configMapName)] = string(jsonBytes)
log.Debugf("updated configmap in %s is %+v", opsSightSpec.Namespace, configMap)
_, err = kubeClient.CoreV1().ConfigMaps(opsSightSpec.Namespace).Update(configMap)
if err != nil {
return fmt.Errorf("unable to update configmap %s in %s: %v", opsSightSpec.Perceptor.Name, opsSightSpec.Namespace, err)
}
return nil
}
// Run ...
func (p *ConfigMapUpdater) Run(ch <-chan struct{}) {
log.Infof("Starting controller for hub<->perceptor updates... this blocks, so running in a go func.")
syncFunc := func() {
err := p.updateAllHubs()
if err != nil {
log.Errorf("unable to update hubs because %+v", err)
}
}
syncFunc()
hubListWatch := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return p.hubClient.SynopsysV1().Hubs(p.config.Namespace).List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return p.hubClient.SynopsysV1().Hubs(p.config.Namespace).Watch(options)
},
}
_, hubController := cache.NewInformer(hubListWatch,
&hubv1.Hub{},
2*time.Second,
cache.ResourceEventHandlerFuncs{
// TODO kinda dumb, we just do a complete re-list of all hubs,
// every time an event happens... But thats all we need to do, so its good enough.
DeleteFunc: func(obj interface{}) {
log.Debugf("configmap updater hub deleted event ! %v ", obj)
syncFunc()
},
AddFunc: func(obj interface{}) {
log.Debugf("configmap updater hub added event! %v ", obj)
syncFunc()
},
},
)
opssightListWatch := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return p.opssightClient.SynopsysV1().OpsSights(p.config.Namespace).List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return p.opssightClient.SynopsysV1().OpsSights(p.config.Namespace).Watch(options)
},
}
_, opssightController := cache.NewInformer(opssightListWatch,
&opssightv1.OpsSight{},
2*time.Second,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
log.Debugf("configmap updater opssight added event! %v ", obj)
err := p.updateOpsSight(obj)
if err != nil {
log.Errorf("unable to update opssight because %+v", err)
}
},
},
)
// make sure this is called from a go func -- it blocks!
go hubController.Run(ch)
go opssightController.Run(ch)
}
func (p *ConfigMapUpdater) getAllHubs() []string {
allHubNamespaces := []string{}
hubsList, _ := util.ListHubs(p.hubClient, p.config.Namespace)
hubs := hubsList.Items
for _, hub := range hubs {
if strings.EqualFold(hub.Spec.HubType, "worker") {
hubURL := fmt.Sprintf("webserver.%s.svc", hub.Name)
status := p.verifyHub(hubURL, hub.Name)
if status {
allHubNamespaces = append(allHubNamespaces, hubURL)
}
log.Infof("Hub config map controller, namespace is %s", hub.Name)
}
}
log.Debugf("allHubNamespaces: %+v", allHubNamespaces)
return allHubNamespaces
}
// updateAllHubs will list all hubs in the cluster, and send them to opssight as scan targets.
// TODO there may be hubs which we dont want opssight to use. Not sure how to deal with that yet.
func (p *ConfigMapUpdater) updateAllHubs() error {
// for opssight 3.0, only support one opssight
opssights, err := util.GetOpsSights(p.opssightClient)
if err != nil {
return errors.Annotate(err, "unable to get opssights")
}
if len(opssights.Items) > 0 {
allHubNamespaces := p.getAllHubs()
// TODO, replace w/ configmap mutat ?
// curl perceptor w/ the latest hub list
for _, o := range opssights.Items {
err = sendHubs(p.kubeClient, &o.Spec, allHubNamespaces)
if err != nil {
return errors.Annotate(err, "unable to send hubs")
}
}
}
return nil
}
// updateAllHubs will list all hubs in the cluster, and send them to opssight as scan targets.
// TODO there may be hubs which we dont want opssight to use. Not sure how to deal with that yet.
func (p *ConfigMapUpdater) updateOpsSight(obj interface{}) error {
opssight := obj.(*opssightv1.OpsSight)
var err error
for j := 0; j < 20; j++ {
opssight, err = util.GetOpsSight(p.opssightClient, p.config.Namespace, opssight.Name)
if err != nil {
return fmt.Errorf("unable to get opssight %s due to %+v", opssight.Name, err)
}
if strings.EqualFold(opssight.Status.State, "running") {
break
}
log.Debugf("waiting for opssight %s to be up.....", opssight.Name)
time.Sleep(10 * time.Second)
}
allHubNamespaces := p.getAllHubs()
// TODO, replace w/ configmap mutat ?
// curl perceptor w/ the latest hub list
err = sendHubs(p.kubeClient, &opssight.Spec, allHubNamespaces)
if err != nil {
return errors.Annotate(err, "unable to send hubs")
}
return nil
}
func (p *ConfigMapUpdater) verifyHub(hubURL string, name string) bool {
for i := 0; i < 60; i++ {
resp, err := p.httpClient.Get(fmt.Sprintf("https://%s:443/api/current-version", hubURL))
if err != nil {
log.Debugf("unable to talk with the hub %s", hubURL)
time.Sleep(10 * time.Second)
_, err := util.GetHub(p.hubClient, name, name)
if err != nil {
return false
}
continue
}
_, err = ioutil.ReadAll(resp.Body)
if err != nil {
log.Errorf("unable to read the response from hub %s due to %+v", hubURL, err)
return false
}
defer resp.Body.Close()
log.Debugf("hub response status for %s is %v", hubURL, resp.Status)
if resp.StatusCode == 200 {
return true
}
time.Sleep(10 * time.Second)
}
return false
}
|
package main
import (
"fmt"
"sync"
"time"
)
// 这是因为同一逻辑处理器中三个任务被创建后 理论上会按顺序 被放在同一个任务队列,但实际上最后那个任务会被放在专一的next(下一个要被执行的任务的意思)的位置,
// 所以优先级最高,最可能先被执行,所以表现为在同一个goroutine中创建的多个任务中最后创建那个任务最可能先被执行。
func schedulesSort() {
done := make(chan bool)
values := []string{"a", "b", "c"}
for _, v := range values {
fmt.Println("--->", v)
go func(u string) {
fmt.Println(u)
done <- true
}(v)
}
// wait for all goroutines to complete before exiting
for _ = range values {
<-done
}
}
func worker(wg *sync.WaitGroup) {
time.Sleep(time.Second)
var counter int
for i := 0; i < 1e10; i++ {
counter++
}
wg.Done()
}
// GODEBUG=schedtrace=1000,scheddetail=1 ./schedules
func main() {
schedulesSort()
var wg sync.WaitGroup
wg.Add(0)
for i := 0; i < 1e10; i++ {
go worker(&wg)
time.Sleep(time.Second * 50 )
}
wg.Wait()
time.Sleep(time.Second)
}
|
package main
import "fmt"
func main() {
s1 := make([]int, 50)
fmt.Println(len(s1)) // 50
s2 := make(chan int, 50)
fmt.Println(len(s2)) // 0
s2 <- 0
fmt.Println(len(s2)) // 1
}
|
package main
import "fmt"
// 44. 通配符匹配
// 给定一个字符串 (s) 和一个字符模式 (p) ,实现一个支持 '?' 和 '*' 的通配符匹配。
// '?' 可以匹配任何单个字符。
// '*' 可以匹配任意字符串(包括空字符串)。
// 两个字符串完全匹配才算匹配成功。
// 说明:
// s 可能为空,且只包含从 a-z 的小写字母。
// p 可能为空,且只包含从 a-z 的小写字母,以及字符 ? 和 *。
// https://leetcode-cn.com/problems/wildcard-matching/
func main() {
fmt.Println(isMatch("aa", "a")) // false
fmt.Println(isMatch("aa", "*")) // true
}
// 法一:动态规划
// dp[i][j]表示s的前i个字符是否可以匹配p的前j个字符
func isMatch(s string, p string) bool {
m, n := len(s), len(p)
dp := make([][]bool, m+1)
// init
for k := range dp {
dp[k] = make([]bool, n+1)
}
// 当s为空时
dp[0][0] = true
for j := 1; j <= n; j++ {
if p[j-1] == '*' {
dp[0][j] = true
} else {
break
}
}
for i := 1; i <= m; i++ {
for j := 1; j <= n; j++ { // p 不为空,所以从1开始
if s[i-1] == p[j-1] || p[j-1] == '?' {
dp[i][j] = dp[i-1][j-1]
} else if p[j-1] == '*' { // *可以匹配空字符串或者任意多个字符
dp[i][j] = dp[i][j-1] || dp[i-1][j]
}
}
}
return dp[m][n]
}
|
package cli
import (
"encoding/json"
"fmt"
"log"
"os"
"os/exec"
"time"
"github.com/spf13/afero"
"github.com/spf13/cobra"
ac "github.com/ocramh/fingerprinter/pkg/acoustid"
fp "github.com/ocramh/fingerprinter/pkg/fingerprint"
)
var (
apikey string
)
func init() {
rootCmd.AddCommand(acoustidCmd)
acoustidCmd.Flags().StringVarP(&apikey, "apikey", "k", "", "acoustid key")
acoustidCmd.Flags().StringVarP(&inputFile, "audiofile", "a", "", "audio file path")
acoustidCmd.MarkFlagRequired("apikey")
acoustidCmd.MarkFlagRequired("audiofile")
}
var acoustidCmd = &cobra.Command{
Use: "acoustid",
Short: "Generate an audio fingerprint and queries the AcoustID API to find matching recording ID(s)",
Run: func(cmd *cobra.Command, args []string) {
chroma := fp.NewChromaPrint(exec.Command, afero.NewOsFs())
fingerprints, err := chroma.CalcFingerprint(inputFile)
if err != nil {
log.Fatal(err)
}
acoustIDClient := ac.NewAcoustID(apikey)
retryOnFail := true
var lookupRes []ac.ACLookupResult
for _, fingerprint := range fingerprints {
resp, err := acoustIDClient.LookupFingerprint(fingerprint, retryOnFail)
if err != nil {
log.Fatal(err)
}
lookupRes = append(lookupRes, resp.Results...)
time.Sleep(ac.AcoustIDReqDelay)
}
b, err := json.Marshal(lookupRes)
if err != nil {
log.Fatal(err)
}
fmt.Fprintf(os.Stdout, string(b))
},
}
|
package main
import (
"github.com/shurcooL/vfsgen"
"log"
"net/http"
)
func main() {
var err error
err = vfsgen.Generate(http.Dir("plugins/micro/component"), vfsgen.Options{
PackageName: "bundle",
VariableName: "Components",
BuildTags: "build",
Filename: "plugins/micro/bundle/component_build.go",
})
if err != nil {
log.Fatalln(err)
}
err = vfsgen.Generate(http.Dir("plugins/swagger/static"), vfsgen.Options{
PackageName: "swagger",
VariableName: "Browser",
BuildTags: "build",
Filename: "plugins/swagger/browser_build.go",
})
if err != nil {
log.Fatalln(err)
}
}
|
// Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package container
import (
"os"
"strings"
log "github.com/cihub/seelog"
docker "github.com/fsouza/go-dockerclient"
)
func NewDockerClient() (*docker.Client, error) {
host := os.Getenv("DOCKER_HOST")
hostIsLocal := host == "" || strings.HasPrefix(host, "unix://")
if !hostIsLocal {
log.Warnf("Detected DOCKER_HOST %s. This should not be remote.",
host)
}
return docker.NewClientFromEnv()
}
|
package main
import "fmt"
func main() {
fmt.Println("This is func expression")
f := func(x int) {
fmt.Println("Sundan was born in", x)
}
f(1987)
f2 := func(y string) {
fmt.Println("Sundan was bon in", y)
}
f2("Bangladesh")
f3 := func(z []string) {
fmt.Println("Sundan was", z)
}
f3([]string{"Awesome", "Honest", "Kind"})
}
|
package postgres_backend
import (
"github.com/straumur/straumur"
"testing"
)
type QueryBuilder func(e *straumur.Event) (string, []interface{}, error)
func queryTest(t *testing.T, expected string, e *straumur.Event, qb QueryBuilder) {
expectedArgs := []interface{}{
e.Key,
"{}",
"{}",
e.Created,
e.Updated,
e.Description,
e.Importance,
e.Origin,
}
if e.ID > 0 {
expectedArgs = []interface{}{
e.Key,
"{}",
"{}",
e.Description,
e.Importance,
e.Origin,
}
}
for _, i := range [][]string{e.Entities, e.OtherReferences, e.Actors, e.Tags} {
for _, s := range i {
expectedArgs = append(expectedArgs, s)
}
}
if e.ID > 0 {
expectedArgs = append(expectedArgs, e.ID)
}
query, args, err := qb(e)
if err != nil {
t.Fatal(err)
}
//Need DeepEqual method for these cases
if len(args) != len(expectedArgs) {
t.Fatalf("Expected %+v, got %+v", expectedArgs, args)
}
if query != expected {
t.Fatalf("Expected:\n\t%s\nGot:\n\t%s", expected, query)
}
}
func TestBuildInsertQuery(t *testing.T) {
const expected = `insert into "event" ("key", "key_params", "created", "updated", "payload", "description", "importance", "origin", "entities", "other_references", "actors", "tags") values ($1, $2, $3, $4, $5, $6, $7, $8,ARRAY[$9, $10]::text[], ARRAY[]::text[], ARRAY[$11]::text[], ARRAY[]::text[]) returning "id", "created", "updated";`
e := straumur.NewEvent(
"foo.bar",
nil,
nil,
"My event",
3,
"mysystem",
[]string{"ns/foo", "ns/moo"},
nil,
[]string{"someone"},
nil)
queryTest(t, expected, e, buildInsertQuery)
}
func TestBuildUpdateQuery(t *testing.T) {
const expected = `update "event" set "key" = $1, "key_params" = $2, "payload" = $3, "description" = $4, "importance" = $5, "origin" = $6, "entities" = ARRAY[$7, $8]::text[], "other_references" = ARRAY[]::text[], "actors" = ARRAY[$9]::text[], "tags" = ARRAY[]::text[] where "id" = $10 returning "updated";`
e := straumur.NewEvent(
"foo.bar",
nil,
nil,
"My event",
3,
"mysystem",
[]string{"ns/foo", "ns/moo"},
nil,
[]string{"someone"},
nil)
e.ID = 123
queryTest(t, expected, e, buildUpdateQuery)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.