text
stringlengths 11
4.05M
|
|---|
package main
import (
"io"
"net/http"
"strings"
)
func hello(w http.ResponseWriter, r *http.Request) {
listIP := strings.Split(r.Header.Get("X-FORWARDED-FOR"),",")
io.WriteString(w, listIP[0] )
}
func main() {
http.HandleFunc("/", hello)
http.ListenAndServe(":3001", nil)
}
|
package keeper
import (
"context"
"encoding/hex"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/irisnet/irismod/modules/random/types"
)
var _ types.QueryServer = Keeper{}
// Random implements the Query/Random gRPC method
func (k Keeper) Random(c context.Context, req *types.QueryRandomRequest) (*types.QueryRandomResponse, error) {
if req == nil {
return nil, status.Errorf(codes.InvalidArgument, "empty request")
}
reqID, err := hex.DecodeString(req.ReqId)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "invalid request id")
}
ctx := sdk.UnwrapSDKContext(c)
random, err := k.GetRandom(ctx, reqID)
if err != nil {
return nil, status.Errorf(codes.NotFound, "random %s not found", req.ReqId)
}
return &types.QueryRandomResponse{Random: &random}, nil
}
// RandomRequestQueue implements the Query/RandomRequestQueue gRPC method
func (k Keeper) RandomRequestQueue(c context.Context, req *types.QueryRandomRequestQueueRequest) (*types.QueryRandomRequestQueueResponse, error) {
if req == nil {
return nil, status.Errorf(codes.InvalidArgument, "empty request")
}
if req.Height < 0 {
return nil, status.Errorf(codes.InvalidArgument, "invalid height")
}
ctx := sdk.UnwrapSDKContext(c)
var requests []types.Request
if req.Height == 0 {
// query all pending requests
requests = queryAllRandomRequestsInQueue(ctx, k)
} else {
// query the pending requests by the specified height
requests = queryRandomRequestQueueByHeight(ctx, req.Height, k)
}
return &types.QueryRandomRequestQueueResponse{Requests: requests}, nil
}
|
package AvatarGenerator
import (
"os"
"image/png"
"image"
"fmt"
"crypto/sha256"
"image/color"
)
const (
IMAGE_DIMENSION = 256
)
func GenerateAvatar(email string, ip string, user string) {
if email == "" || ip == "" || user == "" {
print("You must provide an email, ip address and username \n")
os.Exit(1)
}
bArray := sha256.Sum256([]byte(email + ip + user))
img := image.NewNRGBA(image.Rect(0, 0, IMAGE_DIMENSION, IMAGE_DIMENSION))
bLen := len(bArray)
loop := IMAGE_DIMENSION / bLen
for i := 0; i < loop; i++ {
for j := 0; j < loop; j++ {
wStart := i * bLen
hStart := j * bLen
v := bArray[(i*loop+j)%bLen]
picker := (i*loop + j) % 3
var r, g, b uint8
if picker == 0 {
r = 255
g = v
b = 0
}
if picker == 1 {
r = 0
g = 255
b = v
}
if picker == 2 {
r = v
g = 0
b = 255
}
for m := 0; m < bLen; m++ {
for n := 0; n < bLen; n++ {
img.Set(wStart+m, hStart+n, color.RGBA{r, g, b, 255})
}
}
}
}
f, _ := os.Create("avatarme.png")
defer f.Close()
png.Encode(f, img)
fmt.Println("Successfully saved image.")
}
|
/*
Package say provides an interruptible speaking service.
The server will 'say' a user supplied quote.
It breaks the quote into phrases, using punctuation as delimiters,
running the espeak command for each phrase.
This allows the sequence to be terminated at any point
between phrases, but introduces a short pause at punctuation.
*/
package say
import (
"errors"
"io"
"os/exec"
"strings"
s "github.com/nedp/command/sequence"
"github.com/nedp/remotecmds/router"
)
type Params struct {
Quote string
}
func (Params) IsParams() {} // Marker
func NewParams() router.Params {
return new(Params)
}
// Creates a new sequence (github.com/nedp/command/sequence)
// for saying `quote`.
//
// Returns
// the created sequence.
func NewSequence(routeParams router.Params) s.RunAller {
p := params{
routeParams.(*Params).Quote,
make(chan *exec.Cmd, 1),
make(chan io.WriteCloser, 1),
}
phrases := phrasesIn(p.quote)
builder := s.SequenceOf(
// Prepare the first espeak instance and pipe.
s.PhaseOf(prepareEspeak(p)).
// Send through the first pipe.
AndJust(sendPhrase(p, phrases[0])),
)
for i := 0; i+1 < len(phrases); i += 1 {
builder = builder.Then(
// Run the current espeak instance.
s.PhaseOf(runEspeak(p)).
// Prepare the next espeak instance and pipe.
AndJust(prepareEspeak(p)).
// Send through the next pipe.
AndJust(sendPhrase(p, phrases[i+1])),
)
}
// Run the last espeak instance.
builder = builder.ThenJust(runEspeak(p))
outCh := make(chan string)
close(outCh)
return builder.End(outCh)
}
func phrasesIn(quote string) []string {
const sep = ",.;:?!"
phrases := make([]string, 0, len(quote)/2)
// Break the quote into phrases on seperator characters.
for i := strings.IndexAny(quote, sep); i != -1; i = strings.IndexAny(quote, sep) {
// If there's a separator character at the end of the quote,
// add the rest of the quote as a single phrase
// and break from the loop early.
if i+1 == len(quote) {
break
}
// Add the next phrase.
phrases = append(phrases, quote[:i+1])
quote = quote[i+1:]
}
// Add the part of the quote after the last punctuation mark.
return append(phrases, quote[:])
}
type params struct {
quote string
cmdCh chan *exec.Cmd
pipeCh chan io.WriteCloser
}
func prepareEspeak(p params) func() error {
return func() error {
cmd := exec.Command("espeak")
if pipe, err := cmd.StdinPipe(); err != nil {
return err
} else {
p.pipeCh <- pipe
}
p.cmdCh <- cmd
return nil
}
}
func sendPhrase(p params, phrase string) func() error {
return func() error {
pipe := <-p.pipeCh
for n, err := pipe.Write([]byte(phrase)); n < len(phrase);
n, err = pipe.Write([]byte(phrase)) {
if n <= 0 {
return errors.New("Failed to write to the pipe")
}
if err != nil {
return err
}
phrase = phrase[n:]
}
pipe.Close()
return nil
}
}
func runEspeak(p params) func() error {
return func() error {
cmd := <-p.cmdCh
return cmd.Run()
}
}
func splitFunc(c rune) bool {
switch c {
case ',', '.', ';', ':', '?', '!':
return true
default:
return false
}
}
|
package event
import (
"gopkg.in/mgo.v2/bson"
"time"
mgo "gopkg.in/mgo.v2"
)
type EventRepo struct {
Collection *mgo.Collection
}
//MONGO FUNCTIONS
func (repo EventRepo) create(item *Event) error {
//check the family
query := bson.M{
"child": item.Child,
"name": item.Name,
}
exist, err := repo.exist(query)
if !exist && err != nil {
item.Created = time.Now()
item.Updated = time.Now()
err = repo.update(item)
return err
}
return err
}
func (r EventRepo) exist(query bson.M) (bool, error) {
query_response, error := r.Collection.Find(query).Count()
return query_response < 1, error
}
func (r EventRepo) update(item *Event) (err error) {
var id bson.ObjectId
if item.Id.Hex() == "" {
id = bson.NewObjectId()
} else {
id = item.Id
}
item.Updated = time.Now()
_, err = r.Collection.UpsertId(id, item)
return
}
func (r EventRepo) all() (items []Event, err error) {
err = r.Collection.Find(bson.M{}).All(&items)
return
}
func (r EventRepo) destroy(id string) (err error) {
bid := bson.ObjectIdHex(id)
err = r.Collection.RemoveId(bid)
return
}
|
package common
import (
"fmt"
"regexp"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func getVersionRegex() string {
return `^\d+\.\d+((\.\d+)?|(\-RC\d+))$`
}
func validateVersion(t *testing.T, version string, ok bool) {
matched, err := regexp.Match(getVersionRegex(), []byte(version))
require.NoError(t, err, "invalid version regex")
if ok {
require.True(t, matched, "invalid version regex match")
} else {
require.False(t, matched, "invalid version regex match")
}
}
func TestValidateVersionRegex(t *testing.T) {
validateVersion(t, "1.1", true)
validateVersion(t, "1.1.1", true)
validateVersion(t, "1.1-RC1", true)
validateVersion(t, "1.1.1-RC1", false)
validateVersion(t, "1.1-rc1", false)
}
func TestGetBuildInfo(t *testing.T) {
buildInfo := GetBuildInfo()
require.NotNil(t, buildInfo, "missing build info")
require.NotZero(t, buildInfo.Version, "missing build info version")
}
func TestGetBuildInfoString(t *testing.T) {
buildInfo := GetBuildInfo()
buildInfo.GitShortRevision = "foobar"
v := buildInfo.String()
require.NotZero(t, v, "empty build string")
require.True(t, strings.Contains(v, "foobar"), "invalid version string")
}
func TestGetBuildInfoStringSanitize(t *testing.T) {
buildInfo := GetBuildInfo()
buildInfo.Sanitize()
v := buildInfo.String()
require.Equal(t, fmt.Sprintf("v%s", buildInfo.Version), v, "invalid build string")
}
|
package cmd
import (
"fmt"
"github.com/bb-orz/gt/libs/libService"
"github.com/bb-orz/gt/utils"
"github.com/urfave/cli/v2"
"io"
"os"
)
func ServiceCommand() *cli.Command {
return &cli.Command{
Name: "service",
Usage: "Add Application Service",
UsageText: "gt service [--name|-n=][ServiceName]",
Description: "The service command create a new service go interface,this command will generate some necessary files in service directory.",
Flags: []cli.Flag{
&cli.StringFlag{Name: "name", Aliases: []string{"n"}, Value: "example"},
&cli.StringFlag{Name: "version", Aliases: []string{"v"}, Value: "V1"},
&cli.StringFlag{Name: "interface_output_path", Aliases: []string{"o"}, Value: "./services"},
&cli.StringFlag{Name: "implement_output_path", Aliases: []string{"c"}, Value: "./services"},
&cli.StringFlag{Name: "dto_output_path", Aliases: []string{"d"}, Value: "./dtos"},
},
Action: ServiceCommandAction,
}
}
func ServiceCommandAction(ctx *cli.Context) error {
var err error
var cmdParams = libService.CmdParams{
Name: ctx.String("name"),
Version: ctx.String("version"),
IOutputPath: ctx.String("interface_output_path"),
MOutputPath: ctx.String("implement_output_path"),
DOutputPath: ctx.String("dto_output_path"),
}
var interfaceFile, implementFile io.Writer
// 检查服务接口是否存在,不存在则创建
interfaceFileName := cmdParams.IOutputPath + "/" + cmdParams.Name + "_service.go"
if !IsServiceFileExist(interfaceFileName) {
if interfaceFile, err = utils.CreateFile(interfaceFileName); err != nil {
utils.CommandLogger.Error(utils.CommandNameService, err)
} else {
utils.CommandLogger.OK(utils.CommandNameService, fmt.Sprintf("Create %s Service Interface File Successful! >>> FilePath:%s", utils.CamelString(cmdParams.Name), interfaceFileName))
// 格式化写入
if err = libService.NewFormatterServiceInterface().Format(cmdParams.Name, cmdParams.Version).WriteOut(interfaceFile); err != nil {
utils.CommandLogger.Error(utils.CommandNameService, err)
return nil
} else {
utils.CommandLogger.OK(utils.CommandNameService, fmt.Sprintf("Write %s Service Interface File Successful!", cmdParams.Name))
}
}
} else {
utils.CommandLogger.Warning(utils.CommandNameService, fmt.Sprintf("%s Service Interface File Is Exist!", cmdParams.Name))
}
// 创建实现服务指定版本的文件
implementFileName := cmdParams.MOutputPath + "/" + cmdParams.Name + "_service_" + utils.SnakeString(cmdParams.Version) + ".go"
if !IsServiceFileExist(implementFileName) {
if implementFile, err = utils.CreateFile(implementFileName); err != nil {
utils.CommandLogger.Error(utils.CommandNameService, err)
} else {
utils.CommandLogger.OK(utils.CommandNameService, fmt.Sprintf("Create %s Service Interface File Successful! >>> FilePath:%s", utils.CamelString(cmdParams.Name), implementFileName))
// 格式化写入
if err = libService.NewFormatterServiceImplement().Format(cmdParams.Name, cmdParams.Version).WriteOut(implementFile); err != nil {
utils.CommandLogger.Error(utils.CommandNameService, err)
return nil
} else {
utils.CommandLogger.OK(utils.CommandNameService, fmt.Sprintf("Write %s Service Implement File Successful!", cmdParams.Name))
}
}
} else {
utils.CommandLogger.Warning(utils.CommandNameService, fmt.Sprintf("%s Service Implement File Is Exist!", cmdParams.Name))
}
// 生成服务数据传输对象
dtoFileName := cmdParams.DOutputPath + "/service_" + cmdParams.Name + "_dto.go"
if !IsServiceFileExist(dtoFileName) {
if implementFile, err = utils.CreateFile(dtoFileName); err != nil {
utils.CommandLogger.Error(utils.CommandNameService, err)
} else {
utils.CommandLogger.OK(utils.CommandNameService, fmt.Sprintf("Create %s Service DTO File Successful! >>> FilePath:%s", utils.CamelString(cmdParams.Name), dtoFileName))
// 格式化写入
if err = libService.NewFormatterServiceDto().Format(cmdParams.Name, cmdParams.Version).WriteOut(implementFile); err != nil {
utils.CommandLogger.Error(utils.CommandNameService, err)
return nil
} else {
utils.CommandLogger.OK(utils.CommandNameService, fmt.Sprintf("Write %s Service DTO File Successful!", cmdParams.Name))
}
}
} else {
utils.CommandLogger.Warning(utils.CommandNameService, fmt.Sprintf("%s Service DTO File Is Exist!", cmdParams.Name))
}
return nil
}
func IsServiceFileExist(fileName string) bool {
_, err := os.Stat(fileName)
if err == nil {
return true
}
if os.IsNotExist(err) {
return false
}
return false
}
|
package reflection
import (
"reflect"
"testing"
)
type TestStruct struct {
ReflectTest string `test:"Test tag value"`
}
type TestMultiple struct {
ReflectTest string `test:"Test tag value"`
ReflectTest2 string `test:"Test tag value2"`
}
type TestUnexportedFails struct {
unexportedTest string `test:"tester"`
}
func TestGetFieldNamesWithExistingTag(t *testing.T) {
test := &TestStruct{
ReflectTest: "Hello World",
}
fields, err := GetFieldNamesWithTag("test", reflect.TypeOf(test))
if err != nil {
t.Fail()
}
if len(fields) != 1 {
t.Fail()
}
}
func TestGetFieldNamesWithNonExistingTag(t *testing.T) {
test := &TestStruct{
ReflectTest: "Hello World",
}
fields, err := GetFieldNamesWithTag("foo", reflect.TypeOf(test))
if err != nil {
t.Fail()
}
if len(fields) != 0 {
t.Fail()
}
}
func TestGetFieldNamesWithTagNonStructFails(t *testing.T) {
_, err := GetFieldNamesWithTag("foo", reflect.TypeOf("bla"))
if err == nil {
t.Fail()
}
}
func TestGetTagValueWithExistingTag(t *testing.T) {
test := &TestStruct{
ReflectTest: "Hello World",
}
val, err := GetTagValue("test", "ReflectTest", reflect.TypeOf(test))
if err != nil {
t.Fail()
}
if val != "Test tag value" {
t.Fail()
}
}
func TestGetTagValueWithNonStruct(t *testing.T) {
_, err := GetTagValue("foo", "ReflectFoo", reflect.TypeOf("test"))
if err == nil || err.Error() != "supplied argument is not a structure" {
t.Fail()
}
}
func TestGetTagValueWithNonExistingField(t *testing.T) {
test := &TestStruct{
ReflectTest: "Hello World",
}
_, err := GetTagValue("foo", "ReflectFoo", reflect.TypeOf(test))
if err == nil || err.Error() != "supplied field is not defined in the structure" {
t.Fail()
}
}
func TestGetTagValueWithNonExistingTag(t *testing.T) {
test := &TestStruct{
ReflectTest: "Hello World",
}
_, err := GetTagValue("foo", "ReflectTest", reflect.TypeOf(test))
if err == nil || err.Error() != "supplied tag is not present on the field" {
t.Fail()
}
}
func TestGetTagValuesWithNonStruct(t *testing.T) {
_, err := GetTagValues("foo", reflect.TypeOf("test"))
if err == nil || err.Error() != "supplied argument is not a structure" {
t.Fail()
}
}
func TestGetTagValuesWithNonExistingTag(t *testing.T) {
test := &TestStruct{
ReflectTest: "Hello World",
}
values, err := GetTagValues("foo", reflect.TypeOf(test))
if err != nil {
t.Fail()
}
if len(values) != 0 {
t.Fail()
}
}
func TestGetTagValuesWithMultipleTags(t *testing.T) {
test := &TestMultiple{
ReflectTest: "Hello World",
ReflectTest2: "Foo bar",
}
values, err := GetTagValues("test", reflect.TypeOf(test))
if err != nil {
t.Fail()
}
if len(values) != 2 {
t.Fail()
}
}
func TestGetTagValuesWithExistingTag(t *testing.T) {
test := &TestStruct{
ReflectTest: "Hello World",
}
values, err := GetTagValues("test", reflect.TypeOf(test))
if err != nil {
t.Fail()
}
if len(values) != 1 {
t.Fail()
}
}
func TestGetTagValuesWithUnexportedTag(t *testing.T) {
test := &TestUnexportedFails{
unexportedTest: "Hello World",
}
values, err := GetTagValues("test", reflect.TypeOf(test))
if err != nil {
t.Fail()
}
if len(values) != 0 {
t.Fail()
}
}
func TestGetFieldNamesWithUnexportedTag(t *testing.T) {
test := &TestUnexportedFails{
unexportedTest: "Hello World",
}
values, err := GetFieldNamesWithTag("test", reflect.TypeOf(test))
if err != nil {
t.Fail()
}
if len(values) != 0 {
t.Fail()
}
}
func TestGetTagValueWithUnexportedTag(t *testing.T) {
test := &TestUnexportedFails{
unexportedTest: "Hello World",
}
_, err := GetTagValue("test", "unexportedTest", reflect.TypeOf(test))
if err == nil || err.Error() != "supplied field is not exported" {
t.Fail()
}
}
|
package sese
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document02700105 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:sese.027.001.05 Document"`
Message *SecuritiesTransactionCancellationRequestStatusAdviceV05 `xml:"SctiesTxCxlReqStsAdvc"`
}
func (d *Document02700105) AddMessage() *SecuritiesTransactionCancellationRequestStatusAdviceV05 {
d.Message = new(SecuritiesTransactionCancellationRequestStatusAdviceV05)
return d.Message
}
// Scope
// An account servicer sends an SecuritiesTransactionCancellationRequestStatusAdvice to an account owner to advise the status of a securities transaction cancellation request previously sent by the account owner.
// The account servicer/owner relationship may be:
// - a central securities depository or another settlement market infrastructure acting on behalf of their participants
// - an agent (sub-custodian) acting on behalf of their global custodian customer, or
// - a custodian acting on behalf of an investment management institution or a broker/dealer.
//
// Usage
// The message may also be used to:
// - re-send a message previously sent,
// - provide a third party with a copy of a message for information,
// - re-send to a third party a copy of a message for information
// using the relevant elements in the Business Application Header.
type SecuritiesTransactionCancellationRequestStatusAdviceV05 struct {
// Reference to the unambiguous identification of the cancellation request as per the account owner.
CancellationRequestReference *iso20022.Identification14 `xml:"CxlReqRef"`
// Unambiguous identification of the transaction as known by the account servicer.
TransactionIdentification *iso20022.TransactionIdentifications30 `xml:"TxId,omitempty"`
// Provides details on the processing status of the request.
ProcessingStatus *iso20022.ProcessingStatus54Choice `xml:"PrcgSts"`
// Identifies the details of the transaction.
TransactionDetails *iso20022.TransactionDetails80 `xml:"TxDtls,omitempty"`
// Additional information that cannot be captured in the structured elements and/or any other specific block.
SupplementaryData []*iso20022.SupplementaryData1 `xml:"SplmtryData,omitempty"`
}
func (s *SecuritiesTransactionCancellationRequestStatusAdviceV05) AddCancellationRequestReference() *iso20022.Identification14 {
s.CancellationRequestReference = new(iso20022.Identification14)
return s.CancellationRequestReference
}
func (s *SecuritiesTransactionCancellationRequestStatusAdviceV05) AddTransactionIdentification() *iso20022.TransactionIdentifications30 {
s.TransactionIdentification = new(iso20022.TransactionIdentifications30)
return s.TransactionIdentification
}
func (s *SecuritiesTransactionCancellationRequestStatusAdviceV05) AddProcessingStatus() *iso20022.ProcessingStatus54Choice {
s.ProcessingStatus = new(iso20022.ProcessingStatus54Choice)
return s.ProcessingStatus
}
func (s *SecuritiesTransactionCancellationRequestStatusAdviceV05) AddTransactionDetails() *iso20022.TransactionDetails80 {
s.TransactionDetails = new(iso20022.TransactionDetails80)
return s.TransactionDetails
}
func (s *SecuritiesTransactionCancellationRequestStatusAdviceV05) AddSupplementaryData() *iso20022.SupplementaryData1 {
newValue := new(iso20022.SupplementaryData1)
s.SupplementaryData = append(s.SupplementaryData, newValue)
return newValue
}
|
package main
import (
"math"
)
func abs(x int) int {
if x < 0 {
return -x
}
return x
}
func min(a, b int) int {
if a <= b {
return a
}
return b
}
func divide(dividend int, divisor int) int {
sign := (dividend < 0) == (divisor < 0)
a, b, res := abs(dividend), abs(divisor), 0
// if i -> int build error, i -> uint leetcode error
//for i = 31; i >= 0; i--{
// if (a >> i) >= b {
// res += 1 << i
// a -= b << i
// }
//}
for a >= b {
var x uint = 0
for a >= (b << (x + 1)) {
x++
}
res += 1 << x
a -= b << x
}
if !sign {
res = -res
}
return min(res, math.MaxInt32)
}
|
package fysdk
import (
"encoding/json"
"fmt"
"log"
"net/http"
"strings"
pkgBean "webapi/bean"
)
type ISkeleton interface {
OnPaid(*pkgBean.AndroidPayment)
}
// 支付通知自定义字段
type FYSDKPaymentExt struct {
SKU string `json:"sku"`
}
func PayNotify(skeleton ISkeleton, w http.ResponseWriter, r *http.Request) {
r.ParseForm()
if ok := CheckSign(r.Form, GetPaySecret()); !ok {
log.Printf("收到FYSDK订单验签失败: %v", r.Form)
http.Error(w, "sign_failure", http.StatusBadRequest)
return
}
var ext FYSDKPaymentExt
if v := r.FormValue("app_callback_ext"); v != "" {
err := json.Unmarshal([]byte(v), &ext)
if err != nil {
log.Printf("收到FYSDK订单: 自定义参数格式不合法")
http.Error(w, "ext_invalid", http.StatusBadRequest)
return
}
}
bean := &pkgBean.AndroidPayment{
OrderID: r.FormValue("order_id"), // SDK订单号
UUID: r.FormValue("uuid"), // SDK唯一用户编号
ZoneID: parseInt32(r.FormValue("app_zone_id")), // 游戏大区编号
UID: parseUint32(r.FormValue("app_player_id")), // 游戏账号
SKU: ext.SKU, // 商品编号
Amount: parseInt32(r.FormValue("pay_amount")), // 支付数量
PayTime: parseInt64(r.FormValue("pay_time")), // 订单支付时间
Sandbox: r.FormValue("sandbox") == "sandbox", // 是否测试订单
Happen: parseInt64(r.FormValue("time")), // 发生时间
Achieved: false, // 是否已认领
}
if err := pkgBean.InsertAndroidPayment(bean); err != nil {
if exists := strings.HasPrefix(err.Error(), "Error 1062: Duplicate entry"); exists {
// 重复的订单
fmt.Fprintf(w, "ok")
return
}
http.Error(w, "sign_failure", http.StatusInternalServerError)
return
}
skeleton.OnPaid(bean)
fmt.Fprintf(w, "ok")
return
}
|
package syntax
import (
"fmt"
"testing"
"time"
)
func TestTimeoutSelect(t *testing.T) {
c := make(chan int, 10)
go func() {
for i := 0; i < 30; i++ {
if i < 10 {
c <- i
}
fmt.Println("tick", i)
time.Sleep(1 * time.Second)
}
}()
for {
timeout := false
select {
case <-time.After(10 * time.Second):
fmt.Println("timout")
timeout = true
case n := <-c:
fmt.Println("got ", n)
}
if timeout {
break
}
}
time.Sleep(5 * time.Second)
}
|
package main
import (
"bufio"
"log"
"os"
"strconv"
"strings"
)
func main() {
validPasswordsPart1 := 0
validPasswordsPart2 := 0
file, err := os.Open("input.txt")
if err != nil {
log.Fatal(err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
validPasswordsPart1 = validPasswordsPart1 + CheckPassword(scanner.Text())
validPasswordsPart2 = validPasswordsPart2 + CheckPassWordPartTwo(scanner.Text())
}
log.Printf("1st Part: %d", validPasswordsPart1)
log.Printf("2nd Part: %d", validPasswordsPart2)
}
func CheckPassword(text string) int {
i := strings.Index(text, ":")
count := 0
for _, l := range strings.Split(text[i+2:], "") {
search := text[i-1 : i]
if l == search {
count = count + 1
}
}
r := strings.Split(text[0:i-2], "-")
low, _ := strconv.Atoi(r[0])
high, _ := strconv.Atoi(r[1])
if count > 0 && count >= low && count <= high {
return 1
}
return 0
}
func CheckPassWordPartTwo(text string) int {
i := strings.Index(text, ":")
password := strings.Split(text[i+2:], "")
key := text[i-1 : i]
r := strings.Split(text[0:i-2], "-")
firstValue, _ := strconv.Atoi(r[0])
secondvalue, _ := strconv.Atoi(r[1])
firstIndex := firstValue - 1
secondIndex := secondvalue - 1
if password[firstIndex] == key && password[secondIndex] != key || password[firstIndex] != key && password[secondIndex] == key {
return 1
}
return 0
}
|
package utils
import (
"encoding/base64"
"log"
"time"
"gocv.io/x/gocv"
)
func CaptureImg() string {
webcam, err := gocv.OpenVideoCapture(0)
if err != nil {
log.Fatal(err)
}
defer webcam.Close()
img := gocv.NewMat()
time.Sleep(time.Millisecond * 100)
webcam.Read(&img)
data, err := gocv.IMEncode(".png", img)
if err != nil {
log.Fatal(err)
}
return "data:image/png;base64," + base64.StdEncoding.EncodeToString(data.GetBytes())
}
|
/*
You are given the following information, but you may prefer to do some research for yourself.
1 Jan 1900 was a Monday.
Thirty days has September,
April, June and November.
All the rest have thirty-one,
Saving February alone,
Which has twenty-eight, rain or shine.
And on leap years, twenty-nine.
A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
*/
package main
import (
"fmt"
"time"
)
func main() {
fmt.Println(solve(1901, 2000, time.January, time.December, time.Sunday))
}
func solve(y0, y1 int, m0, m1 time.Month, d time.Weekday) int {
n := 0
for y := y0; y <= y1; y++ {
for m := m0; m <= m1; m++ {
t := time.Date(y, m, 1, 0, 0, 0, 0, time.UTC)
if t.Weekday() == d {
n++
}
}
}
return n
}
|
package webauthnutil
import (
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/pomerium/pomerium/pkg/grpc/user"
)
func TestGetUserEntity(t *testing.T) {
t.Run("name as email", func(t *testing.T) {
ue := GetUserEntity(&user.User{
Id: "test",
Email: "test@example.com",
})
assert.Equal(t, "test@example.com", ue.Name)
})
t.Run("name as id", func(t *testing.T) {
ue := GetUserEntity(&user.User{
Id: "test",
})
assert.Equal(t, "test", ue.Name)
})
t.Run("displayName as name", func(t *testing.T) {
ue := GetUserEntity(&user.User{
Id: "test",
Name: "Test User",
})
assert.Equal(t, "Test User", ue.DisplayName)
})
t.Run("displayName as email", func(t *testing.T) {
ue := GetUserEntity(&user.User{
Id: "test",
Email: "test@example.com",
})
assert.Equal(t, "test@example.com", ue.DisplayName)
})
}
func TestGetUserEntityID(t *testing.T) {
userID := "test@example.com"
rawUserEntityID := GetUserEntityID(userID)
userEntityUUID, err := uuid.FromBytes(rawUserEntityID)
assert.NoError(t, err, "should return a UUID")
assert.Equal(t, "8c0ac353-406f-5c08-845d-b72779779a42", userEntityUUID.String())
}
|
package main
import (
"bytes"
"flag"
"fmt"
"io"
"log"
"os"
"os/exec"
"sync"
"time"
)
var start = time.Now()
var format = flag.String("format", "default", "timestamp format")
var verbose = flag.Bool("verbose", false, "verbose output")
var tabs = flag.Bool("tabs", false, "use tabs rather than spaces after the timestamp")
var utc = flag.Bool("utc", false, "use utc timestamps instead of localtime ones.")
var millis = flag.Bool("millis", false, "calculate timestamps in milliseconds since program start.")
type TimeFormat int
const (
DEFAULT TimeFormat = iota
ANSI
RFC3339
RFC3339Nano
)
func (tf *TimeFormat) String() string {
var res string
switch *tf {
case DEFAULT:
res = "2006/01/02 03:04:05"
case ANSI:
res = time.ANSIC
case RFC3339:
res = time.RFC3339
case RFC3339Nano:
res = time.RFC3339Nano
default:
log.Panicf("Unexpected")
}
return res
}
func (tf *TimeFormat) fromString(s *string) bool {
res := true
switch *s {
case "default":
*tf = DEFAULT
case "ansi":
*tf = ANSI
case "rfc3339":
*tf = RFC3339
case "rfc3339nano":
*tf = RFC3339Nano
default:
res = false
}
return res
}
// TimestampedWriter is a writer that splits text on newlines and outputs lines one at the time, prepending each
// with a timestamp.
type TimestampedWriter struct {
writer io.Writer
format string
utc bool
millis bool
tabs bool
incomplete []byte
}
// NewTimestampedWriter creates a new TimestampedWriter
func NewTimestampedWriter(w io.Writer, timeFormat TimeFormat, utc *bool, millis *bool, tabs *bool) *TimestampedWriter {
return &TimestampedWriter{
writer: w,
format: timeFormat.String(),
utc: *utc,
millis: *millis,
tabs: *tabs,
incomplete: make([]byte, 0),
}
}
func (tsw *TimestampedWriter) Write(p []byte) (int, error) {
lines := bytes.Split(p, []byte("\n"))
last := lines[len(lines)-1]
for _, line := range lines[:len(lines)-1] {
var (
timestamp string
err error
)
now := time.Now()
if *millis {
timestamp = fmt.Sprintf("%12.3fms", float64(now.Sub(start).Microseconds())/1000)
} else {
if *utc {
now = now.UTC()
}
timestamp = now.Format(tsw.format)
}
_, err = tsw.writer.Write([]byte(timestamp))
if err != nil {
return 0, err
}
var sep = "| "
if tsw.tabs {
sep = "|\t"
}
_, err = tsw.writer.Write([]byte(sep))
if err != nil {
return 0, err
}
if 0 < len(tsw.incomplete) {
_, err = tsw.writer.Write(tsw.incomplete)
if err != nil {
return 0, err
}
}
tsw.incomplete = last
_, err = tsw.writer.Write(line)
if err != nil {
return 0, err
}
_, err = tsw.writer.Write([]byte("\n"))
if err != nil {
return 0, err
}
}
return len(p), nil
}
func execute(name string, args []string, tf TimeFormat) {
var err error
if *verbose {
log.Printf("invoking command: %v, args: %v", name, args)
}
cmd := exec.Command(name, args...)
stdoutIn, err := cmd.StdoutPipe()
if err != nil {
log.Fatalf("ERROR: could not connect to stdout pipe: %s", err)
}
stderrIn, err := cmd.StderrPipe()
if err != nil {
log.Fatalf("ERROR: could not connect to stderr pipe: %s", err)
}
stdout := NewTimestampedWriter(os.Stdout, tf, utc, millis, tabs)
stderr := NewTimestampedWriter(os.Stderr, tf, utc, millis, tabs)
err = cmd.Start()
if err != nil {
log.Fatalf("ERROR: could not start: '%s'\n", err)
}
processStreams(stdout, stdoutIn, stderr, stderrIn)
err = cmd.Wait()
if err != nil {
log.Fatalf("ERROR: command failed: %s", err)
}
}
func processStreams(stdout *TimestampedWriter, stdoutIn io.ReadCloser, stderr *TimestampedWriter, stderrIn io.ReadCloser) {
var wg sync.WaitGroup
wg.Add(2)
go func() {
_, err := io.Copy(stdout, stdoutIn)
if err != nil {
log.Fatal(err)
}
wg.Done()
}()
go func() {
_, err := io.Copy(stderr, stderrIn)
if err != nil {
log.Fatal(err)
}
wg.Done()
}()
wg.Wait()
}
func init() {
/* timestamps in logging can easily get confused with output */
log.SetFlags(log.Flags() &^ (log.Ldate | log.Ltime))
flag.CommandLine.Usage = func() {
output := flag.CommandLine.Output()
_, _ = fmt.Fprintf(output, "ts - run a command with timestamped output\n\n")
_, _ = fmt.Fprintf(output, "usage:\n ts [ options ] cmd args...\n\n")
_, _ = fmt.Fprintf(output, "options:\n")
flag.PrintDefaults()
}
}
func main() {
flag.Parse()
if *millis && *utc {
log.Printf("WARNING: -utc will be ignored when -millis is specified.")
}
var tf TimeFormat
ok := tf.fromString(format)
if !ok {
log.Fatal(fmt.Sprintf("illegal time format identifier: %v", *format))
}
cliArgs := flag.Args()
if len(cliArgs) < 1 {
flag.CommandLine.Usage()
os.Exit(1)
}
name := cliArgs[0]
args := cliArgs[1:]
execute(name, args, tf)
}
|
package goauth
import (
"crypto/rand"
"encoding/base64"
"encoding/json"
"io"
"strings"
"time"
)
// TokenType is the type of the token and defines how it
// must be used in order to authenticate requests.
type TokenType string
const (
// TokenTypeBearer is the bearer token type.
TokenTypeBearer TokenType = "bearer"
// TokenTypeMac is the mac token type.
TokenTypeMac TokenType = "mac"
)
var (
// DefaultTokenExpiry is the default number of seconds
// that a token is
DefaultTokenExpiry = time.Hour
// DefaultTokenType is the default token type that should be used when creating new tokens.
DefaultTokenType = TokenTypeBearer
// NewToken is a utility method for generating a new token that can be overriden in testing.
NewToken = newToken
)
// newToken generates a new token and returns it as a secret.
func newToken() (Secret, error) {
b := make([]byte, 24)
n, err := io.ReadFull(rand.Reader, b)
if n != len(b) || err != nil {
return "", err
}
return Secret(base64.URLEncoding.EncodeToString(b)), nil
}
// Grant represents an authorization grant consisting of an access token, an optional refresh token
// and additional fields containing details of the authentication session.
type Grant struct {
AccessToken Secret
TokenType TokenType
ExpiresIn time.Duration
RefreshToken Secret
IDToken Secret
Scope []string
CreatedAt time.Time
}
// IsExpired returns true if the grant has expired, else it returns false.
func (g *Grant) IsExpired() bool {
if g.CreatedAt.Add(time.Duration(g.ExpiresIn) * time.Second).After(timeNow()) {
return false
}
return true
}
func (g *Grant) CheckScope(requiredScope []string) error {
// For each of the required scopes check that the grant has access
for _, check := range requiredScope {
if !checkInScope(check, g.Scope) {
return ErrorAccessDenied
}
}
return nil
}
// checkInScope checks whether check is present in scope returning a bool.
func checkInScope(check string, scope []string) bool {
for _, s := range scope {
if s == check {
return true
}
}
return false
}
// Write marshals the Grant into JSON, including only the required fields and writes it
// to the provided io.Writer. It is used to return Grants in an http response.
func (g *Grant) Write(w io.Writer) error {
m := make(map[string]interface{})
m["access_token"] = g.AccessToken
m["token_type"] = g.TokenType
m["expires_in"] = g.ExpiresIn.Seconds()
if g.RefreshToken != "" {
m["refresh_token"] = g.RefreshToken
}
if g.Scope != nil {
m["scope"] = strings.Join(g.Scope, " ")
}
if g.IDToken != "" {
m["id_token"] = g.IDToken
}
enc := json.NewEncoder(w)
return enc.Encode(m)
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cli
import (
"bytes"
"context"
"fmt"
"strings"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/appfile/dryrun"
"github.com/oam-dev/kubevela/pkg/utils/common"
cmdutil "github.com/oam-dev/kubevela/pkg/utils/util"
)
// LiveDiffCmdOptions contains the live-diff cmd options
type LiveDiffCmdOptions struct {
cmdutil.IOStreams
ApplicationFile string
DefinitionFile string
AppName string
Namespace string
Revision string
SecondaryRevision string
Context int
}
// NewLiveDiffCommand creates `live-diff` command
func NewLiveDiffCommand(c common.Args, order string, ioStreams cmdutil.IOStreams) *cobra.Command {
o := &LiveDiffCmdOptions{IOStreams: ioStreams}
cmd := &cobra.Command{
Use: "live-diff",
DisableFlagsInUseLine: true,
Short: "Compare application and revisions.",
Long: "Compare application and revisions.",
Example: "# compare the current application and the running revision\n" +
"> vela live-diff my-app\n" +
"# compare the current application and the specified revision\n" +
"> vela live-diff my-app --revision my-app-v1\n" +
"# compare two application revisions\n" +
"> vela live-diff --revision my-app-v1,my-app-v2\n" +
"# compare the application file and the specified revision\n" +
"> vela live-diff -f my-app.yaml -r my-app-v1 --context 10",
Annotations: map[string]string{
types.TagCommandOrder: order,
types.TagCommandType: types.TypeApp,
},
Args: cobra.RangeArgs(0, 1),
RunE: func(cmd *cobra.Command, args []string) (err error) {
o.Namespace, err = GetFlagNamespaceOrEnv(cmd, c)
if err != nil {
return err
}
if err = o.loadAndValidate(args); err != nil {
return err
}
buff, err := LiveDiffApplication(o, c)
if err != nil {
return err
}
cmd.Println(buff.String())
return nil
},
}
cmd.Flags().StringVarP(&o.ApplicationFile, "file", "f", "", "application file name")
cmd.Flags().StringVarP(&o.DefinitionFile, "definition", "d", "", "specify a file or directory containing capability definitions, they will only be used in dry-run rather than applied to K8s cluster")
cmd.Flags().StringVarP(&o.Revision, "revision", "r", "", "specify one or two application revision name(s), by default, it will compare with the latest revision")
cmd.Flags().IntVarP(&o.Context, "context", "c", -1, "output number lines of context around changes, by default show all unchanged lines")
addNamespaceAndEnvArg(cmd)
return cmd
}
// LiveDiffApplication can return user what would change if upgrade an application.
func LiveDiffApplication(cmdOption *LiveDiffCmdOptions, c common.Args) (bytes.Buffer, error) {
var buff = bytes.Buffer{}
newClient, err := c.GetClient()
if err != nil {
return buff, err
}
var objs []*unstructured.Unstructured
if cmdOption.DefinitionFile != "" {
objs, err = ReadDefinitionsFromFile(cmdOption.DefinitionFile)
if err != nil {
return buff, err
}
}
pd, err := c.GetPackageDiscover()
if err != nil {
return buff, err
}
config, err := c.GetConfig()
if err != nil {
return buff, err
}
liveDiffOption := dryrun.NewLiveDiffOption(newClient, config, pd, objs)
if cmdOption.ApplicationFile == "" {
return cmdOption.renderlessDiff(newClient, liveDiffOption)
}
app, err := readApplicationFromFile(cmdOption.ApplicationFile)
if err != nil {
return buff, errors.WithMessagef(err, "read application file: %s", cmdOption.ApplicationFile)
}
if app.Namespace == "" {
app.SetNamespace(cmdOption.Namespace)
}
appRevision := &v1beta1.ApplicationRevision{}
if cmdOption.Revision != "" {
// get the Revision if user specifies
if err := newClient.Get(context.Background(),
client.ObjectKey{Name: cmdOption.Revision, Namespace: app.Namespace}, appRevision); err != nil {
return buff, errors.Wrapf(err, "cannot get application Revision %q", cmdOption.Revision)
}
} else {
// get the latest Revision of the application
livingApp := &v1beta1.Application{}
if err := newClient.Get(context.Background(),
client.ObjectKey{Name: app.Name, Namespace: app.Namespace}, livingApp); err != nil {
return buff, errors.Wrapf(err, "cannot get application %q", app.Name)
}
if livingApp.Status.LatestRevision != nil {
latestRevName := livingApp.Status.LatestRevision.Name
if err := newClient.Get(context.Background(),
client.ObjectKey{Name: latestRevName, Namespace: app.Namespace}, appRevision); err != nil {
return buff, errors.Wrapf(err, "cannot get application Revision %q", cmdOption.Revision)
}
} else {
// .status.latestRevision is nil, that means the app has not
// been rendered yet
return buff, fmt.Errorf("the application %q has no Revision in the cluster", app.Name)
}
}
diffResult, err := liveDiffOption.Diff(context.Background(), app, appRevision)
if err != nil {
return buff, errors.WithMessage(err, "cannot calculate diff")
}
reportDiffOpt := dryrun.NewReportDiffOption(cmdOption.Context, &buff)
reportDiffOpt.PrintDiffReport(diffResult)
return buff, nil
}
func (o *LiveDiffCmdOptions) loadAndValidate(args []string) error {
if len(args) > 0 {
o.AppName = args[0]
}
revisions := strings.Split(o.Revision, ",")
if len(revisions) > 2 {
return errors.Errorf("cannot use more than 2 revisions")
}
o.Revision = revisions[0]
if len(revisions) == 2 {
o.SecondaryRevision = revisions[1]
}
if (o.AppName == "" && len(revisions) == 1) && o.ApplicationFile == "" {
return errors.Errorf("either application name or application file must be set")
}
if (o.AppName != "" || len(revisions) > 1) && o.ApplicationFile != "" {
return errors.Errorf("cannot set application name and application file at the same time")
}
if o.AppName != "" && o.SecondaryRevision != "" {
return errors.Errorf("cannot use application name and two revisions at the same time")
}
if o.SecondaryRevision != "" && o.ApplicationFile != "" {
return errors.Errorf("cannot use application file and two revisions at the same time")
}
return nil
}
func (o *LiveDiffCmdOptions) renderlessDiff(cli client.Client, option *dryrun.LiveDiffOption) (bytes.Buffer, error) {
var base, comparor dryrun.LiveDiffObject
ctx := context.Background()
var buf bytes.Buffer
if o.AppName != "" {
app := &v1beta1.Application{}
if err := cli.Get(ctx, client.ObjectKey{Name: o.AppName, Namespace: o.Namespace}, app); err != nil {
return buf, errors.Wrapf(err, "cannot get application %s/%s", o.Namespace, o.AppName)
}
base = dryrun.LiveDiffObject{Application: app}
if o.Revision == "" {
if app.Status.LatestRevision == nil {
return buf, errors.Errorf("no latest application revision available for application %s/%s", o.Namespace, o.AppName)
}
o.Revision = app.Status.LatestRevision.Name
}
}
rev, secondaryRev := &v1beta1.ApplicationRevision{}, &v1beta1.ApplicationRevision{}
if err := cli.Get(ctx, client.ObjectKey{Name: o.Revision, Namespace: o.Namespace}, rev); err != nil {
return buf, errors.Wrapf(err, "cannot get application revision %s/%s", o.Namespace, o.Revision)
}
if o.SecondaryRevision == "" {
comparor = dryrun.LiveDiffObject{ApplicationRevision: rev}
} else {
if err := cli.Get(ctx, client.ObjectKey{Name: o.SecondaryRevision, Namespace: o.Namespace}, secondaryRev); err != nil {
return buf, errors.Wrapf(err, "cannot get application revision %s/%s", o.Namespace, o.SecondaryRevision)
}
base = dryrun.LiveDiffObject{ApplicationRevision: rev}
comparor = dryrun.LiveDiffObject{ApplicationRevision: secondaryRev}
}
diffResult, err := option.RenderlessDiff(ctx, base, comparor)
if err != nil {
return buf, errors.WithMessage(err, "cannot calculate diff")
}
reportDiffOpt := dryrun.NewReportDiffOption(o.Context, &buf)
reportDiffOpt.PrintDiffReport(diffResult)
return buf, nil
}
|
package pkg
import (
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
func NewSugaredLogger() *zap.SugaredLogger {
// ログレベル
level := zap.NewAtomicLevel()
level.SetLevel(zapcore.InfoLevel)
// コンフィグ
myConfig := zap.Config{
Level: level,
Development: false,
DisableCaller: true,
DisableStacktrace: true,
Sampling: nil,
Encoding: "console",
EncoderConfig: zapcore.EncoderConfig{
MessageKey: "Msg",
LevelKey: "Level",
TimeKey: "Time",
NameKey: "Name",
CallerKey: "Caller",
//FunctionKey: "",
StacktraceKey: "St",
//LineEnding: "",
EncodeLevel: zapcore.CapitalColorLevelEncoder,
EncodeTime: zapcore.ISO8601TimeEncoder,
EncodeDuration: zapcore.StringDurationEncoder,
EncodeCaller: zapcore.ShortCallerEncoder,
//EncodeName: nil,
//ConsoleSeparator: "",
},
OutputPaths: []string{"stdout"},
ErrorOutputPaths: []string{"stderr"},
//InitialFields: nil,
}
logger, _ := myConfig.Build()
return logger.Sugar()
}
|
package core
import (
"fmt"
"strings"
"github.com/chirino/graphql"
"github.com/chirino/graphql/resolvers"
"github.com/chirino/graphql/schema"
"github.com/dosco/graphjin/core/internal/sdata"
"github.com/dosco/graphjin/core/internal/util"
)
var typeMap map[string]string = map[string]string{
"smallint": "Int",
"integer": "Int",
"bigint": "Int",
"smallserial": "Int",
"serial": "Int",
"bigserial": "Int",
"decimal": "Float",
"numeric": "Float",
"real": "Float",
"double precision": "Float",
"money": "Float",
"boolean": "Boolean",
}
type expInfo struct {
name, vtype string
list bool
desc, db string
}
const (
likeDesc = "Value matching pattern where '%' represents zero or more characters and '_' represents a single character. Eg. '_r%' finds values having 'r' in second position"
notLikeDesc = "Value not matching pattern where '%' represents zero or more characters and '_' represents a single character. Eg. '_r%' finds values not having 'r' in second position"
iLikeDesc = "Value matching (case-insensitive) pattern where '%' represents zero or more characters and '_' represents a single character. Eg. '_r%' finds values having 'r' in second position"
notILikeDesc = "Value not matching (case-insensitive) pattern where '%' represents zero or more characters and '_' represents a single character. Eg. '_r%' finds values not having 'r' in second position"
similarDesc = "Value matching regex pattern. Similar to the 'like' operator but with support for regex. Pattern must match entire value."
notSimilarDesc = "Value not matching regex pattern. Similar to the 'like' operator but with support for regex. Pattern must not match entire value."
)
var expList []expInfo = []expInfo{
{"eq", "", false, "Equals value", ""},
{"equals", "", false, "Equals value", ""},
{"neq", "", false, "Does not equal value", ""},
{"not_equals", "", false, "Does not equal value", ""},
{"gt", "", false, "Is greater than value", ""},
{"greater_than", "", false, "Is greater than value", ""},
{"lt", "", false, "Is lesser than value", ""},
{"lesser_than", "", false, "Is lesser than value", ""},
{"gte", "", false, "Is greater than or equals value", ""},
{"greater_or_equals", "", false, "Is greater than or equals value", ""},
{"lte", "", false, "Is lesser than or equals value", ""},
{"lesser_or_equals", "", false, "Is lesser than or equals value", ""},
{"in", "", true, "Is in list of values", ""},
{"nin", "", true, "Is not in list of values", ""},
{"not_in", "", true, "Is not in list of values", ""},
{"like", "String", false, likeDesc, ""},
{"nlike", "String", false, notLikeDesc, ""},
{"not_like", "String", false, notLikeDesc, ""},
{"ilike", "String", false, iLikeDesc, ""},
{"nilike", "String", false, notILikeDesc, ""},
{"not_ilike", "String", false, notILikeDesc, ""},
{"similar", "String", false, similarDesc, ""},
{"nsimilar", "String", false, notSimilarDesc, ""},
{"not_similar", "String", false, notSimilarDesc, ""},
{"regex", "String", false, "Value matches regex pattern", ""},
{"nregex", "String", false, "Value not matching regex pattern", ""},
{"not_regex", "String", false, "Value not matching regex pattern", ""},
{"iregex", "String", false, "Value matches (case-insensitive) regex pattern", ""},
{"niregex", "String", false, "Value not matching (case-insensitive) regex pattern", ""},
{"not_iregex", "String", false, "Value not matching (case-insensitive) regex pattern", ""},
{"has_key", "", false, "JSON value contains this key", ""},
{"has_key_any", "", true, "JSON value contains any of these keys", ""},
{"has_key_all", "", true, "JSON value contains all of these keys", ""},
{"contains", "", false, "JSON value matches any of they key/value pairs", ""},
{"contained_in", "", false, "JSON value contains all of they key/value pairs", ""},
{"is_null", "Boolean", false, "Is value null (true) or not null (false)", ""},
}
type funcInfo struct {
name, desc, db string
}
var funcCount = funcInfo{"count", "Count the number of rows", ""}
var funcListNum []funcInfo = []funcInfo{
{"avg", "Calculate an average %s", ""},
{"max", "Find the maximum %s", ""},
{"min", "Find the minimum %s", ""},
{"stddev", "Calculate the standard deviation of %s values", ""},
{"stddev_pop", "Calculate the population standard deviation of %s values", ""},
{"stddev_samp", "Calculate the sample standard deviation of %s values", ""},
{"variance", "Calculate the sample variance of %s values", ""},
{"var_samp", "Calculate the sample variance of %s values", ""},
{"var_pop", "Calculate the population sample variance of %s values", ""},
}
var funcListString []funcInfo = []funcInfo{
{"length", "Calculate the length of %s", ""},
{"lower", "Convert %s to lowercase", ""},
{"upper", "Convert %s to uppercase", ""},
}
type intro struct {
*schema.Schema
*sdata.DBSchema
gj *graphjin
query *schema.Object
mutation *schema.Object
subscription *schema.Object
exptNeeded map[string]bool
}
func (gj *graphjin) initGraphQLEgine() error {
if gj.prod {
return nil
}
engine := graphql.New()
in := &intro{
Schema: engine.Schema,
DBSchema: gj.schema,
gj: gj,
query: &schema.Object{Name: "Query", Fields: schema.FieldList{}},
mutation: &schema.Object{Name: "Mutation", Fields: schema.FieldList{}},
subscription: &schema.Object{Name: "Subscribe", Fields: schema.FieldList{}},
exptNeeded: map[string]bool{},
}
in.Types[in.query.Name] = in.query
in.Types[in.mutation.Name] = in.mutation
in.Types[in.subscription.Name] = in.subscription
in.EntryPoints[schema.Query] = in.query
in.EntryPoints[schema.Mutation] = in.mutation
in.EntryPoints[schema.Subscription] = in.subscription
in.Types["OrderDirection"] = &schema.Enum{Name: "OrderDirection", Values: []*schema.EnumValue{
{
Name: "asc",
Desc: schema.NewDescription("Ascending"),
}, {
Name: "desc",
Desc: schema.NewDescription("Descending"),
},
}}
in.Types["Cursor"] = &schema.Scalar{
Name: "Cursor",
Desc: schema.NewDescription("A cursor is an encoded string use for pagination"),
}
if err := in.addTables(); err != nil {
return err
}
in.addExpressions()
in.addDirectives()
if err := in.ResolveTypes(); err != nil {
return err
}
engine.Resolver = resolvers.Func(revolverFunc)
gj.ge = engine
return nil
}
func revolverFunc(request *resolvers.ResolveRequest, next resolvers.Resolution) resolvers.Resolution {
resolver := resolvers.MetadataResolver.Resolve(request, next)
if resolver != nil {
return resolver
}
resolver = resolvers.MethodResolver.Resolve(request, next) // needed by the MetadataResolver
if resolver != nil {
return resolver
}
return nil
}
func (in *intro) addTables() error {
for _, t := range in.GetTables() {
if err := in.addTable(t.Name, t, false); err != nil {
return err
}
if err := in.addTable(t.Name, t, true); err != nil {
return err
}
}
for _, t := range in.GetTables() {
if err := in.addRels(t.Name, t); err != nil {
return err
}
}
for name, t := range in.GetAliases() {
if err := in.addTable(name, t, false); err != nil {
return err
}
}
return nil
}
// func (in *intro) addToTable(name, desc string, ti sdata.DBTable) {
// k := name + "Output"
// var ot *schema.Object = in.Types[k].(*schema.Object)
// ot.Fields = append(ot.Fields, &schema.Field{
// Name: ti.Name,
// Type: &schema.TypeName{Name: ti.Name + "Output"},
// Desc: schema.NewDescription(desc),
// })
// }
func (in *intro) addTable(name string, ti sdata.DBTable, singular bool) error {
if ti.Blocked {
return nil
}
if len(ti.Columns) == 0 {
return nil
}
if singular {
name = name + in.SingularSuffix
}
if in.gj.conf.EnableCamelcase {
name = util.ToCamel(name)
}
// outputType
ot := &schema.Object{
Name: name + "Output", Fields: schema.FieldList{},
}
in.Types[ot.Name] = ot
// inputType
it := &schema.InputObject{
Name: name + "Input", Fields: schema.InputValueList{},
}
in.Types[it.Name] = it
// orderByType
obt := &schema.InputObject{
Name: name + "OrderBy", Fields: schema.InputValueList{},
}
in.Types[obt.Name] = obt
ot.Fields = append(ot.Fields, &schema.Field{
Name: name,
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: name + "Output"}}}},
})
// expressionType
exptName := name + "Expression"
expt := &schema.InputObject{
Name: exptName,
Fields: schema.InputValueList{
&schema.InputValue{
Name: "and",
Type: &schema.TypeName{Name: exptName},
},
&schema.InputValue{
Name: "or",
Type: &schema.TypeName{Name: exptName},
},
&schema.InputValue{
Name: "not",
Type: &schema.TypeName{Name: exptName},
},
},
}
in.Types[expt.Name] = expt
for _, col := range ti.Columns {
in.addColumn(name, ti, col, it, obt, expt, ot, singular)
}
return nil
}
func (in *intro) addRels(name string, ti sdata.DBTable) error {
relTables1, err := in.GetFirstDegree(ti.Schema, ti.Name)
if err != nil {
return err
}
for k, t := range relTables1 {
k1 := t.Name + "Output"
if _, ok := in.Types[k1]; !ok {
continue
}
if t.Blocked {
continue
}
ot, ok := in.Types[name+"Output"].(*schema.Object)
if !ok {
continue
}
ot.Fields = append(ot.Fields, &schema.Field{
Name: k,
Type: &schema.TypeName{Name: k1},
})
}
relTables2, err := in.GetSecondDegree(ti.Schema, ti.Name)
if err != nil {
return err
}
for k, t := range relTables2 {
k1 := t.Name + "Output"
if _, ok := in.Types[k1]; !ok {
continue
}
if t.Blocked {
continue
}
ot, ok := in.Types[name+"Output"].(*schema.Object)
if !ok {
continue
}
ot.Fields = append(ot.Fields, &schema.Field{
Name: k,
Type: &schema.TypeName{Name: k1},
})
}
return nil
}
func (in *intro) addDirectives() {
in.DeclaredDirectives["object"] = &schema.DirectiveDecl{
Name: "object",
Desc: schema.NewDescription("Directs the executor to change the return type from a list to a object. All but the first entry of the list will be truncated"),
Locs: []string{"FIELD"},
}
in.DeclaredDirectives["through"] = &schema.DirectiveDecl{
Name: "through",
Desc: schema.NewDescription("Directs the executor to use the specified table as a join-table to connect this field and it's parent"),
Locs: []string{"FIELD"},
Args: schema.InputValueList{
{
Name: "table",
Desc: schema.NewDescription("Table name"),
Type: &schema.TypeName{Name: "String"},
},
},
}
in.DeclaredDirectives["script"] = &schema.DirectiveDecl{
Name: "script",
Desc: schema.NewDescription("Script the executor to use run specified script against this GraphQL request"),
Locs: []string{"QUERY", "MUTATION", "SUBSCRIPTION"},
Args: schema.InputValueList{
{
Name: "name",
Desc: schema.NewDescription("Script name"),
Type: &schema.TypeName{Name: "String"},
},
},
}
in.DeclaredDirectives["notRelated"] = &schema.DirectiveDecl{
Name: "notRelated",
Desc: schema.NewDescription("Directs the executor to treat this selector as if it were the top-level selector with no relation to its parent"),
Locs: []string{"FIELD"},
}
in.DeclaredDirectives["cacheControl"] = &schema.DirectiveDecl{
Name: "cacheControl",
Desc: schema.NewDescription("Directs the executor to set the cache-control header when GET (APQ) requests are used for the query"),
Locs: []string{"QUERY", "MUTATION", "SUBSCRIPTION"},
Args: schema.InputValueList{
{
Name: "maxAge",
Desc: schema.NewDescription("The maximum amount of time (in seconds) a resource is considered fresh"),
Type: &schema.TypeName{Name: "Int"},
},
{
Name: "scope",
Desc: schema.NewDescription("Set to 'public' when any cache can store the data and 'private' when only the browser cache should"),
Type: &schema.TypeName{Name: "String"},
},
},
}
in.DeclaredDirectives["validation"] = &schema.DirectiveDecl{
Name: "validation",
Desc: schema.NewDescription("Checks all variables for validation"),
Locs: []string{"QUERY", "MUTATION", "SUBSCRIPTION"},
Args: schema.InputValueList{
{
Name: "cue",
Desc: schema.NewDescription("Use Cue [https://cuelang.org/] schema to validate variables"),
Type: &schema.TypeName{Name: "String"},
},
},
}
}
func (in *intro) addColumn(
tableName string,
ti sdata.DBTable, col sdata.DBColumn,
it, obt, expt *schema.InputObject, ot *schema.Object, singular bool) {
colName := col.Name
if col.Blocked {
return
}
if in.gj.conf.EnableCamelcase {
colName = util.ToCamel(colName)
}
colType, typeName := getGQLType(col, true)
ot.Fields = append(ot.Fields, &schema.Field{
Name: colName,
Type: colType,
})
in.addFuncs(colName, typeName, colType, ti, col, it, obt, expt, ot, singular)
in.addArgs(tableName, ti, col, it, obt, expt, ot, singular)
it.Fields = append(it.Fields, &schema.InputValue{
Name: colName,
Type: colType,
})
obt.Fields = append(obt.Fields, &schema.InputValue{
Name: colName,
Type: &schema.TypeName{Name: "OrderDirection"},
})
in.exptNeeded[typeName] = true
expt.Fields = append(expt.Fields, &schema.InputValue{
Name: colName,
Type: &schema.TypeName{Name: typeName + "Expression"},
})
}
func (in *intro) addFuncs(
colName string,
typeName string,
colType schema.Type,
ti sdata.DBTable, col sdata.DBColumn,
it, obt, expt *schema.InputObject, ot *schema.Object, singular bool) {
var fn string
if in.gj.conf.DisableFuncs {
return
}
if col.PrimaryKey && !in.gj.conf.DisableAgg {
fn = funcCount.name + "_" + colName
if in.gj.conf.EnableCamelcase {
fn = util.ToCamel(fn)
}
ot.Fields = append(ot.Fields, &schema.Field{
Name: fn,
Type: colType,
Desc: schema.NewDescription(funcCount.desc),
})
}
// No functions on foreign key columns
if col.FKeyCol == "" {
// If it's a numeric type...
if !in.gj.conf.DisableAgg {
if typeName == "Float" || typeName == "Int" {
for _, v := range funcListNum {
fn = funcCount.name + "_" + colName
if in.gj.conf.EnableCamelcase {
fn = util.ToCamel(fn)
}
desc := fmt.Sprintf(v.desc, colName)
ot.Fields = append(ot.Fields, &schema.Field{
Name: fn,
Type: colType,
Desc: schema.NewDescription(desc),
})
}
}
}
if typeName == "String" {
for _, v := range funcListString {
fn = funcCount.name + "_" + colName
if in.gj.conf.EnableCamelcase {
fn = util.ToCamel(fn)
}
desc := fmt.Sprintf(v.desc, colName)
ot.Fields = append(ot.Fields, &schema.Field{
Name: fn,
Type: colType,
Desc: schema.NewDescription(desc),
})
}
}
fn = funcCount.name + "_" + colName
if in.gj.conf.EnableCamelcase {
fn = util.ToCamel(fn)
}
ot.Fields = append(ot.Fields, &schema.Field{
Name: fn,
Type: colType,
Desc: schema.NewDescription(funcCount.desc),
})
for _, f := range in.GetFunctions() {
fn := f.Name + "_" + colName
fn_type, typeName := getGQLTypeFunc(f.Params[0])
_, colTypeName := getGQLType(col, false)
if typeName != colTypeName {
continue
}
fName := f.Name + "_" + colName
if in.gj.conf.EnableCamelcase {
fn = util.ToCamel(fName)
}
ot.Fields = append(ot.Fields, &schema.Field{
Name: fn,
Type: fn_type,
})
}
}
}
func (in *intro) addArgs(
name string,
ti sdata.DBTable, col sdata.DBColumn,
it, obt, expt *schema.InputObject, ot *schema.Object, singular bool) {
otName := &schema.TypeName{Name: ot.Name}
itName := &schema.TypeName{Name: it.Name}
potName := &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: ot.Name}}}
pitName := &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: it.Name}}}
var args schema.InputValueList
if !singular {
args = schema.InputValueList{
&schema.InputValue{
Desc: schema.NewDescription("Sort or order results. Use key 'asc' for ascending and 'desc' for descending"),
Name: "order_by",
Type: &schema.TypeName{Name: obt.Name},
},
&schema.InputValue{
Desc: schema.NewDescription("Filter results based on column values or values of columns in related tables"),
Name: "where",
Type: &schema.TypeName{Name: expt.Name},
},
&schema.InputValue{
Desc: schema.NewDescription("Limit the number of returned rows"),
Name: "limit",
Type: &schema.TypeName{Name: "Int"},
},
&schema.InputValue{
Desc: schema.NewDescription("Offset the number of returned rows (Not efficient for pagination, please use a cursor for that)"),
Name: "offset",
Type: &schema.TypeName{Name: "Int"},
},
&schema.InputValue{
Desc: schema.NewDescription("Number of rows to return from the top. Combine with 'after' or 'before' arguments for cursor pagination"),
Name: "first",
Type: &schema.TypeName{Name: "Int"},
},
&schema.InputValue{
Desc: schema.NewDescription("Number of rows to return from the bottom. Combine with 'after' or 'before' arguments for cursor pagination"),
Name: "last",
Type: &schema.TypeName{Name: "Int"},
},
&schema.InputValue{
Desc: schema.NewDescription("Pass the cursor to this argument for backward pagination"),
Name: "before",
Type: &schema.TypeName{Name: "Cursor"},
},
&schema.InputValue{
Desc: schema.NewDescription("Pass the cursor to this argument for forward pagination"),
Name: "after",
Type: &schema.TypeName{Name: "Cursor"},
},
}
if len(ti.FullText) == 0 {
args = append(args, &schema.InputValue{
Desc: schema.NewDescription("Performs a full text search"),
Name: "search",
Type: &schema.TypeName{Name: "String"},
})
}
}
if ti.PrimaryCol.Name != "" && singular {
colType, _ := getGQLType(ti.PrimaryCol, true)
args = append(args, &schema.InputValue{
Desc: schema.NewDescription("Finds the record by the primary key"),
Name: "id",
Type: colType,
})
}
if singular {
in.query.Fields = append(in.query.Fields, &schema.Field{
//Desc: schema.NewDescription(""),
Name: name,
Type: otName,
Args: args,
})
in.subscription.Fields = append(in.subscription.Fields, &schema.Field{
//Desc: schema.NewDescription(""),
Name: name,
Type: otName,
Args: args,
})
} else {
in.query.Fields = append(in.query.Fields, &schema.Field{
//Desc: schema.NewDescription(""),
Name: name,
Type: potName,
Args: args,
})
in.subscription.Fields = append(in.subscription.Fields, &schema.Field{
//Desc: schema.NewDescription(""),
Name: name,
Type: potName,
Args: args,
})
}
mutationArgs := append(args, schema.InputValueList{
&schema.InputValue{
Desc: schema.NewDescription(fmt.Sprintf("Insert row into table %s", name)),
Name: "insert",
Type: pitName,
},
&schema.InputValue{
Desc: schema.NewDescription(fmt.Sprintf("Update row in table %s", name)),
Name: "update",
Type: itName,
},
&schema.InputValue{
Desc: schema.NewDescription(fmt.Sprintf("Update or Insert row in table %s", name)),
Name: "upsert",
Type: itName,
},
&schema.InputValue{
Desc: schema.NewDescription(fmt.Sprintf("Delete row from table %s", name)),
Name: "delete",
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "Boolean"}},
},
}...)
in.mutation.Fields = append(in.mutation.Fields, &schema.Field{
Name: name,
Args: mutationArgs,
Type: potName,
})
}
func (in *intro) addExpressions() {
// scalarExpressionTypesNeeded
for typeName := range in.exptNeeded {
var fields schema.InputValueList
for _, v := range expList {
vtype := v.vtype
if v.vtype == "" {
vtype = typeName
}
iv := &schema.InputValue{
Name: v.name,
Desc: schema.NewDescription(v.desc),
Type: &schema.TypeName{Name: vtype},
}
if v.list {
iv.Type = &schema.List{OfType: iv.Type}
}
fields = append(fields, iv)
}
ext := &schema.InputObject{
Name: typeName + "Expression",
Fields: fields,
}
in.Types[ext.Name] = ext
}
}
func getGQLType(col sdata.DBColumn, id bool) (schema.Type, string) {
var typeName string
var ok bool
k := strings.ToLower(col.Type)
if i := strings.IndexAny(k, "(["); i != -1 {
k = k[:i]
}
if col.PrimaryKey && id {
typeName = "ID"
} else if typeName, ok = typeMap[k]; !ok {
typeName = "String"
}
var t schema.Type = &schema.TypeName{Name: typeName}
if col.Array {
t = &schema.List{OfType: t}
}
// if col.NotNull {
// t = &schema.NonNull{OfType: t}
// }
return t, typeName
}
func getGQLTypeFunc(col sdata.DBFuncParam) (schema.Type, string) {
var typeName string
var ok bool
k := strings.ToLower(col.Type)
if i := strings.IndexAny(k, "(["); i != -1 {
k = k[:i]
}
if typeName, ok = typeMap[k]; !ok {
typeName = "String"
}
var t schema.Type = &schema.TypeName{Name: typeName}
// if col.NotNull {
// t = &schema.NonNull{OfType: t}
// }
return t, typeName
}
|
package mongodb
import (
"context"
"fmt"
"time"
"github.com/brigadecore/brigade/v2/apiserver/internal/api"
"github.com/brigadecore/brigade/v2/apiserver/internal/lib/mongodb"
"github.com/brigadecore/brigade/v2/apiserver/internal/meta"
"github.com/pkg/errors"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
// workersStore is a MongoDB-based implementation of the api.WorkersStore
// interface.
type workersStore struct {
collection mongodb.Collection
}
// NewWorkersStore returns a MongoDB-based implementation of the
// api.WorkersStore interface.
func NewWorkersStore(database *mongo.Database) (api.WorkersStore, error) {
return &workersStore{
collection: database.Collection("events"),
}, nil
}
func (w *workersStore) UpdateStatus(
ctx context.Context,
eventID string,
status api.WorkerStatus,
) error {
res, err := w.collection.UpdateOne(
ctx,
bson.M{"id": eventID},
bson.M{
"$set": bson.M{
"worker.status": status,
},
},
)
if err != nil {
return errors.Wrapf(
err,
"error updating status of event %q worker",
eventID,
)
}
if res.MatchedCount == 0 {
return &meta.ErrNotFound{
Type: api.EventKind,
ID: eventID,
}
}
return nil
}
func (w *workersStore) UpdateHashedToken(
ctx context.Context,
eventID string,
hashedToken string,
) error {
res, err := w.collection.UpdateOne(
ctx,
bson.M{"id": eventID},
bson.M{
"$set": bson.M{
"worker.hashedToken": hashedToken,
},
},
)
if err != nil {
return errors.Wrapf(
err,
"error updating event %q worker hashed token",
eventID,
)
}
if res.MatchedCount == 0 {
return &meta.ErrNotFound{
Type: api.EventKind,
ID: eventID,
}
}
return nil
}
func (w *workersStore) Timeout(
ctx context.Context,
eventID string,
) error {
timedOutTime := time.Now().UTC()
res, err := w.collection.UpdateOne(
ctx,
bson.M{
"id": eventID,
"worker.status.phase": bson.M{
"$in": []api.WorkerPhase{
api.WorkerPhaseStarting,
api.WorkerPhaseRunning,
},
},
},
bson.M{
"$set": bson.M{
"worker.status.ended": timedOutTime,
"worker.status.phase": api.WorkerPhaseTimedOut, // nolint: lll
"worker.jobs.$[pending].status.phase": api.JobPhaseCanceled,
"worker.jobs.$[startingOrRunning].status.phase": api.JobPhaseAborted,
},
},
&options.UpdateOptions{
ArrayFilters: &options.ArrayFilters{
Filters: []interface{}{
bson.M{
"pending.status.phase": api.JobPhasePending,
},
bson.M{
"startingOrRunning.status.phase": bson.M{
"$in": []api.JobPhase{
api.JobPhaseStarting,
api.JobPhaseRunning,
},
},
},
},
},
},
)
if err != nil {
return errors.Wrapf(
err,
"error updating status of event %q worker",
eventID,
)
}
if res.MatchedCount == 0 {
return &meta.ErrConflict{
Type: api.EventKind,
ID: eventID,
Reason: fmt.Sprintf(
"Event %q was not timed out "+
"because it was not in a starting or running state.",
eventID,
),
}
}
return nil
}
|
package databroker
import (
"context"
"sort"
"sync"
"time"
"golang.org/x/exp/maps"
"github.com/pomerium/pomerium/config"
"github.com/pomerium/pomerium/internal/hashutil"
"github.com/pomerium/pomerium/internal/log"
"github.com/pomerium/pomerium/internal/telemetry/metrics"
"github.com/pomerium/pomerium/internal/telemetry/trace"
"github.com/pomerium/pomerium/pkg/cryptutil"
"github.com/pomerium/pomerium/pkg/grpc"
configpb "github.com/pomerium/pomerium/pkg/grpc/config"
"github.com/pomerium/pomerium/pkg/grpc/databroker"
"github.com/pomerium/pomerium/pkg/grpcutil"
)
// ConfigSource provides a new Config source that decorates an underlying config with
// configuration derived from the data broker.
type ConfigSource struct {
mu sync.RWMutex
outboundGRPCConnection *grpc.CachedOutboundGRPClientConn
computedConfig *config.Config
underlyingConfig *config.Config
dbConfigs map[string]dbConfig
updaterHash uint64
cancel func()
config.ChangeDispatcher
}
type dbConfig struct {
*configpb.Config
version uint64
}
// NewConfigSource creates a new ConfigSource.
func NewConfigSource(ctx context.Context, underlying config.Source, listeners ...config.ChangeListener) *ConfigSource {
src := &ConfigSource{
dbConfigs: map[string]dbConfig{},
outboundGRPCConnection: new(grpc.CachedOutboundGRPClientConn),
}
for _, li := range listeners {
src.OnConfigChange(ctx, li)
}
underlying.OnConfigChange(ctx, func(ctx context.Context, cfg *config.Config) {
src.mu.Lock()
src.underlyingConfig = cfg.Clone()
src.mu.Unlock()
src.rebuild(ctx, firstTime(false))
})
src.underlyingConfig = underlying.GetConfig()
src.rebuild(ctx, firstTime(true))
return src
}
// GetConfig gets the current config.
func (src *ConfigSource) GetConfig() *config.Config {
src.mu.RLock()
defer src.mu.RUnlock()
return src.computedConfig
}
type firstTime bool
func (src *ConfigSource) rebuild(ctx context.Context, firstTime firstTime) {
_, span := trace.StartSpan(ctx, "databroker.config_source.rebuild")
defer span.End()
src.mu.Lock()
defer src.mu.Unlock()
cfg := src.underlyingConfig.Clone()
// start the updater
src.runUpdater(cfg)
seen := map[uint64]string{}
for _, policy := range cfg.Options.GetAllPolicies() {
id, err := policy.RouteID()
if err != nil {
log.Warn(ctx).Err(err).
Str("policy", policy.String()).
Msg("databroker: invalid policy config, ignoring")
return
}
seen[id] = ""
}
var additionalPolicies []config.Policy
ids := maps.Keys(src.dbConfigs)
sort.Strings(ids)
certsIndex := cryptutil.NewCertificatesIndex()
for _, cert := range cfg.Options.GetX509Certificates() {
certsIndex.Add(cert)
}
// add all the config policies to the list
for _, id := range ids {
cfgpb := src.dbConfigs[id]
cfg.Options.ApplySettings(ctx, certsIndex, cfgpb.Settings)
var errCount uint64
err := cfg.Options.Validate()
if err != nil {
metrics.SetDBConfigRejected(ctx, cfg.Options.Services, id, cfgpb.version, err)
return
}
for _, routepb := range cfgpb.GetRoutes() {
policy, err := config.NewPolicyFromProto(routepb)
if err != nil {
errCount++
log.Warn(ctx).Err(err).
Str("db_config_id", id).
Msg("databroker: error converting protobuf into policy")
continue
}
err = policy.Validate()
if err != nil {
errCount++
log.Warn(ctx).Err(err).
Str("db_config_id", id).
Str("policy", policy.String()).
Msg("databroker: invalid policy, ignoring")
continue
}
routeID, err := policy.RouteID()
if err != nil {
errCount++
log.Warn(ctx).Err(err).
Str("db_config_id", id).
Str("policy", policy.String()).
Msg("databroker: cannot establish policy route ID, ignoring")
continue
}
if _, ok := seen[routeID]; ok {
errCount++
log.Warn(ctx).Err(err).
Str("db_config_id", id).
Str("seen-in", seen[routeID]).
Str("policy", policy.String()).
Msg("databroker: duplicate policy detected, ignoring")
continue
}
seen[routeID] = id
additionalPolicies = append(additionalPolicies, *policy)
}
metrics.SetDBConfigInfo(ctx, cfg.Options.Services, id, cfgpb.version, int64(errCount))
}
// add the additional policies here since calling `Validate` will reset them.
cfg.Options.AdditionalPolicies = append(cfg.Options.AdditionalPolicies, additionalPolicies...)
src.computedConfig = cfg
if !firstTime {
src.Trigger(ctx, cfg)
}
metrics.SetConfigInfo(ctx, cfg.Options.Services, "databroker", cfg.Checksum(), true)
}
func (src *ConfigSource) runUpdater(cfg *config.Config) {
sharedKey, _ := cfg.Options.GetSharedKey()
connectionOptions := &grpc.OutboundOptions{
OutboundPort: cfg.OutboundPort,
InstallationID: cfg.Options.InstallationID,
ServiceName: cfg.Options.Services,
SignedJWTKey: sharedKey,
}
h, err := hashutil.Hash(connectionOptions)
if err != nil {
log.Fatal().Err(err).Send()
}
// nothing changed, so don't restart the updater
if src.updaterHash == h {
return
}
src.updaterHash = h
if src.cancel != nil {
src.cancel()
src.cancel = nil
}
ctx := context.Background()
ctx, src.cancel = context.WithCancel(ctx)
cc, err := src.outboundGRPCConnection.Get(ctx, connectionOptions)
if err != nil {
log.Error(ctx).Err(err).Msg("databroker: failed to create gRPC connection to data broker")
return
}
client := databroker.NewDataBrokerServiceClient(cc)
syncer := databroker.NewSyncer("databroker", &syncerHandler{
client: client,
src: src,
}, databroker.WithTypeURL(grpcutil.GetTypeURL(new(configpb.Config))),
databroker.WithFastForward())
go func() {
var databrokerURLs []string
urls, _ := cfg.Options.GetDataBrokerURLs()
for _, url := range urls {
databrokerURLs = append(databrokerURLs, url.String())
}
log.Info(ctx).
Str("outbound_port", cfg.OutboundPort).
Strs("databroker_urls", databrokerURLs).
Msg("config: starting databroker config source syncer")
_ = grpc.WaitForReady(ctx, cc, time.Second*10)
_ = syncer.Run(ctx)
}()
}
type syncerHandler struct {
src *ConfigSource
client databroker.DataBrokerServiceClient
}
func (s *syncerHandler) GetDataBrokerServiceClient() databroker.DataBrokerServiceClient {
return s.client
}
func (s *syncerHandler) ClearRecords(_ context.Context) {
s.src.mu.Lock()
s.src.dbConfigs = map[string]dbConfig{}
s.src.mu.Unlock()
}
func (s *syncerHandler) UpdateRecords(ctx context.Context, _ uint64, records []*databroker.Record) {
if len(records) == 0 {
return
}
s.src.mu.Lock()
for _, record := range records {
if record.GetDeletedAt() != nil {
delete(s.src.dbConfigs, record.GetId())
continue
}
var cfgpb configpb.Config
err := record.GetData().UnmarshalTo(&cfgpb)
if err != nil {
log.Warn(ctx).Err(err).Msg("databroker: error decoding config")
delete(s.src.dbConfigs, record.GetId())
continue
}
s.src.dbConfigs[record.GetId()] = dbConfig{&cfgpb, record.Version}
}
s.src.mu.Unlock()
s.src.rebuild(ctx, firstTime(false))
}
|
package controller
import (
"context"
"github.com/labstack/echo"
"mix/test/api/admin/common"
codes "mix/test/codes"
transaction "mix/test/pb/core/transaction"
"mix/test/utils/api"
)
func CreateHotWithdraw(c echo.Context) error {
in := new(transaction.CreateHotWithdrawInput)
if err := c.Bind(in); err != nil {
return api.Error(c, codes.ParseDataError, err)
}
ctx := context.Background()
out, err := common.Dispatcher.GetTransaction().CreateHotWithdraw(ctx, in)
if err != nil {
return api.Error(c, codes.InnerServiceError, err)
}
return api.Success(c, out)
}
func GetHotWithdraw(c echo.Context) error {
in := new(transaction.GetHotWithdrawInput)
if err := c.Bind(in); err != nil {
return api.Error(c, codes.ParseDataError, err)
}
ctx := context.Background()
out, err := common.Dispatcher.GetTransaction().GetHotWithdraw(ctx, in)
if err != nil {
return api.Error(c, codes.InnerServiceError, err)
}
return api.Success(c, out)
}
func GetHotWithdrawList(c echo.Context) error {
in := new(transaction.Empty)
if err := c.Bind(in); err != nil {
return api.Error(c, codes.ParseDataError, err)
}
ctx := context.Background()
out, err := common.Dispatcher.GetTransaction().GetHotWithdrawList(ctx, in)
if err != nil {
return api.Error(c, codes.InnerServiceError, err)
}
return api.Success(c, out)
}
func RemoveHotWithdraw(c echo.Context) error {
in := new(transaction.RemoveHotWithdrawInput)
if err := c.Bind(in); err != nil {
return api.Error(c, codes.ParseDataError, err)
}
ctx := context.Background()
out, err := common.Dispatcher.GetTransaction().RemoveHotWithdraw(ctx, in)
if err != nil {
return api.Error(c, codes.InnerServiceError, err)
}
return api.Success(c, out)
}
func UpdateHotWithdraw(c echo.Context) error {
in := new(transaction.UpdateHotWithdrawInput)
if err := c.Bind(in); err != nil {
return api.Error(c, codes.ParseDataError, err)
}
ctx := context.Background()
out, err := common.Dispatcher.GetTransaction().UpdateHotWithdraw(ctx, in)
if err != nil {
return api.Error(c, codes.InnerServiceError, err)
}
return api.Success(c, out)
}
func GetHotWithdrawByMerchantId(c echo.Context) error {
in := new(transaction.GetHotWithdrawByMerchantIdInput)
if err := c.Bind(in); err != nil {
return api.Error(c, codes.ParseDataError, err)
}
ctx := context.Background()
out, err := common.Dispatcher.GetTransaction().GetHotWithdrawByMerchantId(ctx, in)
if err != nil {
return api.Error(c, codes.InnerServiceError, err)
}
return api.Success(c, out)
}
func UpdateHotWithdrawByMerchantId(c echo.Context) error {
in := new(transaction.UpdateHotWithdrawByMerchantIdInput)
if err := c.Bind(in); err != nil {
return api.Error(c, codes.ParseDataError, err)
}
ctx := context.Background()
out, err := common.Dispatcher.GetTransaction().UpdateHotWithdrawByMerchantId(ctx, in)
if err != nil {
return api.Error(c, codes.InnerServiceError, err)
}
return api.Success(c, out)
}
func RemoveHotWithdrawByMerchantId(c echo.Context) error {
in := new(transaction.RemoveHotWithdrawByMerchantIdInput)
if err := c.Bind(in); err != nil {
return api.Error(c, codes.ParseDataError, err)
}
ctx := context.Background()
out, err := common.Dispatcher.GetTransaction().RemoveHotWithdrawByMerchantId(ctx, in)
if err != nil {
return api.Error(c, codes.InnerServiceError, err)
}
return api.Success(c, out)
}
|
package app
import (
"fmt"
api "github.com/Percona-Lab/percona-xtradb-cluster-operator/pkg/apis/pxc/v1alpha1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PVCs returns the list of PersistentVolumeClaims for the pod
func PVCs(name string, vspec *api.PodVolumeSpec) ([]corev1.PersistentVolumeClaim, error) {
rvolStorage, err := resource.ParseQuantity(vspec.Size)
if err != nil {
return nil, fmt.Errorf("wrong storage resources: %v", err)
}
return []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: corev1.PersistentVolumeClaimSpec{
StorageClassName: vspec.StorageClass,
AccessModes: vspec.AccessModes,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: rvolStorage,
},
},
},
},
}, nil
}
|
package domain
//Resource - structure to processing
type Resource struct {
ID uint `json:"id"`
Payload string `json:"payload"`
Pattern string `json:"pattern"`
}
|
package runtimehelper
import (
"runtime"
)
func callerName(skip int) string {
pc, _, _, ok := runtime.Caller(skip)
if ok {
return runtime.FuncForPC(pc).Name()
}
return ""
}
// CallerName returns name of its caller.
func CallerName() string {
return callerName(1)
}
// CallerCallerName returns name of caller of its caller.
func CallerCallerName() string {
return callerName(2)
}
|
//+build test
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package node
import (
"context"
"encoding/json"
"log"
"os/exec"
"regexp"
"strings"
"time"
"github.com/Azure/aks-engine/test/e2e/kubernetes/pod"
"github.com/Azure/aks-engine/test/e2e/kubernetes/util"
"github.com/pkg/errors"
)
const (
//ServerVersion is used to parse out the version of the API running
ServerVersion = `(Server Version:\s)+(.*)`
)
// Node represents the kubernetes Node Resource
type Node struct {
Status Status `json:"status"`
Metadata Metadata `json:"metadata"`
Spec Spec `json:"spec"`
}
// Metadata contains things like name and created at
type Metadata struct {
Name string `json:"name"`
CreatedAt time.Time `json:"creationTimestamp"`
Labels map[string]string `json:"labels"`
Annotations map[string]string `json:"annotations"`
}
// Spec contains things like taints
type Spec struct {
Taints []Taint `json:"taints"`
Unschedulable bool `json:"unschedulable"`
ProviderID string `json:"providerID"`
}
// Taint defines a Node Taint
type Taint struct {
Effect string `json:"effect"`
Key string `json:"key"`
Value string `json:"value"`
}
// Status parses information from the status key
type Status struct {
NodeInfo Info `json:"nodeInfo"`
NodeAddresses []Address `json:"addresses"`
Conditions []Condition `json:"conditions"`
Capacity Capacity `json:capacity`
}
// Address contains an address and a type
type Address struct {
Address string `json:"address"`
Type string `json:"type"`
}
// Info contains node information like what version the kubelet is running
type Info struct {
ContainerRuntimeVersion string `json:"containerRuntimeVersion"`
KubeProxyVersion string `json:"kubeProxyVersion"`
KubeletVersion string `json:"kubeletVersion"`
OperatingSystem string `json:"operatingSystem"`
OSImage string `json:"osImage"`
}
type Capacity struct {
CPU string `json:"cpu"`
}
// Condition contains various status information
type Condition struct {
LastHeartbeatTime time.Time `json:"lastHeartbeatTime"`
LastTransitionTime time.Time `json:"lastTransitionTime"`
Message string `json:"message"`
Reason string `json:"reason"`
Status string `json:"status"`
Type string `json:"type"`
}
// List is used to parse out Nodes from a list
type List struct {
Nodes []Node `json:"items"`
}
// GetNodesResult is the result type for GetAllByPrefixAsync
type GetNodesResult struct {
Nodes []Node
Err error
}
// TopNodesResult is the result type for TopNodesAsync
type TopNodesResult struct {
Err error
}
// GetNodesAsync wraps Get with a struct response for goroutine + channel usage
func GetNodesAsync() GetNodesResult {
list, err := Get()
if list == nil {
list = &List{
Nodes: []Node{},
}
}
return GetNodesResult{
Nodes: list.Nodes,
Err: err,
}
}
// TopNodesAsync wraps TopNodes with a struct response for goroutine + channel usage
func TopNodesAsync() TopNodesResult {
err := TopNodes()
return TopNodesResult{
Err: err,
}
}
// GetReadyNodesAsync wraps Get with a struct response for goroutine + channel usage
func GetReadyNodesAsync() GetNodesResult {
list, err := GetReady()
if list == nil {
list = &List{
Nodes: []Node{},
}
}
return GetNodesResult{
Nodes: list.Nodes,
Err: err,
}
}
// GetByRegexAsync wraps GetByRegex with a struct response for goroutine + channel usage
func GetByRegexAsync(regex string) GetNodesResult {
nodes, err := GetByRegex(regex)
if nodes == nil {
nodes = []Node{}
}
return GetNodesResult{
Nodes: nodes,
Err: err,
}
}
// IsReady returns if the node is in a Ready state
func (n *Node) IsReady() bool {
if n.Spec.Unschedulable {
return false
}
for _, condition := range n.Status.Conditions {
if condition.Type == "Ready" && condition.Status == "True" {
return true
}
}
return false
}
// IsLinux checks for a Linux node
func (n *Node) IsLinux() bool {
return n.Status.NodeInfo.OperatingSystem == "linux"
}
// IsWindows checks for a Windows node
func (n *Node) IsWindows() bool {
return n.Status.NodeInfo.OperatingSystem == "windows"
}
// IsUbuntu checks for an Ubuntu-backed node
func (n *Node) IsUbuntu() bool {
if n.IsLinux() {
return strings.Contains(strings.ToLower(n.Status.NodeInfo.OSImage), "ubuntu")
}
return false
}
// HasSubstring determines if a node name matches includes the passed in substring
func (n *Node) HasSubstring(substrings []string) bool {
for _, substring := range substrings {
if strings.Contains(strings.ToLower(n.Metadata.Name), substring) {
return true
}
}
return false
}
// Version returns the version of the kubelet on the node
func (n *Node) Version() string {
return n.Status.NodeInfo.KubeletVersion
}
// DescribeNodes describes all nodes
func DescribeNodes() {
list, err := Get()
if err != nil {
log.Printf("Unable to get nodes: %s", err)
}
if list != nil {
for _, node := range list.Nodes {
err := node.Describe()
if err != nil {
log.Printf("Unable to describe node %s: %s", node.Metadata.Name, err)
}
}
}
}
// Describe will describe a node resource
func (n *Node) Describe() error {
var commandTimeout time.Duration
cmd := exec.Command("k", "describe", "node", n.Metadata.Name)
out, err := util.RunAndLogCommand(cmd, commandTimeout)
log.Printf("\n%s\n", string(out))
return err
}
// AddLabel adds a label to a node
func (n *Node) AddLabel(label string) error {
var commandTimeout time.Duration
cmd := exec.Command("k", "label", "node", n.Metadata.Name, label)
out, err := util.RunAndLogCommand(cmd, commandTimeout)
log.Printf("\n%s\n", string(out))
return err
}
// AddLabelWithRetry add label to a node until success or timeout
func (n *Node) AddLabelWithRetry(sleep, timeout time.Duration, label string) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan error)
var mostRecentRetryError error
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- n.AddLabel(label)
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
if result == nil {
return nil
}
mostRecentRetryError = result
case <-ctx.Done():
return errors.Errorf("AddLabelWithRetry timed out: %s\n", mostRecentRetryError)
}
}
}
// AddAnnotation adds an annotation to node
func (n *Node) AddAnnotation(annotation string) error {
var commandTimeout time.Duration
cmd := exec.Command("k", "annotate", "nodes", n.Metadata.Name, annotation)
out, err := util.RunAndLogCommand(cmd, commandTimeout)
log.Printf("\n%s\n", string(out))
return err
}
// AddAnnotationWithRetry adds annotation to node trying until success or timeout
func (n *Node) AddAnnotationWithRetry(sleep, timeout time.Duration, annotation string) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan error)
var mostRecentRetryError error
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- n.AddAnnotation(annotation)
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
if result == nil {
return nil
}
mostRecentRetryError = result
case <-ctx.Done():
return errors.Errorf("AddAnnotationWithRetry timed out: %s\n", mostRecentRetryError)
}
}
}
// AddTaint adds a taint to node
func (n *Node) AddTaint(taint Taint) error {
var commandTimeout time.Duration
cmd := exec.Command("k", "taint", "nodes", n.Metadata.Name, taint.Key+"="+taint.Value+":"+taint.Effect)
out, err := util.RunAndLogCommand(cmd, commandTimeout)
log.Printf("\n%s\n", string(out))
return err
}
// RemoveTaint removes a taint from a node
func (n *Node) RemoveTaint(taint Taint) error {
var commandTimeout time.Duration
cmd := exec.Command("k", "taint", "nodes", n.Metadata.Name, taint.Key+":"+taint.Effect+"-")
out, err := util.RunAndLogCommand(cmd, commandTimeout)
log.Printf("\n%s\n", string(out))
return err
}
// AreAllReady returns if all nodes are ready
func AreAllReady() bool {
list, _ := Get()
if list != nil {
for _, node := range list.Nodes {
if !node.IsReady() {
return false
}
}
}
return true
}
// AreNNodesReady returns a bool depending on cluster state
func AreNNodesReady(nodeCount int) bool {
if nodeCount == -1 {
return AreAllReady()
}
list, _ := Get()
var ready int
if list != nil {
for _, node := range list.Nodes {
nodeReady := node.IsReady()
if !nodeReady {
return false
}
ready++
}
}
if ready == nodeCount {
return true
}
return false
}
// AreMinNodesReady returns if the minimum nodes ready count is met
func AreMinNodesReady(nodeCount int) bool {
if nodeCount == -1 {
return AreAllReady()
}
list, _ := Get()
var ready int
if list != nil {
for _, node := range list.Nodes {
nodeReady := node.IsReady()
if !nodeReady {
return false
}
ready++
}
}
if ready >= nodeCount {
return true
}
return false
}
// AreMaxNodesReady returns if nodes ready count is <= a maximum number
func AreMaxNodesReady(nodeCount int) bool {
list, _ := Get()
var ready int
if list != nil {
for _, node := range list.Nodes {
if node.IsReady() {
ready++
}
}
}
return ready <= nodeCount
}
// WaitOnReady will block until all nodes are in ready state
func WaitOnReady(nodeCount int, sleep, timeout time.Duration) bool {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan bool)
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- AreNNodesReady(nodeCount)
time.Sleep(sleep)
}
}
}()
for {
select {
case ready := <-ch:
if ready {
return ready
}
case <-ctx.Done():
DescribeNodes()
return false
}
}
}
// WaitOnReadyMin will block until the minimum nodes ready count is met
func WaitOnReadyMin(nodeCount int, sleep time.Duration, describeIfFail bool, timeout time.Duration) bool {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan bool)
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- AreMinNodesReady(nodeCount)
time.Sleep(sleep)
}
}
}()
for {
select {
case ready := <-ch:
if ready {
return ready
}
case <-ctx.Done():
if describeIfFail {
DescribeNodes()
}
return false
}
}
}
// WaitOnReadyMax will block until nodes ready count is <= a maximum number
func WaitOnReadyMax(nodeCount int, sleep, timeout time.Duration) bool {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan bool)
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- AreMaxNodesReady(nodeCount)
time.Sleep(sleep)
}
}
}()
for {
select {
case ready := <-ch:
if ready {
return ready
}
case <-ctx.Done():
DescribeNodes()
return false
}
}
}
// WaitForNodesWithAnnotation will wait until the desired number of nodes have a particular annotation
func WaitForNodesWithAnnotation(nodeCount int, key, val string, sleep, timeout time.Duration) ([]Node, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan GetNodesResult)
var mostRecentWaitForNodesWithAnnotationError error
var nodes []Node
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- GetByAnnotationsAsync(key, val)
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
mostRecentWaitForNodesWithAnnotationError = result.Err
nodes = result.Nodes
if mostRecentWaitForNodesWithAnnotationError == nil {
if len(nodes) == nodeCount {
return nodes, nil
}
}
case <-ctx.Done():
return nil, errors.Errorf("WaitForNodesWithAnnotation timed out: %s\n", mostRecentWaitForNodesWithAnnotationError)
}
}
}
// Get returns the current nodes for a given kubeconfig
func Get() (*List, error) {
cmd := exec.Command("k", "get", "nodes", "-o", "json")
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to run 'kubectl get nodes':\n - %s", err)
if len(string(out)) > 0 {
log.Printf("\n - %s", string(out))
}
return nil, err
}
nl := List{}
err = json.Unmarshal(out, &nl)
if err != nil {
log.Printf("Error unmarshalling nodes json:%s", err)
}
return &nl, nil
}
// TopNodes prints nodes metrics
func TopNodes() error {
cmd := exec.Command("k", "top", "nodes")
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to run 'kubectl top nodes':\n - %s", err)
if len(string(out)) > 0 {
log.Printf("\n - %s", string(out))
}
pod.PrintPodsLogs("metrics-server", "kube-system", 5*time.Second, 1*time.Minute)
return err
}
if strings.Contains(string(out), "<unknown>") {
log.Printf("\n - %s", string(out))
pod.PrintPodsLogs("metrics-server", "kube-system", 5*time.Second, 1*time.Minute)
return errors.Errorf("Node contained unknown value")
}
return nil
}
// GetReadyWithRetry gets nodes, allowing for retries
func GetReadyWithRetry(sleep, timeout time.Duration) ([]Node, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan GetNodesResult)
var mostRecentGetReadyWithRetryError error
var nodes []Node
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- GetReadyNodesAsync()
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
mostRecentGetReadyWithRetryError = result.Err
nodes = result.Nodes
if mostRecentGetReadyWithRetryError == nil {
if len(nodes) > 0 {
return nodes, nil
}
}
case <-ctx.Done():
return nil, errors.Errorf("GetReadyWithRetry timed out: %s\n", mostRecentGetReadyWithRetryError)
}
}
}
// GetWithRetry gets nodes, allowing for retries
func GetWithRetry(sleep, timeout time.Duration) ([]Node, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan GetNodesResult)
var mostRecentGetWithRetryError error
var nodes []Node
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- GetNodesAsync()
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
mostRecentGetWithRetryError = result.Err
nodes = result.Nodes
if mostRecentGetWithRetryError == nil {
if len(nodes) > 0 {
return nodes, nil
}
}
case <-ctx.Done():
return nil, errors.Errorf("GetWithRetry timed out: %s\n", mostRecentGetWithRetryError)
}
}
}
// GetByRegexWithRetry gets nodes that match a regular expression, allowing for retries
func GetByRegexWithRetry(regex string, sleep, timeout time.Duration) ([]Node, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan GetNodesResult)
var mostRecentGetByRegexWithRetryError error
var nodes []Node
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- GetByRegexAsync(regex)
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
mostRecentGetByRegexWithRetryError = result.Err
nodes = result.Nodes
if mostRecentGetByRegexWithRetryError == nil {
allNodesAreReady := true
if len(nodes) > 0 {
for _, n := range nodes {
if !n.IsReady() {
allNodesAreReady = false
break
}
}
if allNodesAreReady {
return nodes, nil
}
}
}
case <-ctx.Done():
DescribeNodes()
return nil, errors.Errorf("GetByRegexWithRetry timed out: %s\n", mostRecentGetByRegexWithRetryError)
}
}
}
// TopNodesWithRetry gets nodes, allowing for retries
func TopNodesWithRetry(sleep, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan TopNodesResult)
var mostRecentTopNodesWithRetryError error
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- TopNodesAsync()
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
mostRecentTopNodesWithRetryError = result.Err
if mostRecentTopNodesWithRetryError == nil {
return nil
}
case <-ctx.Done():
return errors.Errorf("TopNodesWithRetry timed out: %s\n", mostRecentTopNodesWithRetryError)
}
}
}
// GetReady returns the current nodes for a given kubeconfig
func GetReady() (*List, error) {
l, err := Get()
if err != nil {
return nil, err
}
nl := &List{
[]Node{},
}
for _, node := range l.Nodes {
if node.IsReady() {
nl.Nodes = append(nl.Nodes, node)
} else {
log.Printf("found an unready node!")
}
}
return nl, nil
}
// GetAddressByType will return the Address object for a given Kubernetes node
func (ns *Status) GetAddressByType(t string) *Address {
for _, a := range ns.NodeAddresses {
if a.Type == t {
return &a
}
}
return nil
}
// GetByRegex will return a []Node of all nodes that have a name that match the regular expression
func GetByRegex(regex string) ([]Node, error) {
list, err := Get()
if err != nil {
return nil, err
}
nodes := make([]Node, 0)
for _, n := range list.Nodes {
exp, err := regexp.Compile(regex)
if err != nil {
return nil, err
}
if exp.MatchString(n.Metadata.Name) {
nodes = append(nodes, n)
}
}
return nodes, nil
}
// GetByLabel will return a []Node of all nodes that have a matching label
func GetByLabel(label string) ([]Node, error) {
list, err := Get()
if err != nil {
return nil, err
}
nodes := make([]Node, 0)
for _, n := range list.Nodes {
if _, ok := n.Metadata.Labels[label]; ok {
nodes = append(nodes, n)
}
}
return nodes, nil
}
// GetByAnnotationsAsync wraps GetByAnnotations with a struct response for goroutine + channel usage
func GetByAnnotationsAsync(key, value string) GetNodesResult {
nodes, err := GetByAnnotations(key, value)
return GetNodesResult{
Nodes: nodes,
Err: err,
}
}
// GetByAnnotations will return a []Node of all nodes that have a matching annotation
func GetByAnnotations(key, value string) ([]Node, error) {
list, err := Get()
if err != nil {
return nil, err
}
nodes := make([]Node, 0)
for _, n := range list.Nodes {
if value != "" {
if n.Metadata.Annotations[key] == value {
nodes = append(nodes, n)
}
} else {
if _, ok := n.Metadata.Annotations[key]; ok {
nodes = append(nodes, n)
}
}
}
return nodes, nil
}
// GetByTaint will return a []Node of all nodes that have a matching taint
func GetByTaint(key, value, effect string) ([]Node, error) {
list, err := Get()
if err != nil {
return nil, err
}
nodes := make([]Node, 0)
for _, n := range list.Nodes {
for _, t := range n.Spec.Taints {
if t.Key == key && t.Value == value && t.Effect == effect {
nodes = append(nodes, n)
}
}
}
return nodes, nil
}
|
// Copyright (C) 2021 Cisco Systems Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package vpplink
import (
"fmt"
"github.com/projectcalico/vpp-dataplane/v3/vpplink/generated/bindings/vlib"
"github.com/projectcalico/vpp-dataplane/v3/vpplink/generated/bindings/vpe"
)
func (v *VppLink) GetVPPVersion() (string, error) {
client := vpe.NewServiceClient(v.GetConnection())
response, err := client.ShowVersion(v.GetContext(), &vpe.ShowVersion{})
if err != nil {
return "", fmt.Errorf("failed to get VPP version: %w", err)
}
return response.Version, nil
}
// RunCli sends CLI command to VPP and returns response.
func (v *VppLink) RunCli(cmd string) (string, error) {
client := vlib.NewServiceClient(v.GetConnection())
response, err := client.CliInband(v.GetContext(), &vlib.CliInband{
Cmd: cmd,
})
if err != nil {
return "", fmt.Errorf("failed to run VPP CLI command %q: %w", cmd, err)
}
return response.Reply, nil
}
|
package packet
import (
"bytes"
"github.com/cpusoft/goutil/asn1util"
"github.com/cpusoft/goutil/belogs"
model "rpstir2-model"
)
func ExtractSiaOid(oidPackets *[]OidPacket, fileByte []byte) (subjectInfoAccess model.SiaModel, err error) {
//oidRpkiManifestKey,oidRpkiNotifyKey,oidCaRepositoryKey,oidSignedObjectKey is children of SubjectInfoAccess
var sia model.SiaModel = model.SiaModel{}
for _, oidPacket := range *oidPackets {
if oidPacket.Oid == oidRpkiManifestKey {
belogs.Debug("ExtractSiaOid():found oidRpkiManifestKey:")
oidRpkiManifestPacket := oidPacket.ParentPacket.Children[1]
PrintPacketString("oidRpkiManifestPacket", oidRpkiManifestPacket, true, false)
bytes := oidRpkiManifestPacket.Bytes()
_, datapos, _ := asn1util.DecodeFiniteLen(bytes)
//logs.LogDebugBytes(("oidRpkiManifestPacket bytes[2:]", bytes[datapos:])
sia.RpkiManifest = string(bytes[datapos:])
belogs.Debug("ExtractSiaOid():sia.rpkiManifest:", sia.RpkiManifest)
}
if oidPacket.Oid == oidRpkiNotifyKey {
belogs.Debug("ExtractSiaOid():found oidRpkiNotifyKey:")
oidRpkiNotifyPacket := oidPacket.ParentPacket.Children[1]
PrintPacketString("oidRpkiNotifyPacket", oidRpkiNotifyPacket, true, false)
bytes := oidRpkiNotifyPacket.Bytes()
_, datapos, _ := asn1util.DecodeFiniteLen(bytes)
//logs.LogDebugBytes(("oidRpkiNotifyPacket bytes[2:]", bytes[datapos:])
sia.RpkiNotify = string(bytes[datapos:])
belogs.Debug("ExtractSiaOid():sia.rpkiNotify:", sia.RpkiNotify)
}
if oidPacket.Oid == oidCaRepositoryKey {
belogs.Debug("ExtractSiaOid():found oidCaRepositoryKey:")
oidCaRepositoryPacket := oidPacket.ParentPacket.Children[1]
PrintPacketString("oidCaRepositoryPacket", oidCaRepositoryPacket, true, false)
bytes := oidCaRepositoryPacket.Bytes()
_, datapos, _ := asn1util.DecodeFiniteLen(bytes)
//logs.LogDebugBytes(("oidCaRepositoryPacket bytes[2:]", bytes[datapos:])
sia.CaRepository = string(bytes[datapos:])
belogs.Debug("ExtractSiaOid():sia.caRepository:", sia.CaRepository)
}
if oidPacket.Oid == oidSignedObjectKey {
belogs.Debug("ExtractSiaOid():found oidSignedObjectKey:")
oidSignedObjectPacket := oidPacket.ParentPacket.Children[1]
PrintPacketString("oidSignedObjectPacket", oidSignedObjectPacket, true, false)
bytes := oidSignedObjectPacket.Bytes()
_, datapos, _ := asn1util.DecodeFiniteLen(bytes)
//logs.LogDebugBytes(("oidSignedObjectPacket bytes[2:]", bytes[datapos:])
sia.SignedObject = string(bytes[datapos:])
belogs.Debug("ExtractSiaOid():sia.signedObject:", sia.SignedObject)
}
}
if len(sia.CaRepository) > 0 || len(sia.RpkiManifest) > 0 ||
len(sia.RpkiNotify) > 0 || len(sia.SignedObject) > 0 {
return sia, nil
}
return reExtractSiaOid(fileByte)
}
// if decode packet fail ,so try again using OID to decode
func reExtractSiaOid(fileByte []byte) (subjectInfoAccess model.SiaModel, err error) {
/*
SEQUENCE (2 elem)
OBJECT IDENTIFIER 1.3.6.1.5.5.7.48.11 signedObject (PKIX subject/authority info access descriptor)
[6] rsync://rpki.ripe.net/repository/DEFAULT/be/c37497-6376-461e-93c6-9778674edc97/1…
*/
var sia model.SiaModel = model.SiaModel{}
sia.RpkiManifest, _ = reExtractSiaSubOid(oidRpkiManifestKeyByte, fileByte)
sia.RpkiNotify, _ = reExtractSiaSubOid(oidRpkiNotifyKeyByte, fileByte)
sia.CaRepository, _ = reExtractSiaSubOid(oidCaRepositoryKeyByte, fileByte)
sia.SignedObject, _ = reExtractSiaSubOid(oidSignedObjectKeyByte, fileByte)
return sia, nil
}
// if decode packet fail ,so try again using OID to decode
func reExtractSiaSubOid(oidKeyByte []byte, fileByte []byte) (sub string, err error) {
/*
SEQUENCE (2 elem) //seq0
OBJECT IDENTIFIER 1.3.6.1.5.5.7.48.11 signedObject (PKIX subject/authority info access descriptor) //pos0
[6] rsync://rpki.ripe.net/repository/DEFAULT/be/c37497-6376-461e-93c6-9778674edc97/1… //mf0
*/
belogs.Debug("reExtractSiaSubOid():len(fileByte): ", len(fileByte))
pos0 := bytes.Index(fileByte, oidKeyByte)
belogs.Debug("reExtractSiaSubOid():enum0 pos:", pos0)
// may not exist
if pos0 <= 0 {
return "", nil
}
var datapos uint64 = uint64(pos0)
var datalen uint64 = uint64(0)
belogs.Debug("reExtractSiaSubOid():datapos, datalen:", datapos, datalen)
//avoid error of 0x00, 0x00, so it is not limit datalen, and will include all data
sub0 := fileByte[int(datapos)+len(oidKeyByte):]
//logs.LogDebugBytes(("reExtractSiaSubOid():sub0:", sub0)
datalen, datapos, _ = asn1util.DecodeFiniteAndInfiniteLen(sub0)
belogs.Debug("reExtractSiaSubOid():sub0 pos:", datapos)
//it is string, so easy , so not need use DecodePacket, just get string
sub0Value := sub0[datapos : datapos+datalen]
//logs.LogDebugBytes(("reExtractSiaSubOid():sub0Value:", sub0Value)
return string(sub0Value), nil
}
|
package envoyconfig
import (
"encoding/json"
"fmt"
"net/url"
"sort"
"strings"
envoy_config_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
envoy_config_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
envoy_type_matcher_v3 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
"github.com/golang/protobuf/ptypes/any"
"github.com/golang/protobuf/ptypes/wrappers"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/structpb"
"google.golang.org/protobuf/types/known/wrapperspb"
"github.com/pomerium/pomerium/config"
"github.com/pomerium/pomerium/internal/httputil"
"github.com/pomerium/pomerium/internal/urlutil"
)
const (
httpCluster = "pomerium-control-plane-http"
)
func (b *Builder) buildGRPCRoutes() ([]*envoy_config_route_v3.Route, error) {
action := &envoy_config_route_v3.Route_Route{
Route: &envoy_config_route_v3.RouteAction{
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
Cluster: "pomerium-control-plane-grpc",
},
},
}
return []*envoy_config_route_v3.Route{{
Name: "pomerium-grpc",
Match: &envoy_config_route_v3.RouteMatch{
PathSpecifier: &envoy_config_route_v3.RouteMatch_Prefix{
Prefix: "/",
},
Grpc: &envoy_config_route_v3.RouteMatch_GrpcRouteMatchOptions{},
},
Action: action,
TypedPerFilterConfig: map[string]*any.Any{
PerFilterConfigExtAuthzName: PerFilterConfigExtAuthzDisabled(),
},
}}, nil
}
func (b *Builder) buildPomeriumHTTPRoutes(
options *config.Options,
host string,
) ([]*envoy_config_route_v3.Route, error) {
var routes []*envoy_config_route_v3.Route
// if this is the pomerium proxy in front of the the authenticate service, don't add
// these routes since they will be handled by authenticate
isFrontingAuthenticate, err := isProxyFrontingAuthenticate(options, host)
if err != nil {
return nil, err
}
if !isFrontingAuthenticate {
routes = append(routes,
b.buildControlPlanePathRoute(options, "/ping"),
b.buildControlPlanePathRoute(options, "/healthz"),
b.buildControlPlanePathRoute(options, "/.pomerium"),
b.buildControlPlanePrefixRoute(options, "/.pomerium/"),
b.buildControlPlanePathRoute(options, "/.well-known/pomerium"),
b.buildControlPlanePrefixRoute(options, "/.well-known/pomerium/"),
)
// per #837, only add robots.txt if there are no unauthenticated routes
if !hasPublicPolicyMatchingURL(options, url.URL{Scheme: "https", Host: host, Path: "/robots.txt"}) {
routes = append(routes, b.buildControlPlanePathRoute(options, "/robots.txt"))
}
}
authRoutes, err := b.buildPomeriumAuthenticateHTTPRoutes(options, host)
if err != nil {
return nil, err
}
routes = append(routes, authRoutes...)
return routes, nil
}
func (b *Builder) buildPomeriumAuthenticateHTTPRoutes(
options *config.Options,
host string,
) ([]*envoy_config_route_v3.Route, error) {
if !config.IsAuthenticate(options.Services) {
return nil, nil
}
for _, fn := range []func() (*url.URL, error){
options.GetAuthenticateURL,
options.GetInternalAuthenticateURL,
} {
u, err := fn()
if err != nil {
return nil, err
}
if urlMatchesHost(u, host) {
return []*envoy_config_route_v3.Route{
b.buildControlPlanePathRoute(options, options.AuthenticateCallbackPath),
b.buildControlPlanePathRoute(options, "/"),
}, nil
}
}
return nil, nil
}
func (b *Builder) buildControlPlanePathRoute(
options *config.Options,
path string,
) *envoy_config_route_v3.Route {
r := &envoy_config_route_v3.Route{
Name: "pomerium-path-" + path,
Match: &envoy_config_route_v3.RouteMatch{
PathSpecifier: &envoy_config_route_v3.RouteMatch_Path{Path: path},
},
Action: &envoy_config_route_v3.Route_Route{
Route: &envoy_config_route_v3.RouteAction{
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
Cluster: httpCluster,
},
},
},
ResponseHeadersToAdd: toEnvoyHeaders(options.GetSetResponseHeaders()),
TypedPerFilterConfig: map[string]*any.Any{
PerFilterConfigExtAuthzName: PerFilterConfigExtAuthzContextExtensions(MakeExtAuthzContextExtensions(true, 0)),
},
}
return r
}
func (b *Builder) buildControlPlanePrefixRoute(
options *config.Options,
prefix string,
) *envoy_config_route_v3.Route {
r := &envoy_config_route_v3.Route{
Name: "pomerium-prefix-" + prefix,
Match: &envoy_config_route_v3.RouteMatch{
PathSpecifier: &envoy_config_route_v3.RouteMatch_Prefix{Prefix: prefix},
},
Action: &envoy_config_route_v3.Route_Route{
Route: &envoy_config_route_v3.RouteAction{
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
Cluster: httpCluster,
},
},
},
ResponseHeadersToAdd: toEnvoyHeaders(options.GetSetResponseHeaders()),
TypedPerFilterConfig: map[string]*any.Any{
PerFilterConfigExtAuthzName: PerFilterConfigExtAuthzContextExtensions(MakeExtAuthzContextExtensions(true, 0)),
},
}
return r
}
// getClusterID returns a cluster ID
var getClusterID = func(policy *config.Policy) string {
prefix := getClusterStatsName(policy)
if prefix == "" {
prefix = "route"
}
id, _ := policy.RouteID()
return fmt.Sprintf("%s-%x", prefix, id)
}
// getClusterStatsName returns human readable name that would be used by envoy to emit statistics, available as envoy_cluster_name label
func getClusterStatsName(policy *config.Policy) string {
if policy.EnvoyOpts != nil && policy.EnvoyOpts.Name != "" {
return policy.EnvoyOpts.Name
}
return ""
}
func (b *Builder) buildRoutesForPoliciesWithHost(
cfg *config.Config,
host string,
) ([]*envoy_config_route_v3.Route, error) {
var routes []*envoy_config_route_v3.Route
for i, p := range cfg.Options.GetAllPolicies() {
policy := p
fromURL, err := urlutil.ParseAndValidateURL(policy.From)
if err != nil {
return nil, err
}
if !urlMatchesHost(fromURL, host) {
continue
}
policyRoutes, err := b.buildRoutesForPolicy(cfg, &policy, fmt.Sprintf("policy-%d", i))
if err != nil {
return nil, err
}
routes = append(routes, policyRoutes...)
}
return routes, nil
}
func (b *Builder) buildRoutesForPoliciesWithCatchAll(
cfg *config.Config,
) ([]*envoy_config_route_v3.Route, error) {
var routes []*envoy_config_route_v3.Route
for i, p := range cfg.Options.GetAllPolicies() {
policy := p
fromURL, err := urlutil.ParseAndValidateURL(policy.From)
if err != nil {
return nil, err
}
if !strings.Contains(fromURL.Host, "*") {
continue
}
policyRoutes, err := b.buildRoutesForPolicy(cfg, &policy, fmt.Sprintf("policy-%d", i))
if err != nil {
return nil, err
}
routes = append(routes, policyRoutes...)
}
return routes, nil
}
func (b *Builder) buildRoutesForPolicy(
cfg *config.Config,
policy *config.Policy,
name string,
) ([]*envoy_config_route_v3.Route, error) {
fromURL, err := urlutil.ParseAndValidateURL(policy.From)
if err != nil {
return nil, err
}
var routes []*envoy_config_route_v3.Route
if strings.Contains(fromURL.Host, "*") {
// we have to match '*.example.com' and '*.example.com:443', so there are two routes
for _, host := range urlutil.GetDomainsForURL(fromURL) {
route, err := b.buildRouteForPolicyAndMatch(cfg, policy, name, mkRouteMatchForHost(policy, host))
if err != nil {
return nil, err
}
routes = append(routes, route)
}
} else {
route, err := b.buildRouteForPolicyAndMatch(cfg, policy, name, mkRouteMatch(policy))
if err != nil {
return nil, err
}
routes = append(routes, route)
}
return routes, nil
}
func (b *Builder) buildRouteForPolicyAndMatch(
cfg *config.Config,
policy *config.Policy,
name string,
match *envoy_config_route_v3.RouteMatch,
) (*envoy_config_route_v3.Route, error) {
fromURL, err := urlutil.ParseAndValidateURL(policy.From)
if err != nil {
return nil, err
}
routeID, err := policy.RouteID()
if err != nil {
return nil, err
}
route := &envoy_config_route_v3.Route{
Name: name,
Match: match,
Metadata: &envoy_config_core_v3.Metadata{},
RequestHeadersToRemove: getRequestHeadersToRemove(cfg.Options, policy),
ResponseHeadersToAdd: toEnvoyHeaders(cfg.Options.GetSetResponseHeadersForPolicy(policy)),
}
if policy.Redirect != nil {
action, err := b.buildPolicyRouteRedirectAction(policy.Redirect)
if err != nil {
return nil, err
}
route.Action = &envoy_config_route_v3.Route_Redirect{Redirect: action}
} else {
action, err := b.buildPolicyRouteRouteAction(cfg.Options, policy)
if err != nil {
return nil, err
}
route.Action = &envoy_config_route_v3.Route_Route{Route: action}
}
luaMetadata := map[string]*structpb.Value{
"rewrite_response_headers": getRewriteHeadersMetadata(policy.RewriteResponseHeaders),
}
// disable authentication entirely when the proxy is fronting authenticate
isFrontingAuthenticate, err := isProxyFrontingAuthenticate(cfg.Options, fromURL.Hostname())
if err != nil {
return nil, err
}
if isFrontingAuthenticate {
route.TypedPerFilterConfig = map[string]*any.Any{
PerFilterConfigExtAuthzName: PerFilterConfigExtAuthzDisabled(),
}
} else {
route.TypedPerFilterConfig = map[string]*any.Any{
PerFilterConfigExtAuthzName: PerFilterConfigExtAuthzContextExtensions(MakeExtAuthzContextExtensions(false, routeID)),
}
luaMetadata["remove_pomerium_cookie"] = &structpb.Value{
Kind: &structpb.Value_StringValue{
StringValue: cfg.Options.CookieName,
},
}
luaMetadata["remove_pomerium_authorization"] = &structpb.Value{
Kind: &structpb.Value_BoolValue{
BoolValue: true,
},
}
luaMetadata["remove_impersonate_headers"] = &structpb.Value{
Kind: &structpb.Value_BoolValue{
BoolValue: policy.IsForKubernetes(),
},
}
}
if policy.IsForKubernetes() {
for _, hdr := range b.reproxy.GetPolicyIDHeaders(routeID) {
route.RequestHeadersToAdd = append(route.RequestHeadersToAdd,
&envoy_config_core_v3.HeaderValueOption{
Header: &envoy_config_core_v3.HeaderValue{
Key: hdr[0],
Value: hdr[1],
},
AppendAction: envoy_config_core_v3.HeaderValueOption_OVERWRITE_IF_EXISTS_OR_ADD,
})
}
}
route.Metadata.FilterMetadata = map[string]*structpb.Struct{
"envoy.filters.http.lua": {Fields: luaMetadata},
}
return route, nil
}
func (b *Builder) buildPolicyRouteRedirectAction(r *config.PolicyRedirect) (*envoy_config_route_v3.RedirectAction, error) {
action := &envoy_config_route_v3.RedirectAction{}
switch {
case r.HTTPSRedirect != nil:
action.SchemeRewriteSpecifier = &envoy_config_route_v3.RedirectAction_HttpsRedirect{
HttpsRedirect: *r.HTTPSRedirect,
}
case r.SchemeRedirect != nil:
action.SchemeRewriteSpecifier = &envoy_config_route_v3.RedirectAction_SchemeRedirect{
SchemeRedirect: *r.SchemeRedirect,
}
}
if r.HostRedirect != nil {
action.HostRedirect = *r.HostRedirect
}
if r.PortRedirect != nil {
action.PortRedirect = *r.PortRedirect
}
switch {
case r.PathRedirect != nil:
action.PathRewriteSpecifier = &envoy_config_route_v3.RedirectAction_PathRedirect{
PathRedirect: *r.PathRedirect,
}
case r.PrefixRewrite != nil:
action.PathRewriteSpecifier = &envoy_config_route_v3.RedirectAction_PrefixRewrite{
PrefixRewrite: *r.PrefixRewrite,
}
}
if r.ResponseCode != nil {
action.ResponseCode = envoy_config_route_v3.RedirectAction_RedirectResponseCode(*r.ResponseCode)
}
if r.StripQuery != nil {
action.StripQuery = *r.StripQuery
}
return action, nil
}
func (b *Builder) buildPolicyRouteRouteAction(options *config.Options, policy *config.Policy) (*envoy_config_route_v3.RouteAction, error) {
clusterName := getClusterID(policy)
// kubernetes requests are sent to the http control plane to be reproxied
if policy.IsForKubernetes() {
clusterName = httpCluster
}
routeTimeout := getRouteTimeout(options, policy)
idleTimeout := getRouteIdleTimeout(policy)
prefixRewrite, regexRewrite := getRewriteOptions(policy)
upgradeConfigs := []*envoy_config_route_v3.RouteAction_UpgradeConfig{
{
UpgradeType: "websocket",
Enabled: &wrappers.BoolValue{Value: policy.AllowWebsockets},
},
{
UpgradeType: "spdy/3.1",
Enabled: &wrappers.BoolValue{Value: policy.AllowSPDY},
},
}
if policy.IsTCP() {
upgradeConfigs = append(upgradeConfigs, &envoy_config_route_v3.RouteAction_UpgradeConfig{
UpgradeType: "CONNECT",
Enabled: &wrappers.BoolValue{Value: true},
ConnectConfig: &envoy_config_route_v3.RouteAction_UpgradeConfig_ConnectConfig{},
})
}
action := &envoy_config_route_v3.RouteAction{
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
Cluster: clusterName,
},
UpgradeConfigs: upgradeConfigs,
HostRewriteSpecifier: &envoy_config_route_v3.RouteAction_AutoHostRewrite{
AutoHostRewrite: &wrappers.BoolValue{Value: !policy.PreserveHostHeader},
},
Timeout: routeTimeout,
IdleTimeout: idleTimeout,
PrefixRewrite: prefixRewrite,
RegexRewrite: regexRewrite,
HashPolicy: []*envoy_config_route_v3.RouteAction_HashPolicy{
// hash by the routing key, which is added by authorize.
{
PolicySpecifier: &envoy_config_route_v3.RouteAction_HashPolicy_Header_{
Header: &envoy_config_route_v3.RouteAction_HashPolicy_Header{
HeaderName: httputil.HeaderPomeriumRoutingKey,
},
},
Terminal: true,
},
// if the routing key is missing, hash by the ip.
{
PolicySpecifier: &envoy_config_route_v3.RouteAction_HashPolicy_ConnectionProperties_{
ConnectionProperties: &envoy_config_route_v3.RouteAction_HashPolicy_ConnectionProperties{
SourceIp: true,
},
},
Terminal: true,
},
},
}
setHostRewriteOptions(policy, action)
return action, nil
}
func mkEnvoyHeader(k, v string) *envoy_config_core_v3.HeaderValueOption {
return &envoy_config_core_v3.HeaderValueOption{
Header: &envoy_config_core_v3.HeaderValue{
Key: k,
Value: v,
},
AppendAction: envoy_config_core_v3.HeaderValueOption_OVERWRITE_IF_EXISTS_OR_ADD,
}
}
func toEnvoyHeaders(headers map[string]string) []*envoy_config_core_v3.HeaderValueOption {
var ks []string
for k := range headers {
ks = append(ks, k)
}
sort.Strings(ks)
envoyHeaders := make([]*envoy_config_core_v3.HeaderValueOption, 0, len(headers))
for _, k := range ks {
envoyHeaders = append(envoyHeaders, mkEnvoyHeader(k, headers[k]))
}
return envoyHeaders
}
func mkRouteMatch(policy *config.Policy) *envoy_config_route_v3.RouteMatch {
match := &envoy_config_route_v3.RouteMatch{}
switch {
case policy.IsTCP():
match.PathSpecifier = &envoy_config_route_v3.RouteMatch_ConnectMatcher_{
ConnectMatcher: &envoy_config_route_v3.RouteMatch_ConnectMatcher{},
}
case policy.Regex != "":
match.PathSpecifier = &envoy_config_route_v3.RouteMatch_SafeRegex{
SafeRegex: &envoy_type_matcher_v3.RegexMatcher{
Regex: policy.Regex,
},
}
case policy.Path != "":
match.PathSpecifier = &envoy_config_route_v3.RouteMatch_Path{Path: policy.Path}
case policy.Prefix != "":
match.PathSpecifier = &envoy_config_route_v3.RouteMatch_Prefix{Prefix: policy.Prefix}
default:
match.PathSpecifier = &envoy_config_route_v3.RouteMatch_Prefix{Prefix: "/"}
}
return match
}
func mkRouteMatchForHost(
policy *config.Policy,
host string,
) *envoy_config_route_v3.RouteMatch {
match := mkRouteMatch(policy)
match.Headers = append(match.Headers, &envoy_config_route_v3.HeaderMatcher{
Name: ":authority",
HeaderMatchSpecifier: &envoy_config_route_v3.HeaderMatcher_StringMatch{
StringMatch: &envoy_type_matcher_v3.StringMatcher{
MatchPattern: &envoy_type_matcher_v3.StringMatcher_SafeRegex{
SafeRegex: &envoy_type_matcher_v3.RegexMatcher{
Regex: config.WildcardToRegex(host),
},
},
},
},
})
return match
}
func getRequestHeadersToRemove(options *config.Options, policy *config.Policy) []string {
requestHeadersToRemove := policy.RemoveRequestHeaders
if !policy.PassIdentityHeaders {
requestHeadersToRemove = append(requestHeadersToRemove,
httputil.HeaderPomeriumJWTAssertion,
httputil.HeaderPomeriumJWTAssertionFor)
for headerName := range options.JWTClaimsHeaders {
requestHeadersToRemove = append(requestHeadersToRemove, headerName)
}
}
// remove these headers to prevent a user from re-proxying requests through the control plane
requestHeadersToRemove = append(requestHeadersToRemove,
httputil.HeaderPomeriumReproxyPolicy,
httputil.HeaderPomeriumReproxyPolicyHMAC,
)
return requestHeadersToRemove
}
func getRouteTimeout(options *config.Options, policy *config.Policy) *durationpb.Duration {
var routeTimeout *durationpb.Duration
if policy.UpstreamTimeout != nil {
routeTimeout = durationpb.New(*policy.UpstreamTimeout)
} else if shouldDisableStreamIdleTimeout(policy) {
// a non-zero value would conflict with idleTimeout and/or websocket / tcp calls
routeTimeout = durationpb.New(0)
} else {
routeTimeout = durationpb.New(options.DefaultUpstreamTimeout)
}
return routeTimeout
}
func getRouteIdleTimeout(policy *config.Policy) *durationpb.Duration {
var idleTimeout *durationpb.Duration
if policy.IdleTimeout != nil {
idleTimeout = durationpb.New(*policy.IdleTimeout)
} else if shouldDisableStreamIdleTimeout(policy) {
idleTimeout = durationpb.New(0)
}
return idleTimeout
}
func shouldDisableStreamIdleTimeout(policy *config.Policy) bool {
return policy.AllowWebsockets ||
policy.IsTCP() ||
policy.IsForKubernetes() // disable for kubernetes so that tailing logs works (#2182)
}
func getRewriteOptions(policy *config.Policy) (prefixRewrite string, regexRewrite *envoy_type_matcher_v3.RegexMatchAndSubstitute) {
if policy.PrefixRewrite != "" {
prefixRewrite = policy.PrefixRewrite
} else if policy.RegexRewritePattern != "" {
regexRewrite = &envoy_type_matcher_v3.RegexMatchAndSubstitute{
Pattern: &envoy_type_matcher_v3.RegexMatcher{
Regex: policy.RegexRewritePattern,
},
Substitution: policy.RegexRewriteSubstitution,
}
} else if len(policy.To) > 0 && policy.To[0].URL.Path != "" {
prefixRewrite = policy.To[0].URL.Path
}
return prefixRewrite, regexRewrite
}
func setHostRewriteOptions(policy *config.Policy, action *envoy_config_route_v3.RouteAction) {
switch {
case policy.HostRewrite != "":
action.HostRewriteSpecifier = &envoy_config_route_v3.RouteAction_HostRewriteLiteral{
HostRewriteLiteral: policy.HostRewrite,
}
case policy.HostRewriteHeader != "":
action.HostRewriteSpecifier = &envoy_config_route_v3.RouteAction_HostRewriteHeader{
HostRewriteHeader: policy.HostRewriteHeader,
}
case policy.HostPathRegexRewritePattern != "":
action.HostRewriteSpecifier = &envoy_config_route_v3.RouteAction_HostRewritePathRegex{
HostRewritePathRegex: &envoy_type_matcher_v3.RegexMatchAndSubstitute{
Pattern: &envoy_type_matcher_v3.RegexMatcher{
Regex: policy.HostPathRegexRewritePattern,
},
Substitution: policy.HostPathRegexRewriteSubstitution,
},
}
case policy.PreserveHostHeader:
action.HostRewriteSpecifier = &envoy_config_route_v3.RouteAction_AutoHostRewrite{
AutoHostRewrite: wrapperspb.Bool(false),
}
default:
action.HostRewriteSpecifier = &envoy_config_route_v3.RouteAction_AutoHostRewrite{
AutoHostRewrite: wrapperspb.Bool(true),
}
}
}
func hasPublicPolicyMatchingURL(options *config.Options, requestURL url.URL) bool {
for _, policy := range options.GetAllPolicies() {
if policy.AllowPublicUnauthenticatedAccess && policy.Matches(requestURL) {
return true
}
}
return false
}
func isProxyFrontingAuthenticate(options *config.Options, host string) (bool, error) {
authenticateURL, err := options.GetAuthenticateURL()
if err != nil {
return false, err
}
if !config.IsAuthenticate(options.Services) && urlMatchesHost(authenticateURL, host) {
return true, nil
}
return false, nil
}
func getRewriteHeadersMetadata(headers []config.RewriteHeader) *structpb.Value {
if len(headers) == 0 {
return &structpb.Value{
Kind: &structpb.Value_ListValue{
ListValue: new(structpb.ListValue),
},
}
}
var obj interface{}
bs, _ := json.Marshal(headers)
_ = json.Unmarshal(bs, &obj)
v, _ := structpb.NewValue(obj)
return v
}
|
package jwt
import (
"errors"
"fmt"
"log"
"strconv"
"strings"
"testing"
"time"
"github.com/robbert229/jwt"
)
func Test_jwt_gen(t *testing.T) {
secret := "ThisIsMySuperSecret"
algorithm := jwt.HmacSha256(secret)
claims := jwt.NewClaim()
claims.Set("Role", "Admin")
claims.Set("UserName", "whr")
claims.Set("RoomID", "123")
claims.SetTime("exp", time.Now().Add(time.Minute))
token, err := algorithm.Encode(claims)
if err != nil {
panic(err)
}
fmt.Printf("Token: %s\n", token)
if algorithm.Validate(token) != nil {
panic(err)
}
loadedClaims, err := algorithm.Decode(token)
if err != nil {
panic(err)
}
role, err := loadedClaims.Get("Role")
if err != nil {
panic(err)
}
userName, err := loadedClaims.Get("UserName")
if err != nil {
panic(err)
}
log.Println("userName:", userName)
roomID, err := loadedClaims.Get("RoomID")
if err != nil {
panic(err)
}
log.Println("roomID:", roomID)
roleString, ok := role.(string)
if !ok {
panic(err)
}
if strings.Compare(roleString, "Admin") == 0 {
//user is an admin
fmt.Println("User is an admin")
}
}
func Test_do_jwt(t *testing.T) {
secretKey := "abc"
s, err := jwtGen(secretKey, int64(100))
fmt.Println("------", "", "-----------")
if err != nil {
log.Println(err)
return
}
//
uid, err := jwtParse(secretKey, s)
if err != nil {
log.Println(err)
}
log.Println("uid", uid)
fmt.Println("------", "ok", "-----------")
}
func jwtGen(secretKey string, uid int64) (string, error) {
algorithm := jwt.HmacSha256(secretKey)
claims := jwt.NewClaim()
claims.Set("uid", fmt.Sprint(uid))
//claims.SetTime("exp", time.Now().Add(time.Hour*24*365*10))
token, err := algorithm.Encode(claims)
if err != nil {
return "", err
}
return token, nil
}
func jwtParse(secretKey, token string) (uid int64, err error) {
uid, err = 0, nil
algorithm := jwt.HmacSha256(secretKey)
//
if err = algorithm.Validate(token); err != nil {
return
}
//parse
loadedClaims, err := algorithm.Decode(token)
if err != nil {
return 0, err
}
id, err := loadedClaims.Get("uid")
if err != nil {
return 0, err
}
s, ok := id.(string)
if !ok {
log.Println("uid:", uid)
return 0, errors.New("错误的uid类型")
}
if uid, err = strconv.ParseInt(s, 10, 64); err != nil {
log.Println("获取jwt时出错,err:", err)
return 0, err
}
return
}
|
package views
import "time"
type CreateUserRes struct {
Name string `json:"name"`
Username string `json:"username"`
Age int64 `json:"age"`
Contact string`json:"contact"`
KycDetails string `json:"kyc_details"`
}
type CreateWalletRes struct {
WalletName string `json:"wallet_name"`
}
type Wallets struct {
WalletName string `json:"wallet_name"`
WalletBalance float64 `json:"wallet_balance"`
IsActive int64 `json:"is_active"`
}
type SignInRes struct {
Wallets []Wallets
Uid string
}
type GetUsersRes struct {
Uid string `json:"uid"`
Name string `json:"name"`
Username string `json:"username"`
}
type ReportRes struct {
Uid string `json:"uid"`
Username string `json:"username"`
WalletName string `json:"wallet_name"`
TransactionID string `json:"transaction_id"`
Amount float64 `json:"amount"`
TransactionType string `json:"transaction_type"`
TransactionTime time.Time `json:"transaction_time"`
}
|
package repository
import (
. "2019_2_IBAT/pkg/pkg/models"
"fmt"
"testing"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/require"
"gopkg.in/DATA-DOG/go-sqlmock.v1"
)
func TestDBUserStorage_SetTagsIDs_Correct(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
defer sqlxDB.Close()
tagIDs := []string{
uuid.New().String(),
uuid.New().String(),
uuid.New().String(),
}
authRec := AuthStorageValue{
ID: uuid.New(),
Role: SeekerStr,
}
for _, tagID := range tagIDs {
mock.
ExpectExec(`INSERT INTO recommendations`).
WithArgs(
authRec.ID, tagID,
).
WillReturnResult(sqlmock.NewResult(1, 1))
}
repo := DBRecommendsStorage{
DbConn: sqlxDB,
}
err = repo.SetTagIDs(authRec, tagIDs)
if err != nil {
t.Error("Failed to set tag IDs\n")
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
}
func TestDBUserStorage_GetTagIDs_Correct(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
rows := sqlmock.
NewRows([]string{"tag_id"})
expect := []string{
uuid.New().String(),
uuid.New().String(),
uuid.New().String(),
}
for _, item := range expect {
rows = rows.AddRow(item)
}
authRec := AuthStorageValue{
ID: uuid.New(),
}
mock.
ExpectQuery(`SELECT tag_id FROM recommendations WHERE`).
WithArgs(authRec.ID).
WillReturnRows(rows)
repo := DBRecommendsStorage{
DbConn: sqlxDB,
}
gotTagIDs, err := repo.GetTagIDs(authRec)
if err != nil {
t.Error("Failed to get tag IDs\n")
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
require.Equal(t, expect, gotTagIDs, "The two values should be the same.")
}
func TestDBUserStorage_GetTagIDs_Fail(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
rows := sqlmock.
NewRows([]string{"tag_id"})
expect := []string{
uuid.New().String(),
uuid.New().String(),
uuid.New().String(),
}
for _, item := range expect {
rows = rows.AddRow(item)
}
authRec := AuthStorageValue{
ID: uuid.New(),
}
mock.
ExpectQuery(`SELECT tag_id FROM recommendations WHERE`).
WithArgs(authRec.ID).
WillReturnError(fmt.Errorf("bad query"))
repo := DBRecommendsStorage{
DbConn: sqlxDB,
}
_, err = repo.GetTagIDs(authRec)
if err == nil {
t.Error("Expected error")
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
require.Equal(t, InternalErrorMsg, err.Error(), "The two values should be the same.")
}
func TestDBUserStorage_GetUsersForTags(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
repo := DBRecommendsStorage{
DbConn: sqlxDB,
}
rows := sqlmock.
NewRows([]string{"tag_id"})
expectUserIDs := []string{
uuid.New().String(),
uuid.New().String(),
uuid.New().String(),
}
tagIDs := []string{
uuid.New().String(),
uuid.New().String(),
uuid.New().String(),
}
for _, item := range expectUserIDs {
rows = rows.AddRow(item)
}
mock.
ExpectQuery(`SELECT DISTINCT person_id FROM recommendations WHERE`).
WillReturnRows(rows)
gotUserIDs, err := repo.GetUsersForTags(tagIDs)
if err != nil {
t.Error("Failed to get tag IDs\n")
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
require.Equal(t, expectUserIDs, gotUserIDs, "The two values should be the same.")
mock.
ExpectQuery(`SELECT DISTINCT person_id FROM recommendations WHERE`).
WillReturnError(fmt.Errorf(InternalErrorMsg))
_, err = repo.GetUsersForTags(tagIDs)
if err == nil {
t.Error("Expected error\n")
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
require.Equal(t, InternalErrorMsg, err.Error(), "The two values should be the same.")
}
|
package main
import (
"context"
"crudrpc/api/mcrsv"
"encoding/json"
"io/ioutil"
"net/http"
"github.com/gorilla/mux"
"google.golang.org/grpc"
)
var conn *grpc.ClientConn
type Request struct {
Idx int `json:"idx"`
Username string `json:"username"`
UserId string `json:"userid"`
Password string `json:"password"`
}
func CreateUser(w http.ResponseWriter, r *http.Request) {
body, _ := ioutil.ReadAll(r.Body)
req := Request{}
_ = json.Unmarshal(body, &req)
c := mcrsv.NewRpcAppClient(conn)
postman := mcrsv.ProtoUser{
Idx: int32(req.Idx),
Username: req.Username,
UserId: req.UserId,
Password: req.Password,
}
rows, _ := c.CreateUser(context.Background(), &postman)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(rows)
}
func main() {
// 그럼 별도 서버는 어떻게 연결하지? 프로젝트는 공유하는건가?
// 어차피 이렇게해도 나눠서만 배포하면 msa형식이 되니까?
conn, _ = grpc.Dial(":8081", grpc.WithInsecure())
defer conn.Close()
//c := mcrsv.NewRpcAppClient(conn)
r := mux.NewRouter()
r.HandleFunc("/create", CreateUser).Methods("POST")
http.ListenAndServe(":8082", r)
}
|
package problem0240
import "testing"
func TestSolve(t *testing.T) {
t.Log(searchMatrix([][]int{
[]int{1, 2, 3, 4, 5},
}, 4))
}
|
package repository
import (
"fmt"
"github.com/google/uuid"
"github.com/pkg/errors"
. "2019_2_IBAT/pkg/pkg/models"
)
func (m *DBUserStorage) CreateFavorite(favVac FavoriteVacancy) bool {
_, err := m.DbConn.Exec("INSERT INTO favorite_vacancies(person_id, vacancy_id)"+
"VALUES($1, $2);",
favVac.PersonID, favVac.VacancyID,
)
if err != nil {
fmt.Printf("CreateFavorite: %s \n", err)
return false
}
return true
}
func (m *DBUserStorage) GetFavoriteVacancies(record AuthStorageValue) ([]Vacancy, error) {
vacancies := []Vacancy{}
rows, err := m.DbConn.Queryx("SELECT v.id, v.own_id, c.company_name, v.experience, "+
"v.position, v.tasks, v.requirements, v.wage_from, v.wage_to, v.conditions, v.about, "+
"v.region, v.type_of_employment, v.work_schedule "+
"FROM favorite_vacancies AS fv "+
"JOIN vacancies AS v ON (fv.vacancy_id = v.id) "+
"JOIN companies AS c ON v.own_id = c.own_id WHERE fv.person_id = $1;", record.ID) //fux query
if err != nil {
fmt.Printf("GetFavoriteVacancies: %s\n", err)
return vacancies, errors.New(InternalErrorMsg)
}
defer rows.Close()
for rows.Next() {
var vacancy Vacancy
err = rows.StructScan(&vacancy)
if err != nil {
fmt.Printf("GetFavoriteVacancies: %s\n", err)
return vacancies, errors.New(InternalErrorMsg)
}
vacancy.Favorite = true
vacancies = append(vacancies, vacancy)
}
return vacancies, nil
}
func (m *DBUserStorage) DeleteFavoriteVacancy(vacancyId uuid.UUID, authInfo AuthStorageValue) error {
_, err := m.DbConn.Exec("DELETE FROM favorite_vacancies WHERE vacancy_id = $1 AND person_id = $2;",
vacancyId, authInfo.ID,
) //check fi invalid id or internal error
if err != nil {
fmt.Printf("DeleteVacancy: %s\n", err)
return errors.New(InvalidIdMsg) //dif errors
}
return nil
}
|
package asset
// FileWriter interface is used to write all the files in the specified location
type FileWriter interface {
PersistToFile(directory string) error
}
// NewDefaultFileWriter create a new adapter to expose the default implementation as a FileWriter
func NewDefaultFileWriter(a WritableAsset) FileWriter {
return &fileWriterAdapter{a: a}
}
type fileWriterAdapter struct {
a WritableAsset
}
// PersistToFile wraps the default implementation
func (fwa *fileWriterAdapter) PersistToFile(directory string) error {
return PersistToFile(fwa.a, directory)
}
|
package cmd
import (
"github.com/spf13/cobra"
)
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
rootCmd := initRoot()
rootCmd.AddCommand(initDoc(rootCmd))
cobra.CheckErr(rootCmd.Execute())
}
|
package p0001
import (
"reflect"
"testing"
)
func TestTwoSum(t *testing.T) {
nums := []int{2, 7, 11, 15}
target := 9
t.Logf(" Input: nums = %v, target = %d\n", nums, target)
actual := twoSum(nums, target)
t.Logf(" Output: %v\n", actual)
expected := []int{0, 1}
if !reflect.DeepEqual(expected, actual) {
t.Fatalf("twoSum expected %v, actual %v", expected, actual)
}
}
func TestTwoSum2(t *testing.T) {
nums := []int{2, 7, 11, 15}
target := 9
t.Logf(" Input: nums = %v, target = %d\n", nums, target)
actual := twoSum2(nums, target)
t.Logf(" Output: %v\n", actual)
expected := []int{0, 1}
if !reflect.DeepEqual(expected, actual) {
t.Fatalf("twoSum expected %v, actual %v", expected, actual)
}
}
|
/*
* @lc app=leetcode.cn id=147 lang=golang
*
* [147] 对链表进行插入排序
*/
package solution
// @lc code=start
func insertionSortList(head *ListNode) *ListNode {
if head == nil || head.Next == nil {
return head
}
dummy := &ListNode{Next: head}
p, pre, q := dummy, head, head.Next
for q != nil {
if pre.Val <= q.Val {
pre = pre.Next
} else {
for p.Next.Val <= q.Val {
p = p.Next
}
pre.Next = q.Next
q.Next = p.Next
p.Next = q
}
p = dummy
q = pre.Next
}
return dummy.Next
}
// @lc code=end
|
package dao
import (
"fmt"
"github.com/xormplus/xorm"
"go.uber.org/zap"
"mix/test/codes"
entity "mix/test/entity/core/transaction"
mapper "mix/test/mapper/core/transaction"
"mix/test/utils/status"
)
func (p *Dao) CreateToken(logger *zap.Logger, session *xorm.Session, item *entity.Token) (id int64, err error) {
res, err := mapper.CreateToken(session, item)
if err != nil {
logger.Error("Call mapper.CreateToken error", zap.Error(err))
return
}
id, err = res.LastInsertId()
if err != nil {
logger.Error("Get id error", zap.Error(err))
return
}
return
}
func (p *Dao) GetToken(logger *zap.Logger, session *xorm.Session, id int64) (item *entity.Token, err error) {
item, err = mapper.GetToken(session, id)
if err != nil {
logger.Error("Call mapper.GetToken error", zap.Error(err))
return
}
return
}
func (p *Dao) MustGetToken(logger *zap.Logger, session *xorm.Session, id int64) (item *entity.Token, err error) {
item, err = p.GetToken(logger, session, id)
if err != nil {
return
}
if item == nil {
err = status.Code(codes.TokenNotFound)
logger.Error(
"Get token error",
zap.Error(err),
zap.Int64("id", id),
)
return
}
return
}
func (p *Dao) GetTokenList(logger *zap.Logger, session *xorm.Session) (items []*entity.Token, err error) {
items, err = mapper.GetTokenList(session)
if err != nil {
logger.Error("Call mapper.GetTokenList error", zap.Error(err))
return
}
return
}
func (p *Dao) RemoveToken(logger *zap.Logger, session *xorm.Session, id int64) (err error) {
_, err = mapper.RemoveToken(session, id)
if err != nil {
logger.Error("Call mapper.RemoveToken error", zap.Error(err))
return
}
return
}
func (p *Dao) MustRemoveToken(logger *zap.Logger, session *xorm.Session, id int64) (err error) {
res, err := mapper.RemoveToken(session, id)
if err != nil {
logger.Error("Call mapper.RemoveToken error", zap.Error(err))
return
}
affected, err := res.RowsAffected()
if err != nil {
logger.Error("Get affected error", zap.Error(err))
return
}
if affected != 1 {
err = fmt.Errorf("update affected error")
logger.Error("Call mapper.RemoveToken error",
zap.Int64("affected", affected),
zap.Int64("id",
id),
zap.Error(err))
return
}
return
}
func (p *Dao) UpdateToken(logger *zap.Logger, engine *xorm.EngineGroup, session *xorm.Session, item *entity.Token) (err error) {
_, err = mapper.UpdateToken(engine, session, item)
if err != nil {
logger.Error("Call mapper.UpdateToken error", zap.Error(err))
return
}
return
}
func (p *Dao) MustUpdateToken(logger *zap.Logger, engine *xorm.EngineGroup, session *xorm.Session, item *entity.Token) (err error) {
res, err := mapper.UpdateToken(engine, session, item)
if err != nil {
logger.Error("Call mapper.UpdateToken error", zap.Error(err))
return
}
affected, err := res.RowsAffected()
if err != nil {
logger.Error("Get affected error", zap.Error(err))
return
}
if affected != 1 {
err = fmt.Errorf("update affected error")
logger.Error("Call mapper.UpdateToken error",
zap.Int64("affected", affected),
zap.Int64("item.Id", item.Id),
zap.Error(err))
return
}
return
}
func (p *Dao) GetTokenBySymbol(logger *zap.Logger, session *xorm.Session, token string, chain string) (item *entity.Token, err error) {
item, err = mapper.GetTokenBySymbol(session, token, chain)
if err != nil {
logger.Error("Call mapper.GetTokenBySymbol error", zap.Error(err))
return
}
return
}
func (p *Dao) MustGetTokenBySymbol(logger *zap.Logger, session *xorm.Session, token string, chain string) (item *entity.Token, err error) {
item, err = p.GetTokenBySymbol(logger, session, token, chain)
if err != nil {
return
}
if item == nil {
err = status.Code(codes.TokenNotFound)
logger.Error(
"Get token error",
zap.Error(err),
zap.String("token", token),
zap.String("chain", chain),
)
return
}
return
}
func (p *Dao) UpdateTokenBySymbol(logger *zap.Logger, engine *xorm.EngineGroup, session *xorm.Session, item *entity.Token) (err error) {
_, err = mapper.UpdateTokenBySymbol(engine, session, item)
if err != nil {
logger.Error("Call mapper.UpdateTokenBySymbol error", zap.Error(err))
return
}
return
}
func (p *Dao) MustUpdateTokenBySymbol(logger *zap.Logger, engine *xorm.EngineGroup, session *xorm.Session, item *entity.Token) (err error) {
res, err := mapper.UpdateTokenBySymbol(engine, session, item)
if err != nil {
logger.Error("Call mapper.UpdateTokenBySymbol error", zap.Error(err))
return
}
affected, err := res.RowsAffected()
if err != nil {
logger.Error("Get affected error", zap.Error(err))
return
}
if affected != 1 {
err = fmt.Errorf("update affected error")
logger.Error("Call mapper.UpdateTokenBySymbol error",
zap.Int64("affected", affected),
zap.String("item.Token", item.Token),
zap.String("item.Chain", item.Chain),
zap.Error(err))
return
}
return
}
func (p *Dao) RemoveTokenBySymbol(logger *zap.Logger, session *xorm.Session, token string, chain string) (err error) {
_, err = mapper.RemoveTokenBySymbol(session, token, chain)
if err != nil {
logger.Error("Call mapper.RemoveTokenBySymbol error", zap.Error(err))
return
}
return
}
func (p *Dao) MustRemoveTokenBySymbol(logger *zap.Logger, session *xorm.Session, token string, chain string) (err error) {
res, err := mapper.RemoveTokenBySymbol(session, token, chain)
if err != nil {
logger.Error("Call mapper.RemoveTokenBySymbol error", zap.Error(err))
return
}
affected, err := res.RowsAffected()
if err != nil {
logger.Error("Get affected error", zap.Error(err))
return
}
if affected != 1 {
err = fmt.Errorf("update affected error")
logger.Error("Call mapper.RemoveTokenBySymbol error",
zap.Int64("affected", affected),
zap.String("token",
token),
zap.String("chain",
chain),
zap.Error(err))
return
}
return
}
|
package dvid
import (
. "github.com/janelia-flyem/go/gocheck"
"testing"
)
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { TestingT(t) }
type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) TestVoxelCoord(c *C) {
a := VoxelCoord{10, 21, 837821}
b := VoxelCoord{78312, -200, 40123}
result := a.Add(b)
c.Assert(result[0], Equals, a[0]+b[0])
c.Assert(result[1], Equals, a[1]+b[1])
c.Assert(result[2], Equals, a[2]+b[2])
result = a.Sub(b)
c.Assert(result[0], Equals, a[0]-b[0])
c.Assert(result[1], Equals, a[1]-b[1])
c.Assert(result[2], Equals, a[2]-b[2])
result = a.Mod(b)
c.Assert(result[0], Equals, a[0]%b[0])
c.Assert(result[1], Equals, a[1]%b[1])
c.Assert(result[2], Equals, a[2]%b[2])
result = a.Div(b)
c.Assert(result[0], Equals, a[0]/b[0])
c.Assert(result[1], Equals, a[1]/b[1])
c.Assert(result[2], Equals, a[2]/b[2])
d := VoxelCoord{1, 1, 1}
e := VoxelCoord{4, 4, 4}
dist := d.Distance(e)
c.Assert(dist, Equals, int32(5))
c.Assert(a.String(), Equals, "(10,21,837821)")
result = a.Max(b)
c.Assert(result, Equals, VoxelCoord{78312, 21, 837821})
result = b.Max(a)
c.Assert(result, Equals, VoxelCoord{78312, 21, 837821})
result = a.Min(b)
c.Assert(result, Equals, VoxelCoord{10, -200, 40123})
result = b.Min(a)
c.Assert(result, Equals, VoxelCoord{10, -200, 40123})
d = VoxelCoord{111, 213, 678}
blockSize := Point3d{20, 30, 40}
g := d.BlockCoord(blockSize)
c.Assert(g, Equals, BlockCoord{5, 7, 16})
d = VoxelCoord{111, 213, 680}
g = d.BlockCoord(blockSize)
c.Assert(g, Equals, BlockCoord{5, 7, 17})
d = VoxelCoord{111, 213, 678}
blockSize = Point3d{20, 30, 1}
g = d.BlockCoord(blockSize)
c.Assert(g, Equals, BlockCoord{5, 7, 678})
result = d.BlockVoxel(blockSize)
c.Assert(result, Equals, VoxelCoord{11, 3, 0})
}
func (s *MySuite) TestBlockCoord(c *C) {
a := BlockCoord{123, 8191, 32001}
b := BlockCoord{2980, 617, 99}
result := a.Add(b)
c.Assert(result[0], Equals, a[0]+b[0])
c.Assert(result[1], Equals, a[1]+b[1])
c.Assert(result[2], Equals, a[2]+b[2])
result = a.Sub(b)
c.Assert(result[0], Equals, a[0]-b[0])
c.Assert(result[1], Equals, a[1]-b[1])
c.Assert(result[2], Equals, a[2]-b[2])
c.Assert(a.String(), Equals, "(123,8191,32001)")
result = a.Max(b)
c.Assert(result, Equals, BlockCoord{2980, 8191, 32001})
result = b.Max(a)
c.Assert(result, Equals, BlockCoord{2980, 8191, 32001})
result = a.Min(b)
c.Assert(result, Equals, BlockCoord{123, 617, 99})
result = b.Min(a)
c.Assert(result, Equals, BlockCoord{123, 617, 99})
}
|
// Copyright 2020 Thomas.Hoehenleitner [at] seerose.net
// Use of this source code is governed by a license that can be found in the LICENSE file.
// Package assert_test contains blackbox tests.
package tst_test
import (
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/rokath/trice/pkg/tst"
)
func TestEqual(t *testing.T) {
tst.Equal(t, 33, 33)
}
func TestEqualLines(t *testing.T) {
exp := "Hello\r\nWorld\r\n"
act := "Hello\nWorld\n"
tst.EqualLines(t, exp, act)
}
func TestEqualTextFiles(t *testing.T) {
fd0, e0 := ioutil.TempFile("", "*.txt")
assert.Nil(t, e0)
defer func() {
assert.Nil(t, fd0.Close())
assert.Nil(t, os.Remove(fd0.Name()))
}()
fd1, e1 := ioutil.TempFile("", "*.txt")
assert.Nil(t, e1)
defer func() {
assert.Nil(t, fd1.Close())
assert.Nil(t, os.Remove(fd1.Name()))
}()
_, e2 := fd0.WriteString("Hello\r\nWorld\r\n")
assert.Nil(t, e2)
_, e3 := fd1.WriteString("Hello\nWorld\n")
assert.Nil(t, e3)
tst.EqualTextFiles(t, fd0.Name(), fd1.Name())
}
func TestEqualFiles(t *testing.T) {
tst.EqualLines(t, os.Args[0], os.Args[0])
}
|
package banner
import (
"fmt"
)
// Print prints the ASCII banner to the console
func Print() {
// Note: Generated ASCII banner online: http://patorjk.com/software/taag/#p=display&f=Big%20Money-nw
fmt.Println(`-----------------------------------------------------------`)
fmt.Println(` $$$$$$\ $$\ $$\ $$\ `)
fmt.Println(`$$ __$$\ $$ | $\ $$ | $$ | `)
fmt.Println(`$$ / \__| $$$$$$\ $$ |$$$\ $$ | $$$$$$\ $$$$$$$\ `)
fmt.Println(`$$ |$$$$\ $$ __$$\ $$ $$ $$\$$ |$$ __$$\ $$ __$$\ `)
fmt.Println(`$$ |\_$$ |$$ / $$ | $$$$ _$$$$ |$$$$$$$$ |$$ | $$ |`)
fmt.Println(`$$ | $$ |$$ | $$ | $$$ / \$$$ |$$ ____|$$ | $$ |`)
fmt.Println(`\$$$$$$ |\$$$$$$ | $$ / \$$ |\$$$$$$$\ $$$$$$$ |`)
fmt.Println(` \______/ \______/ \__/ \__| \_______|\_______/ `)
fmt.Println(`-----------------------------------------------------------`)
fmt.Println(`-------Idiomatic Go - Web Boilerplate by severedsea--------`)
fmt.Println(`-----------------------------------------------------------`)
}
|
package scanner
import (
"bytes"
"fmt"
"regexp"
"strconv"
"strings"
//"time"
"github.com/PuerkitoBio/goquery"
"github.com/purstal/go-tieba-base/misc"
"github.com/purstal/go-tieba-base/simple-http"
//"github.com/purstal/go-tieba-modules/operation-analyser/old/log"
)
type OpType int
const (
OpType_None OpType = 0
OpType_Delete OpType = 12
OpType_Recover OpType = 13
OpType_AddGood OpType = 17
OpType_CancelGood OpType = 18
OpType_AddTop OpType = 25
OpType_CancelTop OpType = 26
)
func ListPostLog(BDUSS, forumName, svalue string, opType OpType, fromTime, toTime int64, pn int) ([]byte, error) {
var parameters simple_http.Parameters
parameters.Add("word", misc.UrlQueryEscape(misc.ToGBK(forumName)))
parameters.Add("op_type", strconv.Itoa(int(opType)))
parameters.Add("stype", "op_uname") //post_uname:发贴人;op_uname:操作人
parameters.Add("svalue", svalue) //utf8-url-encoding
parameters.Add("date_type", "on") //限定时间范围
if fromTime != 0 {
parameters.Add("begin", strconv.FormatInt(fromTime, 10)) //起始时间戳
}
if toTime != 0 {
parameters.Add("end", strconv.FormatInt(toTime, 10)) //结束时间戳
}
parameters.Add("pn", strconv.Itoa(pn))
var cookies simple_http.Cookies
cookies.Add("BDUSS", BDUSS)
return simple_http.Get("simple_http://tieba.baidu.com/bawu2/platform/listPostLog", parameters, cookies)
}
func TryListingPostLog(BDUSS, forumName, svalue string, opType OpType, fromTime, toTime int64, pn int) []byte {
for {
var resp, err = ListPostLog(BDUSS, forumName, svalue, opType, fromTime, toTime, pn)
if err == nil {
return []byte(misc.FromGBK(string(resp)))
}
}
}
func TryGettingListingPostLogDocument(BDUSS, forumName, svalue string, opType OpType, fromTime, toTime int64, pn int) *goquery.Document {
for {
var doc, err = goquery.NewDocumentFromReader(bytes.NewReader(TryListingPostLog(BDUSS, forumName, svalue, opType, fromTime, toTime, pn)))
if err == nil {
return doc
}
}
}
func CanViewBackstage(doc *goquery.Document) bool {
return doc.Find(`div#operator_menu`).Length() >= 1
}
func ExtractLogCount(doc *goquery.Document) (int, error) {
var count, err = strconv.Atoi(doc.Find(`div.breadcrumbs`).Contents().Filter(`em`).Text())
return count, err
}
type Log struct {
Author string
PostTime struct {
Month, Day, Hour, Minute int
}
Title string
IsReply bool
Text string
MediaHtml string
TID int
PID int
OperateType OpType
Operator string
OperateTime struct {
Year, Month, Day, Hour, Minute int
}
}
var extractFromLinkRegexp *regexp.Regexp
func init() {
extractFromLinkRegexp = regexp.MustCompile(`/p/(\d*)\?pid=(\d*)`)
}
func ExtractLogs(doc *goquery.Document, logs []Log, fromIndex, toIndex int) (trueLength bool) {
var logTrs = doc.Find(`table.data_table`).Find(`tbody`).Find(`tr`)
if logTrs.Length() != toIndex-fromIndex {
return false
}
logTrs.Each(func(i int, tr *goquery.Selection) {
//var log = &logs[fromIndex+i]
var log = &logs[fromIndex+i] //
if len(logs) <= fromIndex+i {
panic(fmt.Sprintln(len(logs), fromIndex+i, fromIndex, toIndex))
}
log.Author = tr.Find(`div.post_author`).Find(`a`).Text()
fmt.Sscanf(tr.Find(`time.ui_text_desc`).Text(),
"%d月%d日 %d:%d",
&log.PostTime.Month, &log.PostTime.Day,
&log.PostTime.Hour, &log.PostTime.Minute)
var contentSel = tr.Find(`div.post_content`)
var a = contentSel.Find(`h1`).Find(`a`)
var title = strings.TrimSpace(a.Text())
if strings.HasPrefix(title, "回复:") {
log.IsReply = true
log.Title = strings.TrimPrefix(title, "回复:")
} else {
log.Title = title
}
var herf, _ = a.Attr(`href`)
fmt.Sscanf(herf, "/p/%d?pid=%d", &log.TID, &log.PID)
log.Text = strings.TrimSpace(contentSel.Find(`div.post_text`).Text())
var mediaHtml, _ = contentSel.Find(`div.post_media`).Html()
if strings.TrimSpace(log.MediaHtml) != "" {
log.MediaHtml = mediaHtml
}
var td2 = tr.Find(`td`).Next()
switch td2.Find(`span`).Text() {
case "删贴":
log.OperateType = OpType_Delete
case "恢复":
log.OperateType = OpType_Recover
case "加精":
log.OperateType = OpType_AddGood
case "取消加精":
log.OperateType = OpType_CancelGood
case "置顶":
log.OperateType = OpType_AddTop
case "取消置顶":
log.OperateType = OpType_CancelTop
default:
log.OperateType = OpType_None
}
var td3 = td2.Next()
log.Operator = td3.Find(`a.ui_text_normal`).Text()
var td4 = td3.Next()
var td4Html, _ = td4.Html()
fmt.Sscanf(td4Html,
`%d-%d-%d<br/>%d:%d`,
&log.OperateTime.Year,
&log.OperateTime.Month, &log.OperateTime.Day,
&log.OperateTime.Hour, &log.OperateTime.Minute)
})
return true
}
func TryGettingAndExtractLogs(BDUSS, forumName, svalue string,
opType OpType, fromTime, toTime int64,
pn int, logs []Log, trueCount int) {
for {
var doc = TryGettingListingPostLogDocument(BDUSS, forumName, svalue, opType, fromTime, toTime, pn)
if ExtractLogs(doc, logs, (pn-1)*30, (pn-1)*30+trueCount) {
return
}
}
}
|
package structs
import "testing"
func TestPerimeter(t *testing.T) {
rectangle := Rectangle{10.0, 3.0}
got := Perimeter(rectangle)
want := 26.0
if got != want {
t.Errorf("got %.2f want %.2f", got, want)
}
}
func TestArea(t *testing.T) {
checkArea := func(t *testing.T, shape Shape, want float64) {
t.Helper()
got := shape.Area()
if got != want {
t.Errorf("got %.2f want %.2f", got, want)
}
}
t.Run("rectangles", func(t *testing.T) {
rectangle := Rectangle{10.0, 3.0}
want := 30.0
checkArea(t, rectangle, want)
})
t.Run("circles", func(t *testing.T) {
circle := Circle{10}
want := 314.1592653589793
checkArea(t, circle, want)
})
}
|
package leetcode
import "testing"
func TestCommonPrefix(t *testing.T) {
tests := []struct {
a string
b string
cp string
}{
{
a: "a",
b: "b",
cp: "",
},
{
a: "a",
b: "a",
cp: "a",
},
{
a: "abc",
b: "ab",
cp: "ab",
},
{
a: "ab",
b: "abc",
cp: "ab",
},
{
a: "ab123",
b: "ab45",
cp: "ab",
},
}
for i, tt := range tests {
if got, want := commonPrefix(tt.a, tt.b), tt.cp; got != want {
t.Errorf("%d: commonPrefix: got %v, want %v", i, got, want)
}
}
}
func TestLongestCommonPrefix(t *testing.T) {
tests := []struct {
input []string
output string
}{
{
input: []string{"flower", "flow", "flight"},
output: "fl",
},
{
input: []string{"dog", "racecar", "car"},
output: "",
},
}
for i, tt := range tests {
if got, want := longestCommonPrefix(tt.input), tt.output; got != want {
t.Errorf("%d: longestCommonPrefix: got %v, want %v", i, got, want)
}
}
}
|
//All variables on LHS of declaration has already been declared
package main
func pain () { //should be legal
var x, y bool
x, y, z := true, `false`, true != false;
}
func main () { //Illegal
var x, y, z bool
x, y, z := true, `false`, true == false;
}
|
package model
// Assessment a struct encapsulating payment
type Assessment struct {
BaseModel
ApplicationID uint `json:"application_id" gorm:"not null;type:int(15)"`
QuestionID string `json:"question_id" gorm:"not null;type:varchar(20)"`
SelectedAnswer string `json:"selected_answer" gorm:"type:varchar(100)"`
CorrectAnswer string `json:"correct_answer" gorm:"type:varchar(100)"`
IsCorrect bool `json:"is_correct" gorm:"type:tinyint(1);default:0"`
Question *Question `json:"question"`
Application *Application `json:"application"`
}
|
package main
import (
"fmt"
)
func main() {
fmt.Println(predictPartyVictory("RD"))
//fmt.Println(predictPartyVictory("DDRRR"))
}
func predictPartyVictory(senate string) string {
bs := []byte(senate)
r, d := true, true
flag := 0
for r && d {
r, d = false, false
for i := 0; i < len(bs); i++ {
switch bs[i] {
case 'D':
if flag < 0 {
bs[i] = 0
} else {
d = true
}
flag++
case 'R':
if flag > 0 {
bs[i] = 0
} else {
r = true
}
flag--
}
}
}
if r {
return "Radiant"
}
return "Dire"
}
|
package submerge
import (
"bufio"
"os"
"strconv"
"strings"
)
func parseSubFile(file *os.File) ([]*subLine, error) {
var lines []*subLine
sc := bufio.NewScanner(file)
nextLine := true
for nextLine {
line, notEmpty, err := parseSubLine(sc)
if err != nil {
return nil, err
}
nextLine = notEmpty
if line != nil {
lines = append(lines, line)
}
}
return lines, nil
}
func parseSubLine(sc *bufio.Scanner) (*subLine, bool, error) {
counter := 0
var currentLine *subLine
for sc.Scan() {
if err := sc.Err(); err != nil {
return nil, false, err
}
line := strings.TrimSpace(sc.Text())
line = strings.Replace(line, "\ufeff", "", -1)
if line == "" {
return currentLine, true, nil
}
switch counter {
case 0:
num, err := strconv.ParseInt(line, 10, 64)
if err != nil {
return nil, false, err
}
currentLine = &subLine{Num: int(num)}
case 1:
currentLine.Time = line
case 2:
currentLine.Text1 = line
case 3:
currentLine.Text2 = line
}
counter++
}
return currentLine, false, nil
}
|
package rabbitmq
import (
"context"
"regexp"
"time"
"github.com/pkg/errors"
"github.com/streadway/amqp"
"github.com/batchcorp/plumber-schemas/build/go/protos/opts"
"github.com/batchcorp/plumber-schemas/build/go/protos/records"
"github.com/batchcorp/rabbit"
rtypes "github.com/batchcorp/plumber/backends/rabbitmq/types"
"github.com/batchcorp/plumber/prometheus"
"github.com/batchcorp/plumber/validate"
)
func (r *RabbitMQ) Relay(ctx context.Context, relayOpts *opts.RelayOptions, relayCh chan interface{}, errorCh chan<- *records.ErrorRecord) error {
if err := validateRelayOptions(relayOpts); err != nil {
return errors.Wrap(err, "unable to verify options")
}
var excludeRegexp *regexp.Regexp
if relayOpts.Rabbit.Args.ExcludeBindingKeyRegex != "" {
var err error
excludeRegexp, err = regexp.Compile(relayOpts.Rabbit.Args.ExcludeBindingKeyRegex)
if err != nil {
return errors.Wrap(err, "unable to compile exclude regex")
}
}
// Check if nil to allow unit testing injection into struct
if r.client == nil {
consumer, err := r.newRabbitForRead(relayOpts.Rabbit.Args)
if err != nil {
return errors.Wrap(err, "unable to create new rabbit consumer")
}
r.client = consumer
}
defer r.client.Close()
errCh := make(chan *rabbit.ConsumeError)
go r.client.Consume(ctx, errCh, func(msg amqp.Delivery) error {
if excludeRegexp != nil && excludeRegexp.Match([]byte(msg.RoutingKey)) {
r.log.Debugf("consumed message for routing key '%s' matches filter '%s' - skipping",
msg.RoutingKey, relayOpts.Rabbit.Args.ExcludeBindingKeyRegex)
return nil
}
if msg.Body == nil {
// Ignore empty messages
// this will also prevent log spam if a queue goes missing
return nil
}
prometheus.Incr("rabbit-relay-consumer", 1)
r.log.Debugf("Writing message to relay channel: %s", msg.Body)
relayCh <- &rtypes.RelayMessage{
Value: &msg,
Options: &rtypes.RelayMessageOptions{},
}
return nil
})
for {
select {
case err := <-errCh:
errorCh <- &records.ErrorRecord{
OccurredAtUnixTsUtc: time.Now().UTC().Unix(),
Error: err.Error.Error(),
}
prometheus.IncrPromCounter("plumber_read_errors", 1)
case <-ctx.Done():
r.log.Debug("Received shutdown signal, exiting relayer")
return nil
}
}
return nil
}
// validateRelayOptions ensures all required relay options are present
func validateRelayOptions(relayOpts *opts.RelayOptions) error {
if relayOpts == nil {
return validate.ErrEmptyRelayOpts
}
if relayOpts.Rabbit == nil {
return validate.ErrEmptyBackendGroup
}
args := relayOpts.Rabbit.Args
if args == nil {
return validate.ErrEmptyBackendArgs
}
if args.ExchangeName == "" {
return ErrEmptyExchangeName
}
if args.QueueName == "" {
return ErrEmptyQueueName
}
if args.BindingKey == "" {
return ErrEmptyBindingKey
}
return nil
}
|
package diff
import (
"fmt"
"os"
"reflect"
"testing"
)
func TestListSubdirsPWD(t *testing.T) {
// diff folder doesn't currently have subdirs
got, err := listSubDirs()
if err != nil {
t.Errorf("error listing subdirs: %s", err)
}
want := []string{}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %+v, wanted %+v", got, want)
}
}
func TestListSubdirsParent(t *testing.T) {
err := os.Chdir("../")
if err != nil {
t.Errorf("error going to parent folder: %s", err)
}
// diff folder doesn't currently have subdirs
got, err := listSubDirs()
if err != nil {
t.Errorf("error listing subdirs: %s", err)
}
present := []string{"cmd", "diff"}
notPresent := []string{"main.go"}
err = assertContains(got, present)
if err != nil {
t.Errorf("contains failed: %s", err)
}
err = assertContains(got, notPresent)
if err == nil {
t.Errorf("found files %+v in output %+v, should only be directories", notPresent, got )
}
}
func assertContains(s []string, values []string) error {
for _, str := range values {
present := false
for _, v := range s {
if v == str {
present = true
}
}
if !present {
return fmt.Errorf("Expected value %q not present in %+v", str, s)
}
}
return nil
}
|
package models
// Inventories data model for inventory
type Inventories struct {
Model
ProductID int `gorm:"not null" json:"product_id"`
Quantity int `json:"quantity"`
}
|
package main
import (
"fmt"
"errors"
)
func main(){
r, err := div(9, -10)
if err != nil {
fmt.Println(err.Error())
} else {
fmt.Println(r)
}
}
func div(x,y float64)(float64, error){
if y < 0 {
return x/y, errors.New("jumlah nilai harus positif")
} else {
return x/y, nil
}
}
|
// +build !race
package daemonsetstore
import (
"context"
"testing"
"time"
"github.com/square/p2/pkg/ds/fields"
daemonsetstore_protos "github.com/square/p2/pkg/grpc/daemonsetstore/protos"
"github.com/square/p2/pkg/grpc/testutil"
"github.com/square/p2/pkg/logging"
"github.com/square/p2/pkg/manifest"
"github.com/square/p2/pkg/store/consul/consulutil"
"github.com/square/p2/pkg/store/consul/dsstore"
"github.com/square/p2/pkg/store/consul/transaction"
"github.com/square/p2/pkg/types"
"github.com/gofrs/uuid"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
klabels "k8s.io/kubernetes/pkg/labels"
)
func TestListDaemonSets(t *testing.T) {
fixture := consulutil.NewFixture(t)
defer fixture.Stop()
dsStore := dsstore.NewConsul(fixture.Client, 0, &logging.DefaultLogger)
seedDS, err := createADaemonSet(dsStore, fixture.Client.KV())
if err != nil {
t.Fatalf("could not seed daemon set store with a daemon set")
}
server := NewServer(dsStore)
resp, err := server.ListDaemonSets(context.Background(), &daemonsetstore_protos.ListDaemonSetsRequest{})
if err != nil {
t.Fatalf("error listing daemon sets: %s", err)
}
if len(resp.DaemonSets) != 1 {
t.Errorf("expected a single daemon set but there were %d", len(resp.DaemonSets))
}
ds := resp.DaemonSets[0]
if ds.Id != seedDS.ID.String() {
t.Errorf("expected daemon set ID to be %q but was %q", seedDS.ID, ds.Id)
}
if ds.Disabled != seedDS.Disabled {
t.Errorf("expected daemon set's disabled flag to be %t but was %t", seedDS.Disabled, ds.Disabled)
}
expectedStr, err := seedDS.Manifest.Marshal()
if err != nil {
t.Fatalf("couldn't marshal seed daemon set's manifest as a string: %s", err)
}
if ds.Manifest != string(expectedStr) {
t.Errorf("expected manifest to be %q but was %q", string(expectedStr), ds.Manifest)
}
if ds.MinHealth != 18 {
t.Errorf("expected min health to be %d but was %d", 18, ds.MinHealth)
}
if ds.Name != seedDS.Name.String() {
t.Errorf("expected daemon set name to be %q but was %q", seedDS.Name.String(), ds.Name)
}
if ds.NodeSelector != seedDS.NodeSelector.String() {
t.Errorf("expected node selector to be %q but was %q", seedDS.NodeSelector.String(), ds.NodeSelector)
}
if ds.PodId != seedDS.PodID.String() {
t.Errorf("expected pod ID to be %q but was %q", seedDS.PodID, ds.PodId)
}
if ds.Timeout != seedDS.Timeout.Nanoseconds() {
t.Errorf("expected timeout to be %s but was %s", seedDS.Timeout, time.Duration(ds.Timeout))
}
}
func TestDisableDaemonSet(t *testing.T) {
fixture := consulutil.NewFixture(t)
defer fixture.Stop()
dsStore := dsstore.NewConsul(fixture.Client, 0, &logging.DefaultLogger)
daemonSet, err := createADaemonSet(dsStore, fixture.Client.KV())
if err != nil {
t.Fatal(err)
}
// confirm it starts out as enabled
if daemonSet.Disabled {
t.Fatal("daemon set already disabled")
}
server := NewServer(dsStore)
_, err = server.DisableDaemonSet(context.Background(), &daemonsetstore_protos.DisableDaemonSetRequest{
DaemonSetId: daemonSet.ID.String(),
})
if err != nil {
t.Fatalf("error disabling daemon set: %s", err)
}
daemonSet, _, err = dsStore.Get(daemonSet.ID)
if err != nil {
t.Fatalf("could not fetch daemon set after disabling it to confirm the disable worked: %s", err)
}
if !daemonSet.Disabled {
t.Error("daemon set wasn't disabled")
}
}
func TestDisableDaemonSetInvalidArgument(t *testing.T) {
fixture := consulutil.NewFixture(t)
defer fixture.Stop()
dsStore := dsstore.NewConsul(fixture.Client, 0, &logging.DefaultLogger)
server := NewServer(dsStore)
_, err := server.DisableDaemonSet(context.Background(), &daemonsetstore_protos.DisableDaemonSetRequest{
DaemonSetId: "bad daemon set ID",
})
if err == nil {
t.Fatal("should have gotten an error passing a malformed daemon set ID to disable")
}
if grpc.Code(err) != codes.InvalidArgument {
t.Errorf("should have gotten an invalid argument error but was %q", err)
}
}
func TestDisableDaemonSetNotFound(t *testing.T) {
fixture := consulutil.NewFixture(t)
defer fixture.Stop()
dsStore := dsstore.NewConsul(fixture.Client, 0, &logging.DefaultLogger)
server := NewServer(dsStore)
_, err := server.DisableDaemonSet(context.Background(), &daemonsetstore_protos.DisableDaemonSetRequest{
DaemonSetId: uuid.Must(uuid.NewV4()).String(),
})
if err == nil {
t.Fatal("should have gotten an error passing a malformed daemon set ID to disable")
}
if grpc.Code(err) != codes.NotFound {
t.Errorf("should have gotten a not found error but was %q", err)
}
}
type TestWatchDaemonSetsStream struct {
*testutil.FakeServerStream
responseCh chan<- *daemonsetstore_protos.WatchDaemonSetsResponse
}
func (w TestWatchDaemonSetsStream) Send(resp *daemonsetstore_protos.WatchDaemonSetsResponse) error {
w.responseCh <- resp
return nil
}
type fakeDaemonSetWatcher struct {
resultCh <-chan dsstore.WatchedDaemonSets
}
func newFakeDSWatcher(resultCh <-chan dsstore.WatchedDaemonSets) fakeDaemonSetWatcher {
return fakeDaemonSetWatcher{
resultCh: resultCh,
}
}
func (fakeDaemonSetWatcher) List() ([]fields.DaemonSet, error) { panic("List() not implemented") }
func (fakeDaemonSetWatcher) Disable(id fields.ID) (fields.DaemonSet, error) {
panic("Disable() not implemented")
}
func (f fakeDaemonSetWatcher) Watch(quitCh <-chan struct{}) <-chan dsstore.WatchedDaemonSets {
out := make(chan dsstore.WatchedDaemonSets)
go func() {
defer close(out)
for {
select {
case <-quitCh:
return
case val := <-f.resultCh:
select {
case <-quitCh:
return
case out <- val:
}
}
}
}()
return out
}
func TestWatchDaemonSets(t *testing.T) {
resultCh := make(chan dsstore.WatchedDaemonSets)
server := NewServer(newFakeDSWatcher(resultCh))
respCh := make(chan *daemonsetstore_protos.WatchDaemonSetsResponse)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
serverExit := make(chan struct{})
go func() {
defer close(serverExit)
err := server.WatchDaemonSets(new(daemonsetstore_protos.WatchDaemonSetsRequest), TestWatchDaemonSetsStream{
FakeServerStream: testutil.NewFakeServerStream(ctx),
responseCh: respCh,
})
if err != nil {
t.Fatal(err)
}
}()
fixture := consulutil.NewFixture(t)
defer fixture.Stop()
dsStore := dsstore.NewConsul(fixture.Client, 0, &logging.DefaultLogger)
ds, err := createADaemonSet(dsStore, fixture.Client.KV())
if err != nil {
t.Fatal(err)
}
go func() {
resultCh <- dsstore.WatchedDaemonSets{
Created: []*fields.DaemonSet{
&ds,
},
}
}()
select {
case out := <-respCh:
if out.Error != "" {
t.Fatalf("expected no error from watch but got %s", out.Error)
}
if len(out.Created) != 1 {
t.Fatalf("expected 1 created daemon set but there were %d", len(out.Created))
}
if len(out.Updated) != 0 {
t.Fatalf("expected 0 updated daemon sets but there were %d", len(out.Updated))
}
if len(out.Deleted) != 0 {
t.Fatalf("expected 0 deleted daemon sets but there were %d", len(out.Deleted))
}
case <-time.After(1 * time.Second):
t.Fatal("timeout waiting for output")
}
go func() {
resultCh <- dsstore.WatchedDaemonSets{
Updated: []*fields.DaemonSet{
&ds,
},
}
}()
select {
case out := <-respCh:
if out.Error != "" {
t.Fatalf("expected no error from watch but got %s", out.Error)
}
if len(out.Created) != 0 {
t.Fatalf("expected 0 created daemon sets but there were %d", len(out.Created))
}
if len(out.Updated) != 1 {
t.Fatalf("expected 1 updated daemon set but there were %d", len(out.Updated))
}
if len(out.Deleted) != 0 {
t.Fatalf("expected 0 deleted daemon sets but there were %d", len(out.Deleted))
}
case <-time.After(1 * time.Second):
t.Fatal("timeout waiting for output")
}
go func() {
resultCh <- dsstore.WatchedDaemonSets{
Deleted: []*fields.DaemonSet{
&ds,
},
}
}()
select {
case out := <-respCh:
if out.Error != "" {
t.Fatalf("expected no error from watch but got %s", out.Error)
}
if len(out.Created) != 0 {
t.Fatalf("expected 0 created daemon sets but there were %d", len(out.Created))
}
if len(out.Updated) != 0 {
t.Fatalf("expected 0 updated daemon sets but there were %d", len(out.Updated))
}
if len(out.Deleted) != 1 {
t.Fatalf("expected 1 deleted daemon set but there were %d", len(out.Deleted))
}
case <-time.After(1 * time.Second):
t.Fatal("timeout waiting for output")
}
cancelFunc()
select {
case _, ok := <-serverExit:
if ok {
t.Fatal("expected server to exit after client cancel")
}
case <-time.After(1 * time.Second):
t.Fatal("timed out waiting for server to exit")
}
}
func validManifest() manifest.Manifest {
builder := manifest.NewBuilder()
builder.SetID("fooapp")
return builder.GetManifest()
}
func createADaemonSet(store *dsstore.ConsulStore, txner transaction.Txner) (fields.DaemonSet, error) {
minHealth := 18
name := fields.ClusterName("some_daemon_set")
nodeSelector, err := klabels.Parse("foo=bar")
if err != nil {
return fields.DaemonSet{}, err
}
podID := types.PodID("fooapp")
timeout := 100 * time.Nanosecond
ctx, cancel := transaction.New(context.Background())
defer cancel()
ds, err := store.Create(
ctx,
validManifest(),
minHealth,
name,
nodeSelector,
podID,
timeout,
)
if err != nil {
return fields.DaemonSet{}, err
}
return ds, transaction.MustCommit(ctx, txner)
}
|
package vm
import (
"fmt"
"github.com/davecgh/go-spew/spew"
installertypes "github.com/openshift/installer/pkg/types"
vsphere "github.com/pulumi/pulumi-vsphere/sdk/v2/go/vsphere"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
// InstanceType - Types of instances
type InstanceType string
//InstanceType enum
const (
Bootstrap InstanceType = "bootstrap"
Master InstanceType = "control-plane"
Worker InstanceType = "compute"
)
// VirtualMachine - all the objects we need...
type VirtualMachine struct {
Ctx *pulumi.Context
Datacenter *vsphere.Datacenter
ResourcePool *vsphere.ResourcePool
Folder *vsphere.Folder
ComputeCluster *vsphere.ComputeCluster
LookupDatacenter *vsphere.LookupDatacenterResult
LookupComputeCluster *vsphere.LookupComputeClusterResult
LookupResourcePool *vsphere.LookupResourcePoolResult
LookupFolder *vsphere.LookupFolderResult
Datastore *vsphere.GetDatastoreResult
Network *vsphere.GetNetworkResult
}
// NewResourcePool - create a new resourcepool, set resourcepool in VirtualMahchine
func (vm *VirtualMachine) NewResourcePool(name string) error {
resourcePool, err := vsphere.NewResourcePool(vm.Ctx, name, &vsphere.ResourcePoolArgs{
Name: pulumi.StringPtr(name),
ParentResourcePoolId: pulumi.String(vm.LookupComputeCluster.ResourcePoolId),
})
if err != nil {
return err
}
vm.ResourcePool = resourcePool
return nil
}
// NewFolder - Create a new folder, set folder in VirtualMachine
func (vm *VirtualMachine) NewFolder(name string) error {
folder, err := vsphere.NewFolder(vm.Ctx, name, &vsphere.FolderArgs{
Path: pulumi.String(name),
Type: pulumi.String("vm"),
DatacenterId: pulumi.StringPtr(*vm.LookupComputeCluster.DatacenterId),
})
if err != nil {
return err
}
vm.Folder = folder
return nil
}
// NewVirtualMachine - new object
func NewVirtualMachine(ctx *pulumi.Context, ic *installertypes.InstallConfig) (*VirtualMachine, error) {
lookupDatacenter, err := vsphere.LookupDatacenter(ctx, &vsphere.LookupDatacenterArgs{Name: &ic.Platform.VSphere.Datacenter})
if err != nil {
return nil, err
}
datacenter, err := vsphere.GetDatacenter(ctx, *lookupDatacenter.Name, pulumi.ID(lookupDatacenter.Id), &vsphere.DatacenterState{
Name: pulumi.StringPtr(*lookupDatacenter.Name),
Moid: pulumi.StringPtr(lookupDatacenter.Id),
})
if err != nil {
return nil, err
}
spew.Dump(datacenter)
lookupComputeCluster, err := vsphere.LookupComputeCluster(ctx,
&vsphere.LookupComputeClusterArgs{
DatacenterId: &lookupDatacenter.Id,
Name: ic.Platform.VSphere.Cluster,
})
if err != nil {
return nil, err
}
computeCluster, err := vsphere.GetComputeCluster(ctx, lookupComputeCluster.Name, pulumi.ID(lookupComputeCluster.Id), &vsphere.ComputeClusterState{
DatacenterId: pulumi.StringPtr(lookupDatacenter.Id),
Name: pulumi.StringPtr(lookupComputeCluster.Name),
})
defaultRPName := "*"
lookupRP, err := vsphere.LookupResourcePool(ctx, &vsphere.LookupResourcePoolArgs{
DatacenterId: &lookupComputeCluster.Id,
Name: &defaultRPName,
})
if err != nil {
return nil, err
}
resourcePool, err := vsphere.GetResourcePool(ctx, *lookupRP.Name, nil, &vsphere.ResourcePoolState{
ParentResourcePoolId: pulumi.StringPtr(lookupRP.Id),
Name: pulumi.StringPtr(*lookupRP.Name),
})
if err != nil {
return nil, err
}
defaultFolderPath := fmt.Sprintf("/%s/vm", *lookupDatacenter.Name)
lookupFolder, err := vsphere.LookupFolder(ctx, &vsphere.LookupFolderArgs{
Path: defaultFolderPath,
})
if err != nil {
return nil, err
}
folder, err := vsphere.GetFolder(ctx, defaultFolderPath, pulumi.ID(lookupFolder.Id), &vsphere.FolderState{
Path: pulumi.StringPtr(lookupFolder.Path),
})
datastore, err := vsphere.GetDatastore(ctx, &vsphere.GetDatastoreArgs{
Name: ic.Platform.VSphere.DefaultDatastore,
DatacenterId: &lookupDatacenter.Id,
})
if err != nil {
return nil, err
}
network, err := vsphere.GetNetwork(ctx, &vsphere.GetNetworkArgs{
Name: ic.Platform.VSphere.Network,
DatacenterId: &lookupDatacenter.Id,
})
if err != nil {
return nil, err
}
return &VirtualMachine{
LookupDatacenter: lookupDatacenter,
Datacenter: datacenter,
ResourcePool: resourcePool,
Network: network,
Datastore: datastore,
ComputeCluster: computeCluster,
LookupFolder: lookupFolder,
LookupComputeCluster: lookupComputeCluster,
Folder: folder,
Ctx: ctx,
}, nil
}
/* TODO:
* - ignition
* - ip addressing
*/
// CreateCoreOSVirtualMachine - creates a virtual machine
func (vm VirtualMachine) CreateCoreOSVirtualMachine(instanceType InstanceType, quantity int, template string) error {
templateVM, err := vsphere.LookupVirtualMachine(vm.Ctx, &vsphere.LookupVirtualMachineArgs{
Name: template,
DatacenterId: &vm.LookupDatacenter.Id,
})
if err != nil {
return err
}
vmNetworkInterfaces := vsphere.VirtualMachineNetworkInterfaceArray{
vsphere.VirtualMachineNetworkInterfaceArgs{
NetworkId: pulumi.String(vm.Network.Id),
},
}
vmDisks := vsphere.VirtualMachineDiskArray{
vsphere.VirtualMachineDiskArgs{
Label: pulumi.StringPtr("disk0"),
Size: pulumi.IntPtr(16),
ThinProvisioned: pulumi.BoolPtr(false),
},
}
vmCloneArgs := &vsphere.VirtualMachineCloneArgs{
TemplateUuid: pulumi.String(templateVM.Id),
}
//"guestinfo.ignition.config.data" = base64encode(data.ignition_config.ign[each.key].rendered)
extraConfigs := pulumi.StringMap{
"guestinfo.ignition.config.data.encoding": pulumi.String("base64"),
}
vmArgs := &vsphere.VirtualMachineArgs{
Name: pulumi.StringPtr("jcallen-pulumi-test"),
ResourcePoolId: vm.ResourcePool.ID(),
DatastoreId: pulumi.StringPtr(vm.Datastore.Id),
NumCpus: pulumi.IntPtr(1),
Memory: pulumi.IntPtr(1024),
GuestId: pulumi.StringPtr("rhel7_64Guest"),
EnableDiskUuid: pulumi.BoolPtr(true),
Clone: vmCloneArgs,
NetworkInterfaces: vmNetworkInterfaces,
Disks: vmDisks,
WaitForGuestNetRoutable: pulumi.BoolPtr(false),
WaitForGuestNetTimeout: pulumi.IntPtr(0),
Folder: vm.Folder.Path,
ExtraConfig: extraConfigs,
}
_, err = vsphere.NewVirtualMachine(vm.Ctx, "jcallen-pulumi-test", vmArgs)
if err != nil {
return err
}
return nil
}
// CreateCoreOSTemplate - create the template virtual machine
func (vm VirtualMachine) CreateCoreOSTemplate(localOvfPath, version string) error {
vmNetworkInterfaces := vsphere.VirtualMachineNetworkInterfaceArray{
vsphere.VirtualMachineNetworkInterfaceArgs{
NetworkId: pulumi.String(vm.Network.Id),
},
}
vmOvfDeploy := vsphere.VirtualMachineOvfDeployArgs{
LocalOvfPath: pulumi.String(localOvfPath),
}
vmArgs := &vsphere.VirtualMachineArgs{
Name: pulumi.StringPtr("rhcos"),
ResourcePoolId: vm.ResourcePool.ID(),
DatastoreId: pulumi.StringPtr(vm.Datastore.Id),
NumCpus: pulumi.IntPtr(1),
Memory: pulumi.IntPtr(1024),
GuestId: pulumi.StringPtr("rhel7_64Guest"),
EnableDiskUuid: pulumi.BoolPtr(true),
NetworkInterfaces: vmNetworkInterfaces,
WaitForGuestNetRoutable: pulumi.BoolPtr(false),
WaitForGuestNetTimeout: pulumi.IntPtr(0),
Folder: vm.Folder.Path,
OvfDeploy: vmOvfDeploy,
}
virtualMachine, err := vsphere.NewVirtualMachine(vm.Ctx, "jcallen-pulumi-test", vmArgs)
if err != nil {
return err
}
spew.Dump(virtualMachine)
spew.Dump(vmArgs)
return nil
}
|
package interpeter
import (
"fmt"
"github.com/fd/forklift/static/github.com/zhemao/glisp/interpreter"
)
type Interperter struct {
Env map[string]string
env *glisp.Glisp
}
func (i *Interperter) SexpString() string {
return "forklift"
}
func (i *Interperter) setup() {
i.env.AddGlobal("forklift", i)
i.env.AddFunction("include", includeDeploypack)
i.env.AddFunction("env-set", envSet)
i.env.AddFunction("env-get", envGet)
}
func envSet(env *glisp.Glisp, name string, args []glisp.Sexp) (glisp.Sexp, error) {
if len(args) == 0 || len(args)%2 != 0 {
return nil, fmt.Errorf("usage: (env-set <key> <value> ...)")
}
for i := 0; i < len(args); i += 2 {
if !glisp.IsString(args[i]) || !glisp.IsString(args[i+1]) {
return nil, fmt.Errorf("usage: (env-set <key> <value> ...)")
}
var (
interp = getInterperter(env)
key = string(args[i].(glisp.SexpStr))
value = string(args[i+1].(glisp.SexpStr))
)
interp.Env[key] = value
}
return glisp.SexpBool(true), nil
}
func envGet(env *glisp.Glisp, name string, args []glisp.Sexp) (glisp.Sexp, error) {
if len(args) != 1 || !glisp.IsString(args[0]) {
return nil, fmt.Errorf("usage: (env-get <key>)")
}
var (
interp = getInterperter(env)
key = string(args[0].(glisp.SexpStr))
)
return glisp.SexpStr(interp.Env[key]), nil
}
func getInterperter(env *glisp.Glisp) *Interperter {
x, ok := env.FindObject("forklift")
if !ok {
panic("forklift not found")
}
i, ok := x.(*Interperter)
if !ok {
panic("forklift not found")
}
return i
}
|
package main
import (
"bytes"
"flag"
"fmt"
"html/template"
"log"
"math/rand"
"net/http"
"os"
"runtime"
"sync"
"time"
"golang.org/x/tools/godoc"
"golang.org/x/tools/godoc/static"
"golang.org/x/tools/godoc/vfs"
"golang.org/x/tools/godoc/vfs/gatefs"
"golang.org/x/tools/godoc/vfs/mapfs"
"github.com/codegangsta/negroni"
"github.com/vrischmann/envconfig"
)
var conf struct {
Port int
IndexFile string
}
type doc struct {
packageName string
name string
}
func (d *doc) String() string {
return fmt.Sprintf("%s#%s", d.packageName, d.name)
}
var (
flWriteIndex = flag.Bool("write-index", false, "Generate the godoc index")
tmplMu sync.RWMutex
tmpl map[string]*template.Template = make(map[string]*template.Template)
godocMu sync.RWMutex
currentDoc doc
docCache map[doc]string = make(map[doc]string)
corpus *godoc.Corpus
pres *godoc.Presentation
godocFs = vfs.NameSpace{}
)
func randomPackage() string {
i := rand.Int() % len(packages)
return packages[i]
}
func loadAndCompileTemplates() {
tmplMu.Lock()
defer tmplMu.Unlock()
tmpl["index"] = template.Must(template.New("index").ParseFiles("./templates/index.html", "./templates/layout.html"))
}
func templateReload(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
loadAndCompileTemplates()
next(w, r)
}
func indexHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
tmplMu.RLock()
defer tmplMu.RUnlock()
godocMu.RLock()
defer godocMu.RUnlock()
tmpl["index"].ExecuteTemplate(w, "ROOT", map[string]interface{}{
"godoc": template.HTML(docCache[currentDoc]),
})
}
func getGodoc(d doc) (string, error) {
var buf bytes.Buffer
log.Printf("getting doc %s", &d)
args := []string{d.packageName}
if d.name != "" {
args = append(args, d.name)
}
err := godoc.CommandLine(&buf, godocFs, pres, args)
return buf.String(), err
}
func initGodocFs() {
rootFs := gatefs.New(vfs.OS(runtime.GOROOT()), make(chan bool, 20))
godocFs.Bind("/", rootFs, "/", vfs.BindReplace)
godocFs.Bind("/lib/godoc", mapfs.New(static.Files), "/", vfs.BindReplace)
}
func initGodoc(writeMode bool) error {
initGodocFs()
corpus = godoc.NewCorpus(godocFs)
if writeMode {
if err := corpus.Init(); err != nil {
return err
}
}
corpus.MaxResults = 10000
corpus.IndexEnabled = true
corpus.IndexFiles = conf.IndexFile
corpus.IndexInterval = 0
corpus.IndexThrottle = 0.75
corpus.IndexFullText = true
corpus.IndexGoCode = true
pres = godoc.NewPresentation(corpus)
pres.HTMLMode = false
readTemplates(pres, false)
return nil
}
func changeCurrentDoc() {
godocMu.Lock()
defer godocMu.Unlock()
currentDoc = doc{packageName: randomPackage()}
}
func updateDoc(d doc) {
html, err := getGodoc(d)
if err != nil {
log.Printf("unable to get the godoc for %s", currentDoc)
}
godocMu.Lock()
defer godocMu.Unlock()
docCache[d] = html
currentDoc = d
}
func writeGodocIndex() error {
log.Println("initializing corpus")
if err := initGodoc(true); err != nil {
return err
}
log.Println("updating corpus")
corpus.UpdateIndex()
f, err := os.Create(conf.IndexFile)
if err != nil {
return err
}
index, _ := corpus.CurrentIndex()
log.Printf("writing index to %s", conf.IndexFile)
_, err = index.WriteTo(f)
return err
}
func init() {
rand.Seed(time.Now().UnixNano())
}
func main() {
flag.Parse()
if err := envconfig.Init(&conf); err != nil {
log.Fatalln(err)
return
}
if *flWriteIndex {
if err := writeGodocIndex(); err != nil {
log.Fatalln(err)
}
return
}
if err := initGodoc(false); err != nil {
log.Fatalln(err)
return
}
go corpus.RunIndexer()
changeCurrentDoc()
go func() {
changeTicker := time.NewTicker(time.Second * 86400)
updateTicker := time.NewTicker(time.Second * 10)
for {
select {
case <-changeTicker.C:
changeCurrentDoc()
case <-updateTicker.C:
updateDoc(currentDoc)
}
}
}()
mux := http.NewServeMux()
mux.Handle("/static/", http.StripPrefix("/static", http.FileServer(http.Dir("./static"))))
mux.HandleFunc("/", indexHandler)
n := negroni.New()
n.UseFunc(templateReload)
n.UseHandler(mux)
n.Run(fmt.Sprintf(":%d", conf.Port))
}
|
package types
import (
"fmt"
bgpapi "github.com/osrg/gobgp/v3/api"
"github.com/projectcalico/vpp-dataplane/v3/vpplink/generated/bindings/interface_types"
"github.com/projectcalico/vpp-dataplane/v3/vpplink/generated/bindings/ip_types"
"github.com/projectcalico/vpp-dataplane/v3/vpplink/generated/bindings/sr"
"github.com/projectcalico/vpp-dataplane/v3/vpplink/generated/bindings/sr_types"
)
type SrBehavior uint8
const (
SrBehaviorEND SrBehavior = SrBehavior(sr_types.SR_BEHAVIOR_API_END)
SrBehaviorX SrBehavior = SrBehavior(sr_types.SR_BEHAVIOR_API_X)
SrBehaviorT SrBehavior = SrBehavior(sr_types.SR_BEHAVIOR_API_T)
SrBehaviorDFIRST SrBehavior = SrBehavior(sr_types.SR_BEHAVIOR_API_D_FIRST)
SrBehaviorDX2 SrBehavior = SrBehavior(sr_types.SR_BEHAVIOR_API_DX2)
SrBehaviorDX6 SrBehavior = SrBehavior(sr_types.SR_BEHAVIOR_API_DX6)
SrBehaviorDX4 SrBehavior = SrBehavior(sr_types.SR_BEHAVIOR_API_DX4)
SrBehaviorDT6 SrBehavior = SrBehavior(sr_types.SR_BEHAVIOR_API_DT6)
SrBehaviorDT4 SrBehavior = SrBehavior(sr_types.SR_BEHAVIOR_API_DT4)
SrBehaviorLAST SrBehavior = SrBehavior(sr_types.SR_BEHAVIOR_API_LAST)
)
var (
SrBehaviorVPP_GoBGP = map[uint8]bgpapi.SRv6Behavior{
1: bgpapi.SRv6Behavior_END,
2: bgpapi.SRv6Behavior_ENDX,
3: bgpapi.SRv6Behavior_ENDT,
5: bgpapi.SRv6Behavior_END_DX2,
6: bgpapi.SRv6Behavior_END_DX6,
7: bgpapi.SRv6Behavior_END_DX4,
8: bgpapi.SRv6Behavior_END_DT6,
9: bgpapi.SRv6Behavior_END_DT4,
}
SrBehaviorGoBGP_VPP = map[bgpapi.SRv6Behavior]uint8{
bgpapi.SRv6Behavior_END: 1,
bgpapi.SRv6Behavior_ENDX: 2,
bgpapi.SRv6Behavior_ENDT: 3,
bgpapi.SRv6Behavior_END_DX2: 5,
bgpapi.SRv6Behavior_END_DX6: 6,
bgpapi.SRv6Behavior_END_DX4: 7,
bgpapi.SRv6Behavior_END_DT6: 8,
bgpapi.SRv6Behavior_END_DT4: 9,
}
)
func ToVppSrBehavior(behavior SrBehavior) sr_types.SrBehavior {
return sr_types.SrBehavior(behavior)
}
func FromVppSrBehavior(behavior sr_types.SrBehavior) SrBehavior {
return SrBehavior(behavior)
}
func FromGoBGPSrBehavior(behavior uint8) SrBehavior {
var result = SrBehaviorGoBGP_VPP[bgpapi.SRv6Behavior(behavior)]
return SrBehavior(result)
}
// SrLocalsid definition
type SrLocalsid struct {
Localsid ip_types.IP6Address
EndPsp bool
Behavior SrBehavior
SwIfIndex interface_types.InterfaceIndex
VlanIndex uint32
FibTable uint32
NhAddr ip_types.Address
}
func (l *SrLocalsid) SetBehavior(code uint8) {
l.Behavior = SrBehavior(code)
}
func (l *SrLocalsid) CompareBehaviorTo(behavior uint8) bool {
return uint8(l.Behavior) == behavior
}
func (l *SrLocalsid) String() (policy string) {
return fmt.Sprintf("Localsid: %s, EndPsp: %v, Behavior: %d, SwIfIndex: %d, VlanIndex: %d, FibTable: %d, NhAddr: %s",
l.Localsid, l.EndPsp, uint8(l.Behavior), l.SwIfIndex, l.VlanIndex, l.FibTable, l.NhAddr.String())
}
// SrPolicy definition
type SrPolicy struct {
Bsid ip_types.IP6Address
IsSpray bool
IsEncap bool
FibTable uint32
SidLists []Srv6SidList
}
func (p *SrPolicy) FromVPP(response *sr.SrPoliciesDetails) {
p.Bsid = response.Bsid
p.IsSpray = response.IsSpray
p.IsEncap = response.IsEncap
p.FibTable = response.FibTable
var sidLists []Srv6SidList
for _, sl := range response.SidLists {
sidLists = append(sidLists, Srv6SidList{
NumSids: sl.NumSids,
Weight: sl.Weight,
Sids: sl.Sids,
})
}
p.SidLists = sidLists
}
func (p *SrPolicy) String() (policy string) {
policy = fmt.Sprintf("Bsid: %s, IsSpray: %v, IsEncap: %v, FibTable: %d, SidLists: [",
p.Bsid, p.IsSpray, p.IsEncap, p.FibTable)
for _, sidList := range p.SidLists {
policy += sidList.String()
}
policy += "]"
return policy
}
// Srv6SidList definition
type Srv6SidList struct {
NumSids uint8
Weight uint32
Sids [16]ip_types.IP6Address
}
func (s *Srv6SidList) String() string {
return fmt.Sprintf("{NumSids: %d, Weight: %d, Sids: %s}",
s.NumSids, s.Weight, s.Sids)
}
type SrSteerTrafficType uint8
const (
SR_STEER_L2 SrSteerTrafficType = SrSteerTrafficType(sr_types.SR_STEER_API_L2)
SR_STEER_IPV4 SrSteerTrafficType = SrSteerTrafficType(sr_types.SR_STEER_API_IPV4)
SR_STEER_IPV6 SrSteerTrafficType = SrSteerTrafficType(sr_types.SR_STEER_API_IPV6)
)
func ToVppSrSteerTrafficType(trafficType SrSteerTrafficType) sr_types.SrSteer {
return sr_types.SrSteer(trafficType)
}
func FromVppSrSteerTrafficType(trafficType sr_types.SrSteer) SrSteerTrafficType {
return SrSteerTrafficType(trafficType)
}
type SrSteer struct {
TrafficType SrSteerTrafficType
FibTable uint32
Prefix ip_types.Prefix
SwIfIndex uint32
Bsid ip_types.IP6Address
}
func (s *SrSteer) String() string {
return fmt.Sprintf("TrafficType: %d, FibTable: %d, Prefix: %s, SwIfIndex: %d, Bsid: %s",
s.TrafficType, s.FibTable, s.Prefix.String(), s.SwIfIndex, s.Bsid.String())
}
|
package main
import "fmt"
type Pet interface {
Walk()
}
type Dog struct {
name string
}
func (d *Dog) Walk() {
fmt.Println("dog walk ...")
}
func main() {
dog := Dog{"little dog"}
// p 是接口类型变量,&dog 是 p 的动态值,Dog 是 p 的动态类型
var p Pet = &dog
p.Walk()
var p1 Pet
fmt.Println(p1) // <nil>
fmt.Println(p1 == nil) // true
var d2 *Dog
var p2 Pet = d2
fmt.Println(p2) // <nil>
fmt.Println(p2 == nil) // false
}
|
package actions
import "github.com/gopherjs/vecty/examples/todomvc/store/model"
type ReplaceItems struct {
Items []*model.Item
}
type AddItem struct {
Title string
}
type DestroyItem struct {
Index int
}
type SetTitle struct {
Index int
Title string
}
type SetCompleted struct {
Index int
Completed bool
}
type SetAllCompleted struct {
Completed bool
}
type ClearCompleted struct{}
type SetFilter struct {
Filter model.FilterState
}
|
package p02
func addDigits(num int) int {
if num == 0 {
return 0
}
res := num % 9
if res == 0 {
return 9
}
return res
}
|
package authors
import (
"emailSender/db"
"github.com/gofiber/fiber/v2"
"github.com/golang-jwt/jwt"
)
type v struct {
Id string `json:"id"`
jwt.StandardClaims
}
func VerifyEmail(c *fiber.Ctx) error {
vToken := c.Params("verification")
token, err := jwt.ParseWithClaims(vToken, &v{}, func(token *jwt.Token) (interface{}, error) {
// since we only use the one private key to sign the tokens,
// we also only use its public counter part to verify
return []byte(jwtSecret), nil
})
if err != nil {
return fiber.NewError(fiber.StatusBadRequest, err.Error())
}
claims, ok := token.Claims.(*v)
if !ok {
return fiber.NewError(fiber.StatusBadRequest, "Could not verify email!")
}
id := claims.Id
if err := db.VerifyAuthor(id); err != nil {
return fiber.NewError(fiber.StatusInternalServerError, err.Error())
}
return c.Status(fiber.StatusOK).JSON(fiber.Map{"err": false, "msg": "Successfully verified your email!"})
}
|
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import (
"github.com/pingcap/errors"
"github.com/pingcap/tidb/parser/format"
"github.com/pingcap/tidb/parser/model"
)
var (
_ StmtNode = &AnalyzeTableStmt{}
_ StmtNode = &DropStatsStmt{}
_ StmtNode = &LoadStatsStmt{}
)
// AnalyzeTableStmt is used to create table statistics.
type AnalyzeTableStmt struct {
stmtNode
TableNames []*TableName
PartitionNames []model.CIStr
IndexNames []model.CIStr
AnalyzeOpts []AnalyzeOpt
// IndexFlag is true when we only analyze indices for a table.
IndexFlag bool
Incremental bool
// HistogramOperation is set in "ANALYZE TABLE ... UPDATE/DROP HISTOGRAM ..." statement.
HistogramOperation HistogramOperationType
// ColumnNames indicate the columns whose statistics need to be collected.
ColumnNames []model.CIStr
ColumnChoice model.ColumnChoice
}
// AnalyzeOptType is the type for analyze options.
type AnalyzeOptionType int
// Analyze option types.
const (
AnalyzeOptNumBuckets = iota
AnalyzeOptNumTopN
AnalyzeOptCMSketchDepth
AnalyzeOptCMSketchWidth
AnalyzeOptNumSamples
AnalyzeOptSampleRate
)
// AnalyzeOptionString stores the string form of analyze options.
var AnalyzeOptionString = map[AnalyzeOptionType]string{
AnalyzeOptNumBuckets: "BUCKETS",
AnalyzeOptNumTopN: "TOPN",
AnalyzeOptCMSketchWidth: "CMSKETCH WIDTH",
AnalyzeOptCMSketchDepth: "CMSKETCH DEPTH",
AnalyzeOptNumSamples: "SAMPLES",
AnalyzeOptSampleRate: "SAMPLERATE",
}
// HistogramOperationType is the type for histogram operation.
type HistogramOperationType int
// Histogram operation types.
const (
// HistogramOperationNop shows no operation in histogram. Default value.
HistogramOperationNop HistogramOperationType = iota
HistogramOperationUpdate
HistogramOperationDrop
)
// String implements fmt.Stringer for HistogramOperationType.
func (hot HistogramOperationType) String() string {
switch hot {
case HistogramOperationUpdate:
return "UPDATE HISTOGRAM"
case HistogramOperationDrop:
return "DROP HISTOGRAM"
}
return ""
}
// AnalyzeOpt stores the analyze option type and value.
type AnalyzeOpt struct {
Type AnalyzeOptionType
Value ValueExpr
}
// Restore implements Node interface.
func (n *AnalyzeTableStmt) Restore(ctx *format.RestoreCtx) error {
if n.Incremental {
ctx.WriteKeyWord("ANALYZE INCREMENTAL TABLE ")
} else {
ctx.WriteKeyWord("ANALYZE TABLE ")
}
for i, table := range n.TableNames {
if i != 0 {
ctx.WritePlain(",")
}
if err := table.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore AnalyzeTableStmt.TableNames[%d]", i)
}
}
if len(n.PartitionNames) != 0 {
ctx.WriteKeyWord(" PARTITION ")
}
for i, partition := range n.PartitionNames {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WriteName(partition.O)
}
if n.HistogramOperation != HistogramOperationNop {
ctx.WritePlain(" ")
ctx.WriteKeyWord(n.HistogramOperation.String())
ctx.WritePlain(" ")
if len(n.ColumnNames) > 0 {
ctx.WriteKeyWord("ON ")
for i, columnName := range n.ColumnNames {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WriteName(columnName.O)
}
}
}
switch n.ColumnChoice {
case model.AllColumns:
ctx.WriteKeyWord(" ALL COLUMNS")
case model.PredicateColumns:
ctx.WriteKeyWord(" PREDICATE COLUMNS")
case model.ColumnList:
ctx.WriteKeyWord(" COLUMNS ")
for i, columnName := range n.ColumnNames {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WriteName(columnName.O)
}
}
if n.IndexFlag {
ctx.WriteKeyWord(" INDEX")
}
for i, index := range n.IndexNames {
if i != 0 {
ctx.WritePlain(",")
} else {
ctx.WritePlain(" ")
}
ctx.WriteName(index.O)
}
if len(n.AnalyzeOpts) != 0 {
ctx.WriteKeyWord(" WITH")
for i, opt := range n.AnalyzeOpts {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WritePlainf(" %v ", opt.Value.GetValue())
ctx.WritePlain(AnalyzeOptionString[opt.Type])
}
}
return nil
}
// Accept implements Node Accept interface.
func (n *AnalyzeTableStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*AnalyzeTableStmt)
for i, val := range n.TableNames {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.TableNames[i] = node.(*TableName)
}
return v.Leave(n)
}
// DropStatsStmt is used to drop table statistics.
// if the PartitionNames is not empty, or IsGlobalStats is true, it will contain exactly one table
type DropStatsStmt struct {
stmtNode
Tables []*TableName
PartitionNames []model.CIStr
IsGlobalStats bool
}
// Restore implements Node interface.
func (n *DropStatsStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("DROP STATS ")
for index, table := range n.Tables {
if index != 0 {
ctx.WritePlain(", ")
}
if err := table.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore DropStatsStmt.Tables[%d]", index)
}
}
if n.IsGlobalStats {
ctx.WriteKeyWord(" GLOBAL")
return nil
}
if len(n.PartitionNames) != 0 {
ctx.WriteKeyWord(" PARTITION ")
}
for i, partition := range n.PartitionNames {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WriteName(partition.O)
}
return nil
}
// Accept implements Node Accept interface.
func (n *DropStatsStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*DropStatsStmt)
for i, val := range n.Tables {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Tables[i] = node.(*TableName)
}
return v.Leave(n)
}
// LoadStatsStmt is the statement node for loading statistic.
type LoadStatsStmt struct {
stmtNode
Path string
}
// Restore implements Node interface.
func (n *LoadStatsStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("LOAD STATS ")
ctx.WriteString(n.Path)
return nil
}
// Accept implements Node Accept interface.
func (n *LoadStatsStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*LoadStatsStmt)
return v.Leave(n)
}
// LockStatsStmt is the statement node for lock table statistic
type LockStatsStmt struct {
stmtNode
Tables []*TableName
}
// Restore implements Node interface.
func (n *LockStatsStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("LOCK STATS ")
for index, table := range n.Tables {
if index != 0 {
ctx.WritePlain(", ")
}
if err := table.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore LockStatsStmt.Tables[%d]", index)
}
}
return nil
}
// Accept implements Node Accept interface.
func (n *LockStatsStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*LockStatsStmt)
for i, val := range n.Tables {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Tables[i] = node.(*TableName)
}
return v.Leave(n)
}
// UnlockStatsStmt is the statement node for unlock table statistic
type UnlockStatsStmt struct {
stmtNode
Tables []*TableName
}
// Restore implements Node interface.
func (n *UnlockStatsStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("UNLOCK STATS ")
for index, table := range n.Tables {
if index != 0 {
ctx.WritePlain(", ")
}
if err := table.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore UnlockStatsStmt.Tables[%d]", index)
}
}
return nil
}
// Accept implements Node Accept interface.
func (n *UnlockStatsStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*UnlockStatsStmt)
for i, val := range n.Tables {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Tables[i] = node.(*TableName)
}
return v.Leave(n)
}
|
package junehttp
import (
"fmt"
"net/http"
)
func Servertest() {
http.HandleFunc("/postpage", func(w http.ResponseWriter, r *http.Request) {
//接受post请求,然后打印表单中key和value字段的值
if r.Method == "POST" {
var (
key string = r.PostFormValue("key")
value string = r.PostFormValue("value")
)
fmt.Printf("key is : %s\n", key)
fmt.Printf("value is: %s\n", value)
}
for _, que := range r.URL.Query() {
fmt.Println(que)
}
values := r.PostForm["key"]
for _, v := range values {
fmt.Println(v)
}
})
err := http.ListenAndServe(":8000", nil)
if err != nil {
fmt.Println(err.Error())
return
}
}
func Clienttest() {
DoHttpRequest(
"http://localhost:8000/postpage",
"POST",
"key=fuchongjun&value=18",
"",
map[string]string{"Content-Type": "application/x-www-form-urlencoded"},
nil, map[string]string{"para": "444"}, nil,
30)
}
|
package main
import (
"bufio"
"fmt"
"io"
"os"
)
func main() {
// os.Stdout.Close() //会把终端输出关闭, 下面的println就打印不出来了
fmt.Println("Are you OK?")
os.Stdout.WriteString("Are you ok??? \n") //往终端屏幕上输出
// os.Stdin.Close() 关闭后就无法输入
var a int
fmt.Println("please input a number")
fmt.Scan(&a)
fmt.Println("a = ", a)
path := "./demo.txt"
WriteFile(path)
}
func WriteFile(path string) {
//新建文件
file, err := os.Create(path)
if err != nil {
fmt.Println("err = ", err)
return
}
//使用完毕, 需要关闭文件
defer file.close()
for i := 0; i < 10; i++ {
// 把"i = 1\n" 存储在buf中
buf := fmt.Sprintf("i = %d\n", i)
n, err := file.WriteString(buf)
if err != nil {
fmt.Println("err = ", err)
return
}
}
}
func ReadFile(path string) {
// 打开文件
file, err := os.Open(path)
if err != nil {
fmt.Println("err = ", err)
return
}
// 关闭文件
defer file.close()
buf := make([]byte, 1024*2)
n, err := f.Read(buf)
if err != nil && err != io.EOF { //文件出错, 同时没有到file end
fmt.Println("err = ", err)
return
}
}
//每次读取一行
func ReadFileLine(path string) {
// 打开文件
file, err := os.Open(path)
if err != nil {
fmt.Println("err = ", err)
return
}
// 关闭文件
defer file.close()
//新建一个缓冲区, 把内容先放在缓冲区
r := bufio.NewReader(file)
for {
//遇到'\n'结束读取, 但\n 也读取进去了
buf, err := r.ReadBytes('\n')
if err != nil {
if err == io.EOF {
break
}
fmt.Println("err = ", err)
}
fmt.Printf("buf = %s", string(buf))
}
}
|
package uhost
import (
"github.com/xiaohui/goucloud/ucloud"
)
// CreateUHostInstance will create instances
type CreateUHostInstanceParams struct {
ucloud.CommonRequest
Region string
ImageId string
LoginMode string
Password string
KeyPair string
CPU int
Memory int
DiskSpace int
Name string
NetworkId string
SecurityGroupId string
ChargeType string
Quantity int
Count int
UHostType string
NetCapability string
Tag string
CouponId string
}
type CreateUHostInstanceResponse struct {
ucloud.CommonResponse
HostIds []string
}
func (u *UHost) CreateUHostInstance(params *CreateUHostInstanceParams) (*CreateUHostInstanceResponse, error) {
response := CreateUHostInstanceResponse{}
err := u.DoRequest("CreateUHostInstance", params, response)
return &response, err
}
type DescribeImageParams struct {
ucloud.CommonRequest
Region string
ImageType string
OsType string
ImageId string
Offset int
Limit int
}
type ImageSet struct {
ImageId string
ImageName string
OsType string
OsName string
State string
ImageDescription string
CreateTime string
}
type ImageSetArray []ImageSet
type DescribeImageResponse struct {
ucloud.CommonResponse
TotalCount int
ImageSet ImageSetArray
}
func (u *UHost) DescribeImage(params *DescribeImageParams) (*DescribeImageResponse, error) {
response := DescribeImageResponse{}
err := u.DoRequest("DescribeImage", params, response)
return &response, err
}
type DescribeUHostInstanceParams struct {
Region string
Tag string
Offset int
Limit int
}
type DiskSet struct {
Type string
DiskId string
Size int
}
type DiskSetArray []DiskSet
type IPSet struct {
Type string
IPId string
IP string
bandwidth int
}
type IPSetArray []IPSet
type UHostSet struct {
UHostId string
UHostType string
ImageId string
BasicImageId string
BasicImageName string
Tag string
Remark string
Name string
State string
CreateTime int
ChargeType string
ExpireTime string
CPU int
Memory int
DiskSet DiskSetArray
IPSet IPSetArray
NetCapability string
}
type UHostSetArray []UHostSet
type DescribeUHostInstanceResponse struct {
ucloud.CommonResponse
TotalCount int
UHostSet UHostSetArray
}
func (u *UHost) DescribeUHostInstance(params *DescribeUHostInstanceParams) (*DescribeUHostInstanceResponse, error) {
response := DescribeUHostInstanceResponse{}
err := u.DoRequest("DescribeUHostInstance", params, response)
return &response, err
}
type StartUHostInstanceParams struct {
ucloud.CommonRequest
Region string
UHostId string
}
type StartUHostInstanceResponse struct {
ucloud.CommonResponse
UhostId string
}
func (u *UHost) StartUHostInstance(params *StartUHostInstanceParams) (*StartUHostInstanceResponse, error) {
response := StartUHostInstanceResponse{}
err := u.DoRequest("StartUHostInstance", params, response)
return &response, err
}
type StopUHostInstanceParams struct {
ucloud.CommonRequest
Region string
UHostId string
}
type StopUHostInstanceResponse struct {
ucloud.CommonResponse
UhostId string
}
func (u *UHost) StopUHostInstance(params *StopUHostInstanceParams) (*StopUHostInstanceResponse, error) {
response := StopUHostInstanceResponse{}
err := u.DoRequest("StopUHostInstance", params, response)
return &response, err
}
type PoweroffUHostInstanceParams struct {
ucloud.CommonRequest
Region string
UHostId string
}
type PoweroffUHostInstanceResponse struct {
ucloud.CommonResponse
UhostId string
}
func (u *UHost) PoweroffUHostInstance(params *PoweroffUHostInstanceParams) (*PoweroffUHostInstanceResponse, error) {
response := PoweroffUHostInstanceResponse{}
err := u.DoRequest("PoweroffUHostInstance", params, response)
return &response, err
}
type RebootUHostInstanceParams struct {
ucloud.CommonRequest
Region string
UHostId string
}
type RebootUHostInstanceResponse struct {
ucloud.CommonResponse
UhostId string
}
func (u *UHost) RebootUHostInstance(params *RebootUHostInstanceParams) (*RebootUHostInstanceResponse, error) {
response := RebootUHostInstanceResponse{}
err := u.DoRequest("RebootUHostInstance", params, response)
return &response, err
}
type ResetUHostInstancePasswordParams struct {
ucloud.CommonRequest
Region string
UHostId string
Password string
}
type ResetUHostInstancePasswordResponse struct {
ucloud.CommonResponse
UhostId string
}
func (u *UHost) ResetUHostInstancePassword(params *ResetUHostInstancePasswordParams) (*ResetUHostInstancePasswordResponse, error) {
response := ResetUHostInstancePasswordResponse{}
err := u.DoRequest("ResetUHostInstancePassword", params, response)
return &response, err
}
|
package cbor
import (
"bytes"
"testing"
"github.com/polydawn/refmt/tok/fixtures"
)
func testBytes(t *testing.T) {
t.Run("short byte array", func(t *testing.T) {
seq := fixtures.SequenceMap["short byte array"]
canon := bcat(b(0x40+5), []byte(`value`))
t.Run("encode canonical", func(t *testing.T) {
checkEncoding(t, seq, canon, nil)
})
t.Run("decode canonical", func(t *testing.T) {
checkDecoding(t, seq, canon, nil)
})
t.Run("decode indefinite length, single hunk", func(t *testing.T) {
checkDecoding(t, seq, bcat(b(0x5f), b(0x40+5), []byte(`value`), b(0xff)), nil)
})
t.Run("decode indefinite length, multi hunk", func(t *testing.T) {
checkDecoding(t, seq, bcat(b(0x5f), b(0x40+2), []byte(`va`), b(0x40+3), []byte(`lue`), b(0xff)), nil)
})
})
t.Run("long zero byte array", func(t *testing.T) {
seq := fixtures.SequenceMap["long zero byte array"]
canon := bcat(b(0x40+0x19), []byte{0x1, 0x90}, bytes.Repeat(b(0x0), 400))
t.Run("encode canonical", func(t *testing.T) {
checkEncoding(t, seq, canon, nil)
})
t.Run("decode canonical", func(t *testing.T) {
checkDecoding(t, seq, canon, nil)
})
})
}
|
package main
import (
"io"
"net/http"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"gopkg.in/cheggaaa/pb.v1"
)
var fileRegExp = regexp.MustCompile(`[0-9]`)
func download(media string, wg *sync.WaitGroup, bar *pb.ProgressBar) {
semaphore <- struct{}{}
defer func() { <-semaphore }()
defer wg.Done()
if !silent {
defer bar.Increment()
}
resp, err := http.Get(media)
if err != nil {
fails.Get++
}
filename := fileRegExp.FindAllString(media, -1) // find file id
filename = append(filename, filepath.Ext(media)) // extract file extension
fn := strings.Join(filename, "") // Add it all together
file, err := os.Create(fn)
if err != nil {
panic(err)
}
_, err = io.Copy(file, resp.Body)
if err != nil {
fails.Copy++
}
}
|
package main
import "fmt"
func main() {
//var chan1 chan int //读写
var chan2 chan<- int //只写
chan2 = make(chan int, 3)
chan2 <- 2
var chan3 <-chan int //只读
chan3 = make(chan int, 3)
num := <-chan3
fmt.Println(num)
}
|
package tx
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/terra-money/terra.go/key"
"github.com/terra-money/terra.go/msg"
terraapp "github.com/terra-money/core/app"
)
func Test_Sign(t *testing.T) {
mnemonic := "essence gallery exit illegal nasty luxury sport trouble measure benefit busy almost bulb fat shed today produce glide meadow require impact fruit omit weasel"
privKeyBz, err := key.DerivePrivKeyBz(mnemonic, key.CreateHDPath(0, 0))
assert.NoError(t, err)
privKey, err := key.PrivKeyGen(privKeyBz)
assert.NoError(t, err)
addr := msg.AccAddress(privKey.PubKey().Address())
assert.Equal(t, addr.String(), "terra1cevwjzwft3pjuf5nc32d9kyrvh5y7fp9havw7k")
txBuilder := NewTxBuilder(terraapp.MakeEncodingConfig().TxConfig)
err = txBuilder.SetMsgs(
msg.NewMsgExecuteContract(
addr,
addr,
[]byte("{\"withdraw\":{\"position_idx\":\"1\",\"collateral\":{\"info\":{\"native_token\":{\"denom\":\"uusd\"}},\"amount\":\"1000\"}}}"),
msg.Coins{},
),
)
require.NoError(t, err)
txBuilder.SetFeeAmount(msg.Coins{})
txBuilder.SetGasLimit(1000000)
// amino version test
err = txBuilder.Sign(SignModeLegacyAminoJSON, SignerData{
ChainID: "testnet",
AccountNumber: 359,
Sequence: 4,
}, privKey, true)
require.NoError(t, err)
sigs, err := txBuilder.GetTx().GetSignaturesV2()
require.NoError(t, err)
bz, err := txBuilder.TxConfig.MarshalSignatureJSON(sigs)
fmt.Println(string(bz))
assert.NoError(t, err)
assert.Equal(t, bz, []byte(`{"signatures":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AmADjpxwusAnJ7ahD7+trzovH32w+LaRGVZSZUOd3E3d"},"data":{"single":{"mode":"SIGN_MODE_LEGACY_AMINO_JSON","signature":"FUoV2W4aS8zm2AzmrduvZCw8QXuAZXz/hgp/aCr/jtwWi6oHpsMhhR+dUt1r0L29PAUJz69aMVvMfTQEecI0+w=="}},"sequence":"4"}]}`))
// direct mode test
err = txBuilder.Sign(SignModeDirect, SignerData{
ChainID: "testnet",
AccountNumber: 359,
Sequence: 4,
}, privKey, true)
require.NoError(t, err)
sigs, err = txBuilder.GetTx().GetSignaturesV2()
require.NoError(t, err)
bz, err = txBuilder.TxConfig.MarshalSignatureJSON(sigs)
assert.NoError(t, err)
assert.Equal(t, bz, []byte(`{"signatures":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AmADjpxwusAnJ7ahD7+trzovH32w+LaRGVZSZUOd3E3d"},"data":{"single":{"mode":"SIGN_MODE_DIRECT","signature":"BllKHpAIgMHVvip11+QAHqlhGGfh6dIpFAdIqE+EZFIs4oWggjjua9g9bSZY9Sr6y+8Fjn2X7ziWUHT8zSbNdQ=="}},"sequence":"4"}]}`))
}
|
package g2util
import (
"database/sql/driver"
"fmt"
"time"
)
const (
//TimeZone ...
TimeZone = "Asia/Shanghai"
//Custom ...
Custom = "2006-01-02 15:04:05"
//DateLayout ...
DateLayout = "2006-01-02"
)
/*func init() {
//设定时区,shanghai
_ = SetTimeZone()
}*/
// TimeNowFunc ...
var TimeNowFunc = time.Now
// TimeNow ...
func TimeNow() time.Time { return TimeNowFunc() }
// SetTimeZone ...Shanghai
func SetTimeZone() error {
lc, err := time.LoadLocation(TimeZone)
if err == nil {
time.Local = lc
}
return err
}
// JSONTime ...
type JSONTime time.Time
// Today ...今日日期
func Today() *JSONTime { return JSONTime(TimeNow()).Addr().Date() }
// Now 当前时间
func Now() *JSONTime { return NewJSONTimeOfTime(TimeNow()) }
// NewJSONTimeOfTime 时间转换为JSONTime
func NewJSONTimeOfTime(t time.Time) *JSONTime { return JSONTime(t).Addr() }
// ToDatetime ...
func ToDatetime(in string) (JSONTime, error) {
out, err := time.ParseInLocation(Custom, in, time.Local)
return JSONTime(out), err
}
// Must2JSONTimeAddr maybe panic
func Must2JSONTimeAddr(in string) *JSONTime {
j, err := ToDatetime(in)
if err != nil {
panic(err)
}
return &j
}
/*//FromDB ...
func (p *JSONTime) FromDB(data []byte) error {
timeStd, _ := time.ParseInLocation(Custom, string(data), time.Local)
*p = JSONTime(timeStd)
return nil
}
//ToDB ...
func (p *JSONTime) ToDB() (b []byte, err error) {
b = []byte(p.String())
return
}*/
// SetByTime ...
func (p *JSONTime) SetByTime(timeVal time.Time) {
*p = JSONTime(timeVal)
}
// Time ...
func (p *JSONTime) Time() time.Time {
return p.Convert2Time()
}
// Date ...返回一个日期0点的时间
func (p *JSONTime) Date() *JSONTime {
y, m, d := p.Time().Date()
dt := time.Date(y, m, d, 0, 0, 0, 0, time.Local)
t := JSONTime(dt)
return &t
}
// Convert2Time ...
func (p *JSONTime) Convert2Time() time.Time {
return time.Time(*p).Local()
}
// Value insert timestamp into Mysql need this function.
func (p *JSONTime) Value() (driver.Value, error) {
var zeroTime time.Time
var ti = p.Convert2Time()
if ti.UnixNano() == zeroTime.UnixNano() {
return nil, nil
}
return ti, nil
}
// Scan valueof time.Time
func (p *JSONTime) Scan(v interface{}) error {
value, ok := v.(time.Time)
if ok {
*p = JSONTime(value)
return nil
}
return fmt.Errorf("can not convert %v to timestamp", v)
}
// GobEncode implements the gob.GobEncoder interface.
func (p *JSONTime) GobEncode() ([]byte, error) {
return p.Convert2Time().MarshalBinary()
}
// GobDecode implements the gob.GobDecoder interface.
func (p *JSONTime) GobDecode(data []byte) error {
s := p.Convert2Time()
err := (&s).UnmarshalBinary(data)
if err != nil {
return err
}
*p = JSONTime(s)
return nil
}
// MarshalJSON ...
func (p *JSONTime) MarshalJSON() ([]byte, error) {
if time.Time(*p).IsZero() {
return []byte(`""`), nil
}
data := make([]byte, 0)
data = append(data, '"')
data = p.Convert2Time().AppendFormat(data, Custom)
data = append(data, '"')
return data, nil
}
// UnmarshalJSON ...
func (p *JSONTime) UnmarshalJSON(data []byte) error {
local, err := time.ParseInLocation(`"`+Custom+`"`, string(data), time.Local)
if err != nil {
*p = JSONTime(time.Time{})
}
*p = JSONTime(local)
return nil
}
// String ...
func (p *JSONTime) String() string { return p.Convert2Time().Format(Custom) }
// StringFormat 转换为固定格式字符串
func (p *JSONTime) StringFormat(layout string) string { return p.Convert2Time().Format(layout) }
// Addr ...
func (p JSONTime) Addr() *JSONTime { return &p }
// Add ...
func (p *JSONTime) Add(d time.Duration) *JSONTime { return NewJSONTimeOfTime(p.Time().Add(d)) }
// TimeExcWrap 包装执行时间
func TimeExcWrap(fn func()) time.Duration {
n := TimeNow()
fn()
return time.Since(n)
}
// TodayDate ...
func TodayDate() string { return TimeNow().Format(DateLayout) }
// RetryDoTimes ...
func RetryDoTimes(times, intervalSecond int64, fn func() error) error {
var a int64
var err error
for {
err = fn()
if err == nil || a > times {
break
}
a++
time.Sleep(time.Second * time.Duration(intervalSecond))
}
return err
}
// RetryDo 重试行为
func RetryDo(fn func() error, intervalSecond int64) error {
var (
err error
a = 0
)
for {
err = fn()
if err == nil || a > 10 {
break
}
a++
time.Sleep(time.Second * time.Duration(intervalSecond))
}
return err
}
|
package cmd
import (
"context"
"fmt"
"strings"
"github.com/chaosblade-io/chaosblade-spec-go/spec"
"github.com/spf13/cobra"
)
type DestroyCommand struct {
baseCommand
exp *expCommand
}
func (dc *DestroyCommand) Init() {
dc.command = &cobra.Command{
Use: "destroy UID",
Short: "Destroy a chaos experiment",
Long: "Destroy a chaos experiment by experiment uid which you can run status command to query",
Args: cobra.MinimumNArgs(1),
Aliases: []string{"d"},
Example: destroyExample(),
RunE: func(cmd *cobra.Command, args []string) error {
return dc.runDestroy(cmd, args)
},
}
}
// runDestroy
func (dc *DestroyCommand) runDestroy(cmd *cobra.Command, args []string) error {
uid := args[0]
model, err := GetDS().QueryExperimentModelByUid(uid)
if err != nil {
return spec.ReturnFail(spec.Code[spec.DatabaseError], err.Error())
}
if model == nil {
return spec.Return(spec.Code[spec.DataNotFound])
}
if model.Status == "Destroyed" {
result := fmt.Sprintf("command: %s %s %s, destroy time: %s",
model.Command, model.SubCommand, model.Flag, model.UpdateTime)
cmd.Println(spec.ReturnSuccess(result).Print())
return nil
}
var firstCommand = model.Command
var actionCommand, actionTargetCommand string
subCommands := strings.Split(model.SubCommand, " ")
subLength := len(subCommands)
if subLength > 0 {
if subLength > 1 {
actionCommand = subCommands[subLength-1]
actionTargetCommand = subCommands[subLength-2]
} else {
actionCommand = subCommands[0]
actionTargetCommand = ""
}
}
executor := dc.exp.getExecutor(firstCommand, actionTargetCommand, actionCommand)
if executor == nil {
return spec.ReturnFail(spec.Code[spec.ServerError],
fmt.Sprintf("can't find executor for %s, %s", model.Command, model.SubCommand))
}
if actionTargetCommand == "" {
actionTargetCommand = firstCommand
}
// covert commandModel to expModel
expModel := spec.ConvertCommandsToExpModel(actionCommand, actionTargetCommand, model.Flag)
// set destroy flag
ctx := spec.SetDestroyFlag(context.Background(), uid)
// execute
response := executor.Exec(uid, ctx, expModel)
if !response.Success {
return response
}
// return result
checkError(GetDS().UpdateExperimentModelByUid(uid, "Destroyed", ""))
result := fmt.Sprintf("command: %s %s %s", model.Command, model.SubCommand, model.Flag)
cmd.Println(spec.ReturnSuccess(result).Print())
return nil
}
func destroyExample() string {
return `blade destroy 47cc0744f1bb`
}
|
package main
import (
"fmt"
)
func main() {
x := 15
a := &x
fmt.Println(a) //adress
fmt.Println(*a) //pointer value
*a = 5 //change pointer value
fmt.Println(x) //print new x value
*a = *a * *a //change value again
fmt.Println(a) //new adress
fmt.Println(*a) //new value 25
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aggfuncs
import (
"unsafe"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
)
const (
// DefPartialResult4FirstValueSize is the size of partialResult4FirstValue
DefPartialResult4FirstValueSize = int64(unsafe.Sizeof(partialResult4FirstValue{}))
// DefPartialResult4LastValueSize is the size of partialResult4LastValue
DefPartialResult4LastValueSize = int64(unsafe.Sizeof(partialResult4LastValue{}))
// DefPartialResult4NthValueSize is the size of partialResult4NthValue
DefPartialResult4NthValueSize = int64(unsafe.Sizeof(partialResult4NthValue{}))
// DefValue4IntSize is the size of value4Int
DefValue4IntSize = int64(unsafe.Sizeof(value4Int{}))
// DefValue4Float32Size is the size of value4Float32
DefValue4Float32Size = int64(unsafe.Sizeof(value4Float32{}))
// DefValue4Float64Size is the size of value4Float64
DefValue4Float64Size = int64(unsafe.Sizeof(value4Float64{}))
// DefValue4DecimalSize is the size of value4Decimal
DefValue4DecimalSize = int64(unsafe.Sizeof(value4Decimal{}))
// DefValue4TimeSize is the size of value4Time
DefValue4TimeSize = int64(unsafe.Sizeof(value4Time{}))
// DefValue4DurationSize is the size of value4Duration
DefValue4DurationSize = int64(unsafe.Sizeof(value4Duration{}))
// DefValue4StringSize is the size of value4String
DefValue4StringSize = int64(unsafe.Sizeof(value4String{}))
// DefValue4JSONSize is the size of value4JSON
DefValue4JSONSize = int64(unsafe.Sizeof(value4JSON{}))
)
// valueEvaluator is used to evaluate values for `first_value`, `last_value`, `nth_value`,
// `lead` and `lag`.
type valueEvaluator interface {
// evaluateRow evaluates the expression using row and stores the result inside.
evaluateRow(ctx sessionctx.Context, expr expression.Expression, row chunk.Row) (memDelta int64, err error)
// appendResult appends the result to chunk.
appendResult(chk *chunk.Chunk, colIdx int)
}
type value4Int struct {
val int64
isNull bool
}
func (v *value4Int) evaluateRow(ctx sessionctx.Context, expr expression.Expression, row chunk.Row) (memDelta int64, err error) {
v.val, v.isNull, err = expr.EvalInt(ctx, row)
return 0, err
}
func (v *value4Int) appendResult(chk *chunk.Chunk, colIdx int) {
if v.isNull {
chk.AppendNull(colIdx)
} else {
chk.AppendInt64(colIdx, v.val)
}
}
type value4Float32 struct {
val float32
isNull bool
}
func (v *value4Float32) evaluateRow(ctx sessionctx.Context, expr expression.Expression, row chunk.Row) (memDelta int64, err error) {
var val float64
val, v.isNull, err = expr.EvalReal(ctx, row)
v.val = float32(val)
return 0, err
}
func (v *value4Float32) appendResult(chk *chunk.Chunk, colIdx int) {
if v.isNull {
chk.AppendNull(colIdx)
} else {
chk.AppendFloat32(colIdx, v.val)
}
}
type value4Decimal struct {
val *types.MyDecimal
isNull bool
}
func (v *value4Decimal) evaluateRow(ctx sessionctx.Context, expr expression.Expression, row chunk.Row) (memDelta int64, err error) {
v.val, v.isNull, err = expr.EvalDecimal(ctx, row)
return 0, err
}
func (v *value4Decimal) appendResult(chk *chunk.Chunk, colIdx int) {
if v.isNull {
chk.AppendNull(colIdx)
} else {
chk.AppendMyDecimal(colIdx, v.val)
}
}
type value4Float64 struct {
val float64
isNull bool
}
func (v *value4Float64) evaluateRow(ctx sessionctx.Context, expr expression.Expression, row chunk.Row) (memDelta int64, err error) {
v.val, v.isNull, err = expr.EvalReal(ctx, row)
return 0, err
}
func (v *value4Float64) appendResult(chk *chunk.Chunk, colIdx int) {
if v.isNull {
chk.AppendNull(colIdx)
} else {
chk.AppendFloat64(colIdx, v.val)
}
}
type value4String struct {
val string
isNull bool
}
func (v *value4String) evaluateRow(ctx sessionctx.Context, expr expression.Expression, row chunk.Row) (memDelta int64, err error) {
originalLength := len(v.val)
v.val, v.isNull, err = expr.EvalString(ctx, row)
return int64(len(v.val) - originalLength), err
}
func (v *value4String) appendResult(chk *chunk.Chunk, colIdx int) {
if v.isNull {
chk.AppendNull(colIdx)
} else {
chk.AppendString(colIdx, v.val)
}
}
type value4Time struct {
val types.Time
isNull bool
}
func (v *value4Time) evaluateRow(ctx sessionctx.Context, expr expression.Expression, row chunk.Row) (memDelta int64, err error) {
v.val, v.isNull, err = expr.EvalTime(ctx, row)
return 0, err
}
func (v *value4Time) appendResult(chk *chunk.Chunk, colIdx int) {
if v.isNull {
chk.AppendNull(colIdx)
} else {
chk.AppendTime(colIdx, v.val)
}
}
type value4Duration struct {
val types.Duration
isNull bool
}
func (v *value4Duration) evaluateRow(ctx sessionctx.Context, expr expression.Expression, row chunk.Row) (memDelta int64, err error) {
v.val, v.isNull, err = expr.EvalDuration(ctx, row)
return 0, err
}
func (v *value4Duration) appendResult(chk *chunk.Chunk, colIdx int) {
if v.isNull {
chk.AppendNull(colIdx)
} else {
chk.AppendDuration(colIdx, v.val)
}
}
type value4JSON struct {
val types.BinaryJSON
isNull bool
}
func (v *value4JSON) evaluateRow(ctx sessionctx.Context, expr expression.Expression, row chunk.Row) (memDelta int64, err error) {
originalLength := len(v.val.Value)
v.val, v.isNull, err = expr.EvalJSON(ctx, row)
v.val = v.val.Copy() // deep copy to avoid content change.
return int64(len(v.val.Value) - originalLength), err
}
func (v *value4JSON) appendResult(chk *chunk.Chunk, colIdx int) {
if v.isNull {
chk.AppendNull(colIdx)
} else {
chk.AppendJSON(colIdx, v.val)
}
}
func buildValueEvaluator(tp *types.FieldType) (ve valueEvaluator, memDelta int64) {
evalType := tp.EvalType()
if tp.GetType() == mysql.TypeBit {
evalType = types.ETString
}
switch evalType {
case types.ETInt:
return &value4Int{}, DefValue4IntSize
case types.ETReal:
switch tp.GetType() {
case mysql.TypeFloat:
return &value4Float32{}, DefValue4Float32Size
case mysql.TypeDouble:
return &value4Float64{}, DefValue4Float64Size
}
case types.ETDecimal:
return &value4Decimal{}, DefValue4DecimalSize
case types.ETDatetime, types.ETTimestamp:
return &value4Time{}, DefValue4TimeSize
case types.ETDuration:
return &value4Duration{}, DefValue4DurationSize
case types.ETString:
return &value4String{}, DefValue4StringSize
case types.ETJson:
return &value4JSON{}, DefValue4JSONSize
}
return nil, 0
}
type firstValue struct {
baseAggFunc
tp *types.FieldType
}
type partialResult4FirstValue struct {
gotFirstValue bool
evaluator valueEvaluator
}
func (v *firstValue) AllocPartialResult() (pr PartialResult, memDelta int64) {
ve, veMemDelta := buildValueEvaluator(v.tp)
p := &partialResult4FirstValue{evaluator: ve}
return PartialResult(p), DefPartialResult4FirstValueSize + veMemDelta
}
func (*firstValue) ResetPartialResult(pr PartialResult) {
p := (*partialResult4FirstValue)(pr)
p.gotFirstValue = false
}
func (v *firstValue) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) {
p := (*partialResult4FirstValue)(pr)
if p.gotFirstValue {
return 0, nil
}
if len(rowsInGroup) > 0 {
p.gotFirstValue = true
memDelta, err = p.evaluator.evaluateRow(sctx, v.args[0], rowsInGroup[0])
if err != nil {
return 0, err
}
}
return memDelta, nil
}
func (v *firstValue) AppendFinalResult2Chunk(_ sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error {
p := (*partialResult4FirstValue)(pr)
if !p.gotFirstValue {
chk.AppendNull(v.ordinal)
} else {
p.evaluator.appendResult(chk, v.ordinal)
}
return nil
}
type lastValue struct {
baseAggFunc
tp *types.FieldType
}
type partialResult4LastValue struct {
gotLastValue bool
evaluator valueEvaluator
}
func (v *lastValue) AllocPartialResult() (pr PartialResult, memDelta int64) {
ve, veMemDelta := buildValueEvaluator(v.tp)
p := &partialResult4FirstValue{evaluator: ve}
return PartialResult(p), DefPartialResult4LastValueSize + veMemDelta
}
func (*lastValue) ResetPartialResult(pr PartialResult) {
p := (*partialResult4LastValue)(pr)
p.gotLastValue = false
}
func (v *lastValue) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) {
p := (*partialResult4LastValue)(pr)
if len(rowsInGroup) > 0 {
p.gotLastValue = true
memDelta, err = p.evaluator.evaluateRow(sctx, v.args[0], rowsInGroup[len(rowsInGroup)-1])
if err != nil {
return 0, err
}
}
return memDelta, nil
}
func (v *lastValue) AppendFinalResult2Chunk(_ sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error {
p := (*partialResult4LastValue)(pr)
if !p.gotLastValue {
chk.AppendNull(v.ordinal)
} else {
p.evaluator.appendResult(chk, v.ordinal)
}
return nil
}
type nthValue struct {
baseAggFunc
tp *types.FieldType
nth uint64
}
type partialResult4NthValue struct {
seenRows uint64
evaluator valueEvaluator
}
func (v *nthValue) AllocPartialResult() (pr PartialResult, memDelta int64) {
ve, veMemDelta := buildValueEvaluator(v.tp)
p := &partialResult4FirstValue{evaluator: ve}
return PartialResult(p), DefPartialResult4NthValueSize + veMemDelta
}
func (*nthValue) ResetPartialResult(pr PartialResult) {
p := (*partialResult4NthValue)(pr)
p.seenRows = 0
}
func (v *nthValue) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) {
if v.nth == 0 {
return 0, nil
}
p := (*partialResult4NthValue)(pr)
numRows := uint64(len(rowsInGroup))
if v.nth > p.seenRows && v.nth-p.seenRows <= numRows {
memDelta, err = p.evaluator.evaluateRow(sctx, v.args[0], rowsInGroup[v.nth-p.seenRows-1])
if err != nil {
return 0, err
}
}
p.seenRows += numRows
return memDelta, nil
}
func (v *nthValue) AppendFinalResult2Chunk(_ sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error {
p := (*partialResult4NthValue)(pr)
if v.nth == 0 || p.seenRows < v.nth {
chk.AppendNull(v.ordinal)
} else {
p.evaluator.appendResult(chk, v.ordinal)
}
return nil
}
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"os"
"time"
"github.com/nicholasjackson/bench"
"github.com/nicholasjackson/bench/output"
"github.com/nicholasjackson/bench/util"
"github.com/nicholasjackson/building-microservices-in-go/chapter6/vanilla_http/entities"
)
func main() {
fmt.Println("Benchmarking application")
b := bench.New(400, 300*time.Second, 90*time.Second, 5*time.Second)
b.AddOutput(301*time.Second, os.Stdout, output.WriteTabularData)
b.AddOutput(1*time.Second, util.NewFile("./output.txt"), output.WriteTabularData)
b.AddOutput(1*time.Second, util.NewFile("./error.txt"), output.WriteErrorLogs)
b.AddOutput(1*time.Second, util.NewFile("./output.png"), output.PlotData)
b.RunBenchmarks(GoMicroRequest)
}
// GoMicroRequest is executed by benchmarks
func GoMicroRequest() error {
request := entities.HelloWorldRequest{
Name: "Nic",
}
data, _ := json.Marshal(request)
req, err := http.NewRequest("GET", "http://www.public.b.prod-eu-west-1.noths.com", bytes.NewBuffer(data))
client := &http.Client{
Transport: &http.Transport{
MaxIdleConnsPerHost: 5,
},
Timeout: 5 * time.Second,
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("Failed with status: %v", resp.Status)
}
return nil
}
|
// Copyright (c) 2020 Hirotsuna Mizuno. All rights reserved.
// Use of this source code is governed by the MIT license that can be found in
// the LICENSE file.
package speedio
import (
"time"
)
// MeterConfig indicates the configuration parameter of bit rate measurement.
//
// Resolution is how often the bitrate is updated.
// Shorter resolutions increase measurement overhead and memory usage.
// Longer resolutions increase measurement delay.
// For example, with a 10s resolution, the bit rate is 0 for the first 10 seconds.
//
// Sample is the length of the most recent period for which the simple moving average bit rate is calculated.
// It must be an integral multiple of Resolution for accurate measurements.
// Also, it must be at least twice the Resolution.
// Longer sample periods increase memory usage for measurements.
type MeterConfig struct {
Resolution time.Duration
Sample time.Duration // must be an integral multiple of Resolution
}
// MinResolution is the minimum time resolution to measure bit rate.
const MinMeterResolution time.Duration = time.Millisecond * 100
// DefaultMeterConfig is the default configuration for measurement
// with 500ms resolution and 3s sample duration.
// It means that the average bitrate for the last 3s is updated every 500ms.
var DefaultMeterConfig = &MeterConfig{
Resolution: time.Millisecond * 500,
Sample: time.Second * 3,
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package engine
import (
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network"
"github.com/Azure/go-autorest/autorest/to"
)
// CreatePublicIPAddressForNodePools returns public ipv4 address resource for node pool Load Balancer
func CreatePublicIPAddressForNodePools(name string) PublicIPAddressARM {
return PublicIPAddressARM{
ARMResource: ARMResource{
APIVersion: "[variables('apiVersionNetwork')]",
},
PublicIPAddress: network.PublicIPAddress{
Location: to.StringPtr("[variables('location')]"),
Name: to.StringPtr("[variables('" + name + "')]"),
PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
PublicIPAllocationMethod: network.Static,
},
Sku: &network.PublicIPAddressSku{
Name: "[variables('loadBalancerSku')]",
},
Type: to.StringPtr("Microsoft.Network/publicIPAddresses"),
},
}
}
// CreatePublicIPAddressForMaster returns public ipv4 address resource for master Load Balancer
// Includes optional DNS configuration for public clusters
func CreatePublicIPAddressForMaster(includeDNS bool) PublicIPAddressARM {
var dnsSettings *network.PublicIPAddressDNSSettings
if includeDNS {
dnsSettings = &network.PublicIPAddressDNSSettings{
DomainNameLabel: to.StringPtr("[variables('masterFqdnPrefix')]"),
}
}
return PublicIPAddressARM{
ARMResource: ARMResource{
APIVersion: "[variables('apiVersionNetwork')]",
},
PublicIPAddress: network.PublicIPAddress{
Location: to.StringPtr("[variables('location')]"),
Name: to.StringPtr("[variables('masterPublicIPAddressName')]"),
PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
DNSSettings: dnsSettings,
PublicIPAllocationMethod: network.Static,
},
Sku: &network.PublicIPAddressSku{
Name: "[variables('loadBalancerSku')]",
},
Type: to.StringPtr("Microsoft.Network/publicIPAddresses"),
},
}
}
func createAppGwPublicIPAddress() PublicIPAddressARM {
return PublicIPAddressARM{
ARMResource: ARMResource{
APIVersion: "[variables('apiVersionNetwork')]",
},
PublicIPAddress: network.PublicIPAddress{
Location: to.StringPtr("[variables('location')]"),
Name: to.StringPtr("[variables('appGwPublicIPAddressName')]"),
PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
PublicIPAllocationMethod: network.Static,
},
Sku: &network.PublicIPAddressSku{
Name: "Standard",
},
Type: to.StringPtr("Microsoft.Network/publicIPAddresses"),
},
}
}
func createJumpboxPublicIPAddress() PublicIPAddressARM {
return PublicIPAddressARM{
ARMResource: ARMResource{
APIVersion: "[variables('apiVersionNetwork')]",
},
PublicIPAddress: network.PublicIPAddress{
Location: to.StringPtr("[variables('location')]"),
Name: to.StringPtr("[variables('jumpboxPublicIpAddressName')]"),
PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
DNSSettings: &network.PublicIPAddressDNSSettings{
DomainNameLabel: to.StringPtr("[variables('masterFqdnPrefix')]"),
},
PublicIPAllocationMethod: network.Dynamic,
},
Sku: &network.PublicIPAddressSku{
Name: network.PublicIPAddressSkuNameBasic,
},
Type: to.StringPtr("Microsoft.Network/publicIPAddresses"),
},
}
}
// CreateClusterPublicIPAddress returns public ipv4 address resource for cluster
// this public ip address is created and added to the loadbalancer that's created with
// fqdn as name. ARM does not allow creating a loadbalancer with only ipv6 FE which is
// why a ipv4 fe is created here and added to lb.
func CreateClusterPublicIPAddress() PublicIPAddressARM {
return PublicIPAddressARM{
ARMResource: ARMResource{
APIVersion: "[variables('apiVersionNetwork')]",
},
PublicIPAddress: network.PublicIPAddress{
Location: to.StringPtr("[variables('location')]"),
Name: to.StringPtr("fee-ipv4"),
PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
PublicIPAllocationMethod: network.Static,
},
Sku: &network.PublicIPAddressSku{
Name: "[variables('loadBalancerSku')]",
},
Type: to.StringPtr("Microsoft.Network/publicIPAddresses"),
},
}
}
|
package repository
import (
"backend/src/constants"
"backend/src/global"
"backend/src/module"
)
// 获取父节点权限
func GetEnableParentPrivilege(allEnableParent interface{}) {
global.DataBase.Where(&module.Privilege{IsForbidden: constants.PrivilegeEnable, IsLeaf: constants.NotLeaf}).Order("parent_path, label").Find(allEnableParent)
}
// 获取子节点权限
func GetEnableLeafPrivilege(allEnableLeaf interface{}) {
global.DataBase.Where(&module.Privilege{IsForbidden: constants.PrivilegeEnable, IsLeaf: constants.IsLeaf}).Order("parent_path, label").Find(allEnableLeaf)
}
|
//
// Copyright (C) 2019-2021 vdaas.org vald team <vald@vdaas.org>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Package rest provides vald REST client functions
package rest
// Option is gatewayClient configure
type Option func(*gatewayClient)
var (
defaultOptions = []Option{
WithAddr("http://127.0.0.1:8080"),
}
)
// WithAddr returns Option that sets addr
func WithAddr(addr string) Option {
return func(c *gatewayClient) {
if len(addr) != 0 {
c.addr = addr
}
}
}
|
package series
type Series []float64
func (s Series) Values() []float64 {
return s
}
func (s Series) Last(position int) float64 {
return s[len(s)-1-position]
}
func (s Series) LastValues(size int) []float64 {
if l := len(s); l > size {
return s[l-size:]
}
return s
}
func (s Series) Crossover(ref Series) bool {
return s.Last(0) > ref.Last(0) && s.Last(1) <= ref.Last(1)
}
func (s Series) Crossunder(ref Series) bool {
return s.Last(0) <= ref.Last(0) && s.Last(1) > ref.Last(1)
}
|
package cloud
const (
Pending = "创建中"
LaunchFailed = "创建失败"
Running = "运行中"
Stopped = "关机"
Starting = "开机中"
Stopping = "关机中"
Rebooting = "重启中"
ShutDown = "停止销毁"
Terminating = "销毁中"
Unknow = "未知"
)
type Instance struct {
Key string
UUID string
Name string
OS string
CPU int
Memory int
PublicAddrs []string
PrivateAddrs []string
Status string
CreatedTime string
ExpiredTime string
}
|
package goevent
import "fmt"
// EventNotDefined is an error indicationg that the event has not been defined.
type EventNotDefined struct {
eventName string
}
func newEventNotDefined(name string) *EventNotDefined {
return &EventNotDefined{
eventName: name,
}
}
func (e *EventNotDefined) Error() string {
return fmt.Sprintf("%s event has not been defined yet.", e.eventName)
}
// EventName return name of the event.
func (e *EventNotDefined) EventName() string {
return e.eventName
}
var _ error = newEventNotDefined("foo")
|
// Copyright 2020 Ant Group. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0
package converter
import (
"context"
"strings"
"testing"
"github.com/dragonflyoss/image-service/contrib/nydusify/pkg/utils"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/stretchr/testify/assert"
)
func makePlatform(osArch string, nydus bool) *ocispec.Platform {
var platform *ocispec.Platform
if osArch == "" {
platform = &ocispec.Platform{
OS: "",
Architecture: "",
}
} else {
platform = &ocispec.Platform{
OS: strings.Split(osArch, "/")[0],
Architecture: strings.Split(osArch, "/")[1],
}
}
if nydus {
platform.OSFeatures = []string{utils.ManifestOSFeatureNydus}
}
return platform
}
func makeDesc(id string, platform *ocispec.Platform) ocispec.Descriptor {
return ocispec.Descriptor{
MediaType: ocispec.MediaTypeImageManifest,
Digest: digest.FromString("manifest-" + id),
Size: 10,
Platform: platform,
}
}
func TestManifest(t *testing.T) {
mm := manifestManager{
multiPlatform: true,
dockerV2Format: false,
}
nydusDesc := makeDesc("nydus", makePlatform("linux/amd64", true))
// Merge with existing OCI manifests
existDescs := []ocispec.Descriptor{
makeDesc("1", makePlatform("linux/amd64", false)),
makeDesc("2", makePlatform("linux/ppc64le", false)),
}
index, err := mm.makeManifestIndex(context.Background(), existDescs, &nydusDesc, nil)
assert.Nil(t, err)
assert.Equal(t, []ocispec.Descriptor{
makeDesc("1", makePlatform("linux/amd64", false)),
makeDesc("2", makePlatform("linux/ppc64le", false)),
makeDesc("nydus", makePlatform("linux/amd64", true)),
}, index.Manifests)
existDescs = []ocispec.Descriptor{
makeDesc("1", makePlatform("", false)),
}
index, err = mm.makeManifestIndex(context.Background(), existDescs, &nydusDesc, nil)
assert.Nil(t, err)
assert.Equal(t, []ocispec.Descriptor{
makeDesc("1", makePlatform("linux/amd64", false)),
makeDesc("nydus", makePlatform("linux/amd64", true)),
}, index.Manifests)
existDescs = []ocispec.Descriptor{
makeDesc("1", nil),
}
index, err = mm.makeManifestIndex(context.Background(), existDescs, &nydusDesc, nil)
assert.Nil(t, err)
assert.Equal(t, []ocispec.Descriptor{
makeDesc("1", makePlatform("linux/amd64", false)),
makeDesc("nydus", makePlatform("linux/amd64", true)),
}, index.Manifests)
// Merge with specified OCI manifest
ociDesc := makeDesc("1", makePlatform("linux/amd64", false))
index, err = mm.makeManifestIndex(context.Background(), nil, &nydusDesc, &ociDesc)
assert.Nil(t, err)
assert.Equal(t, []ocispec.Descriptor{
makeDesc("1", makePlatform("linux/amd64", false)),
makeDesc("nydus", makePlatform("linux/amd64", true)),
}, index.Manifests)
ociDesc = makeDesc("1", nil)
index, err = mm.makeManifestIndex(context.Background(), nil, &nydusDesc, &ociDesc)
assert.Nil(t, err)
assert.Equal(t, []ocispec.Descriptor{
makeDesc("1", makePlatform("linux/amd64", false)),
makeDesc("nydus", makePlatform("linux/amd64", true)),
}, index.Manifests)
ociDesc = makeDesc("1", makePlatform("", false))
index, err = mm.makeManifestIndex(context.Background(), nil, &nydusDesc, &ociDesc)
assert.Nil(t, err)
assert.Equal(t, []ocispec.Descriptor{
makeDesc("1", makePlatform("linux/amd64", false)),
makeDesc("nydus", makePlatform("linux/amd64", true)),
}, index.Manifests)
// Preferred to merge with existing OCI manifests, instead of specified OCI manifest
ociDesc = makeDesc("3", makePlatform("linux/amd64", false))
existDescs = []ocispec.Descriptor{
makeDesc("1", makePlatform("linux/amd64", false)),
makeDesc("2", makePlatform("linux/ppc64le", false)),
}
index, err = mm.makeManifestIndex(context.Background(), existDescs, &nydusDesc, &ociDesc)
assert.Nil(t, err)
assert.Equal(t, []ocispec.Descriptor{
makeDesc("1", makePlatform("linux/amd64", false)),
makeDesc("2", makePlatform("linux/ppc64le", false)),
makeDesc("nydus", makePlatform("linux/amd64", true)),
}, index.Manifests)
ociDesc = makeDesc("3", makePlatform("linux/amd64", false))
existDescs = []ocispec.Descriptor{
makeDesc("1", makePlatform("linux/amd64", false)),
makeDesc("nydus", makePlatform("linux/amd64", true)),
makeDesc("2", makePlatform("linux/ppc64le", false)),
}
index, err = mm.makeManifestIndex(context.Background(), existDescs, &nydusDesc, &ociDesc)
assert.Nil(t, err)
assert.Equal(t, []ocispec.Descriptor{
makeDesc("1", makePlatform("linux/amd64", false)),
makeDesc("2", makePlatform("linux/ppc64le", false)),
makeDesc("nydus", makePlatform("linux/amd64", true)),
}, index.Manifests)
}
|
// Copyright 2012 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//Wget reads one file from the argument and writes it on the standard output.
package main
import (
"io"
"log"
"net/http"
"os"
)
func wget(arg string, w io.Writer) error {
resp, err := http.Get(arg)
if err != nil {
return err
}
defer resp.Body.Close()
_, err = io.Copy(w, resp.Body)
return nil
}
func main() {
if len(os.Args) < 2 {
os.Exit(1)
}
if err := wget(os.Args[1], os.Stdout); err != nil {
log.Fatalf("%v\n", err)
}
}
|
package gofinancial
import (
"errors"
"time"
"github.com/razorpay/go-financial/enums/paymentperiod"
"github.com/razorpay/go-financial/enums/interesttype"
"github.com/razorpay/go-financial/enums/frequency"
)
// Config is used to store details used in generation of amortization table.
type Config struct {
StartDate time.Time
EndDate time.Time
Frequency frequency.Type
AmountBorrowed int64
InterestType interesttype.Type
Interest int64
PaymentPeriod paymentperiod.Type
Round bool
periods int64 // derived
startDates []time.Time // derived
endDates []time.Time // derived
}
func (c *Config) setPeriodsAndDates() error {
sy, sm, sd := c.StartDate.Date()
startDate := time.Date(sy, sm, sd, 0, 0, 0, 0, c.StartDate.Location())
ey, em, ed := c.EndDate.Date()
endDate := time.Date(ey, em, ed, 0, 0, 0, 0, c.EndDate.Location())
days := int64(endDate.Sub(startDate).Hours()/24) + 1
switch c.Frequency {
case frequency.DAILY:
c.periods = days
for i := 0; i < int(c.periods); i++ {
date := startDate.AddDate(0, 0, i)
if i == 0 {
c.startDates = append(c.startDates, c.StartDate)
c.endDates = append(c.endDates, getEndDates(date, frequency.DAILY))
} else {
c.startDates = append(c.startDates, date)
c.endDates = append(c.endDates, getEndDates(date, frequency.DAILY))
}
}
case frequency.WEEKLY:
if days%7 != 0 {
return errors.New("uneven end date")
}
c.periods = days / 7
for i := 0; i < int(c.periods); i++ {
date := startDate.AddDate(0, 0, 7*i)
if i == 0 {
c.startDates = append(c.startDates, c.StartDate)
c.endDates = append(c.endDates, getEndDates(date, frequency.WEEKLY))
} else {
c.startDates = append(c.startDates, date)
c.endDates = append(c.endDates, getEndDates(date, frequency.WEEKLY))
}
}
case frequency.MONTHLY:
months, err := getMonthsBetweenDates(c.StartDate, c.EndDate)
if err != nil {
return err
}
c.periods = int64(*months)
for i := 0; i < int(c.periods); i++ {
date := startDate.AddDate(0, i, 0)
if i == 0 {
c.startDates = append(c.startDates, c.StartDate)
c.endDates = append(c.endDates, getEndDates(date, frequency.MONTHLY))
} else {
c.startDates = append(c.startDates, date)
c.endDates = append(c.endDates, getEndDates(date, frequency.MONTHLY))
}
}
case frequency.ANNUALLY:
years, err := getYearsBetweenDates(startDate, endDate)
if err != nil {
return err
}
c.periods = int64(*years)
for i := 0; i < int(c.periods); i++ {
date := startDate.AddDate(i, 0, 0)
if i == 0 {
c.startDates = append(c.startDates, c.StartDate)
c.endDates = append(c.endDates, getEndDates(date, frequency.ANNUALLY))
} else {
c.startDates = append(c.startDates, date)
c.endDates = append(c.endDates, getEndDates(date, frequency.ANNUALLY))
}
}
default:
return ErrInvalidFrequency
}
return nil
}
func getMonthsBetweenDates(start time.Time, end time.Time) (*int, error) {
count := 0
for start.Before(end) {
start = start.AddDate(0, 1, 0)
count++
}
finalDate := start.AddDate(0, 0, -1)
if !finalDate.Equal(end) {
return nil, ErrUnevenEndDate
}
return &count, nil
}
func getYearsBetweenDates(start time.Time, end time.Time) (*int, error) {
count := 0
for start.Before(end) {
start = start.AddDate(1, 0, 0)
count++
}
finalDate := start.AddDate(0, 0, -1)
if !finalDate.Equal(end) {
return nil, ErrUnevenEndDate
}
return &count, nil
}
func getEndDates(date time.Time, freq frequency.Type) time.Time {
var nextDate time.Time
switch freq {
case frequency.DAILY:
nextDate = time.Date(date.Year(), date.Month(), date.Day(), 23, 59, 59, 0, date.Location())
case frequency.WEEKLY:
date = date.AddDate(0, 0, 6)
nextDate = time.Date(date.Year(), date.Month(), date.Day(), 23, 59, 59, 0, date.Location())
case frequency.MONTHLY:
date = date.AddDate(0, 1, 0).AddDate(0, 0, -1)
nextDate = time.Date(date.Year(), date.Month(), date.Day(), 23, 59, 59, 0, date.Location())
case frequency.ANNUALLY:
date = date.AddDate(1, 0, 0).AddDate(0, 0, -1)
nextDate = time.Date(date.Year(), date.Month(), date.Day(), 23, 59, 59, 0, date.Location())
}
return nextDate
}
func (c *Config) getInterestRatePerPeriodInDecimal() float64 {
return float64(c.Interest) / 100 / 100 / float64(c.Frequency.Value())
}
|
package main
import "fmt"
func main() {
var key = ""
var loop = true
var balance float64 = 0
var money float64 = 0
var flag = false
var note = ""
var detail = "类型\t金额\t余额\t说明"
for {
fmt.Println("收入支出登记")
fmt.Println("1:查看明细")
fmt.Println("2:登记收入")
fmt.Println("3:登记支出")
fmt.Println("4:退出")
fmt.Println("请选择1-4")
fmt.Scanln(&key)
switch key {
case "1":
fmt.Println("1:查看明细")
if flag {
fmt.Println(detail)
} else {
fmt.Println("暂无记录,请登记一笔记录")
}
case "2":
fmt.Println("输入收入金额...")
fmt.Scanln(&money)
balance += money
fmt.Println("输入收入说明...")
fmt.Scanln(¬e)
detail += fmt.Sprintf("\n%v\t%v\t%v\t%v\t", "收入", money, balance, note)
flag = true
case "3":
fmt.Println("输入支出金额...")
fmt.Scanln(&money)
if money > balance {
fmt.Println("余额不足...")
break
}
balance -= money
fmt.Println("输入支出说明...")
fmt.Scanln(¬e)
detail += fmt.Sprintf("\n%v\t%v\t%v\t%v\t", "支出", money, balance, note)
flag = true
case "4":
fmt.Println("你确定要退出吗?y/n")
choice := ""
for {
fmt.Scanln(&choice)
if choice == "y" || choice == "n" {
break
}
fmt.Println("输入有误,请重新输入4?y/n")
}
if choice == "y" {
loop = false
}
default:
fmt.Println("3输入有误")
}
if !loop {
break
}
}
fmt.Println("退出收入支出登记的使用")
}
|
package primitives
import (
"encoding/xml"
"github.com/plandem/ooxml/ml"
)
//FontVAlignType is a type to encode XSD ST_VerticalAlignRun
type FontVAlignType ml.Property
//MarshalXML marshal FontVAlignType
func (t *FontVAlignType) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
return (*ml.Property)(t).MarshalXML(e, start)
}
//UnmarshalXML unmarshal FontVAlignType
func (t *FontVAlignType) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
return (*ml.Property)(t).UnmarshalXML(d, start)
}
|
package apple1
import (
"github.com/KaiWalter/go6502/pkg/mc6821"
"github.com/veandco/go-sdl2/sdl"
)
type keyMap struct {
unmodified byte
shifted byte
ctrl byte
}
var (
keyboardMapping map[sdl.Keycode]keyMap
)
func initKeyboardMapping() {
// DE!
keyboardMapping = map[sdl.Keycode]keyMap{
0x08: {unmodified: 0x5F, shifted: 0x5F, ctrl: 0x5F}, // Apple1 keyboard had not backspace - hence _
0x0D: {unmodified: 0x0D, shifted: 0x0D, ctrl: 0x0D},
' ': {unmodified: 0x20, shifted: 0x20, ctrl: 0x20},
'.': {unmodified: 0x2E, shifted: 0x3A, ctrl: 0x00},
',': {unmodified: 0x2C, shifted: 0x3B, ctrl: 0x00},
'+': {unmodified: 0x2B, shifted: 0x2A, ctrl: 0x00},
'-': {unmodified: 0x2D, shifted: 0x5F, ctrl: 0x00},
'0': {unmodified: 0x30, shifted: 0x3D, ctrl: 0x00},
'1': {unmodified: 0x31, shifted: 0x21, ctrl: 0x00},
'2': {unmodified: 0x32, shifted: 0x22, ctrl: 0x00},
'3': {unmodified: 0x33, shifted: 0xA7, ctrl: 0x00},
'4': {unmodified: 0x34, shifted: 0x24, ctrl: 0x00},
'5': {unmodified: 0x35, shifted: 0x25, ctrl: 0x00},
'6': {unmodified: 0x36, shifted: 0x26, ctrl: 0x00},
'7': {unmodified: 0x37, shifted: 0x2F, ctrl: 0x00},
'8': {unmodified: 0x38, shifted: 0x28, ctrl: 0x00},
'9': {unmodified: 0x39, shifted: 0x29, ctrl: 0x00},
}
// map characters @ A-Z
for i := sdl.Keycode(0x60); i <= 0x7a; i++ {
keyboardMapping[i] = keyMap{unmodified: byte(i - 0x20), shifted: byte(i - 0x20), ctrl: byte(i - 0x60)}
}
}
func handleKeypressed(keysym sdl.Keysym) {
keyvalue, exists := keyboardMapping[keysym.Sym]
if exists {
value := keyvalue.unmodified
switch keysym.Mod {
case 0x01:
value = keyvalue.shifted
case 0x40:
value = keyvalue.ctrl
}
if value > 0x00 && value < 0x60 {
piaCA1Channel <- mc6821.Fall // bring keyboard strobe to low to force active transition
keyboardInputChannelA <- (value | 0x80) // bit 7 is constantly set (+5V)
piaCA1Channel <- mc6821.Rise // send only pulse
piaCA1Channel <- mc6821.Fall // 20 micro secs are not worth emulating
}
}
}
|
package main
/**
309. 最佳买卖股票时机含冷冻期
给定一个整数数组,其中第 i 个元素代表了第 i 天的股票价格 。
设计一个算法计算出最大利润。在满足以下约束条件下,你可以尽可能地完成更多的交易(多次买卖一支股票):
- 你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。
- 卖出股票后,你无法在第二天买入股票 (即冷冻期为 1 天)。
示例:
```
输入: [1,2,3,0,2]
输出: 3
解释: 对应的交易状态为: [买入, 卖出, 冷冻期, 买入, 卖出]
```
*/
/**
又没写出来
ERROR
*/
func MaxProfit(prices []int) int {
l := len(prices)
if l <= 1 {
return 0
}
if l == 2 {
return max(0, prices[1]-prices[0])
}
sell := make([]int, l)
buy := make([]int, l)
mid := make([]int, l)
sell[0], sell[1] = 0, 0
buy[0], buy[1] = 0, prices[1]-prices[0]
mid[0], mid[1] = 0, 0
for i := 2; i < len(prices); i++ {
mid[i] = sell[i-1]
buy[i] = mid[i-1]
sell[i] = buy[i-1] + (prices[i] - prices[i-1])
}
return max(buy[l-1], max(mid[l-1], sell[l-1]))
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
|
package rds
import (
"fmt"
"testing"
set "github.com/deckarep/golang-set"
xds_route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
"github.com/golang/mock/gomock"
proto "github.com/golang/protobuf/ptypes"
"github.com/google/uuid"
tassert "github.com/stretchr/testify/assert"
"github.com/openservicemesh/osm/pkg/catalog"
"github.com/openservicemesh/osm/pkg/certificate"
"github.com/openservicemesh/osm/pkg/configurator"
"github.com/openservicemesh/osm/pkg/constants"
"github.com/openservicemesh/osm/pkg/envoy"
"github.com/openservicemesh/osm/pkg/service"
"github.com/openservicemesh/osm/pkg/tests"
"github.com/openservicemesh/osm/pkg/trafficpolicy"
)
func TestNewResponse(t *testing.T) {
assert := tassert.New(t)
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockCatalog := catalog.NewMockMeshCataloger(mockCtrl)
mockConfigurator := configurator.NewMockConfigurator(mockCtrl)
uuid := uuid.New().String()
certCommonName := certificate.CommonName(fmt.Sprintf("%s.%s.%s.one.two.three.co.uk", uuid, "some-service", "some-namespace"))
certSerialNumber := certificate.SerialNumber("123456")
testProxy := envoy.NewProxy(certCommonName, certSerialNumber, nil)
testInbound := []*trafficpolicy.InboundTrafficPolicy{
{
Name: "bookstore-v1.default",
Hostnames: tests.BookstoreV1Hostnames,
Rules: []*trafficpolicy.Rule{
{
Route: trafficpolicy.RouteWeightedClusters{
HTTPRouteMatch: tests.BookstoreBuyHTTPRoute,
WeightedClusters: set.NewSet(tests.BookstoreV1DefaultWeightedCluster),
},
AllowedServiceAccounts: set.NewSet(tests.BookstoreServiceAccount),
},
{
Route: trafficpolicy.RouteWeightedClusters{
HTTPRouteMatch: tests.BookstoreSellHTTPRoute,
WeightedClusters: set.NewSet(tests.BookstoreV1DefaultWeightedCluster),
},
AllowedServiceAccounts: set.NewSet(tests.BookstoreServiceAccount),
},
},
},
}
testIngressInbound := []*trafficpolicy.InboundTrafficPolicy{
{
Name: "bookstore-v1-default-bookstore-v1.default.svc.cluster.local",
Hostnames: []string{"bookstore-v1.default.svc.cluster.local"},
Rules: []*trafficpolicy.Rule{
{
Route: trafficpolicy.RouteWeightedClusters{
HTTPRouteMatch: trafficpolicy.HTTPRouteMatch{
PathRegex: tests.BookstoreBuyPath,
Methods: []string{constants.WildcardHTTPMethod},
},
WeightedClusters: set.NewSet(tests.BookstoreV1DefaultWeightedCluster),
},
AllowedServiceAccounts: set.NewSet(tests.BookstoreServiceAccount),
},
},
},
{
Name: "bookstore-v1.default|*",
Hostnames: []string{"*"},
Rules: []*trafficpolicy.Rule{
{
Route: trafficpolicy.RouteWeightedClusters{
HTTPRouteMatch: trafficpolicy.HTTPRouteMatch{
PathRegex: tests.BookstoreBuyPath,
Methods: []string{constants.WildcardHTTPMethod},
},
WeightedClusters: set.NewSet(tests.BookstoreV1DefaultWeightedCluster),
},
AllowedServiceAccounts: set.NewSet(tests.BookstoreServiceAccount),
},
},
},
}
mockCatalog.EXPECT().ListTrafficPoliciesForServiceAccount(gomock.Any()).Return(testInbound, nil, nil).AnyTimes()
mockCatalog.EXPECT().GetIngressPoliciesForService(gomock.Any(), gomock.Any()).Return(testIngressInbound, nil).AnyTimes()
mockCatalog.EXPECT().GetServicesFromEnvoyCertificate(gomock.Any()).Return([]service.MeshService{tests.BookstoreV1Service}, nil).AnyTimes()
mockConfigurator.EXPECT().IsPermissiveTrafficPolicyMode().Return(false).AnyTimes()
actual, err := newResponse(mockCatalog, testProxy, nil, mockConfigurator, nil)
assert.Nil(err)
routeConfig := &xds_route.RouteConfiguration{}
unmarshallErr := proto.UnmarshalAny(actual.GetResources()[0], routeConfig)
if err != nil {
t.Fatal(unmarshallErr)
}
assert.Equal("RDS_Inbound", routeConfig.Name)
assert.Equal(2, len(routeConfig.VirtualHosts))
assert.Equal("inbound_virtual-host|bookstore-v1.default", routeConfig.VirtualHosts[0].Name)
assert.Equal(tests.BookstoreV1Hostnames, routeConfig.VirtualHosts[0].Domains)
assert.Equal(3, len(routeConfig.VirtualHosts[0].Routes))
assert.Equal(tests.BookstoreBuyHTTPRoute.PathRegex, routeConfig.VirtualHosts[0].Routes[0].GetMatch().GetSafeRegex().Regex)
assert.Equal(tests.BookstoreSellHTTPRoute.PathRegex, routeConfig.VirtualHosts[0].Routes[1].GetMatch().GetSafeRegex().Regex)
assert.Equal(tests.BookstoreBuyHTTPRoute.PathRegex, routeConfig.VirtualHosts[0].Routes[2].GetMatch().GetSafeRegex().Regex)
assert.Equal("inbound_virtual-host|bookstore-v1.default|*", routeConfig.VirtualHosts[1].Name)
assert.Equal([]string{"*"}, routeConfig.VirtualHosts[1].Domains)
assert.Equal(1, len(routeConfig.VirtualHosts[1].Routes))
assert.Equal(tests.BookstoreBuyHTTPRoute.PathRegex, routeConfig.VirtualHosts[1].Routes[0].GetMatch().GetSafeRegex().Regex)
}
func TestNewResponseWithPermissiveMode(t *testing.T) {
assert := tassert.New(t)
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockCatalog := catalog.NewMockMeshCataloger(mockCtrl)
mockConfigurator := configurator.NewMockConfigurator(mockCtrl)
uuid := uuid.New().String()
certCommonName := certificate.CommonName(fmt.Sprintf("%s.%s.%s.one.two.three.co.uk", uuid, "some-service", "some-namespace"))
certSerialNumber := certificate.SerialNumber("123456")
testProxy := envoy.NewProxy(certCommonName, certSerialNumber, nil)
testPermissiveInbound := []*trafficpolicy.InboundTrafficPolicy{
{
Name: "bookstore-v1.default",
Hostnames: tests.BookstoreV1Hostnames,
Rules: []*trafficpolicy.Rule{
{
Route: trafficpolicy.RouteWeightedClusters{
HTTPRouteMatch: trafficpolicy.HTTPRouteMatch{
PathRegex: constants.RegexMatchAll,
Methods: []string{constants.WildcardHTTPMethod},
},
WeightedClusters: set.NewSet(tests.BookstoreV1DefaultWeightedCluster),
},
AllowedServiceAccounts: set.NewSet(tests.BookstoreServiceAccount),
},
},
},
}
testOutbound := []*trafficpolicy.OutboundTrafficPolicy{
{
Name: "bookbuyer.default",
Hostnames: []string{
"bookbuyer.default",
"bookbuyer.default.svc",
"bookbuyer.default.svc.cluster",
"bookbuyer.default.svc.cluster.local",
"bookbuyer.default:8888",
"bookbuyer.default.svc:8888",
"bookbuyer.default.svc.cluster:8888",
"bookbuyer.default.svc.cluster.local:8888",
},
Routes: []*trafficpolicy.RouteWeightedClusters{
{
HTTPRouteMatch: trafficpolicy.HTTPRouteMatch{
PathRegex: constants.RegexMatchAll,
Methods: []string{constants.WildcardHTTPMethod},
},
WeightedClusters: set.NewSet(tests.BookstoreV1DefaultWeightedCluster),
},
},
},
}
testIngressInbound := []*trafficpolicy.InboundTrafficPolicy{
{
Name: "bookstore-v1-default-bookstore-v1.default.svc.cluster.local",
Hostnames: []string{"bookstore-v1.default.svc.cluster.local"},
Rules: []*trafficpolicy.Rule{
{
Route: trafficpolicy.RouteWeightedClusters{
HTTPRouteMatch: trafficpolicy.HTTPRouteMatch{
PathRegex: tests.BookstoreBuyPath,
Methods: []string{constants.WildcardHTTPMethod},
},
WeightedClusters: set.NewSet(tests.BookstoreV1DefaultWeightedCluster),
},
AllowedServiceAccounts: set.NewSet(tests.BookstoreServiceAccount),
},
},
},
{
Name: "bookstore-v1.default|*",
Hostnames: []string{"*"},
Rules: []*trafficpolicy.Rule{
{
Route: trafficpolicy.RouteWeightedClusters{
HTTPRouteMatch: trafficpolicy.HTTPRouteMatch{
PathRegex: tests.BookstoreBuyPath,
Methods: []string{constants.WildcardHTTPMethod},
},
WeightedClusters: set.NewSet(tests.BookstoreV1DefaultWeightedCluster),
},
AllowedServiceAccounts: set.NewSet(tests.BookstoreServiceAccount),
},
},
},
}
mockCatalog.EXPECT().GetServicesFromEnvoyCertificate(gomock.Any()).Return([]service.MeshService{tests.BookstoreV1Service}, nil).AnyTimes()
mockCatalog.EXPECT().ListPoliciesForPermissiveMode(gomock.Any()).Return(testPermissiveInbound, testOutbound, nil).AnyTimes()
mockCatalog.EXPECT().GetIngressPoliciesForService(gomock.Any(), gomock.Any()).Return(testIngressInbound, nil).AnyTimes()
mockConfigurator.EXPECT().IsPermissiveTrafficPolicyMode().Return(true).AnyTimes()
actual, err := newResponse(mockCatalog, testProxy, nil, mockConfigurator, nil)
assert.Nil(err)
routeConfig := &xds_route.RouteConfiguration{}
unmarshallErr := proto.UnmarshalAny(actual.GetResources()[0], routeConfig)
if err != nil {
t.Fatal(unmarshallErr)
}
assert.Equal("RDS_Inbound", routeConfig.Name)
assert.Equal(2, len(routeConfig.VirtualHosts))
assert.Equal("inbound_virtual-host|bookstore-v1.default", routeConfig.VirtualHosts[0].Name)
assert.Equal(tests.BookstoreV1Hostnames, routeConfig.VirtualHosts[0].Domains)
assert.Equal(2, len(routeConfig.VirtualHosts[0].Routes))
assert.Equal(constants.RegexMatchAll, routeConfig.VirtualHosts[0].Routes[0].GetMatch().GetSafeRegex().Regex)
assert.Equal(tests.BookstoreBuyHTTPRoute.PathRegex, routeConfig.VirtualHosts[0].Routes[1].GetMatch().GetSafeRegex().Regex)
assert.Equal("inbound_virtual-host|bookstore-v1.default|*", routeConfig.VirtualHosts[1].Name)
assert.Equal([]string{"*"}, routeConfig.VirtualHosts[1].Domains)
assert.Equal(1, len(routeConfig.VirtualHosts[1].Routes))
assert.Equal(tests.BookstoreBuyHTTPRoute.PathRegex, routeConfig.VirtualHosts[1].Routes[0].GetMatch().GetSafeRegex().Regex)
routeConfig = &xds_route.RouteConfiguration{}
unmarshallErr = proto.UnmarshalAny(actual.GetResources()[1], routeConfig)
if err != nil {
t.Fatal(unmarshallErr)
}
assert.Equal("RDS_Outbound", routeConfig.Name)
assert.Equal(1, len(routeConfig.VirtualHosts))
assert.Equal("outbound_virtual-host|bookbuyer.default", routeConfig.VirtualHosts[0].Name)
assert.Equal(1, len(routeConfig.VirtualHosts[0].Routes))
assert.Equal(constants.RegexMatchAll, routeConfig.VirtualHosts[0].Routes[0].GetMatch().GetSafeRegex().Regex)
}
|
package main
import (
"fmt"
"html/template"
"net/http"
)
func index(w http.ResponseWriter, r *http.Request) {
t := template.Must(template.ParseFiles("index.html"))
t.Execute(w, nil)
}
func main() {
http.Handle("/css/", http.StripPrefix("/css/", http.FileServer(http.Dir("css/"))))
http.Handle("/fonts/", http.StripPrefix("/fonts/", http.FileServer(http.Dir("fonts/"))))
http.Handle("/js/", http.StripPrefix("/js/", http.FileServer(http.Dir("js/"))))
http.HandleFunc("/", index)
fmt.Printf("Servidor escuchando en: http://localhost:8001/")
http.ListenAndServe(":8001", nil)
}
|
package write
import (
"os"
"fmt"
"encoding/binary"
//"bufio
"log"
)
type WriteChannel interface {
WritePair(k []byte, v []byte) int
WriteDeleteMarker(k []byte) int
}
type DiskWriteChannel struct {
segment_file *os.File
}
func NewDiskWriteChannel(segment_file *os.File) *DiskWriteChannel {
return &DiskWriteChannel{segment_file}
}
/**
wireformat markers:
41: key value spit
42: tombstone
*/
func (c DiskWriteChannel) WritePair(k []byte, v []byte) int {
k_len := len(k)
v_len := len(v)
log.Printf("WritePair called with key len %d and value len %d", k_len, v_len)
wireformat := append(
append(
append(int2Bytes(uint32(k_len)),
k...),
append(int2Bytes(41),
int2Bytes(uint32(v_len))...)...
),
v...)
written_bytes, err := c.segment_file.Write(wireformat)
if err != nil {
fmt.Println(err)
panic(err)
} else {
fmt.Printf("wrote %d bytes\n", written_bytes)
}
return written_bytes
}
func (c DiskWriteChannel) WriteDeleteMarker(k []byte) int {
fmt.Println("WriteDeleteMarker")
k_len := len(k)
fmt.Println(k_len)
wireformat := append(append(int2Bytes(uint32(k_len)), k...), int2Bytes(42)...)
written_bytes, err := c.segment_file.Write(wireformat)
if err != nil {
fmt.Println(err)
panic(err)
} else {
fmt.Printf("wrote %d bytes\n", written_bytes)
}
return written_bytes
}
func int2Bytes(i uint32) []byte {
bs := make([]byte, 4)
binary.BigEndian.PutUint32(bs, i)
return bs
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
alphapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/apigee/alpha/apigee_alpha_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/alpha"
)
// OrganizationServer implements the gRPC interface for Organization.
type OrganizationServer struct{}
// ProtoToOrganizationRuntimeTypeEnum converts a OrganizationRuntimeTypeEnum enum from its proto representation.
func ProtoToApigeeAlphaOrganizationRuntimeTypeEnum(e alphapb.ApigeeAlphaOrganizationRuntimeTypeEnum) *alpha.OrganizationRuntimeTypeEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.ApigeeAlphaOrganizationRuntimeTypeEnum_name[int32(e)]; ok {
e := alpha.OrganizationRuntimeTypeEnum(n[len("ApigeeAlphaOrganizationRuntimeTypeEnum"):])
return &e
}
return nil
}
// ProtoToOrganizationSubscriptionTypeEnum converts a OrganizationSubscriptionTypeEnum enum from its proto representation.
func ProtoToApigeeAlphaOrganizationSubscriptionTypeEnum(e alphapb.ApigeeAlphaOrganizationSubscriptionTypeEnum) *alpha.OrganizationSubscriptionTypeEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.ApigeeAlphaOrganizationSubscriptionTypeEnum_name[int32(e)]; ok {
e := alpha.OrganizationSubscriptionTypeEnum(n[len("ApigeeAlphaOrganizationSubscriptionTypeEnum"):])
return &e
}
return nil
}
// ProtoToOrganizationBillingTypeEnum converts a OrganizationBillingTypeEnum enum from its proto representation.
func ProtoToApigeeAlphaOrganizationBillingTypeEnum(e alphapb.ApigeeAlphaOrganizationBillingTypeEnum) *alpha.OrganizationBillingTypeEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.ApigeeAlphaOrganizationBillingTypeEnum_name[int32(e)]; ok {
e := alpha.OrganizationBillingTypeEnum(n[len("ApigeeAlphaOrganizationBillingTypeEnum"):])
return &e
}
return nil
}
// ProtoToOrganizationStateEnum converts a OrganizationStateEnum enum from its proto representation.
func ProtoToApigeeAlphaOrganizationStateEnum(e alphapb.ApigeeAlphaOrganizationStateEnum) *alpha.OrganizationStateEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.ApigeeAlphaOrganizationStateEnum_name[int32(e)]; ok {
e := alpha.OrganizationStateEnum(n[len("ApigeeAlphaOrganizationStateEnum"):])
return &e
}
return nil
}
// ProtoToOrganizationAddonsConfig converts a OrganizationAddonsConfig object from its proto representation.
func ProtoToApigeeAlphaOrganizationAddonsConfig(p *alphapb.ApigeeAlphaOrganizationAddonsConfig) *alpha.OrganizationAddonsConfig {
if p == nil {
return nil
}
obj := &alpha.OrganizationAddonsConfig{
AdvancedApiOpsConfig: ProtoToApigeeAlphaOrganizationAddonsConfigAdvancedApiOpsConfig(p.GetAdvancedApiOpsConfig()),
MonetizationConfig: ProtoToApigeeAlphaOrganizationAddonsConfigMonetizationConfig(p.GetMonetizationConfig()),
}
return obj
}
// ProtoToOrganizationAddonsConfigAdvancedApiOpsConfig converts a OrganizationAddonsConfigAdvancedApiOpsConfig object from its proto representation.
func ProtoToApigeeAlphaOrganizationAddonsConfigAdvancedApiOpsConfig(p *alphapb.ApigeeAlphaOrganizationAddonsConfigAdvancedApiOpsConfig) *alpha.OrganizationAddonsConfigAdvancedApiOpsConfig {
if p == nil {
return nil
}
obj := &alpha.OrganizationAddonsConfigAdvancedApiOpsConfig{
Enabled: dcl.Bool(p.GetEnabled()),
}
return obj
}
// ProtoToOrganizationAddonsConfigMonetizationConfig converts a OrganizationAddonsConfigMonetizationConfig object from its proto representation.
func ProtoToApigeeAlphaOrganizationAddonsConfigMonetizationConfig(p *alphapb.ApigeeAlphaOrganizationAddonsConfigMonetizationConfig) *alpha.OrganizationAddonsConfigMonetizationConfig {
if p == nil {
return nil
}
obj := &alpha.OrganizationAddonsConfigMonetizationConfig{
Enabled: dcl.Bool(p.GetEnabled()),
}
return obj
}
// ProtoToOrganization converts a Organization resource from its proto representation.
func ProtoToOrganization(p *alphapb.ApigeeAlphaOrganization) *alpha.Organization {
obj := &alpha.Organization{
Name: dcl.StringOrNil(p.GetName()),
DisplayName: dcl.StringOrNil(p.GetDisplayName()),
Description: dcl.StringOrNil(p.GetDescription()),
CreatedAt: dcl.Int64OrNil(p.GetCreatedAt()),
LastModifiedAt: dcl.Int64OrNil(p.GetLastModifiedAt()),
ExpiresAt: dcl.Int64OrNil(p.GetExpiresAt()),
AnalyticsRegion: dcl.StringOrNil(p.GetAnalyticsRegion()),
AuthorizedNetwork: dcl.StringOrNil(p.GetAuthorizedNetwork()),
RuntimeType: ProtoToApigeeAlphaOrganizationRuntimeTypeEnum(p.GetRuntimeType()),
SubscriptionType: ProtoToApigeeAlphaOrganizationSubscriptionTypeEnum(p.GetSubscriptionType()),
BillingType: ProtoToApigeeAlphaOrganizationBillingTypeEnum(p.GetBillingType()),
AddonsConfig: ProtoToApigeeAlphaOrganizationAddonsConfig(p.GetAddonsConfig()),
CaCertificate: dcl.StringOrNil(p.GetCaCertificate()),
RuntimeDatabaseEncryptionKeyName: dcl.StringOrNil(p.GetRuntimeDatabaseEncryptionKeyName()),
ProjectId: dcl.StringOrNil(p.GetProjectId()),
State: ProtoToApigeeAlphaOrganizationStateEnum(p.GetState()),
Project: dcl.StringOrNil(p.GetProject()),
}
for _, r := range p.GetEnvironments() {
obj.Environments = append(obj.Environments, r)
}
return obj
}
// OrganizationRuntimeTypeEnumToProto converts a OrganizationRuntimeTypeEnum enum to its proto representation.
func ApigeeAlphaOrganizationRuntimeTypeEnumToProto(e *alpha.OrganizationRuntimeTypeEnum) alphapb.ApigeeAlphaOrganizationRuntimeTypeEnum {
if e == nil {
return alphapb.ApigeeAlphaOrganizationRuntimeTypeEnum(0)
}
if v, ok := alphapb.ApigeeAlphaOrganizationRuntimeTypeEnum_value["OrganizationRuntimeTypeEnum"+string(*e)]; ok {
return alphapb.ApigeeAlphaOrganizationRuntimeTypeEnum(v)
}
return alphapb.ApigeeAlphaOrganizationRuntimeTypeEnum(0)
}
// OrganizationSubscriptionTypeEnumToProto converts a OrganizationSubscriptionTypeEnum enum to its proto representation.
func ApigeeAlphaOrganizationSubscriptionTypeEnumToProto(e *alpha.OrganizationSubscriptionTypeEnum) alphapb.ApigeeAlphaOrganizationSubscriptionTypeEnum {
if e == nil {
return alphapb.ApigeeAlphaOrganizationSubscriptionTypeEnum(0)
}
if v, ok := alphapb.ApigeeAlphaOrganizationSubscriptionTypeEnum_value["OrganizationSubscriptionTypeEnum"+string(*e)]; ok {
return alphapb.ApigeeAlphaOrganizationSubscriptionTypeEnum(v)
}
return alphapb.ApigeeAlphaOrganizationSubscriptionTypeEnum(0)
}
// OrganizationBillingTypeEnumToProto converts a OrganizationBillingTypeEnum enum to its proto representation.
func ApigeeAlphaOrganizationBillingTypeEnumToProto(e *alpha.OrganizationBillingTypeEnum) alphapb.ApigeeAlphaOrganizationBillingTypeEnum {
if e == nil {
return alphapb.ApigeeAlphaOrganizationBillingTypeEnum(0)
}
if v, ok := alphapb.ApigeeAlphaOrganizationBillingTypeEnum_value["OrganizationBillingTypeEnum"+string(*e)]; ok {
return alphapb.ApigeeAlphaOrganizationBillingTypeEnum(v)
}
return alphapb.ApigeeAlphaOrganizationBillingTypeEnum(0)
}
// OrganizationStateEnumToProto converts a OrganizationStateEnum enum to its proto representation.
func ApigeeAlphaOrganizationStateEnumToProto(e *alpha.OrganizationStateEnum) alphapb.ApigeeAlphaOrganizationStateEnum {
if e == nil {
return alphapb.ApigeeAlphaOrganizationStateEnum(0)
}
if v, ok := alphapb.ApigeeAlphaOrganizationStateEnum_value["OrganizationStateEnum"+string(*e)]; ok {
return alphapb.ApigeeAlphaOrganizationStateEnum(v)
}
return alphapb.ApigeeAlphaOrganizationStateEnum(0)
}
// OrganizationAddonsConfigToProto converts a OrganizationAddonsConfig object to its proto representation.
func ApigeeAlphaOrganizationAddonsConfigToProto(o *alpha.OrganizationAddonsConfig) *alphapb.ApigeeAlphaOrganizationAddonsConfig {
if o == nil {
return nil
}
p := &alphapb.ApigeeAlphaOrganizationAddonsConfig{}
p.SetAdvancedApiOpsConfig(ApigeeAlphaOrganizationAddonsConfigAdvancedApiOpsConfigToProto(o.AdvancedApiOpsConfig))
p.SetMonetizationConfig(ApigeeAlphaOrganizationAddonsConfigMonetizationConfigToProto(o.MonetizationConfig))
return p
}
// OrganizationAddonsConfigAdvancedApiOpsConfigToProto converts a OrganizationAddonsConfigAdvancedApiOpsConfig object to its proto representation.
func ApigeeAlphaOrganizationAddonsConfigAdvancedApiOpsConfigToProto(o *alpha.OrganizationAddonsConfigAdvancedApiOpsConfig) *alphapb.ApigeeAlphaOrganizationAddonsConfigAdvancedApiOpsConfig {
if o == nil {
return nil
}
p := &alphapb.ApigeeAlphaOrganizationAddonsConfigAdvancedApiOpsConfig{}
p.SetEnabled(dcl.ValueOrEmptyBool(o.Enabled))
return p
}
// OrganizationAddonsConfigMonetizationConfigToProto converts a OrganizationAddonsConfigMonetizationConfig object to its proto representation.
func ApigeeAlphaOrganizationAddonsConfigMonetizationConfigToProto(o *alpha.OrganizationAddonsConfigMonetizationConfig) *alphapb.ApigeeAlphaOrganizationAddonsConfigMonetizationConfig {
if o == nil {
return nil
}
p := &alphapb.ApigeeAlphaOrganizationAddonsConfigMonetizationConfig{}
p.SetEnabled(dcl.ValueOrEmptyBool(o.Enabled))
return p
}
// OrganizationToProto converts a Organization resource to its proto representation.
func OrganizationToProto(resource *alpha.Organization) *alphapb.ApigeeAlphaOrganization {
p := &alphapb.ApigeeAlphaOrganization{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetDisplayName(dcl.ValueOrEmptyString(resource.DisplayName))
p.SetDescription(dcl.ValueOrEmptyString(resource.Description))
p.SetCreatedAt(dcl.ValueOrEmptyInt64(resource.CreatedAt))
p.SetLastModifiedAt(dcl.ValueOrEmptyInt64(resource.LastModifiedAt))
p.SetExpiresAt(dcl.ValueOrEmptyInt64(resource.ExpiresAt))
p.SetAnalyticsRegion(dcl.ValueOrEmptyString(resource.AnalyticsRegion))
p.SetAuthorizedNetwork(dcl.ValueOrEmptyString(resource.AuthorizedNetwork))
p.SetRuntimeType(ApigeeAlphaOrganizationRuntimeTypeEnumToProto(resource.RuntimeType))
p.SetSubscriptionType(ApigeeAlphaOrganizationSubscriptionTypeEnumToProto(resource.SubscriptionType))
p.SetBillingType(ApigeeAlphaOrganizationBillingTypeEnumToProto(resource.BillingType))
p.SetAddonsConfig(ApigeeAlphaOrganizationAddonsConfigToProto(resource.AddonsConfig))
p.SetCaCertificate(dcl.ValueOrEmptyString(resource.CaCertificate))
p.SetRuntimeDatabaseEncryptionKeyName(dcl.ValueOrEmptyString(resource.RuntimeDatabaseEncryptionKeyName))
p.SetProjectId(dcl.ValueOrEmptyString(resource.ProjectId))
p.SetState(ApigeeAlphaOrganizationStateEnumToProto(resource.State))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
sEnvironments := make([]string, len(resource.Environments))
for i, r := range resource.Environments {
sEnvironments[i] = r
}
p.SetEnvironments(sEnvironments)
mProperties := make(map[string]string, len(resource.Properties))
for k, r := range resource.Properties {
mProperties[k] = r
}
p.SetProperties(mProperties)
return p
}
// applyOrganization handles the gRPC request by passing it to the underlying Organization Apply() method.
func (s *OrganizationServer) applyOrganization(ctx context.Context, c *alpha.Client, request *alphapb.ApplyApigeeAlphaOrganizationRequest) (*alphapb.ApigeeAlphaOrganization, error) {
p := ProtoToOrganization(request.GetResource())
res, err := c.ApplyOrganization(ctx, p)
if err != nil {
return nil, err
}
r := OrganizationToProto(res)
return r, nil
}
// applyApigeeAlphaOrganization handles the gRPC request by passing it to the underlying Organization Apply() method.
func (s *OrganizationServer) ApplyApigeeAlphaOrganization(ctx context.Context, request *alphapb.ApplyApigeeAlphaOrganizationRequest) (*alphapb.ApigeeAlphaOrganization, error) {
cl, err := createConfigOrganization(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyOrganization(ctx, cl, request)
}
// DeleteOrganization handles the gRPC request by passing it to the underlying Organization Delete() method.
func (s *OrganizationServer) DeleteApigeeAlphaOrganization(ctx context.Context, request *alphapb.DeleteApigeeAlphaOrganizationRequest) (*emptypb.Empty, error) {
cl, err := createConfigOrganization(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteOrganization(ctx, ProtoToOrganization(request.GetResource()))
}
// ListApigeeAlphaOrganization handles the gRPC request by passing it to the underlying OrganizationList() method.
func (s *OrganizationServer) ListApigeeAlphaOrganization(ctx context.Context, request *alphapb.ListApigeeAlphaOrganizationRequest) (*alphapb.ListApigeeAlphaOrganizationResponse, error) {
cl, err := createConfigOrganization(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListOrganization(ctx)
if err != nil {
return nil, err
}
var protos []*alphapb.ApigeeAlphaOrganization
for _, r := range resources.Items {
rp := OrganizationToProto(r)
protos = append(protos, rp)
}
p := &alphapb.ListApigeeAlphaOrganizationResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigOrganization(ctx context.Context, service_account_file string) (*alpha.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return alpha.NewClient(conf), nil
}
|
package domain
import (
"errors"
"time"
uuid "github.com/satori/go.uuid"
"golang.org/x/crypto/bcrypt"
)
type User struct {
ID uuid.UUID `db:"id" json:"id"`
Name string `db:"name" json:"name"`
Email string `db:"email" json:"email"`
Password string `db:"password" json:"-"`
CreatedAt time.Time `db:"created_at" json:"created_at"`
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
}
type UserService interface {
HashPassword(password string) (string, error)
IsEmailAlreadyExists(email string) (bool, error)
GenerateToken() string
}
func NewUser(service UserService, name, email, password string) (*User, error) {
now := time.Now()
pwd, err := service.HashPassword(password)
if err != nil {
return nil, err
}
isExists, err := service.IsEmailAlreadyExists(email)
if isExists {
return nil, errors.New("Email has already been taken")
}
return &User{
ID: uuid.NewV4(),
Name: name,
Email: email,
Password: pwd,
CreatedAt: now,
UpdatedAt: now,
}, nil
}
func (u *User) ChangePassword(password string) error {
pwd, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
if err != nil {
return err
}
u.Password = string(pwd)
return nil
}
|
package manifests
import (
"errors"
"fmt"
"os"
"os/exec"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
aiv1beta1 "github.com/openshift/assisted-service/api/v1beta1"
"github.com/openshift/assisted-service/models"
"github.com/openshift/installer/pkg/asset"
"github.com/openshift/installer/pkg/asset/mock"
)
func TestNMStateConfig_Generate(t *testing.T) {
cases := []struct {
name string
dependencies []asset.Asset
requiresNmstatectl bool
expectedConfig []*aiv1beta1.NMStateConfig
expectedError string
}{
{
name: "agent-config does not contain networkConfig",
dependencies: []asset.Asset{
getValidDHCPAgentConfigNoHosts(),
getValidOptionalInstallConfig(),
},
requiresNmstatectl: false,
expectedConfig: nil,
expectedError: "",
},
{
name: "valid dhcp agent config with some hosts without networkconfig",
dependencies: []asset.Asset{
getValidDHCPAgentConfigWithSomeHostsWithoutNetworkConfig(),
getValidOptionalInstallConfig(),
},
requiresNmstatectl: true,
expectedConfig: []*aiv1beta1.NMStateConfig{
{
TypeMeta: metav1.TypeMeta{
Kind: "NMStateConfig",
APIVersion: "agent-install.openshift.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprint(getNMStateConfigName(getValidOptionalInstallConfig()), "-0"),
Namespace: getObjectMetaNamespace(getValidOptionalInstallConfig()),
Labels: getNMStateConfigLabels(getValidOptionalInstallConfig()),
},
Spec: aiv1beta1.NMStateConfigSpec{
Interfaces: []*aiv1beta1.Interface{
{
Name: "enp2t0",
MacAddress: "98:af:65:a5:8d:02",
},
},
NetConfig: aiv1beta1.NetConfig{
Raw: unmarshalJSON([]byte(rawNMStateConfigNoIP)),
},
},
},
},
expectedError: "",
},
{
name: "valid config",
dependencies: []asset.Asset{
getValidAgentConfig(),
getValidOptionalInstallConfig(),
},
requiresNmstatectl: true,
expectedConfig: []*aiv1beta1.NMStateConfig{
{
TypeMeta: metav1.TypeMeta{
Kind: "NMStateConfig",
APIVersion: "agent-install.openshift.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprint(getNMStateConfigName(getValidOptionalInstallConfig()), "-0"),
Namespace: getObjectMetaNamespace(getValidOptionalInstallConfig()),
Labels: getNMStateConfigLabels(getValidOptionalInstallConfig()),
},
Spec: aiv1beta1.NMStateConfigSpec{
Interfaces: []*aiv1beta1.Interface{
{
Name: "enp2s0",
MacAddress: "98:af:65:a5:8d:01",
},
{
Name: "enp3s1",
MacAddress: "28:d2:44:d2:b2:1a",
},
},
NetConfig: aiv1beta1.NetConfig{
Raw: unmarshalJSON([]byte(rawNMStateConfig)),
},
},
},
{
TypeMeta: metav1.TypeMeta{
Kind: "NMStateConfig",
APIVersion: "agent-install.openshift.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprint(getNMStateConfigName(getValidOptionalInstallConfig()), "-1"),
Namespace: getObjectMetaNamespace(getValidOptionalInstallConfig()),
Labels: getNMStateConfigLabels(getValidOptionalInstallConfig()),
},
Spec: aiv1beta1.NMStateConfigSpec{
Interfaces: []*aiv1beta1.Interface{
{
Name: "enp2t0",
MacAddress: "98:af:65:a5:8d:02",
},
},
NetConfig: aiv1beta1.NetConfig{
Raw: unmarshalJSON([]byte(rawNMStateConfig)),
},
},
},
{
TypeMeta: metav1.TypeMeta{
Kind: "NMStateConfig",
APIVersion: "agent-install.openshift.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprint(getNMStateConfigName(getValidOptionalInstallConfig()), "-2"),
Namespace: getObjectMetaNamespace(getValidOptionalInstallConfig()),
Labels: getNMStateConfigLabels(getValidOptionalInstallConfig()),
},
Spec: aiv1beta1.NMStateConfigSpec{
Interfaces: []*aiv1beta1.Interface{
{
Name: "enp2u0",
MacAddress: "98:af:65:a5:8d:03",
},
},
NetConfig: aiv1beta1.NetConfig{
Raw: unmarshalJSON([]byte(rawNMStateConfig)),
},
},
},
},
expectedError: "",
},
{
name: "invalid networkConfig",
dependencies: []asset.Asset{
getInValidAgentConfig(),
getValidOptionalInstallConfig(),
},
requiresNmstatectl: true,
expectedConfig: nil,
expectedError: "failed to validate network yaml",
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
parents := asset.Parents{}
parents.Add(tc.dependencies...)
asset := &NMStateConfig{}
err := asset.Generate(parents)
// Check if the test failed because nmstatectl is not available in CI
if tc.requiresNmstatectl {
_, execErr := exec.LookPath("nmstatectl")
if execErr != nil {
assert.ErrorContains(t, err, "executable file not found")
t.Skip("No nmstatectl binary available")
}
}
switch {
case tc.expectedError != "":
assert.ErrorContains(t, err, tc.expectedError)
case len(tc.expectedConfig) == 0:
assert.NoError(t, err)
assert.Equal(t, tc.expectedConfig, asset.Config)
default:
assert.NoError(t, err)
assert.Equal(t, tc.expectedConfig, asset.Config)
assert.NotEmpty(t, asset.Files())
configFile := asset.Files()[0]
assert.Equal(t, "cluster-manifests/nmstateconfig.yaml", configFile.Filename)
// Split up the file into multiple YAMLs if it contains NMStateConfig for more than one node
yamlList, err := GetMultipleYamls[aiv1beta1.NMStateConfig](configFile.Data)
assert.NoError(t, err)
assert.Equal(t, len(tc.expectedConfig), len(yamlList))
for i := range tc.expectedConfig {
assert.Equal(t, *tc.expectedConfig[i], yamlList[i])
}
assert.Equal(t, len(tc.expectedConfig), len(asset.StaticNetworkConfig))
}
})
}
}
func TestNMStateConfig_LoadedFromDisk(t *testing.T) {
cases := []struct {
name string
data string
fetchError error
expectedFound bool
expectedError string
requiresNmstatectl bool
expectedConfig []*models.HostStaticNetworkConfig
}{
{
name: "valid-config-file",
data: `
metadata:
name: mynmstateconfig
namespace: spoke-cluster
labels:
cluster0-nmstate-label-name: cluster0-nmstate-label-value
spec:
config:
interfaces:
- name: eth0
type: ethernet
state: up
mac-address: 52:54:01:aa:aa:a1
ipv4:
enabled: true
address:
- ip: 192.168.122.21
prefix-length: 24
dhcp: false
dns-resolver:
config:
server:
- 192.168.122.1
routes:
config:
- destination: 0.0.0.0/0
next-hop-address: 192.168.122.1
next-hop-interface: eth0
table-id: 254
interfaces:
- name: "eth0"
macAddress: "52:54:01:aa:aa:a1"
- name: "eth1"
macAddress: "52:54:01:bb:bb:b1"`,
requiresNmstatectl: true,
expectedFound: true,
expectedConfig: []*models.HostStaticNetworkConfig{
{
MacInterfaceMap: models.MacInterfaceMap{
{LogicalNicName: "eth0", MacAddress: "52:54:01:aa:aa:a1"},
{LogicalNicName: "eth1", MacAddress: "52:54:01:bb:bb:b1"},
},
NetworkYaml: "dns-resolver:\n config:\n server:\n - 192.168.122.1\ninterfaces:\n- ipv4:\n address:\n - ip: 192.168.122.21\n prefix-length: 24\n dhcp: false\n enabled: true\n mac-address: 52:54:01:aa:aa:a1\n name: eth0\n state: up\n type: ethernet\nroutes:\n config:\n - destination: 0.0.0.0/0\n next-hop-address: 192.168.122.1\n next-hop-interface: eth0\n table-id: 254\n",
},
},
},
{
name: "valid-config-multiple-yamls",
data: `
metadata:
name: mynmstateconfig
namespace: spoke-cluster
labels:
cluster0-nmstate-label-name: cluster0-nmstate-label-value
spec:
config:
interfaces:
- name: eth0
type: ethernet
state: up
mac-address: 52:54:01:aa:aa:a1
ipv4:
enabled: true
address:
- ip: 192.168.122.21
prefix-length: 24
interfaces:
- name: "eth0"
macAddress: "52:54:01:aa:aa:a1"
---
metadata:
name: mynmstateconfig-2
namespace: spoke-cluster
labels:
cluster0-nmstate-label-name: cluster0-nmstate-label-value
spec:
config:
interfaces:
- name: eth0
type: ethernet
state: up
mac-address: 52:54:01:cc:cc:c1
ipv4:
enabled: true
address:
- ip: 192.168.122.22
prefix-length: 24
interfaces:
- name: "eth0"
macAddress: "52:54:01:cc:cc:c1"`,
requiresNmstatectl: true,
expectedFound: true,
expectedConfig: []*models.HostStaticNetworkConfig{
{
MacInterfaceMap: models.MacInterfaceMap{
{LogicalNicName: "eth0", MacAddress: "52:54:01:aa:aa:a1"},
},
NetworkYaml: "interfaces:\n- ipv4:\n address:\n - ip: 192.168.122.21\n prefix-length: 24\n enabled: true\n mac-address: 52:54:01:aa:aa:a1\n name: eth0\n state: up\n type: ethernet\n",
},
{
MacInterfaceMap: models.MacInterfaceMap{
{LogicalNicName: "eth0", MacAddress: "52:54:01:cc:cc:c1"},
},
NetworkYaml: "interfaces:\n- ipv4:\n address:\n - ip: 192.168.122.22\n prefix-length: 24\n enabled: true\n mac-address: 52:54:01:cc:cc:c1\n name: eth0\n state: up\n type: ethernet\n",
},
},
},
{
name: "invalid-interfaces",
data: `
metadata:
name: mynmstateconfig
namespace: spoke-cluster
labels:
cluster0-nmstate-label-name: cluster0-nmstate-label-value
spec:
interfaces:
- name: "eth0"
macAddress: "52:54:01:aa:aa:a1"
- name: "eth0"
macAddress: "52:54:01:bb:bb:b1"`,
requiresNmstatectl: true,
expectedError: "staticNetwork configuration is not valid",
},
// This test case currently does not work for libnmstate 2.2.9,
// due a regression that will be fixed in https://github.com/nmstate/nmstate/issues/2311
// {
// name: "invalid-address-for-type",
// data: `
// metadata:
// name: mynmstateconfig
// namespace: spoke-cluster
// labels:
// cluster0-nmstate-label-name: cluster0-nmstate-label-value
// spec:
// config:
// interfaces:
// - name: eth0
// type: ethernet
// state: up
// mac-address: 52:54:01:aa:aa:a1
// ipv6:
// enabled: true
// address:
// - ip: 192.168.122.21
// prefix-length: 24
// interfaces:
// - name: "eth0"
// macAddress: "52:54:01:aa:aa:a1"`,
// requiresNmstatectl: true,
// expectedError: "staticNetwork configuration is not valid",
// },
{
name: "missing-label",
data: `
metadata:
name: mynmstateconfig
namespace: spoke-cluster
spec:
config:
interfaces:
- name: eth0
type: ethernet
state: up
mac-address: 52:54:01:aa:aa:a1
ipv4:
enabled: true
address:
- ip: 192.168.122.21
prefix-length: 24
interfaces:
- name: "eth0"
macAddress: "52:54:01:aa:aa:a1"`,
requiresNmstatectl: true,
expectedError: "invalid NMStateConfig configuration: ObjectMeta.Labels: Required value: mynmstateconfig does not have any label set",
},
{
name: "not-yaml",
data: `This is not a yaml file`,
expectedError: "could not decode YAML for cluster-manifests/nmstateconfig.yaml: Error reading multiple YAMLs: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type v1beta1.NMStateConfig",
},
{
name: "file-not-found",
fetchError: &os.PathError{Err: os.ErrNotExist},
},
{
name: "error-fetching-file",
fetchError: errors.New("fetch failed"),
expectedError: "failed to load file cluster-manifests/nmstateconfig.yaml: fetch failed",
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
// nmstate may not be installed yet in CI so skip this test if not
if tc.requiresNmstatectl {
_, execErr := exec.LookPath("nmstatectl")
if execErr != nil {
t.Skip("No nmstatectl binary available")
}
}
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
fileFetcher := mock.NewMockFileFetcher(mockCtrl)
fileFetcher.EXPECT().FetchByName(nmStateConfigFilename).
Return(
&asset.File{
Filename: nmStateConfigFilename,
Data: []byte(tc.data)},
tc.fetchError,
)
asset := &NMStateConfig{}
found, err := asset.Load(fileFetcher)
assert.Equal(t, tc.expectedFound, found, "unexpected found value returned from Load")
if tc.expectedError != "" {
assert.ErrorContains(t, err, tc.expectedError)
} else {
assert.NoError(t, err)
}
if tc.expectedFound {
assert.Equal(t, tc.expectedConfig, asset.StaticNetworkConfig, "unexpected Config in NMStateConfig")
assert.Equal(t, len(tc.expectedConfig), len(asset.Config))
for i := 0; i < len(tc.expectedConfig); i++ {
staticNetworkConfig := asset.StaticNetworkConfig[i]
nmStateConfig := asset.Config[i]
for n := 0; n < len(staticNetworkConfig.MacInterfaceMap); n++ {
macInterfaceMap := staticNetworkConfig.MacInterfaceMap[n]
iface := nmStateConfig.Spec.Interfaces[n]
assert.Equal(t, macInterfaceMap.LogicalNicName, iface.Name)
assert.Equal(t, macInterfaceMap.MacAddress, iface.MacAddress)
}
assert.YAMLEq(t, staticNetworkConfig.NetworkYaml, string(nmStateConfig.Spec.NetConfig.Raw))
}
}
})
}
}
func TestGetNodeZeroIP(t *testing.T) {
cases := []struct {
name string
expectedIP string
expectedError string
configs []string
}{
{
name: "no interfaces",
expectedError: "no interface IPs set",
},
{
name: "first interface",
expectedIP: "192.168.122.21",
configs: []string{
`
interfaces:
- name: eth0
type: ethernet
ipv4:
address:
- ip: 192.168.122.21
- name: eth1
type: ethernet
ipv4:
address:
- ip: 192.168.122.22
`,
},
},
{
name: "second interface",
expectedIP: "192.168.122.22",
configs: []string{
`
interfaces:
- name: eth0
type: ethernet
- name: eth1
type: ethernet
ipv4:
address:
- ip: 192.168.122.22
`,
},
},
{
name: "second host",
expectedIP: "192.168.122.22",
configs: []string{
`
interfaces:
- name: eth0
type: ethernet
- name: eth1
type: ethernet
`,
`
interfaces:
- name: eth0
type: ethernet
- name: eth1
type: ethernet
ipv4:
address:
- ip: 192.168.122.22
`,
},
},
{
name: "ipv4 first",
expectedIP: "192.168.122.22",
configs: []string{
`
interfaces:
- name: eth0
type: ethernet
ipv6:
address:
- ip: "2001:0db8::0001"
ipv4:
address:
- ip: 192.168.122.22
`,
},
},
{
name: "ipv6 host first",
expectedIP: "2001:0db8::0001",
configs: []string{
`
interfaces:
- name: eth0
type: ethernet
ipv6:
address:
- ip: "2001:0db8::0001"
`,
`
interfaces:
- name: eth0
type: ethernet
ipv4:
address:
- ip: 192.168.122.31
`,
},
},
{
name: "ipv6 first",
expectedIP: "2001:0db8::0001",
configs: []string{
`
interfaces:
- name: eth0
type: ethernet
ipv6:
address:
- ip: "2001:0db8::0001"
- name: eth1
type: ethernet
ipv4:
address:
- ip: 192.168.122.22
`,
},
},
{
name: "ipv6",
expectedIP: "2001:0db8::0001",
configs: []string{
`
interfaces:
- name: eth0
type: ethernet
ipv6:
address:
- ip: "2001:0db8::0001"
`,
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
var configs []*aiv1beta1.NMStateConfig
for _, hostRaw := range tc.configs {
configs = append(configs, &aiv1beta1.NMStateConfig{
Spec: aiv1beta1.NMStateConfigSpec{
NetConfig: aiv1beta1.NetConfig{
Raw: aiv1beta1.RawNetConfig(hostRaw),
},
},
})
}
ip, err := GetNodeZeroIP(configs)
if tc.expectedError == "" {
assert.NoError(t, err)
assert.Equal(t, tc.expectedIP, ip)
} else {
assert.ErrorContains(t, err, tc.expectedError)
}
})
}
}
|
package coreos
import (
"github.com/bernardolins/vandame/metadata"
)
func Config(name string, config metadata.Config) *CoreOs {
coreos := new(CoreOs)
coreos.Etcd.MachineName = name
configureEtcd2(coreos, config)
return coreos
}
func configureEtcd2(coreos *CoreOs, config metadata.Config) {
coreos.Etcd.InitialClusterToken = config.GetClusterToken()
coreos.Etcd.InitialClusterState = config.GetClusterState()
setEtcd2InitialCluster(coreos, config.GetClusterNodes())
}
func setEtcd2InitialCluster(config *CoreOs, nodes []metadata.Node) {
initialClusterString := ""
for _, node := range nodes {
if initialClusterString != "" {
initialClusterString = initialClusterString + ","
}
nodeString := node.GetNodeName() + "=http://" + node.GetNodeIp() + ":2380"
initialClusterString = initialClusterString + nodeString
}
config.Etcd.InitialCluster = initialClusterString
}
|
package main
import(
"../core"
// "strconv"
// "fmt"
)
func main(){
// bc:=core.NewBlockchain()
// bc.SendData("Send 1 BTC to fox1");
// bc.SendData("Send 1 ETH to fox2");
// bc.Print()
// getSumAndSub(1,2);
// fmt.Println("hello start!")
// bcc:=core.NewBlockchain() //创建区块链
// bcc.AddBlock("Send 1 BTC to fox1")
// bcc.AddBlock("Send 2 BTC to fox1")
// for _,block :=range bcc.Blocks{
// fmt.Printf("上一块hash%x",block.PrevBlockHash)
// fmt.Println("\n")
// fmt.Printf("数据%s",block.Data)
// fmt.Println("\n")
// fmt.Printf("当前hash%x",block.Hash)
// pow:=core.NewProofOfWork(block) //校验工作量
// fmt.Printf("pow %s\n",strconv.FormatBool(pow.Validate()))
// fmt.Println("\n")
// fmt.Println("\n")
// }
block :=core.NewBlockchain("我是一个地址") //创建区块链
// fmt.Println("sss")
// fmt.Printf("%v",block)
defer block.DB.Close() //延迟关闭数据库
cli:=core.CLI{Blockchain:block} //创建命令行
cli.Run() //开启
}
// func getSumAndSub(n1 int,n2 int) (int,int){
// sum:=n1+n2
// sub:=n1-n2
// return sum,sub
// }
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.