text
stringlengths 11
4.05M
|
|---|
package main
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"reflect"
"strings"
"testing"
webhook "github.com/ihcsim/sidecar-injector"
"github.com/ihcsim/sidecar-injector/test"
"github.com/sirupsen/logrus"
admissionv1beta1 "k8s.io/api/admission/v1beta1"
)
var testServer *WebhookServer
func TestMain(m *testing.M) {
// mock out the k8s clientset constructor
webhook.NewClient = test.NewFakeClient
// create a webhook which uses its fake client to seed the sidecar configmap
w, err := initWebhookWithConfigMap()
if err != nil {
panic(err)
}
log := logrus.New()
log.SetOutput(ioutil.Discard)
logger := logrus.NewEntry(log)
testServer = &WebhookServer{nil, w, logger}
os.Exit(m.Run())
}
func TestServe(t *testing.T) {
t.Run("With Empty HTTP Request Body", func(t *testing.T) {
in := bytes.NewReader(nil)
request := httptest.NewRequest(http.MethodGet, "/", in)
recorder := httptest.NewRecorder()
testServer.serve(recorder, request)
if recorder.Code != http.StatusOK {
t.Errorf("HTTP response status mismatch. Expected: %d. Actual: %d", http.StatusOK, recorder.Code)
}
if reflect.DeepEqual(recorder.Body.Bytes(), []byte("")) {
t.Errorf("Content mismatch. Expected HTTP response body to be empty %v", recorder.Body.Bytes())
}
})
t.Run("With Valid HTTP Request Body", func(t *testing.T) {
body, err := test.FixtureHTTPRequestBody("http-request-body-valid.json", "../..")
if err != nil {
t.Fatal("Unexpected error: ", err)
}
in := bytes.NewReader(body)
request := httptest.NewRequest(http.MethodGet, "/", in)
recorder := httptest.NewRecorder()
testServer.serve(recorder, request)
if recorder.Code != http.StatusOK {
t.Errorf("HTTP response status mismatch. Expected: %d. Actual: %d", http.StatusOK, recorder.Code)
}
expected, err := test.FixtureAdmissionReview("admission-review-request-response.json", "../..")
if err != nil {
t.Fatal("Unexpected error: ", err)
}
var actual admissionv1beta1.AdmissionReview
if err := json.Unmarshal(recorder.Body.Bytes(), &actual); err != nil {
t.Fatal("Unexpected error: ", err)
}
if !reflect.DeepEqual(actual, *expected) {
t.Errorf("Content mismatch\nExpected: %+v\nActual: %+v", *expected, actual)
}
})
t.Run("With Valid HTTP Request Body (ignore pod)", func(t *testing.T) {
body, err := test.FixtureHTTPRequestBody("http-request-body-valid-ignore-pod.json", "../..")
if err != nil {
t.Fatal("Unexpected error: ", err)
}
in := bytes.NewReader(body)
request := httptest.NewRequest(http.MethodGet, "/", in)
recorder := httptest.NewRecorder()
testServer.serve(recorder, request)
if recorder.Code != http.StatusOK {
t.Errorf("HTTP response status mismatch. Expected: %d. Actual: %d", http.StatusOK, recorder.Code)
}
expected, err := test.FixtureAdmissionReview("admission-review-request-response-ignore-pod.json", "../..")
if err != nil {
t.Fatal("Unexpected error: ", err)
}
var actual admissionv1beta1.AdmissionReview
if err := json.Unmarshal(recorder.Body.Bytes(), &actual); err != nil {
t.Fatal("Unexpected error: ", err)
}
if !reflect.DeepEqual(actual, *expected) {
t.Errorf("Content mismatch\nExpected: %s\nActual: %s", expected, actual)
}
})
}
func TestHandleRequestError(t *testing.T) {
var (
errMsg = "Some test error"
recorder = httptest.NewRecorder()
err = fmt.Errorf(errMsg)
)
testServer.handleRequestError(recorder, err, http.StatusInternalServerError)
if recorder.Code != http.StatusInternalServerError {
t.Errorf("HTTP response status mismatch. Expected: %d. Actual: %d", http.StatusInternalServerError, recorder.Code)
}
if strings.TrimSpace(recorder.Body.String()) != errMsg {
t.Errorf("HTTP response body mismatch. Expected: %q. Actual: %q", errMsg, recorder.Body.String())
}
}
func TestNewWebhookServer(t *testing.T) {
// sample cert and key pem files from https://golang.org/src/crypto/tls/tls_test.go
var (
rsaCertPEM = `-----BEGIN CERTIFICATE-----
MIIB0zCCAX2gAwIBAgIJAI/M7BYjwB+uMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX
aWRnaXRzIFB0eSBMdGQwHhcNMTIwOTEyMjE1MjAyWhcNMTUwOTEyMjE1MjAyWjBF
MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50
ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANLJ
hPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wok/4xIA+ui35/MmNa
rtNuC+BdZ1tMuVCPFZcCAwEAAaNQME4wHQYDVR0OBBYEFJvKs8RfJaXTH08W+SGv
zQyKn0H8MB8GA1UdIwQYMBaAFJvKs8RfJaXTH08W+SGvzQyKn0H8MAwGA1UdEwQF
MAMBAf8wDQYJKoZIhvcNAQEFBQADQQBJlffJHybjDGxRMqaRmDhX0+6v02TUKZsW
r5QuVbpQhH6u+0UgcW0jp9QwpxoPTLTWGXEWBBBurxFwiCBhkQ+V
-----END CERTIFICATE-----
`
rsaKeyPEM = `-----BEGIN RSA PRIVATE KEY-----
MIIBOwIBAAJBANLJhPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wo
k/4xIA+ui35/MmNartNuC+BdZ1tMuVCPFZcCAwEAAQJAEJ2N+zsR0Xn8/Q6twa4G
6OB1M1WO+k+ztnX/1SvNeWu8D6GImtupLTYgjZcHufykj09jiHmjHx8u8ZZB/o1N
MQIhAPW+eyZo7ay3lMz1V01WVjNKK9QSn1MJlb06h/LuYv9FAiEA25WPedKgVyCW
SmUwbPw8fnTcpqDWE3yTO3vKcebqMSsCIBF3UmVue8YU3jybC3NxuXq3wNm34R8T
xVLHwDXh/6NJAiEAl2oHGGLz64BuAfjKrqwz7qMYr9HCLIe/YsoWq/olzScCIQDi
D2lWusoe2/nEqfDVVWGWlyJ7yOmqaVm/iNUN9B2N2g==
-----END RSA PRIVATE KEY-----`
)
certFile, err := ioutil.TempFile("", "")
if err != nil {
t.Fatal("Unexpected error: ", err)
}
defer os.Remove(certFile.Name())
if err := ioutil.WriteFile(certFile.Name(), []byte(rsaCertPEM), 0); err != nil {
t.Fatal("Unexpected error: ", err)
}
keyFile, err := ioutil.TempFile("", "")
if err != nil {
t.Fatal("Unexpected error: ", err)
}
defer os.Remove(certFile.Name())
if err := ioutil.WriteFile(keyFile.Name(), []byte(rsaKeyPEM), 0); err != nil {
t.Fatal("Unexpected error: ", err)
}
port := "7070"
server, err := NewWebhookServer(port, certFile.Name(), keyFile.Name())
if err != nil {
t.Fatal("Unexpected error: ", err)
}
if server.Addr != fmt.Sprintf(":%s", port) {
t.Errorf("Expected server address to be :%q", port)
}
}
func initWebhookWithConfigMap() (*webhook.Webhook, error) {
fixture, err := webhook.New()
if err != nil {
return nil, err
}
// seed the sidecar configmap with the fake client
configMap, err := test.FixtureConfigMap("../..", "sidecar-configmap.json")
if err != nil {
return nil, err
}
if _, err := fixture.Client.CoreV1().ConfigMaps(test.DefaultNamespace).Create(configMap); err != nil {
return nil, err
}
return fixture, nil
}
|
package mysqldb
import (
"os"
"path/filepath"
"strconv"
"testing"
"context"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
)
// VCRecord 是 验证码的 testSuite
type VCRecordTestSuite struct {
suite.Suite
db *DbClient
}
// SetupSuite 准备设置 Test Suite 执行
func (suite *VCRecordTestSuite) SetupSuite() {
envFilepath := filepath.Join("testdata", "local.svc-jinmuid.env")
db, _ := newTestingDbClientFromEnvFile(envFilepath)
suite.db = db
}
// SearchVcRecordCountsIn24hours 搜索24小时内的验证码记录个数
func (suite *VCRecordTestSuite) TestSearchVcRecordCountsIn24hours() {
t := suite.T()
ctx := context.Background()
_, err := suite.db.GetDB(ctx).SearchVcRecordCountsIn24hours(ctx, os.Getenv("X_TEST_EMAIL"))
assert.NoError(t, err)
}
// SearchVcRecordCountsIn1Minute 搜索1分钟内的验证码记录
func (suite *VCRecordTestSuite) TestSearchVcRecordCountsIn1Minute() {
t := suite.T()
ctx := context.Background()
_, err := suite.db.GetDB(ctx).SearchVcRecordCountsIn1Minute(ctx, os.Getenv("X_TEST_EMAIL"))
assert.NoError(t, err)
}
// SearchVcRecordEarliestTimeIn1Minute 搜索1分钟最早的验证码记录时间
func (suite *VCRecordTestSuite) TestSearchVcRecordEarliestTimeIn1Minute() {
t := suite.T()
ctx := context.Background()
_, err := suite.db.GetDB(ctx).SearchVcRecordEarliestTimeIn1Minute(ctx, os.Getenv("X_TEST_EMAIL"))
assert.NoError(t, err)
}
// FindVcRecord 查找验证码记录
func (suite *VCRecordTestSuite) TestFindVcRecord() {
t := suite.T()
ctx := context.Background()
sn := os.Getenv("X_TEST_SN")
vc := os.Getenv("X_TEST_VC")
sendTo := os.Getenv("X_TEST_EMAIL")
usage := SignUp
_, err := suite.db.GetDB(ctx).FindVcRecord(ctx, sn, vc, sendTo, usage)
assert.NoError(t, err)
}
// HasSnExpired 判断Sn是否已经过期
func (suite *VCRecordTestSuite) TestHasSnExpired() {
t := suite.T()
ctx := context.Background()
sn := os.Getenv("X_TEST_SN")
vc := os.Getenv("X_TEST_VC")
_, err := suite.db.GetDB(ctx).HasSnExpired(ctx, sn, vc)
assert.NoError(t, err)
}
// ModifyVcRecordStatus 修改验证码的状态
func (suite *VCRecordTestSuite) TestModifyVcRecordStatus() {
t := suite.T()
ctx := context.Background()
recordID, _ := strconv.Atoi(os.Getenv("X_TEST_RECORD_ID"))
err := suite.db.GetDB(ctx).ModifyVcRecordStatus(ctx, int32(recordID))
assert.NoError(t, err)
}
// VerifyMVC 验证MVC
func (suite *VCRecordTestSuite) TestVerifyMVC() {
t := suite.T()
ctx := context.Background()
sn := os.Getenv("X_TEST_SN")
vc := os.Getenv("X_TEST_VC")
sendTo := os.Getenv("X_TEST_EMAIL")
nationCode := os.Getenv("X_TEST_NATION_CODE")
_, err := suite.db.GetDB(ctx).VerifyMVC(ctx, sn, vc, sendTo, nationCode)
assert.NoError(t, err)
}
// SearchVcRecord 查找验证码记录
func (suite *VCRecordTestSuite) TestSearchVcRecord() {
t := suite.T()
ctx := context.Background()
sn := os.Getenv("X_TEST_SN")
vc := os.Getenv("X_TEST_VC")
sendTo := os.Getenv("X_TEST_EMAIL")
nationCode := os.Getenv("X_TEST_NATION_CODE")
_, err := suite.db.GetDB(ctx).SearchVcRecord(ctx, sn, vc, sendTo, nationCode)
assert.NoError(t, err)
}
// FindLatestVcRecord 查找最新验证码记录
func (suite *VCRecordTestSuite) TestFindLatestVcRecord() {
t := suite.T()
ctx := context.Background()
sendTo := os.Getenv("X_TEST_EMAIL")
usage := SignUp
_, err := suite.db.GetDB(ctx).FindLatestVcRecord(ctx, sendTo, usage)
assert.NoError(t, err)
}
// SearchSpecificVcRecordCountsIn24hours 搜索24小时内的指定模板的验证码记录个数
func (suite *VCRecordTestSuite) TestSearchSpecificVcRecordCountsIn24hours() {
t := suite.T()
ctx := context.Background()
sendTo := os.Getenv("X_TEST_EMAIL")
usage := SignUp
_, err := suite.db.GetDB(ctx).SearchSpecificVcRecordCountsIn24hours(ctx, sendTo, usage)
assert.NoError(t, err)
}
// SearchSpecificVcRecordEarliestTimeIn24hours 搜索24小时指定模板最早的验证码记录
func (suite *VCRecordTestSuite) TestSearchSpecificVcRecordEarliestTimeIn24hours() {
t := suite.T()
ctx := context.Background()
sendTo := os.Getenv("X_TEST_EMAIL")
usage := SignUp
_, err := suite.db.GetDB(ctx).SearchSpecificVcRecordEarliestTimeIn24hours(ctx, sendTo, usage)
assert.NoError(t, err)
}
// SearchLatestPhoneVerificationCode 搜索最新的电话验证码
func (suite *VCRecordTestSuite) TestSearchLatestPhoneVerificationCode() {
t := suite.T()
ctx := context.Background()
sendTo := os.Getenv("X_TEST_EMAIL")
nationCode := os.Getenv("X_TEST_NATION_CODE")
_, err := suite.db.GetDB(ctx).SearchLatestPhoneVerificationCode(ctx, sendTo, nationCode)
assert.NoError(t, err)
}
// SearchLatestEmailVerificationCode 搜索最新的邮件验证码
func (suite *VCRecordTestSuite) TestSearchLatestEmailVerificationCode() {
t := suite.T()
ctx := context.Background()
sendTo := os.Getenv("X_TEST_EMAIL")
_, err := suite.db.GetDB(ctx).SearchLatestEmailVerificationCode(ctx, sendTo)
assert.NoError(t, err)
}
// VerifyMVCBySecureEmail 根据安全邮箱验证MVC
func (suite *VCRecordTestSuite) TestVerifyMVCBySecureEmail() {
t := suite.T()
ctx := context.Background()
sn := os.Getenv("X_TEST_SN")
vc := os.Getenv("X_TEST_VC")
email := os.Getenv("X_TEST_EMAIL")
_, err := suite.db.GetDB(ctx).VerifyMVCBySecureEmail(ctx, sn, vc, email)
assert.NoError(t, err)
}
// ModifyVcRecordStatusByEmail 根据安全邮箱修改验证码的状态
func (suite *VCRecordTestSuite) TestModifyVcRecordStatusByEmail() {
t := suite.T()
ctx := context.Background()
verificationCode := os.Getenv("X_TEST_VC")
serialNumber := os.Getenv("X_TEST_SN")
email := os.Getenv("X_TEST_EMAIL")
err := suite.db.GetDB(ctx).ModifyVcRecordStatusByEmail(ctx, email, verificationCode, serialNumber)
assert.NoError(t, err)
}
// SetVcAsUsed 设置vc为使用过的
func (suite *VCRecordTestSuite) TestSetVcAsUsed() {
t := suite.T()
ctx := context.Background()
sn := os.Getenv("X_TEST_SN")
vc := os.Getenv("X_TEST_VC")
email := os.Getenv("X_TEST_EMAIL")
nationCode := os.Getenv("X_TEST_NATION_CODE")
err := suite.db.GetDB(ctx).SetVcAsUsed(ctx, sn, vc, email, nationCode)
assert.NoError(t, err)
}
func TestVCRecordTestSuite(t *testing.T) {
suite.Run(t, new(VCRecordTestSuite))
}
|
package lycamplus
import (
"encoding/json"
"fmt"
"github.com/lycam-dev/lycamplus-go-sdk/lycamplus/lib"
)
// User struct define.
type User struct {
client *lib.HTTPClient
}
// NewUser .
func NewUser() *User {
return &User{client: lib.NewHTTPClient()}
}
// Create method.
func (u *User) Create(userRequestModel *UserRequestModel) (*UserResponseModel, error) {
path := fmt.Sprintf("%s/%s/%s", lib.DefaultAPIURL, lib.DefaultAPIVersion, "users")
params, err := lib.Struct2Map(userRequestModel)
if err != nil {
return nil, err
}
data, err := u.client.Post(path, params)
if err != nil {
return nil, err
}
response := new(UserResponseModel)
err = json.Unmarshal(data, &response)
if err != nil {
return nil, err
}
return response, nil
}
// Assume method.
func (u *User) Assume(uuid string) (*TokenResponseModel, error) {
path := fmt.Sprintf("%s/%s/%s/%s/%s", lib.DefaultAPIURL, lib.DefaultAPIVersion, "users", uuid, "assume")
data, err := u.client.Post(path, nil)
if err != nil {
return nil, err
}
response := new(TokenResponseModel)
err = json.Unmarshal(data, &response)
if err != nil {
return nil, err
}
return response, nil
}
|
package core
import (
"bufio"
"errors"
"fmt"
"strings"
"syscall"
"golang.org/x/crypto/ssh/terminal"
"github.com/gookit/color"
"github.com/rs/zerolog/log"
)
type GetInputWrapper struct {
Scanner bufio.Reader
}
var (
errPasswordMismatch = errors.New("The two password inserted are not the same.")
)
func (t *GetInputWrapper) GetPassword(question string, only4Decription bool) (password string, err error) {
fmt.Print(question)
bytePassword, err := terminal.ReadPassword(int(syscall.Stdin))
if err != nil {
return "", err
}
if only4Decription {
return string(bytePassword), nil
}
fmt.Println()
passMsg := fmt.Sprintf("%s Please, insert the password again: ", color.Yellow.Sprint("==>"))
fmt.Print(passMsg)
bytePassword2, err := terminal.ReadPassword(int(syscall.Stdin))
if err != nil {
return "", err
}
if string(bytePassword) == string(bytePassword2) {
return string(bytePassword), nil
}
log.Err(errPasswordMismatch).Msg("GetPassword")
return "", errPasswordMismatch
}
func (t *GetInputWrapper) GetInputString(question string, def string) (text string, err error) {
if def != "" {
fmt.Print(question + "\n" + "press enter for default [" + def + "]\n")
text, err = t.Scanner.ReadString('\n')
if err != nil {
return "", err
}
text = strings.Replace(text, "\r\n", "", -1)
text = strings.Replace(text, "\n", "", -1)
if text == "" {
text = def
}
} else {
fmt.Print(question + "\n")
text, err = t.Scanner.ReadString('\n')
if err != nil {
return "", err
}
text = strings.Replace(text, "\n", "", -1)
}
return text, nil
}
|
// application entry
package main
import "blog/api"
func main() {
api.Run()
}
|
package sleepy
import (
"fmt"
"github.com/lithdew/bytesutil"
"github.com/valyala/bytebufferpool"
)
var _ EndpointDispatcher = (*Channel)(nil)
type Channel struct {
endpoint *Endpoint
window *PacketBuffer
readQueue chan []byte
writeQueue chan []byte
outQueue chan []byte
queue []*bytebufferpool.ByteBuffer
oldestUnacked uint16
lastSent float64
}
func NewChannel(config *Config) *Channel {
channel := new(Channel)
channel.endpoint = NewEndpoint(channel, config)
channel.window = NewPacketBuffer(uint16(channel.endpoint.config.SentPacketBufferSize))
channel.readQueue = make(chan []byte, channel.endpoint.config.RecvPacketBufferSize)
channel.writeQueue = make(chan []byte, channel.endpoint.config.SentPacketBufferSize)
channel.outQueue = make(chan []byte, channel.endpoint.config.SentPacketBufferSize)
return channel
}
func (c *Channel) Read(buf []byte) {
c.readQueue <- buf
}
func (c *Channel) Write(buf []byte) {
c.writeQueue <- buf
}
func (c *Channel) Update(time float64) error {
c.endpoint.Update(time)
Reading:
for {
select {
case b := <-c.readQueue:
err := c.endpoint.ReadPacket(b)
if err != nil {
return fmt.Errorf("failed to receive packet: %w", err)
}
default:
break Reading
}
}
Writing:
for {
select {
case buf := <-c.writeQueue:
if c.oldestUnacked+uint16(c.endpoint.config.RecvPacketBufferSize) == c.endpoint.seq {
b := c.endpoint.pool.Get()
b.B = bytesutil.ExtendSlice(b.B, len(buf))
copy(b.B, buf)
c.queue = append(c.queue, b)
continue
}
c.endpoint.WritePacket(buf)
default:
break Writing
}
}
// Write packets that have yet to be written, and also write packets that have yet to be ACK'ed after 0.1 seconds
// from the moment we originally wrote them.
max := c.oldestUnacked + uint16(c.endpoint.config.RecvPacketBufferSize)
for seq := c.oldestUnacked; seqLTE(seq, max); seq++ {
packet := c.window.Find(seq)
if packet == nil {
continue
}
if packet.written && time-packet.time < 0.1 {
continue
}
packet.written = true
packet.time = time
c.lastSent = time
c.outQueue <- packet.buf.B
}
if time-c.lastSent >= 0.1 {
c.endpoint.WritePacket(nil)
}
return nil
}
func (c *Channel) Out() <-chan []byte {
return c.outQueue
}
func (c *Channel) Transmit(seq uint16, buf []byte) {
b := c.endpoint.pool.Get()
b.B = bytesutil.ExtendSlice(b.B, len(buf))
copy(b.B, buf)
packet := c.window.Insert(seq)
packet.Reset()
packet.buf = b
}
func (c *Channel) Process(seq uint16, data []byte) {
fmt.Printf("[sequence number: %d, content: %q]\n", seq, string(data))
}
func (c *Channel) ACK(seq uint16) {
packet := c.window.Find(seq)
if packet == nil {
return
}
c.window.Remove(seq)
c.endpoint.pool.Put(packet.buf)
if seq != c.oldestUnacked {
return
}
// Find the next oldest un-ACK'ed packet sequence number.
oldestUnacked := c.oldestUnacked
updated, max := false, oldestUnacked+uint16(c.endpoint.config.RecvPacketBufferSize)
for seq := oldestUnacked + 1; seqLTE(seq, max); seq++ {
packet := c.window.Find(seq)
if packet == nil {
continue
}
c.oldestUnacked, updated = seq, true
break
}
// If the oldest ACK was not updated, set the oldest ACK to be the latest sent packet sequence number.
if !updated {
c.oldestUnacked = max
}
// Send packets that were previously queued up due to the oldest un-ACK'ed packet.
diff := c.oldestUnacked - oldestUnacked
for i := uint16(0); len(c.queue) > 0 && i < diff; i++ {
popped := c.queue[0]
c.queue = c.queue[1:]
c.endpoint.WritePacket(popped.B)
c.endpoint.pool.Put(popped)
}
}
|
package main
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func main() {
}
func isSymmetric(root *TreeNode) bool {
if root == nil {
return true
}
return check(root.Left, root.Right)
}
func check(treeLeft *TreeNode, treeRight *TreeNode) bool {
if treeLeft == nil && treeRight == nil {
return true
}
if treeLeft == nil || treeRight == nil {
return false
}
return treeLeft.Val == treeRight.Val &&
check(treeLeft.Left, treeRight.Right) &&
check(treeLeft.Right, treeRight.Left)
}
|
package reverse
import (
"errors"
"fmt"
)
// List is a definition of linked list.
type List struct {
Val byte
Next *List
}
// Init initializes a linked list.
func Init(s []byte) (*List, error) {
if string(s) == "" {
return nil, errors.New("list is empty")
}
head := &List{s[0], nil}
curr := head
for i := 1; i < len(s); i++ {
node := &List{s[i], nil}
curr.Next = node
curr = node
}
return head, nil
}
// Print returns a linked list as a string to print.
func Print(list *List) string {
result := ""
curr := list
for curr != nil {
result += fmt.Sprintf("%c", curr.Val)
curr = curr.Next
}
return result
}
// LinkedList reverses a linked list.
func LinkedList(node *List) (*List, error) {
if node == nil {
return nil, errors.New("list is empty")
}
var curr, prev, next *List = node, nil, nil
for curr != nil {
next = curr.Next
curr.Next = prev
prev = curr
curr = next
}
return prev, nil
}
// String reverses a string in bytes.
func String(s []byte) ([]byte, error) {
if string(s) == "" {
return nil, errors.New("string is empty")
}
for i := 0; i < len(s)/2; i++ {
s[i], s[len(s)-i-1] = s[len(s)-i-1], s[i]
}
return s, nil
}
|
package function
import _ "github.com/project-flogo/microgateway/internal/function/error"
|
package discovery
import "time"
type Resource interface {
ID() string
Name() string
CreationTime() *time.Time
}
type resource struct {
id string
name string
creationTime *time.Time
}
func (r *resource) ID() string {
return r.id
}
func (r *resource) Name() string {
return r.name
}
func (r *resource) CreationTime() *time.Time {
return r.creationTime
}
type Storage interface {
Resource
AtRestEncryption() *AtRestEncryption
}
type storage struct {
resource
atRestEncryption *AtRestEncryption
}
func (s *storage) AtRestEncryption() *AtRestEncryption {
return s.atRestEncryption
}
type ObjectStorage interface {
Storage
HttpEndpoint() *HttpEndpoint
}
type objectStorage struct {
storage
httpEndpoint *HttpEndpoint
}
func (s *objectStorage) HttpEndpoint() *HttpEndpoint {
return s.httpEndpoint
}
type BlockStorage interface {
Storage
}
/*type blockStorage struct {
storage
}*/
type StorageDiscoverer interface {
List() ([]Storage, error)
}
|
package actions
import (
"errors"
"github.com/barrydev/api-3h-shop/src/factories"
"github.com/barrydev/api-3h-shop/src/model"
)
func GetProductItemById(productItemId int64) (*model.ProductItem, error) {
productItem, err := factories.FindProductItemById(productItemId)
if err != nil {
return nil, err
}
if productItem == nil {
return nil, errors.New("product_item does not exists")
}
return productItem, nil
}
|
package storage
import (
"os"
"testing"
"github.com/inazo1115/toydb/lib/util"
)
// TestWriteAndRead tests that DiskManager can write the message to the file and
// read it.
func TestWriteAndRead(t *testing.T) {
// Setup.
dm := NewDiskManager()
DataFile = "diskmanager_test_TestWriteAndRead.tmp"
expected := "this is the test message."
// Write.
if err := dm.Write(0, []byte(expected)); err != nil {
t.Errorf("Write faild.")
}
// Read.
size, err := dm.GetBufferSize(0)
if err != nil {
t.Errorf("GetBufferSize faild.")
}
buf := make([]byte, size)
if err = dm.Read(0, buf); err != nil {
t.Errorf("Read faild.")
}
// Assert.
actual := string(buf)
util.Assert(t, actual, expected)
// Teardown.
os.Remove(DataFile)
}
// TestGetFreePageID_0 tests that DiskManager returns first page id(0) when
// there is no pages on the disk and the buffer.
func TestGetFreePageID_0(t *testing.T) {
// Setup.
dm := NewDiskManager()
DataFile = "diskmanager_test_TestGetFreePageID_0.tmp"
expected := int64(0)
// Get the page id.
pid, err := dm.GetFreePageID(make([]int64, 0))
if err != nil {
t.Errorf("GetFreePageID failed.")
}
// Assert.
actual := pid
util.Assert(t, actual, expected)
// Teardown.
os.Remove(DataFile)
}
// TestGetFreePageID_1 tests that DiskManager returns next of the maximum page
// id when the page is on the cache.
func TestGetFreePageID_1(t *testing.T) {
// Setup.
dm := NewDiskManager()
DataFile = "diskmanager_test_TestGetFreePageID_1.tmp"
expected := int64(5)
// Get the page id.
pagesOnCache := []int64{0, 2, 4}
pid, err := dm.GetFreePageID(pagesOnCache)
if err != nil {
t.Errorf("GetFreePageID failed.")
}
// Assert.
actual := pid
util.Assert(t, actual, expected)
// Teardown.
os.Remove(DataFile)
}
// TestGetFreePageID_2 tests that DiskManager returns next of the maximum page
// id when the page is on the disk.
func TestGetFreePageID_2(t *testing.T) {
// Setup.
dm := NewDiskManager()
DataFile = "diskmanager_test_TestGetFreePageID_2.tmp"
expected := int64(1)
// Write.
if err := dm.Write(0, []byte("this is the test message.")); err != nil {
t.Errorf("Write faild.")
}
// Get the page id.
pid, err := dm.GetFreePageID(make([]int64, 0))
if err != nil {
t.Errorf("GetFreePageID failed.")
}
// Assert.
actual := pid
util.Assert(t, actual, expected)
// Teardown.
os.Remove(DataFile)
}
// TestGetBufferSize_0 tests that DiskManager returns the buffer size. When the
// placement of the page is the last of file, the buffer size equals the page's
// contents size.
func TestGetBufferSize_0(t *testing.T) {
// Setup.
dm := NewDiskManager()
DataFile = "diskmanager_test_TestGetBufferSize_0.tmp"
message := "this is the test message."
expected := int64(len(message))
// Write.
if err := dm.Write(0, []byte(message)); err != nil {
t.Errorf("Write faild.")
}
// Get the buffer size.
size, err := dm.GetBufferSize(0)
if err != nil {
t.Errorf("GetBufferSize failed.")
}
// Assert.
actual := size
util.Assert(t, actual, expected)
// Teardown.
os.Remove(DataFile)
}
// TestGetBufferSize_1 tests that DiskManager returns the buffer size. When the
// placement of the page is not the last of file, the buffer size is a precise
// size which equals the block size.
func TestGetBufferSize_1(t *testing.T) {
// Setup.
dm := NewDiskManager()
DataFile = "diskmanager_test_TestGetBufferSize_1.tmp"
message := "this is the test message."
expected := int64(4096) // It's the block size.
// Write twice.
if err := dm.Write(0, []byte(message)); err != nil {
t.Errorf("Write faild.")
}
if err := dm.Write(1, []byte(message)); err != nil {
t.Errorf("Write faild.")
}
// Get the buffer size.
size, err := dm.GetBufferSize(0)
if err != nil {
t.Errorf("GetBufferSize failed.")
}
// Assert.
actual := size
util.Assert(t, actual, expected)
// Teardown.
os.Remove(DataFile)
}
|
// Package tools is used to pin specific versions of external tools in this
// module's go.mod that we use for testing.
package tools
|
package main
import (
"bufio"
"fmt"
"os"
"strings"
"strconv"
)
func readLines(path string) ([]string, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
var lines []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
return lines, scanner.Err()
}
func main2(wire string, x int, y int) []string {
var nums []string
fuck := strings.Split(wire, ",")
for i, _ := range fuck {
yes := string(fuck[i])
yes = string(yes[0])
sub := false
if yes == "R" {
nums, x, y = add(fuck[i], nums, x, y, sub)
} else if yes == "L" {
sub = true
nums, x, y = add(fuck[i], nums, x, y, sub)
} else if yes == "U" {
nums, x, y = subtract(fuck[i], nums, x, y, sub)
} else if yes == "D" {
sub = true
nums, x, y = subtract(fuck[i], nums, x, y, sub)
} else {
continue
}
}
return nums
}
func add(i string, nums []string, x int, y int, sub bool) ([]string, int, int) {
intVal, err := strconv.Atoi(i[1:])
if err != nil {
fmt.Println(err)
}
for b := 0; b <= intVal; b++ {
if sub == true {
x = x - 1
nums = append(nums, x, y)
} else {
x++
nums = append(nums, x, y)
}
}
return nums, x, y
}
func subtract(i string, nums []string, x int, y int, sub bool) ([]string, int, int) {
return nums, x, y
}
func main() {
lines, err := readLines("f.txt")
if err != nil {
fmt.Println(err)
}
wire1 := string(lines[0])
wire2 := string(lines[1])
a, b := 0, 0
x := main2(wire1, a, b)
y := main2(wire2, a, b)
_, _ = x, y
}
|
// Note by Leandro Motta Barros: The nice tests for OpenSimplex Noise were
// originally written by Owen Raccuglia. They kind of go in the same vein as
// the tests I did in my D (dlang) OpenSimples Nose implementation (see
// https://github.com/lmbarros/sbxs_dlang/blob/master/src/sbxs/noise/open_simplex_noise.d),
// but the credits of this code go to Owen (who clearly knows Go much better
// than I do, by the way), not to me, who just added a few simple benchmarks.
// Tests for OpenSimplex noise, based on the output of
// the Java implementation.
//
// All reference samples were rendered with the default seed (0). Each version
// of the noise function (2D, 3D and 4D) was run to output 2D samples slicing
// across two of the function's axes. There is one 2D slice, three 3D slices
// and 6 4D slices; the 3D slices each pin one axis to the value 3.8; 4D slices
// pin one axis (the first in the filename) to 3.8 and the second to 2.7. These
// values were chosen arbitrarily.
//
// Each sample is a 512x512 greyscale PNG; each pixel is 1/24 wide in the
// OpenSimplex's space -- i.e. pixel (24, 24) in the 2D noise sample was
// computed by evaluating the 2D noise at (1.0, 1.0) and converting from a [-1,
// +1] scale to [0, +1].
//
package opensimplex
import (
"compress/gzip"
"encoding/json"
"io"
"os"
"path"
"testing"
"github.com/lmbarros/sbxs_go_test/test/assert"
)
func loadSamples() <-chan []float64 {
c := make(chan []float64)
go func() {
f, err := os.Open(path.Join("test_files", "samples.json.gz"))
if err != nil {
panic(err.Error())
}
defer f.Close()
gz, err := gzip.NewReader(f)
if err != nil {
panic(err.Error())
}
dec := json.NewDecoder(gz)
for {
var sample []float64
if err := dec.Decode(&sample); err == io.EOF {
break
} else if err != nil {
panic(err.Error())
} else {
c <- sample
}
}
close(c)
}()
return c
}
// Compares generated noise values with values generated with the reference Java
// implementation.
func TestSamplesMatch(t *testing.T) {
samples := loadSamples()
n := NewWithSeed(0)
for s := range samples {
var expected, actual float64
switch len(s) {
case 3:
expected = s[2]
actual = n.Noise2D(s[0], s[1])
case 4:
expected = s[3]
actual = n.Noise3D(s[0], s[1], s[2])
case 5:
expected = s[4]
actual = n.Noise4D(s[0], s[1], s[2], s[3])
default:
t.Fatalf("Unexpected size sample: %d", len(s))
}
if expected != actual {
t.Fatalf("Expected %v, got %v for %dD sample at %v",
expected, actual, len(s)-1, s[:len(s)-1])
}
}
}
// Makes sure that the 1D noise behaves as if sampling 2D noise at y = 0.0. This
// test serves to allow me to try to optmize my "fake 1D" implementation while
// ensuring that I didn't mess things up.
func Test1DNoise(t *testing.T) {
noise := New()
for x := -10.0; x < 10.0; x += 0.09 {
assert.Equal(t, noise.Noise2D(x, 0.0), noise.Noise1D(x))
}
}
// Benchmarks 1D noise generation
func Benchmark1D(b *testing.B) {
noise := New()
for i := 0; i < b.N; i++ {
noise.Noise1D(float64(i))
}
}
// Benchmarks 2D noise generation
func Benchmark2D(b *testing.B) {
noise := New()
for i := 0; i < b.N; i++ {
noise.Noise2D(float64(i), float64(i))
}
}
// Benchmarks 3D noise generation
func Benchmark3D(b *testing.B) {
noise := New()
for i := 0; i < b.N; i++ {
noise.Noise3D(float64(i), float64(i), float64(i))
}
}
// Benchmarks 4D noise generation
func Benchmark4D(b *testing.B) {
noise := New()
for i := 0; i < b.N; i++ {
noise.Noise4D(float64(i), float64(i), float64(i), float64(i))
}
}
|
package resource
// Response implements api2go.Responder
type Response struct {
Res interface{}
Code int
}
// Metadata returns additional metadata
func (r Response) Metadata() map[string]interface{} {
return map[string]interface{}{
"author": "bhops",
}
}
// Result returns the actual payload
func (r Response) Result() interface{} {
return r.Res
}
// StatusCode returns the status code
func (r Response) StatusCode() int {
return r.Code
}
|
package kvs
import "github.com/stretchr/testify/mock"
type MockKVS struct {
mock.Mock
}
func (_m *MockKVS) Delete(key string) error {
ret := _m.Called(key)
var r0 error
if rf, ok := ret.Get(0).(func(string) error); ok {
r0 = rf(key)
} else {
r0 = ret.Error(0)
}
return r0
}
func (_m *MockKVS) Get(key string, options *GetOptions) (*Node, error) {
ret := _m.Called(key, options)
var r0 *Node
if rf, ok := ret.Get(0).(func(string, *GetOptions) *Node); ok {
r0 = rf(key, options)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*Node)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string, *GetOptions) error); ok {
r1 = rf(key, options)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
func (_m *MockKVS) Mkdir(dir string) error {
ret := _m.Called(dir)
var r0 error
if rf, ok := ret.Get(0).(func(string) error); ok {
r0 = rf(dir)
} else {
r0 = ret.Error(0)
}
return r0
}
func (_m *MockKVS) Rmdir(dir string) error {
ret := _m.Called(dir)
var r0 error
if rf, ok := ret.Get(0).(func(string) error); ok {
r0 = rf(dir)
} else {
r0 = ret.Error(0)
}
return r0
}
func (_m *MockKVS) Set(key string, value string, options *SetOptions) (*Node, error) {
ret := _m.Called(key, value, options)
var r0 *Node
if rf, ok := ret.Get(0).(func(string, string, *SetOptions) *Node); ok {
r0 = rf(key, value, options)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*Node)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string, string, *SetOptions) error); ok {
r1 = rf(key, value, options)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
|
package modules
import (
"html/template"
"reflect"
"strconv"
"github.com/sirupsen/logrus"
"github.com/fatih/structs"
"github.com/jinzhu/gorm"
"github.com/jinzhu/inflection"
"github.com/qor/admin"
"github.com/qor/qor"
"github.com/qor/roles"
)
type CircleQor struct {
QorAdmin *admin.Admin
}
func (m *CircleQor) CrudEvent(currentUserID uint, result interface{}, context *qor.Context, oldData string) {
actionName := ""
if context.Request.Method == "POST" || context.Request.Method == "PUT" {
if context.ResourceID == "" {
actionName = CreateActionTypeName
} else {
actionName = UpdateActionTypeName
}
} else if context.Request.Method == "DELETE" {
actionName = DeleteActionTypeName
} else {
logrus.
WithField("method", context.Request.Method).
WithField("resourceID", context.ResourceID).
Warn("Unknown action")
}
// TODO: context의 ResourceID를 사용?
resourceID := uint(0)
if field, ok := structs.New(result).FieldOk("ID"); ok {
resourceID = field.Value().(uint)
}
if _, err := AddCrudEvent(&CrudEvent{
ActionName: actionName,
ActionType: actionName,
ResourceID: resourceID,
ResourceName: structs.Name(result),
CreatorID: currentUserID,
Where: "QOR",
UpdatedData: ConvJsonData(result),
OldData: oldData,
}); err != nil {
logrus.WithError(err).Error("")
}
}
func (m *CircleQor) AddResourceAndMenu(value interface{}, menuViewName string, parentMenu string, permission *roles.Permission, priority int) *admin.Resource {
res := m.QorAdmin.AddResource(value, &admin.Config{Menu: []string{parentMenu}, Permission: permission, Priority: priority})
menuName := res.Name
if !res.Config.Singleton {
menuName = inflection.Plural(res.Name)
}
menu := m.QorAdmin.GetMenu(menuName)
menu.Name = menuViewName
matas := res.GetMetas(nil)
resStruct := structs.New(res.NewStruct())
appendAttr := []interface{}{}
for _, mata := range matas {
name := mata.GetName()
switch name {
case "ID", "CreatedAt", "CreatorID", "UpdaterID", "Name", "Description":
default:
appendAttr = append(appendAttr, name)
}
if resStruct.Field(name).Kind() == reflect.Bool {
res.Meta(&admin.Meta{Name: name, Setter: mata.GetSetter(), Valuer: func(result interface{}, context *qor.Context) interface{} {
value := structs.New(result).Field(name).Value()
if context.ResourceID == "" {
if boolValue, ok := value.(bool); ok && boolValue {
return template.HTML(`<input type="checkbox" checked="checked" readonly/>`)
}
return template.HTML(`<input type="checkbox" readonly/>`)
}
return value
}})
}
}
if meta := res.GetMeta("CreatorID"); meta != nil {
res.Meta(&admin.Meta{Name: "CreatorID", Label: "작성자", Type: "readonly", Valuer: func(result interface{}, context *qor.Context) interface{} {
return extractUserNameByField("CreatorID", result)
}})
}
if meta := res.GetMeta("UpdaterID"); meta != nil {
res.EditAttrs("-UpdaterID")
res.NewAttrs("-UpdaterID")
res.Meta(&admin.Meta{Name: "UpdaterID", Label: "최종수정자", Type: "readonly", Valuer: func(result interface{}, context *qor.Context) interface{} {
return extractUserNameByField("UpdaterID", result)
}})
}
for _, meta := range []struct {
FieldName string
Label string
Type string
}{
{"Description", "설명", ""},
{"Name", "이름", ""},
{"CreatedAt", "작성일", "readonly"},
{"UpdatedAt", "수정일", "readonly"},
} {
if _, ok := resStruct.FieldOk(meta.FieldName); ok {
res.Meta(&admin.Meta{Name: meta.FieldName, Label: meta.Label, Type: meta.Type})
}
}
_, creatorIDOK := resStruct.FieldOk("CreatorID")
_, updaterIDOK := resStruct.FieldOk("UpdaterID")
_, createdAtIDOK := resStruct.FieldOk("CreatedAt")
_, updatedAtIDOK := resStruct.FieldOk("UpdatedAt")
if creatorIDOK && updaterIDOK && createdAtIDOK && updatedAtIDOK {
res.EditAttrs("-CreatorID", "-CreatedAt", "-UpdaterID", "-UpdatedAt")
res.NewAttrs("-CreatorID", "-CreatedAt", "-UpdaterID", "-UpdatedAt")
}
SetIndexAttrs(res, appendAttr...)
res.SaveHandler = func(result interface{}, context *qor.Context) error {
currentUserID := structs.New(context.CurrentUser).Field("ID").Value().(uint)
if currentUserID > 0 {
if context.ResourceID == "" {
structs.New(result).Field("CreatorID").Set(currentUserID)
}
structs.New(result).Field("UpdaterID").Set(currentUserID)
}
oldData := ""
if context.ResourceID != "" {
if resIDUint64, err := strconv.ParseUint(context.ResourceID, 10, 64); err == nil {
oldModelItem := reflect.New(reflect.ValueOf(result).Elem().Type()).Interface()
if err := GetItemByID(uint(resIDUint64), oldModelItem); err == nil {
oldData = ConvJsonData(oldModelItem)
}
}
}
//https://github.com/qor/qor/blob/d696f1942afc36458ef5bc19710145ea6fa93e7e/resource/crud.go#L129
if (context.GetDB().NewScope(result).PrimaryKeyZero() &&
res.HasPermission(roles.Create, context)) || // has create permission
res.HasPermission(roles.Update, context) { // has update permission
if err := context.GetDB().Save(result).Error; err != nil {
return err
}
go m.CrudEvent(currentUserID, result, context, oldData)
return nil
}
return roles.ErrPermissionDenied
}
res.DeleteHandler = func(result interface{}, context *qor.Context) error {
currentUserID := uint(0)
if _, ok := result.(ModelItem); ok {
currentUserID = structs.New(context.CurrentUser).Field("ID").Value().(uint)
}
if res.HasPermission(roles.Delete, context) {
if primaryQuerySQL, primaryParams := res.ToPrimaryQueryParams(context.ResourceID, context); primaryQuerySQL != "" {
if !context.GetDB().First(result, append([]interface{}{primaryQuerySQL}, primaryParams...)...).RecordNotFound() {
if err := context.GetDB().Delete(result).Error; err != nil {
return err
}
go m.CrudEvent(currentUserID, result, context, "")
return nil
}
}
return gorm.ErrRecordNotFound
}
return roles.ErrPermissionDenied
}
return res
}
func extractValueByField(fieldName string, result interface{}) interface{} {
if field := structs.New(result).Field(fieldName); field != nil {
return field.Value()
}
return nil
}
func extractUserNameByField(fieldName string, result interface{}) string {
value := extractValueByField(fieldName, result)
if userID, ok := value.(uint); ok {
if value, err := GetValueByKeyOfTableName("users", "name", userID); err == nil {
return value.(string)
}
}
return "-"
}
func SetIndexAttrs(res *admin.Resource, attr ...interface{}) {
indexAttr := []interface{}{"ID", "Name"}
indexAttr = append(indexAttr, attr...)
indexAttr = append(indexAttr, "CreatorID", "CreatedAt", "UpdaterID", "UpdatedAt")
res.IndexAttrs(indexAttr...)
}
|
package priorityqueue
import (
"github.com/sbromberger/gographs/heap"
)
// A PriorityQueue implements heap.Interface and holds Items.
type PriorityQueue []*heap.Item
func (pq PriorityQueue) Len() int { return len(pq) }
func (pq PriorityQueue) IsEmpty() bool { return pq.Len() == 0 }
func (pq PriorityQueue) Less(i, j int) bool {
// We want Pop to give us the highest, not lowest, Priority so we use greater than here.
return pq[i].Priority < pq[j].Priority
}
func (pq PriorityQueue) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
pq[i].Index = i
pq[j].Index = j
}
func (pq *PriorityQueue) Push(item *heap.Item) {
n := len(*pq)
// item := x.(*Item)
item.Index = n
*pq = append(*pq, item)
}
func (pq *PriorityQueue) Pop() *heap.Item {
old := *pq
n := len(old)
item := old[n-1]
item.Index = -1 // for safety
*pq = old[0 : n-1]
return item
}
// update modifies the Priority and Value of an Item in the queue.
func (pq *PriorityQueue) update(item *heap.Item, Value uint32, Priority float32) {
item.Value = Value
item.Priority = Priority
heap.Fix(pq, item.Index)
}
|
package main
import (
"fmt"
"runtime"
"time"
)
func main() {
quit := make(chan bool)
fmt.Println("当前时间:", time.Now())
myTicker := time.NewTicker(time.Second) //周期定时器
i := 0
go func() {
for {
i++
nowTime := <-myTicker.C
fmt.Println("当前时间:", nowTime)
if i == 8 {
quit <- true
runtime.Goexit()
}
}
}()
<-quit
}
|
package db
import (
"fmt"
"time"
"github.com/DynamoGraph/dbConn"
param "github.com/DynamoGraph/dygparam"
slog "github.com/DynamoGraph/syslog"
"github.com/DynamoGraph/util"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
"github.com/aws/aws-sdk-go/service/dynamodb/expression"
)
type Equality int
const (
logid = "gqlDB: "
)
const (
EQ Equality = iota + 1
LT
GT
GE
LE
)
// api for GQL query functions
type NodeResult struct {
PKey util.UID
SortK string
Ty string
}
type (
QResult []NodeResult
AttrName = string
)
var (
dynSrv *dynamodb.DynamoDB
err error
//tynames []tyNames
//tyShortNm map[string]string
)
func logerr(e error, panic_ ...bool) {
if len(panic_) > 0 && panic_[0] {
slog.Log(logid, e.Error(), true)
panic(e)
}
slog.Log(logid, e.Error())
}
func syslog(s string) {
slog.Log(logid, s)
}
func init() {
dynSrv = dbConn.New()
}
func GSIQueryN(attr AttrName, lv float64, op Equality) (QResult, error) {
var keyC expression.KeyConditionBuilder
//
// DD determines what index to search based on Key value. Here Key is Name and DD knows its a string hence index P_S
//
switch op {
case EQ:
keyC = expression.KeyAnd(expression.Key("P").Equal(expression.Value(attr)), expression.Key("N").Equal(expression.Value(lv)))
case LT:
keyC = expression.KeyAnd(expression.Key("P").Equal(expression.Value(attr)), expression.Key("N").LessThan(expression.Value(lv)))
case GT:
keyC = expression.KeyAnd(expression.Key("P").Equal(expression.Value(attr)), expression.Key("N").GreaterThan(expression.Value(lv)))
case GE:
keyC = expression.KeyAnd(expression.Key("P").Equal(expression.Value(attr)), expression.Key("N").GreaterThanEqual(expression.Value(lv)))
case LE:
keyC = expression.KeyAnd(expression.Key("P").Equal(expression.Value(attr)), expression.Key("N").LessThanEqual(expression.Value(lv)))
}
expr, err := expression.NewBuilder().WithKeyCondition(keyC).Build()
if err != nil {
return nil, newDBExprErr("GSIS", attr, "", err)
}
//
input := &dynamodb.QueryInput{
KeyConditionExpression: expr.KeyCondition(),
FilterExpression: expr.Filter(),
ExpressionAttributeNames: expr.Names(),
ExpressionAttributeValues: expr.Values(),
}
input = input.SetTableName(param.GraphTable).SetIndexName("P_N").SetReturnConsumedCapacity("TOTAL")
//
t0 := time.Now()
result, err := dynSrv.Query(input)
t1 := time.Now()
if err != nil {
return nil, newDBSysErr("GSIS", "Query", err)
}
syslog(fmt.Sprintf("GSIS:consumed capacity for Query index P_S, %s. ItemCount %d Duration: %s ", result.ConsumedCapacity, len(result.Items), t1.Sub(t0)))
//
if int(*result.Count) == 0 {
return nil, newDBNoItemFound("GSIS", attr, "", "Query") //TODO add lv
}
//
qresult := make(QResult, len(result.Items))
err = dynamodbattribute.UnmarshalListOfMaps(result.Items, &qresult)
if err != nil {
return nil, newDBUnmarshalErr("GSIS", attr, "", "UnmarshalListOfMaps", err)
}
//
return qresult, nil
}
func GSIQueryS(attr AttrName, lv string, op Equality) (QResult, error) {
var keyC expression.KeyConditionBuilder
//
// DD determines what index to search based on Key value. Here Key is Name and DD knows its a string hence index P_S
//
switch op {
case EQ:
keyC = expression.KeyAnd(expression.Key("P").Equal(expression.Value(attr)), expression.Key("S").Equal(expression.Value(lv)))
case LT:
keyC = expression.KeyAnd(expression.Key("P").Equal(expression.Value(attr)), expression.Key("S").LessThan(expression.Value(lv)))
case GT:
keyC = expression.KeyAnd(expression.Key("P").Equal(expression.Value(attr)), expression.Key("S").GreaterThan(expression.Value(lv)))
case GE:
keyC = expression.KeyAnd(expression.Key("P").Equal(expression.Value(attr)), expression.Key("S").GreaterThanEqual(expression.Value(lv)))
case LE:
keyC = expression.KeyAnd(expression.Key("P").Equal(expression.Value(attr)), expression.Key("S").LessThanEqual(expression.Value(lv)))
}
expr, err := expression.NewBuilder().WithKeyCondition(keyC).Build()
if err != nil {
return nil, newDBExprErr("GSIS", attr, "", err)
}
//
input := &dynamodb.QueryInput{
KeyConditionExpression: expr.KeyCondition(),
FilterExpression: expr.Filter(),
ExpressionAttributeNames: expr.Names(),
ExpressionAttributeValues: expr.Values(),
}
input = input.SetTableName(param.GraphTable).SetIndexName("P_S").SetReturnConsumedCapacity("TOTAL")
//
t0 := time.Now()
result, err := dynSrv.Query(input)
if err != nil {
return nil, newDBSysErr("GSIS", "Query", err)
}
t1 := time.Now()
syslog(fmt.Sprintf("GSIS:consumed capacity for Query index P_S, %s. ItemCount %d Duration: %s ", result.ConsumedCapacity, len(result.Items), t1.Sub(t0)))
//
if int(*result.Count) == 0 {
return nil, newDBNoItemFound("GSIS", attr, lv, "Query")
}
//
qresult := make(QResult, len(result.Items))
err = dynamodbattribute.UnmarshalListOfMaps(result.Items, &qresult)
if err != nil {
return nil, newDBUnmarshalErr("GSIS", attr, lv, "UnmarshalListOfMaps", err)
}
//
return qresult, nil
}
func GSIhasS(attr AttrName) (QResult, error) {
syslog("GSIhasS: consumed capacity for Query ")
var keyC expression.KeyConditionBuilder
//
// DD determines what index to search based on Key value. Here Key is Name and DD knows its a string hence index P_S
//
keyC = expression.Key("P").Equal(expression.Value(attr))
expr, err := expression.NewBuilder().WithKeyCondition(keyC).Build()
if err != nil {
return nil, newDBExprErr("GSIS", attr, "", err)
}
//
input := &dynamodb.QueryInput{
KeyConditionExpression: expr.KeyCondition(),
FilterExpression: expr.Filter(),
ExpressionAttributeNames: expr.Names(),
ExpressionAttributeValues: expr.Values(),
}
input = input.SetTableName(param.GraphTable).SetIndexName("P_S").SetReturnConsumedCapacity("TOTAL")
//
t0 := time.Now()
result, err := dynSrv.Query(input)
t1 := time.Now()
if err != nil {
return nil, newDBSysErr("GSIhasS", "Query", err)
}
syslog(fmt.Sprintf("GSIhasS: consumed capacity for Query index P_S, %s. ItemCount %d Duration: %s ", result.ConsumedCapacity, len(result.Items), t1.Sub(t0)))
if int(*result.Count) == 0 {
return nil, nil
}
qresult := make(QResult, len(result.Items))
err = dynamodbattribute.UnmarshalListOfMaps(result.Items, &qresult)
if err != nil {
return nil, newDBUnmarshalErr("GSIhasS", attr, "", "UnmarshalListOfMaps", err)
}
//
return qresult, nil
}
func GSIhasN(attr AttrName) (QResult, error) {
syslog("GSIhasN: consumed capacity for Query ")
var keyC expression.KeyConditionBuilder
//
// DD determines what index to search based on Key value. Here Key is Name and DD knows its a string hence index P_S
//
keyC = expression.Key("P").Equal(expression.Value(attr))
expr, err := expression.NewBuilder().WithKeyCondition(keyC).Build()
if err != nil {
return nil, newDBExprErr("GSIhasN", attr, "", err)
}
//
input := &dynamodb.QueryInput{
KeyConditionExpression: expr.KeyCondition(),
FilterExpression: expr.Filter(),
ExpressionAttributeNames: expr.Names(),
ExpressionAttributeValues: expr.Values(),
}
input = input.SetTableName(param.GraphTable).SetIndexName("P_N").SetReturnConsumedCapacity("TOTAL")
//
t0 := time.Now()
result, err := dynSrv.Query(input)
t1 := time.Now()
if err != nil {
return nil, newDBSysErr("GSIhasN", "Query", err)
}
syslog(fmt.Sprintf("GSIS:consumed capacity for Query index P_S, %s. ItemCount %d Duration: %s ", result.ConsumedCapacity, len(result.Items), t1.Sub(t0)))
//
if int(*result.Count) == 0 {
return nil, nil
}
//
qresult := make(QResult, len(result.Items))
err = dynamodbattribute.UnmarshalListOfMaps(result.Items, &qresult)
if err != nil {
return nil, newDBUnmarshalErr("GSIhasN", attr, "", "UnmarshalListOfMaps", err)
}
//
return qresult, nil
}
|
package main
import (
"math/rand"
)
// GenerateRandomDate returns a random year, month and a day till 2018
func GenerateRandomDate() (int, int, int) {
year := rand.Intn(2018) + 1
month := rand.Intn(12) + 1
daysInMonth := 31
switch month {
case 2:
if year%400 == 0 {
daysInMonth = 29
} else {
daysInMonth = 28
}
case 4, 6, 9, 11:
daysInMonth = 30
}
day := rand.Intn(daysInMonth) + 1
return year, month, day
}
|
package stuff
import (
"fmt"
"log"
"os"
"github.com/boltdb/bolt"
// "github.com/mrityunjaygr8/go-pass/stuff"
)
// Item is a struct representing an URL-username-password pair
type Item struct {
URL string
Username string
Password string
}
// AddItem adds a new record to the database
func (s *Store) AddItem(item Item) error {
return s.db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(item.URL))
if err != nil {
log.Fatal(err)
}
return b.Put([]byte(item.Username), []byte(item.Password))
})
}
// GetItem fetches a given record from the database
func (s *Store) GetItem(URL, username string) error {
return s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(URL))
if b == nil {
fmt.Println("There are no saved credentials for this URL")
fmt.Println("You can create new credentials using `go-pass add --URL url_name --username user_name --password pass_word`")
os.Exit(1)
}
v := b.Get([]byte(username))
if v == nil {
fmt.Println("There are no saved credentials for this username on this URL")
fmt.Println("You can create new credentials using `go-pass add --URL url_name --username user_name --password pass_word`")
os.Exit(1)
} else {
fmt.Println(string(v))
}
return nil
})
}
// GetAllURLUsers gets all the users for a given URL
func (s *Store) GetAllURLUsers(url string) error {
return s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(url))
if b == nil {
fmt.Println("There are no saved credentials for this URL")
fmt.Println("You can create new credentials using `go-pass add --URL url_name --username user_name --password pass_word`")
os.Exit(1)
}
fmt.Println(url)
if err := b.ForEach(func(k, _ []byte) error {
fmt.Printf("|--%s\n", string(k))
return nil
}); err != nil {
log.Fatal(err)
}
return nil
})
}
// GetAllURLs gets all the sites that have credentials stored
func (s *Store) GetAllURLs() error {
return s.db.View(func(tx *bolt.Tx) error {
if err := tx.ForEach(func(k []byte, _ *bolt.Bucket) error {
fmt.Println(string(k))
return nil
}); err != nil {
log.Fatal(err)
}
return nil
})
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package common
import (
"net"
"regexp"
"github.com/pkg/errors"
)
// CidrFirstIP returns the first IP of the provided subnet.
func CidrFirstIP(cidr net.IP) net.IP {
for j := len(cidr) - 1; j >= 0; j-- {
cidr[j]++
if cidr[j] > 0 {
break
}
}
return cidr
}
// CidrStringFirstIP returns the first IP of the provided subnet string. Returns an error
// if the string cannot be parsed.
func CidrStringFirstIP(ip string) (net.IP, error) {
cidr, _, err := net.ParseCIDR(ip)
if err != nil {
return nil, err
}
return CidrFirstIP(cidr), nil
}
// IP4BroadcastAddress returns the broadcast address for the given IP subnet.
func IP4BroadcastAddress(n *net.IPNet) net.IP {
// see https://groups.google.com/d/msg/golang-nuts/IrfXFTUavXE/8YwzIOBwJf0J
ip4 := n.IP.To4()
if ip4 == nil {
return nil
}
last := make(net.IP, len(ip4))
copy(last, ip4)
for i := range ip4 {
last[i] |= ^n.Mask[i]
}
return last
}
// GetVNETSubnetIDComponents extract subscription, resourcegroup, vnetname, subnetname from the vnetSubnetID
func GetVNETSubnetIDComponents(vnetSubnetID string) (string, string, string, string, error) {
vnetSubnetIDRegex := `^\/subscriptions\/([^\/]*)\/resourceGroups\/([^\/]*)\/providers\/Microsoft.Network\/virtualNetworks\/([^\/]*)\/subnets\/([^\/]*)$`
re, err := regexp.Compile(vnetSubnetIDRegex)
if err != nil {
return "", "", "", "", err
}
submatches := re.FindStringSubmatch(vnetSubnetID)
if len(submatches) != 5 {
return "", "", "", "", errors.New("Unable to parse vnetSubnetID. Please use a vnetSubnetID with format /subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME")
}
return submatches[1], submatches[2], submatches[3], submatches[4], nil
}
|
package entity
import "time"
//Product data
type Product struct {
ID ID `json:"id" bson:"_id"`
Version Version `json:"version" bson:"_V"`
Name string `json:"name" bson:"name"`
Description string `json:"description,omitempty" bson:"description,omitempty"`
Slug string `json:"slug,omitempty" bson:"slug,omitempty"`
Location string `json:"location,omitempty" bson:"location,omitempty"`
Image string `json:"image,omitempty" bson:"image,omitempty"`
Brand string `json:"brand,omitempty" bson:"brand,omitempty"`
Category string `json:"category,omitempty" bson:"category,omitempty"`
Price int8 `json:"price,omitempty" bson:"price,omitempty"`
Status string `json:"status,omitempty" bson:"status,omitempty"`
Seller string `json:"seller,omitempty" bson:"seller,omitempty"`
CreatedAt time.Time `json:"createdAt" bson:"createdAt"`
}
//UpdateProduct data
type UpdateProduct struct {
Version Version `bson:"_V,omitempty"`
Name string `bson:"name,omitempty" structs:",omitempty"`
Description string `bson:"description,omitempty" structs:",omitempty"`
Slug string `bson:"slug,omitempty" structs:",omitempty"`
Location string `json:"location,omitempty" bson:"location,omitempty"`
Image string `bson:"image,omitempty" structs:",omitempty"`
Brand string `bson:"brand,omitempty" structs:",omitempty"`
Category string `bson:"category,omitempty" structs:",omitempty"`
Price int8 `bson:"price,omitempty" structs:",omitempty"`
Status string `bson:"status,omitempty" structs:",omitempty"`
}
//Validate Validate Product Struct
// TODO: better validation https://medium.com/@apzuk3/input-validation-in-golang-bc24cdec1835
func (p *Product) Validate() []string {
var errs []string
if p.Name == "" {
errs = append(errs, "Name : Name is required")
}
if (p.Price == 0) || (p.Price < 1) {
errs = append(errs, "Price :- Provide Valid Price")
}
return errs
}
|
// Copyright 2020 Ross Light
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
package postgrestest
import (
"bytes"
"context"
"database/sql"
"fmt"
"os/exec"
"strings"
"testing"
"time"
)
const singleTestTime = 30 * time.Second
func TestStart(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), singleTestTime)
defer cancel()
srv, err := Start(ctx)
if err != nil {
t.Fatal(err)
}
t.Cleanup(srv.Cleanup)
db, err := sql.Open("postgres", srv.DefaultDatabase())
if err != nil {
t.Fatal(err)
}
defer db.Close()
db.SetMaxOpenConns(1)
var result int
if err := db.QueryRowContext(ctx, "SELECT 1;").Scan(&result); err != nil {
t.Fatal("Test query:", err)
}
if result != 1 {
t.Errorf("Query returned %d; want 1", result)
}
}
func TestNewDatabase(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), singleTestTime)
defer cancel()
srv, err := Start(ctx)
if err != nil {
t.Fatal(err)
}
t.Cleanup(srv.Cleanup)
const createTableStmt = `CREATE TABLE foo (id SERIAL PRIMARY KEY);`
db1, err := srv.NewDatabase(ctx)
if err != nil {
t.Fatal(err)
}
defer db1.Close()
_, err = db1.ExecContext(ctx, createTableStmt)
if err != nil {
t.Fatal("CREATE TABLE in database #1:", err)
}
db2, err := srv.NewDatabase(ctx)
if err != nil {
t.Fatal(err)
}
defer db2.Close()
// If this fails, it likely means that the server is returning the same database.
_, err = db2.ExecContext(ctx, createTableStmt)
if err != nil {
t.Fatal("CREATE TABLE in database #2:", err)
}
}
func BenchmarkStart(b *testing.B) {
ctx := context.Background()
for i := 0; i < b.N; i++ {
srv, err := Start(ctx)
if err != nil {
b.Fatal(err)
}
b.Cleanup(srv.Cleanup)
}
}
func BenchmarkCreateDatabase(b *testing.B) {
ctx := context.Background()
srv, err := Start(ctx)
if err != nil {
b.Fatal(err)
}
b.Cleanup(srv.Cleanup)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := srv.CreateDatabase(ctx)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkDocker(b *testing.B) {
dockerExe, err := exec.LookPath("docker")
if err != nil {
b.Skip("Could not find Docker:", err)
}
pullCmd := exec.Command(dockerExe, "pull", "postgres")
pullOutput := new(bytes.Buffer)
pullCmd.Stdout = pullOutput
pullCmd.Stderr = pullOutput
err = pullCmd.Run()
b.Log(pullOutput)
if err != nil {
b.Fatal("docker pull:", err)
}
b.Run("Start", func(b *testing.B) {
for i := 0; i < b.N; i++ {
db, cleanup, err := startDocker(b, dockerExe)
if err != nil {
b.Fatal(err)
}
b.Cleanup(cleanup)
db.Close()
}
})
b.Run("CreateDatabase", func(b *testing.B) {
db, cleanup, err := startDocker(b, dockerExe)
if err != nil {
b.Fatal(err)
}
b.Cleanup(cleanup)
defer db.Close()
b.ResetTimer()
for i := 0; i < b.N; i++ {
dbName, err := randomString(16)
if err != nil {
b.Fatal(err)
}
_, err = db.Exec("CREATE DATABASE \"" + dbName + "\";")
if err != nil {
b.Fatal(err)
}
}
})
}
type logger interface {
Log(...interface{})
}
func startDocker(l logger, dockerExe string) (db *sql.DB, cleanup func(), _ error) {
port, err := findUnusedTCPPort()
if err != nil {
return nil, nil, err
}
c := exec.Command(dockerExe, "run",
"--rm",
"--detach",
fmt.Sprintf("--publish=127.0.0.1:%d:5432", port),
"--env=POSTGRES_PASSWORD=xyzzy",
"postgres")
imageID := new(strings.Builder)
c.Stdout = imageID
runLog := new(bytes.Buffer)
c.Stderr = runLog
if err := c.Run(); err != nil {
l.Log(runLog)
return nil, nil, err
}
cleanup = func() {
stopLog := new(bytes.Buffer)
c := exec.Command("docker", "stop", "--", strings.TrimSpace(imageID.String()))
c.Stdout = stopLog
c.Stderr = stopLog
if err := c.Run(); err != nil {
l.Log(err)
l.Log("docker stop:", err)
}
}
dsn := fmt.Sprintf("postgres://postgres:xyzzy@localhost:%d/postgres?sslmode=disable", port)
db, err = sql.Open("postgres", dsn)
if err != nil {
cleanup()
return nil, nil, err
}
db.SetMaxOpenConns(1)
for {
if err := db.Ping(); err == nil {
return db, cleanup, nil
}
}
}
|
package channels
import (
"fmt"
)
// BlockChanel easy use channel block
func BlockChanel(){
ch1 := make(chan int,1)
//依次敲入通道变量的名称(比如ch1)、接送操作符<-以及想要发送的元素值(比如2),并且这三者之间最好用空格进行分割。
ch1 <- 1 // 接受通道表达式
//ch1 <- 2
ch2 := make(chan int,1)
ch2 <- 2
// 示例3。
var ch3 chan int
//ch3 <- 1 // 通道的值为nil,因此这里会造成永久的阻塞!
//<-ch3 // 通道的值为nil,因此这里会造成永久的阻塞!
_ = ch3
}
// UseChanel easy use channel
func UseChanel(){
ch1 := make(chan int,3)
ch1 <- 2 // 该通道接受一个元素值
ch1 <- 1
ch1 <- 3
elem1:= <-ch1 // <-通道接受表达式 通过通道流出一个值赋值给一个变量
fmt.Printf("The first element received from channel ch1:%v\n",elem1)
}
// UseChannelPanic Test channel panic
func UseChannelPanic(){
ch1 := make(chan int,2)
// 发送方
go func(){
for i:=0;i<10;i++{
fmt.Printf("Sender:sending element:%v\n",i)
ch1 <- i
}
fmt.Println("Sender:close the channel...")
close(ch1)
}()
// 接收方
for {
element,ok := <-ch1
if !ok{
fmt.Printf("Receiver:closed channel")
break
}
fmt.Printf("Receiver:received an element:%v\n",element)
}
fmt.Println("End.")
}
// UseChannelArray Test channel test Array
func UseChannelArray(){
ch := make(chan []int,1)
s1 := []int{1,2,3}
ch <- s1
s2 := <- ch
s2[0] = 100
fmt.Println(s1,s2)
ch2 := make(chan [3]int,1)
s3 := [3]int{1,2,3}
ch2 <- s3
s4 := <- ch2
s3[0] = 100
fmt.Println(s3,s4)
}
func sum(s []int,c chan int){
sum := 0
for _,v := range s {
sum += v
}
c <- sum //send sum to c
}
// GetSumArray get sum from array
func GetSumArray(){
s := []int{1,4,5,-2,-7,9,12}
c := make(chan int)
go sum(s[:len(s)/2],c)
go sum(s[len(s)/2:],c)
x,y := <-c,<-c
fmt.Println(x,y,x+y)
}
|
package storage
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
// This is the latest schema version for the purpose of tests.
LatestVersion = 11
)
func TestShouldObtainCorrectUpMigrations(t *testing.T) {
ver, err := latestMigrationVersion(providerSQLite)
require.NoError(t, err)
assert.Equal(t, LatestVersion, ver)
migrations, err := loadMigrations(providerSQLite, 0, ver)
require.NoError(t, err)
assert.Len(t, migrations, ver)
for i := 0; i < len(migrations); i++ {
assert.Equal(t, i+1, migrations[i].Version)
}
}
func TestShouldObtainCorrectDownMigrations(t *testing.T) {
ver, err := latestMigrationVersion(providerSQLite)
require.NoError(t, err)
assert.Equal(t, LatestVersion, ver)
migrations, err := loadMigrations(providerSQLite, ver, 0)
require.NoError(t, err)
assert.Len(t, migrations, ver)
for i := 0; i < len(migrations); i++ {
assert.Equal(t, ver-i, migrations[i].Version)
}
}
func TestMigrationShouldGetSpecificMigrationIfAvaliable(t *testing.T) {
upMigrationsPostgreSQL, err := loadMigrations(providerPostgres, 8, 9)
require.NoError(t, err)
require.Len(t, upMigrationsPostgreSQL, 1)
assert.True(t, upMigrationsPostgreSQL[0].Up)
assert.Equal(t, 9, upMigrationsPostgreSQL[0].Version)
assert.Equal(t, providerPostgres, upMigrationsPostgreSQL[0].Provider)
upMigrationsSQLite, err := loadMigrations(providerSQLite, 8, 9)
require.NoError(t, err)
require.Len(t, upMigrationsSQLite, 1)
assert.True(t, upMigrationsSQLite[0].Up)
assert.Equal(t, 9, upMigrationsSQLite[0].Version)
assert.Equal(t, providerAll, upMigrationsSQLite[0].Provider)
downMigrationsPostgreSQL, err := loadMigrations(providerPostgres, 9, 8)
require.NoError(t, err)
require.Len(t, downMigrationsPostgreSQL, 1)
assert.False(t, downMigrationsPostgreSQL[0].Up)
assert.Equal(t, 9, downMigrationsPostgreSQL[0].Version)
assert.Equal(t, providerAll, downMigrationsPostgreSQL[0].Provider)
downMigrationsSQLite, err := loadMigrations(providerSQLite, 9, 8)
require.NoError(t, err)
require.Len(t, downMigrationsSQLite, 1)
assert.False(t, downMigrationsSQLite[0].Up)
assert.Equal(t, 9, downMigrationsSQLite[0].Version)
assert.Equal(t, providerAll, downMigrationsSQLite[0].Provider)
}
func TestMigrationShouldReturnErrorOnSame(t *testing.T) {
migrations, err := loadMigrations(providerPostgres, 1, 1)
assert.EqualError(t, err, "current version is same as migration target, no action being taken")
assert.Nil(t, migrations)
}
func TestMigrationsShouldNotBeDuplicatedPostgres(t *testing.T) {
migrations, err := loadMigrations(providerPostgres, 0, SchemaLatest)
require.NoError(t, err)
require.NotEqual(t, 0, len(migrations))
previousUp := make([]int, len(migrations))
for i, migration := range migrations {
assert.True(t, migration.Up)
if i != 0 {
for _, v := range previousUp {
assert.NotEqual(t, v, migration.Version)
}
}
previousUp = append(previousUp, migration.Version)
}
migrations, err = loadMigrations(providerPostgres, SchemaLatest, 0)
require.NoError(t, err)
require.NotEqual(t, 0, len(migrations))
previousDown := make([]int, len(migrations))
for i, migration := range migrations {
assert.False(t, migration.Up)
if i != 0 {
for _, v := range previousDown {
assert.NotEqual(t, v, migration.Version)
}
}
previousDown = append(previousDown, migration.Version)
}
}
func TestMigrationsShouldNotBeDuplicatedMySQL(t *testing.T) {
migrations, err := loadMigrations(providerMySQL, 0, SchemaLatest)
require.NoError(t, err)
require.NotEqual(t, 0, len(migrations))
previousUp := make([]int, len(migrations))
for i, migration := range migrations {
assert.True(t, migration.Up)
if i != 0 {
for _, v := range previousUp {
assert.NotEqual(t, v, migration.Version)
}
}
previousUp = append(previousUp, migration.Version)
}
migrations, err = loadMigrations(providerMySQL, SchemaLatest, 0)
require.NoError(t, err)
require.NotEqual(t, 0, len(migrations))
previousDown := make([]int, len(migrations))
for i, migration := range migrations {
assert.False(t, migration.Up)
if i != 0 {
for _, v := range previousDown {
assert.NotEqual(t, v, migration.Version)
}
}
previousDown = append(previousDown, migration.Version)
}
}
func TestMigrationsShouldNotBeDuplicatedSQLite(t *testing.T) {
migrations, err := loadMigrations(providerSQLite, 0, SchemaLatest)
require.NoError(t, err)
require.NotEqual(t, 0, len(migrations))
previousUp := make([]int, len(migrations))
for i, migration := range migrations {
assert.True(t, migration.Up)
if i != 0 {
for _, v := range previousUp {
assert.NotEqual(t, v, migration.Version)
}
}
previousUp = append(previousUp, migration.Version)
}
migrations, err = loadMigrations(providerSQLite, SchemaLatest, 0)
require.NoError(t, err)
require.NotEqual(t, 0, len(migrations))
previousDown := make([]int, len(migrations))
for i, migration := range migrations {
assert.False(t, migration.Up)
if i != 0 {
for _, v := range previousDown {
assert.NotEqual(t, v, migration.Version)
}
}
previousDown = append(previousDown, migration.Version)
}
}
|
package classfile
type ConstantInfo interface {
readInfo(reader *ClassReader)
}
// ConstantPool
/**
常量池占据了class文件很大一部分数据,里面存放着各式各样
的常量信息,包括数字和字符串常量、类和接口名、字段和方法
名等等.
于常量池中常量的数量是不固定的,所以在常量池的入口需要放置一项u2类型的数据,代表常
量池容量计数值(constant_pool_count)。
常量池中每一项常量都是一个表.
常量表类型 标志值 描述
CONSTANT_Utf8 1 UTF-8编码的Unicode字符串
CONSTANT_Integer 3 int类型的字面值
CONSTANT_Float 4 float类型的字面值
CONSTANT_Long 5 long类型的字面值
CONSTANT_Double 6 double类型的字面值
CONSTANT_Class 7 对一个类或者是接口的符号引用
CONSTANT_String 8 String类型的字面值的引用
CONSTANT_Fieldref 9 对一个字段的符号
CONSTANT_Methodref 10 对一个类中方法的符号应用
CONSTANT_InterfaceMethodref 11 对一个接口中方法的符号引用
CONSTANT_NameAndType 12 对一个字段或方法的部分符号引用
CONSTANT_MethodHandle 15 对方法句柄的引用
CONSTANT_MethodType 16 对方法类型的引用
CONSTANT_Dynamic 17 对动态常量的引用
CONSTANT_InvokeDynamic 18 动态调用引用
...
可以把常量池中的常量分为两类:字面量(literal)和符号引用
(symbolic reference)。字面量包括数字常量和字符串常量,符号引
用包括类和接口名、字段和方法信息等。除了字面量,其他常量都
是通过索引直接或间接指向CONSTANT_Utf8_info常量.
*/
type ConstantPool []ConstantInfo
func readConstantPool(reader *ClassReader) ConstantPool {
// 首先获取常量池数据的数量,数据类型为:u2
// u2 constant_pool_count;
// cp_info constant_pool[constant_pool_count-1];
cpCount := int(reader.readUint16())
cp := make([]ConstantInfo, cpCount) // 申请内存
for i := 1; i < cpCount; i++ { // 注意索引从 1开始
cp[i] = readConstantInfo(reader, cp) //第0个表示的常量池的数据的数量
switch cp[i].(type) {
case *ConstantLongInfo, *ConstantDoubleInfo:
i++ // 占两个位置
}
}
return cp
}
// readConstantInfo
/**
常量池中的表结构起始的第一位是个u1类型的标志位
该标志位表示该表的数据类型
*/
func readConstantInfo(reader *ClassReader, cp ConstantPool) ConstantInfo {
tag := reader.readUint8()
c := newConstantInfo(tag, cp)
c.readInfo(reader)
return c
}
func newConstantInfo(tag uint8, cp ConstantPool) ConstantInfo {
switch tag {
case CONSTANT_Integer:
return &ConstantIntegerInfo{}
case CONSTANT_Float:
return &ConstantFloatInfo{}
case CONSTANT_Long:
return &ConstantLongInfo{}
case CONSTANT_Double:
return &ConstantDoubleInfo{}
case CONSTANT_Utf8:
return &ConstantUtf8Info{}
case CONSTANT_String:
return &ConstantStringInfo{cp: cp}
case CONSTANT_Class:
return &ConstantClassInfo{cp: cp}
case CONSTANT_Fieldref:
return &ConstantFieldrefInfo{ConstantMemberrefInfo{cp: cp}}
case CONSTANT_Methodref:
return &ConstantMethodrefInfo{ConstantMemberrefInfo{cp: cp}}
case CONSTANT_InterfaceMethodref:
return &ConstantInterfaceMethodrefInfo{ConstantMemberrefInfo{cp: cp}}
case CONSTANT_NameAndType:
return &ConstantNameAndTypeInfo{}
case CONSTANT_MethodType:
return &ConstantMethodTypeInfo{}
case CONSTANT_MethodHandle:
return &ConstantMethodHandleInfo{}
case CONSTANT_InvokeDynamic:
return &ConstantInvokeDynamicInfo{}
default:
panic("java.lang.ClassFormatError: constant pool tag!")
}
}
func (self ConstantPool) getConstantInfo(index uint16) ConstantInfo {
if cpInfo := self[index]; cpInfo != nil {
return cpInfo
}
panic("Invalid constant pool index!")
}
func (self ConstantPool) getNameAndType(index uint16) (string, string) {
//首先获取NameAndType_info字段的索引
//根据NameAndType_info字段的信息进行检索
ntInfo := self.getConstantInfo(index).(*ConstantNameAndTypeInfo)
name := self.getUtf8(ntInfo.nameIndex)
_type := self.getUtf8(ntInfo.descriptorIndex)
return name, _type
}
func (self ConstantPool) getClassName(index uint16) string {
classInfo := self.getConstantInfo(index).(*ConstantClassInfo)
return self.getUtf8(classInfo.nameIndex)
}
func (self ConstantPool) getUtf8(index uint16) string {
utf8Info := self.getConstantInfo(index).(*ConstantUtf8Info)
return utf8Info.str
}
|
package server
import (
"FPproject/Frontend/models"
"encoding/json"
"net/http"
"strconv"
)
func cart(w http.ResponseWriter, r *http.Request) {
var tpldata []interface{}
var cart []models.CartItem
data, status := newRequest(r, http.MethodGet, "/allci", nil)
if status != 200 {
tpl.ExecuteTemplate(w, "err.html", nil)
return
}
json.Unmarshal(data, &cart)
var foods []models.Food
for _, v := range cart {
var food models.Food
fooddata, _ := newRequest(r, http.MethodGet, "/food/"+v.ID, nil)
json.Unmarshal(fooddata, &food)
foods = append(foods, food)
}
var uh models.UserHealth
data, _ = newRequest(r, http.MethodGet, "/uh", nil)
json.Unmarshal(data, &uh)
calData := tCal(cart, foods, uh)
tpldata = append(tpldata, cart, foods, calData)
if r.Method == http.MethodPost {
if r.FormValue("submit") == "order" {
_, status := newRequest(r, http.MethodDelete, "/ci", nil)
if status != 200 {
tpl.ExecuteTemplate(w, "err.html", nil)
return
}
}
if r.FormValue("delete") != "" {
id := r.FormValue("delete")
_, status := newRequest(r, http.MethodDelete, "/ci/"+id, nil)
if status != 200 {
tpl.ExecuteTemplate(w, "err.html", nil)
return
}
}
if r.FormValue("edit") != "" {
id := r.FormValue("edit")
qty, _ := strconv.Atoi(r.FormValue(id))
new := models.CartItem{
ID: id,
Qty: qty,
}
_, status := newRequest(r, http.MethodPut, "/ci", new)
if status != 200 {
tpl.ExecuteTemplate(w, "err.html", nil)
return
}
}
http.Redirect(w, r, "/cart", http.StatusSeeOther)
return
}
tpl.ExecuteTemplate(w, "cart.html", tpldata)
}
func res(w http.ResponseWriter, r *http.Request) {
id := r.URL.Query().Get("id")
var tpldata []interface{}
name := r.URL.Query().Get("name")
var add models.Address
data, _ := newRequest(r, http.MethodGet, "/mercadd/"+id, nil)
json.Unmarshal(data, &add)
var foods []models.Food
data, _ = newRequest(r, http.MethodGet, "/allfood/"+id, nil)
json.Unmarshal(data, &foods)
var uadd models.Address
udata, _ := newRequest(r, http.MethodGet, "/add", nil)
json.Unmarshal(udata, &uadd)
dist, cal := distCal(uadd.Postal, add.Postal)
dc := map[string]float32{
"distance": dist,
"cal": cal,
}
tpldata = append(tpldata, name, add, foods, dc)
if r.Method == http.MethodPost {
id := r.FormValue("add")
qty, _ := strconv.Atoi(r.FormValue(id))
new := models.CartItem{
ID: id,
Qty: qty,
}
_, status := newRequest(r, http.MethodPost, "/ci", new)
if status != 200 {
tpl.ExecuteTemplate(w, "err.html", nil)
return
}
http.Redirect(w, r, "/cart", http.StatusSeeOther)
//http.Redirect(w, r, r.Header.Get("Referer"), http.StatusSeeOther)
return
}
tpl.ExecuteTemplate(w, "res.html", tpldata)
}
func browse(w http.ResponseWriter, r *http.Request) {
var mercs []models.User
data, _ := newRequest(r, http.MethodGet, "/merc", nil)
json.Unmarshal(data, &mercs)
if r.Method == http.MethodPost {
id := r.FormValue("id")
name := r.FormValue("name")
http.Redirect(w, r, "/browse/res?id="+id+"&name="+name, http.StatusSeeOther)
return
}
tpl.ExecuteTemplate(w, "browse.html", mercs)
}
|
package main
import (
"fmt"
"os"
"strconv"
)
func splitNumbers(numbers []int, ref int) (lessThanRef, moreThanRef []int) {
for _, n := range numbers {
if n <= ref {
lessThanRef = append(lessThanRef, n)
} else {
moreThanRef = append(moreThanRef, n)
}
}
return lessThanRef, moreThanRef
}
func quicksort(numbers []int) []int {
if len(numbers) <= 1 {
return numbers
}
n := make([]int, len(numbers))
copy(n, numbers)
indiceRef := len(n) / 2
ref := n[indiceRef]
n = append(n[:indiceRef], n[indiceRef+1:]...)
lessThanRef, moreThanRef := splitNumbers(n, ref)
return append(
append(quicksort(lessThanRef), ref),
quicksort(moreThanRef)...,
)
}
func main() {
if len(os.Args) == 1 {
fmt.Println("You should pass [numbers].")
os.Exit(1)
}
entry := os.Args[1:]
numbers := make([]int, len(entry))
for i, n := range entry {
number, err := strconv.Atoi(n)
if err != nil {
fmt.Printf("%s isn't a valid number!\n", n)
os.Exit(1)
}
numbers[i] = number
}
fmt.Println(quicksort(numbers))
}
|
package blockattributes
import (
"fmt"
"regexp"
"strings"
"github.com/srackham/go-rimu/v11/internal/expansion"
"github.com/srackham/go-rimu/v11/internal/options"
"github.com/srackham/go-rimu/v11/internal/spans"
"github.com/srackham/go-rimu/v11/internal/utils/stringlist"
)
var (
Classes string // Space separated HTML class names.
Id string // HTML element id.
Css string // HTML CSS styles.
Attributes string // Other HTML element attributes.
Options expansion.Options
)
var ids stringlist.StringList // List of allocated HTML ids.
func init() {
Init()
}
// Init resets options to default values.
func Init() {
Classes = ""
Id = ""
Css = ""
Attributes = ""
Options = expansion.Options{}
ids = nil
}
func Parse(text string) bool {
// Parse Block Attributes.
// class names = $1, id = $2, css-properties = $3, html-attributes = $4, block-options = $5
text = spans.ReplaceInline(text, expansion.Options{Macros: true})
m := regexp.MustCompile(`^\\?\.((?:\s*[a-zA-Z][\w\-]*)+)*(?:\s*)?(#[a-zA-Z][\w\-]*\s*)?(?:\s*)?(?:"(.+?)")?(?:\s*)?(\[.+])?(?:\s*)?([+-][ \w+-]+)?$`).FindStringSubmatch(text)
if m == nil {
return false
}
for i, v := range m {
m[i] = strings.TrimSpace(v)
}
if !options.SkipBlockAttributes() {
if m[1] != "" { // HTML element class names.
if Classes != "" {
Classes += " "
}
Classes += m[1]
}
if m[2] != "" { // HTML element id.
Id = m[2][1:]
}
if m[3] != "" { // CSS properties.
if Css != "" && !strings.HasSuffix(Css, ";") {
Css += ";"
}
if Css != "" {
Css += " "
}
Css += m[3]
}
if m[4] != "" && !options.IsSafeModeNz() { // HTML attributes.
if Attributes != "" {
Attributes += " "
}
Attributes += strings.TrimSpace(m[4][1 : len(m[4])-1])
}
if m[5] != "" {
Options.Merge(expansion.Parse(m[5]))
}
}
return true
}
// Inject HTML attributes into the HTML `tag` and return result.
// Consume HTML attributes unless the `tag` argument is blank.
func Inject(tag string) string {
if tag == "" {
return tag
}
attrs := ""
if Classes != "" {
m := regexp.MustCompile(`(?i)^<[^>]*class="`).FindStringIndex(tag)
if m != nil {
// Inject class names into first existing class attribute in first tag.
before := tag[:m[1]]
after := tag[m[1]:]
tag = before + Classes + " " + after
} else {
attrs = "class=\"" + Classes + "\""
}
}
if Id != "" {
Id = strings.ToLower(Id)
has_id := regexp.MustCompile(`(?i)^<[^<]*id=".*?"`).MatchString(tag)
if has_id || ids.IndexOf(Id) >= 0 {
options.ErrorCallback("duplicate 'id' attribute: " + Id)
} else {
ids.Push(Id)
}
if !has_id {
attrs += " id=\"" + Id + "\""
}
}
if Css != "" {
m := regexp.MustCompile(`(?i)^<[^<]*style="(.*?)"`).FindStringSubmatchIndex(tag)
if m != nil {
// Inject CSS styles into first existing style attribute in first tag.
before := tag[:m[2]]
after := tag[m[3]:]
css := tag[m[2]:m[3]]
css = strings.TrimSpace(css)
if !strings.HasSuffix(css, ";") {
css += ";"
}
tag = before + css + " " + Css + after
} else {
attrs += " style=\"" + Css + "\""
}
}
if Attributes != "" {
attrs += " " + Attributes
}
attrs = strings.TrimLeft(attrs, " \n")
if attrs != "" {
m := regexp.MustCompile(`(?i)^(<[a-z]+|<h[1-6])(?:[ >])`).FindStringSubmatch(tag) // Match start tag.
if m != nil {
before := m[1]
after := tag[len(m[1]):]
tag = before + " " + attrs + after
}
}
// Consume the attributes.
Classes = ""
Id = ""
Css = ""
Attributes = ""
return tag
}
func Slugify(text string) string {
slug := text
slug = regexp.MustCompile(`\W+`).ReplaceAllString(slug, "-") // Replace non-alphanumeric characters with dashes.
slug = regexp.MustCompile(`-+`).ReplaceAllString(slug, "-") // Replace multiple dashes with single dash.
slug = strings.Trim(slug, "-") // Trim leading and trailing dashes.
slug = strings.ToLower(slug)
if slug == "" {
slug = "x"
}
if ids.IndexOf(slug) > -1 { // Another element already has that id.
i := 2
for ids.IndexOf(slug+"-"+fmt.Sprint(i)) > -1 {
i++
}
slug += "-" + fmt.Sprint(i)
}
return slug
}
|
package functions
func ExecuteGoFunction(package, function string, inputs []interface{}) (interface{}, error) {
}
|
package buildinfo
const (
Graffiti = " .__ __ .__ ___. __ \n ____ | |__ _____ ___.__._/ |_|__| _____ ____ \\_ |__ _____/ |_ \n / _ \\| | \\ / < | |\\ __\\ |/ \\_/ __ \\ ______ | __ \\ / _ \\ __\\\n( <_> ) Y \\ Y Y \\___ | | | | | Y Y \\ ___/ /_____/ | \\_\\ ( <_> ) | \n \\____/|___| /__|_| / ____| |__| |__|__|_| /\\___ > |___ /\\____/|__| \n \\/ \\/\\/ \\/ \\/ \\/ "
GreetingCLI = "\nversion: %s \nbuild time: %s\ntg: %s\ngithub: %s\n"
GithubBloopURL = "https://github.com/robotomize/ohmytime-bot.git"
TgBloopURL = "https://t.me/ohmytimebot"
)
var (
BuildTag = "v0.0.0"
Name = "ohmytime-bot"
Time = ""
)
type buildinfo struct{}
func (buildinfo) Tag() string {
return BuildTag
}
func (buildinfo) Name() string {
return Name
}
func (buildinfo) Time() string {
return Time
}
var Info buildinfo
|
package tun
import (
"github.com/SUCHMOKUO/falcon-tun/tcpip"
"log"
)
type PacketHandler = func(*TUN, tcpip.IPv4Packet)
type PacketHandlers = map[tcpip.IPProtocol]PacketHandler
// register packet handlers.
var packetHandlers = PacketHandlers{
// TCP packet handler.
tcpip.TCP: func(tun *TUN, ipv4Packet tcpip.IPv4Packet) {
relayAddr := tun.tcpRelay.addr
nat := tun.tcpRelay.NAT4
tcpPacket := tcpip.TCPPacket(ipv4Packet.Payload())
srcPort := tcpPacket.SourcePort()
dstPort := tcpPacket.DestinationPort()
srcIP := ipv4Packet.SourceIP()
dstIP := ipv4Packet.DestinationIP()
if srcIP.Equal(relayAddr.IP) && int(srcPort) == relayAddr.Port {
// it's from relay.
realSrcPort, realDstIP, realDstPort := nat.GetRecord(dstPort)
ipv4Packet.SetSourceIP(tun.IP)
ipv4Packet.SetDestinationIP(realDstIP)
tcpPacket.SetSourcePort(realSrcPort)
tcpPacket.SetDestinationPort(realDstPort)
} else {
// it's from others, send it to relay.
natPort := nat.AddRecord(srcPort, dstIP, dstPort)
ipv4Packet.SetSourceIP(dstIP)
ipv4Packet.SetDestinationIP(relayAddr.IP)
tcpPacket.SetSourcePort(natPort)
tcpPacket.SetDestinationPort(uint16(relayAddr.Port))
}
tcpPacket.ResetChecksum(ipv4Packet.PseudoSum())
ipv4Packet.ResetChecksum()
_, err := tun.ifce.Write(ipv4Packet)
if err != nil {
log.Println("write tun error:", err)
}
},
// ICMP packet handler.
tcpip.ICMP: func(tun *TUN, ipv4Packet tcpip.IPv4Packet) {
icmpPacket := tcpip.ICMPPacket(ipv4Packet.Payload())
if icmpPacket.Type() != tcpip.ICMPRequest || icmpPacket.Code() != 0 {
return
}
log.Printf("ping: %v -> %v\n", ipv4Packet.SourceIP(), ipv4Packet.DestinationIP())
// forge a reply.
icmpPacket.SetType(tcpip.ICMPEcho)
srcIP := ipv4Packet.SourceIP()
dstIP := ipv4Packet.DestinationIP()
ipv4Packet.SetSourceIP(dstIP)
ipv4Packet.SetDestinationIP(srcIP)
icmpPacket.ResetChecksum()
ipv4Packet.ResetChecksum()
_, err := tun.ifce.Write(ipv4Packet)
if err != nil {
log.Println("write tun error:", err)
}
},
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package common_test
import (
"context"
"testing"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/br/pkg/lightning/common"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/store/mockstore"
tmock "github.com/pingcap/tidb/util/mock"
"github.com/stretchr/testify/require"
)
func newTableInfo(t *testing.T,
dbID, tableID int64,
createTableSql string, kvStore kv.Storage,
) *model.TableInfo {
p := parser.New()
se := tmock.NewContext()
node, err := p.ParseOneStmt(createTableSql, "utf8mb4", "utf8mb4_bin")
require.NoError(t, err)
tableInfo, err := ddl.MockTableInfo(se, node.(*ast.CreateTableStmt), tableID)
require.NoError(t, err)
tableInfo.State = model.StatePublic
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnLightning)
err = kv.RunInNewTxn(ctx, kvStore, false, func(ctx context.Context, txn kv.Transaction) error {
m := meta.NewMeta(txn)
if err := m.CreateDatabase(&model.DBInfo{ID: dbID}); err != nil && !errors.ErrorEqual(err, meta.ErrDBExists) {
return err
}
return m.CreateTableOrView(dbID, tableInfo)
})
require.NoError(t, err)
return tableInfo
}
func TestAllocGlobalAutoID(t *testing.T) {
storePath := t.TempDir()
kvStore, err := mockstore.NewMockStore(mockstore.WithPath(storePath))
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, kvStore.Close())
})
cases := []struct {
tableID int64
createTableSQL string
expectErrStr string
expectAllocatorTypes []autoid.AllocatorType
}{
// autoID, autoIncrID = false, false
{
tableID: 11,
createTableSQL: "create table t11 (a int primary key clustered)",
expectErrStr: "has no auto ID",
expectAllocatorTypes: nil,
},
{
tableID: 12,
createTableSQL: "create table t12 (a int primary key clustered) AUTO_ID_CACHE 1",
expectErrStr: "has no auto ID",
expectAllocatorTypes: nil,
},
// autoID, autoIncrID = true, false
{
tableID: 21,
createTableSQL: "create table t21 (a int)",
expectErrStr: "",
expectAllocatorTypes: []autoid.AllocatorType{autoid.RowIDAllocType},
},
{
tableID: 22,
createTableSQL: "create table t22 (a int) AUTO_ID_CACHE 1",
expectErrStr: "",
expectAllocatorTypes: []autoid.AllocatorType{autoid.RowIDAllocType},
},
// autoID, autoIncrID = false, true
{
tableID: 31,
createTableSQL: "create table t31 (a int primary key clustered auto_increment)",
expectErrStr: "",
expectAllocatorTypes: []autoid.AllocatorType{autoid.RowIDAllocType},
},
{
tableID: 32,
createTableSQL: "create table t32 (a int primary key clustered auto_increment) AUTO_ID_CACHE 1",
expectErrStr: "",
expectAllocatorTypes: []autoid.AllocatorType{autoid.AutoIncrementType, autoid.RowIDAllocType},
},
// autoID, autoIncrID = true, true
{
tableID: 41,
createTableSQL: "create table t41 (a int primary key nonclustered auto_increment)",
expectErrStr: "",
expectAllocatorTypes: []autoid.AllocatorType{autoid.RowIDAllocType},
},
{
tableID: 42,
createTableSQL: "create table t42 (a int primary key nonclustered auto_increment) AUTO_ID_CACHE 1",
expectErrStr: "",
expectAllocatorTypes: []autoid.AllocatorType{autoid.AutoIncrementType, autoid.RowIDAllocType},
},
// autoRandomID
{
tableID: 51,
createTableSQL: "create table t51 (a bigint primary key auto_random)",
expectErrStr: "",
expectAllocatorTypes: []autoid.AllocatorType{autoid.AutoRandomType},
},
}
ctx := context.Background()
for _, c := range cases {
ti := newTableInfo(t, 1, c.tableID, c.createTableSQL, kvStore)
allocators, err := common.GetGlobalAutoIDAlloc(kvStore, 1, ti)
if c.expectErrStr == "" {
require.NoError(t, err, c.tableID)
require.NoError(t, common.RebaseGlobalAutoID(ctx, 123, kvStore, 1, ti))
base, idMax, err := common.AllocGlobalAutoID(ctx, 100, kvStore, 1, ti)
require.NoError(t, err, c.tableID)
require.Equal(t, int64(123), base, c.tableID)
require.Equal(t, int64(223), idMax, c.tableID)
// all allocators are rebased and allocated
for _, alloc := range allocators {
base2, max2, err := alloc.Alloc(ctx, 100, 1, 1)
require.NoError(t, err, c.tableID)
require.Equal(t, int64(223), base2, c.tableID)
require.Equal(t, int64(323), max2, c.tableID)
}
} else {
require.ErrorContains(t, err, c.expectErrStr, c.tableID)
}
var allocatorTypes []autoid.AllocatorType
for _, alloc := range allocators {
allocatorTypes = append(allocatorTypes, alloc.GetType())
}
require.Equal(t, c.expectAllocatorTypes, allocatorTypes, c.tableID)
}
}
|
package model
import (
"fmt"
//"io"
"time"
)
const (
VERSION = "v0.1.0"
TIMESTAMP_FMT string = "2006-01-02 15:04:05.000"
)
func GenerateVersion() string {
return fmt.Sprintf("---------------- generated by abnf %s %s ----------------", VERSION, time.Now().Format(TIMESTAMP_FMT))
}
|
package helpers
import (
"fmt"
"io"
"log"
"mime/multipart"
"net/http"
"strings"
)
const tfeReqBodyString = `{ "password": "%s" }`
// TfeBackup creates a backup of a TFE instance using the provided access details
func TfeBackup(host, token, pwd string, out io.Writer) error {
var body = strings.NewReader(fmt.Sprintf(tfeReqBodyString, pwd))
var url = fmt.Sprintf("https://%s/_backup/api/v1/backup", strings.TrimSuffix(host, "/"))
log.Printf("making request to %q", url)
var req, err = http.NewRequest(http.MethodPost, url, body)
if err != nil {
return fmt.Errorf("error constructing http request: %v", err)
}
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token))
var resp *http.Response
resp, err = http.DefaultClient.Do(req)
if err != nil {
return fmt.Errorf("error making http request: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode > 299 {
return fmt.Errorf("http request returned: %s", resp.Status)
}
written, err := io.Copy(out, resp.Body)
if err != nil {
return fmt.Errorf("error saving backup: %v", err)
}
log.Printf("saved backup file, size %d bytes\n", written)
return nil
}
// TfeRestore restores a backup to a TFE instance using the provided access details
func TfeRestore(host, token, pwd string, in io.Reader) error {
var url = fmt.Sprintf("https://%s/_backup/api/v1/restore", strings.TrimSuffix(host, "/"))
log.Printf("making request to %q", url)
// using io.Pipe so that we don't load the file into memory to execute the upload
r, w := io.Pipe()
m := multipart.NewWriter(w)
go func() {
defer w.Close()
defer m.Close()
partConfig, err := m.CreateFormField("config")
if err != nil {
return
}
cr := strings.NewReader(fmt.Sprintf(tfeReqBodyString, pwd))
if _, err := io.Copy(partConfig, cr); err != nil {
return
}
partSnapshot, err := m.CreateFormField("snapshot")
if err != nil {
return
}
if _, err := io.Copy(partSnapshot, in); err != nil {
return
}
}()
var req, err = http.NewRequest(http.MethodPost, url, r)
if err != nil {
return fmt.Errorf("error constructing http request: %v", err)
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
req.Header.Set("Content-Type", m.FormDataContentType())
var resp *http.Response
resp, err = http.DefaultClient.Do(req)
if err != nil {
return fmt.Errorf("error making http request: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode > 299 {
return fmt.Errorf("http request returned: %s", resp.Status)
}
if err != nil {
return fmt.Errorf("error restoring backup: %v", err)
}
return nil
}
|
package main
import (
"log"
"sort"
d "github.com/dosko64/distance"
)
func main() {
pp := []d.Point{}
pp = append(pp, d.New(4, 4))
pp = append(pp, d.New(3, 3))
pp = append(pp, d.New(4, 2))
p := d.New(1.5, 1.5)
log.Println(pp)
pp = sortByDistance(p, pp, true)
log.Println(pp)
}
func sortByDistance(p d.Point, pp []d.Point, reverse bool) []d.Point {
sort.Slice(pp, func(i, j int) bool {
a := d.HaversineDistance(
pp[i].Lat(),
pp[i].Lng(),
p.Lat(),
p.Lng(),
)
b := d.HaversineDistance(
pp[j].Lat(),
pp[j].Lng(),
p.Lat(),
p.Lng(),
)
if reverse {
return a > b
}
return a < b
})
return pp
}
|
package nsdownload_test
import (
"testing"
"download/nsdownload"
"fmt"
)
func Test_NationStatGetRoot(t *testing.T) {
d := nsdownload.NewNationStatDownloader()
res := d.GetRoot()
fmt.Println(len(res))
}
func Test_NationStatGetChild(t *testing.T){
d := nsdownload.NewNationStatDownloader()
code := "A01"
result := d.GetChild(code, 1)
if len(result) == 0 {
t.Error("Cannot get level 1")
}
code = "A0101"
result = d.GetChild(code, 2)
if len(result) == 0 {
t.Error("Cannot get level 2")
}
code = "A010101"
result = d.GetChild(code, 3)
if len(result) == 0 {
t.Error("Cannot get level 3")
}
}
func Test_NationStatGetPeriod(t *testing.T){
d := nsdownload.NewNationStatDownloader()
result := d.GetPeriod()
if len(result) == 0 {
t.Error("Cannot get period")
}
}
func Test_NationStatGetData(t *testing.T){
d := nsdownload.NewNationStatDownloader()
codes := []string{"A01010101"}
result := d.GetData(codes, "200101", "-1")
if len(result) == 0{
t.Error(result)
}
}
|
package main
import (
"bufio"
"fmt"
"io"
"net"
"os"
"strings"
)
//等待连接,并打印通信数据
func pp(con net.Conn) {
reader := bufio.NewReader(os.Stdin)
for {
//3.读取客户端发来消息
tmp := make([]byte, 128)
n, err := con.Read(tmp)
if err == io.EOF {
break
}
if err != nil {
fmt.Printf("read message wrong,err:%v\n", err)
return
}
//4.打印客户端消息
rmes := string(tmp[:n])
fmt.Printf("%v\n", rmes)
//发送数据
fmt.Print("请输入消息:")
mes, _ := reader.ReadString('\n')
mes = strings.TrimSpace(mes)
mes = strings.ToLower(mes)
if mes == "exit" {
break
} else {
con.Write([]byte(mes))
}
}
}
func main() {
//1.本地端口启动服务
listener, err := net.Listen("tcp", "127.0.0.1:20000")
if err != nil {
fmt.Printf("open tcp err:%v\n", err)
return
}
for {
//2.等待别人来跟我建立连接
con, err := listener.Accept()
if err != nil {
fmt.Printf("receive message wrong,err:%v\n", err)
return
}
go pp(con)
}
}
|
package golinal
import (
"github.com/stretchr/testify/suite";
"testing"
)
//****************
// Global Matrices
//****************
var NonsquareMatrix = NewMatrix([]float64{1}, []float64{-7})
var NonsquareMatrix2 = NewMatrix([]float64{3, 4})
var ThreeIdentity = NewMatrix([]float64{1, 0, 0}, []float64{0, 1, 0}, []float64{0, 0, 1})
var RandFourMatrix = NewMatrix (
[]float64{0.223548, 7.51484, 7.94393, 7.95676},
[]float64{9.44692, -2.05097, -3.59421, -7.9301},
[]float64{-5.90911, -9.56427, -6.67171, -8.09466},
[]float64{-9.43214, 6.42982, 7.37722, 3.8219})
var RandMatrix = NewMatrix(
[]float64{2.99875, -0.722266, -0.237451, -1.11405, -2.127, 8.88714, -1.65288, -5.27189, -5.92509, -6.02403},
[]float64{3.26164, 5.86218, 2.81815, -2.06958, 0.366388, -0.271817, -3.51731, 3.22294, -4.71693, -8.95407},
[]float64{6.53936, 0.653704, 5.51595, 8.75519, 4.50956, -2.18589, 1.44052, -7.2319, -6.35739, 9.8645},
[]float64{-7.58145, 8.1194, -7.58264, 9.88342, 1.48929, -3.66263, 1.87859, 8.37529, 0.772604, -6.30053},
[]float64{-9.39711, -6.49522, 1.94943, 9.03285, 3.40668, -7.61823, -7.2272, 4.2087, -8.91554, 4.15006},
[]float64{-2.80846, 5.17557, 9.52006, -9.47033, 5.67815, 6.8402, 0.818774, -6.92541, 8.22727, 5.83063},
[]float64{0.627448, -3.28157, 5.50732, 9.96776, -7.11846, -7.3921, -6.67718, -0.621603, -7.81631, -2.35664},
[]float64{-3.93259, -2.36996, 4.96011, 7.4244, -6.90362, -1.08256, 5.21275, -8.66966, -8.88118, -3.51107},
[]float64{8.40158, -6.03886, 7.62759, -2.43493, 0.36834, 9.46535, -4.76645, 7.49544, -7.4696, -7.31169},
[]float64{-5.5276, -5.28959, -2.90794, -0.984676, 7.66101, -4.13466, -6.00181, 1.37331, -9.62865, 1.92439})
// Nil pointer for Matrix
var nilMatrixP *Matrix
//************************
// Constructor Test Suite
//************************
type ConstructorsTestSuite struct {
suite.Suite
EmptyMatrix,
MatrixFromSlices *Matrix
}
func (suite *ConstructorsTestSuite) SetupTest() {
suite.EmptyMatrix = BlankMatrix(2, 3)
suite.MatrixFromSlices = NewMatrix([]float64{1, 0, 0}, []float64{0, 1, 0}, []float64{0, 0, 1})
}
func (suite *ConstructorsTestSuite) TestConstructors() {
suite.Equal(suite.EmptyMatrix.NumRows(), 2, "They should be equal")
suite.Equal(suite.EmptyMatrix.NumCols(), 3, "They Should be equal")
suite.Equal(suite.MatrixFromSlices.NumRows(), 3, "They should be equal")
suite.Equal(suite.MatrixFromSlices.NumCols(), 3, "They Should be equal")
}
//********************************
// Addition of Matrices Test Suite
//********************************
// This data structure is a test suite
// that tests different cases for adding Matrices.
type AdditionTestSuite struct {
suite.Suite
DiffDimMatrix1,
DiffDimMatrix2 *Matrix
AddToItselfMatrix,
CopyOfItselfMatrix,
ResultAddToItself *Matrix
SquareMatrix1,
SquareMatrix2,
ResultMatrix *Matrix
}
// Initializes all matrices to be tested
// in the addition test sweet.
func (suite *AdditionTestSuite) SetupTest() {
suite.DiffDimMatrix1 = BlankMatrix(10, 2)
suite.DiffDimMatrix2 = BlankMatrix(1, 5)
suite.AddToItselfMatrix = NewMatrix([]float64{1, 0, 0}, []float64{0, 1, 0}, []float64{0, 0, 1})
suite.CopyOfItselfMatrix = ThreeIdentity
suite.ResultAddToItself = NewMatrix([]float64{2, 0, 0}, []float64{0, 2, 0}, []float64{0, 0, 2})
suite.SquareMatrix1 = NewMatrix([]float64{1, 6}, []float64{5, -7})
suite.SquareMatrix2 = NewMatrix([]float64{1.05, -10}, []float64{-103, 4})
suite.ResultMatrix = NewMatrix([]float64{2.05, -4}, []float64{-98, -3})
}
// Different Dimension Addition Test
// Adding two matrices of different dimensions should
// raise and Error
func (suite *AdditionTestSuite) TestDifferentDimAddition() {
err := suite.DiffDimMatrix1.Add(suite.DiffDimMatrix2)
suite.NotEqual(nil, err)
}
// The addition method uses pointers as receivers
// to avoid the inefficiency of copying the Matrix arguments
// This tests to make sure that we don't modify the original
// arguments when we add a Matrix to itself
func (suite *AdditionTestSuite) TestAddToItself() {
// Copy the Matrix before we add it to itself
// Test this copy against the one that added to itself
// after adding
err := suite.AddToItselfMatrix.Add(suite.AddToItselfMatrix)
suite.Equal(err, nil, "They should be equal")
suite.NotEqual(suite.CopyOfItselfMatrix, suite.AddToItselfMatrix, "They should not be equal")
suite.Equal(suite.ResultAddToItself, suite.AddToItselfMatrix, "They should be equal")
}
func (suite *AdditionTestSuite) TestSimpleAdd() {
err := suite.SquareMatrix1.Add(suite.SquareMatrix2)
suite.Equal(err, nil, "They should be equal")
suite.Equal(suite.ResultMatrix, suite.SquareMatrix1, "They should be equal")
}
//**************************************
// Multiplication of Matrices Test Suite
//**************************************
type MultiplicationTestSuite struct {
suite.Suite
MismatchRowCol1,
MismatchRowCol2,
SquaredRandMatrix *Matrix
}
func (suite *MultiplicationTestSuite) SetupTest() {
suite.MismatchRowCol1 = NewMatrix([]float64{1, 6, 3}, []float64{5, -7, 3})
suite.MismatchRowCol2 = NonsquareMatrix
suite.SquaredRandMatrix = NewMatrix(
[]float64{11.7719, 129.774, 21.9211, -153.57, 33.6721, 95.1895, 65.75, -102.2, 240.369, 90.2049},
[]float64{55.3243, 93.0136, 31.7845, 3.5077, -63.0053, 39.0486, 87.8007, -105.999, 48.3232, -17.1332},
[]float64{-123.397, 31.8222, -152.806, 154.054, 140.097, -139.273, -99.3154, 62.8844, -122.848, 47.4363},
[]float64{-114.917, 102.382, -47.9481, 156.305, -135.274, -115.198, 43.1011, 158.743, -15.1189, -230.882},
[]float64{-234.646, 27.4006, -238.697, 210.004, 59.1005, -249.836, 115.081, 64.1364, 33.6, 122.233},
[]float64{134.585, -106.579, 231.721, -100.351, 184.561, 46.2524, -168.852, -10.9457, -132.449, 161.053},
[]float64{-15.112, 155.872, -231.474, 108.712, 1.80904, -52.6207, 175.303, -1.27922, 125.886, 15.0193},
[]float64{6.69661, 167.354, -130.272, 86.6587, 3.83158, -125.846, 68.3412, 18.3789, 197.724, 88.4347},
[]float64{-11.0144, 72.0043, 107.567, -6.93323, -10.8936, 117.14, 169.24, -331.361, 99.8063, 176.223},
[]float64{-206.458, -43.6637, -151.877, 62.1927, 42.2995, -180.585, 44.5284, 8.04939, 61.2176, 149.294})
}
func (suite *MultiplicationTestSuite) TestMultiplication() {
mult1, err1 := suite.MismatchRowCol1.Multiply(suite.MismatchRowCol2)
mult2, err2 := suite.MismatchRowCol2.Multiply(suite.MismatchRowCol1)
mult3, err3 := ThreeIdentity.Multiply(ThreeIdentity)
mult4, err4 := RandMatrix.Multiply(Identity(10))
mult5, err5 := RandMatrix.Multiply(RandMatrix)
suite.Equal(mult1, nilMatrixP, "They should be equal")
suite.NotEqual(err1, nil, "They should not be equal")
suite.Equal(mult2, nilMatrixP, "They should be equal")
suite.NotEqual(err2, nil, "They should be equal")
suite.Equal(mult3, ThreeIdentity, "They should be equal")
suite.Equal(err3, nil, "They should be equal")
suite.Equal(RandMatrix, mult4, "They should be equal")
suite.Equal(err4, nil, "They should be equal")
suite.Equal(suite.SquaredRandMatrix, mult5, "They should be equal")
suite.Equal(err5, nil, "They should be equal")
}
//*****************************
// LUP Decomposition Test Suite
//*****************************
type LUPDecompTestSuite struct {
suite.Suite
ThreeMatrix,
FourMatrix *Matrix
LThree,
UThree,
PThree *Matrix
LFour,
UFour,
PFour *Matrix
}
func (suite *LUPDecompTestSuite) SetupTest() {
suite.ThreeMatrix = NewMatrix(
[]float64{1, 3, 5},
[]float64{2, 4, 7},
[]float64{1, 1, 0})
suite.LThree = NewMatrix(
[]float64{1.00000, 0.00000, 0.00000},
[]float64{0.50000, 1.00000, 0.00000},
[]float64{0.50000, -1.00000, 1.00000})
suite.UThree = NewMatrix(
[]float64{2.00000, 4.00000, 7.00000},
[]float64{0.00000, 1.00000, 1.50000},
[]float64{0.00000, 0.00000, -2.00000})
suite.PThree = NewMatrix(
[]float64{0, 1, 0},
[]float64{1, 0, 0},
[]float64{0, 0, 1})
suite.FourMatrix = NewMatrix(
[]float64{11, 9, 24, 2},
[]float64{ 1, 5, 2, 6},
[]float64{ 3, 17, 18, 1},
[]float64{ 2, 5, 7, 1})
suite.LFour = NewMatrix(
[]float64{1.00000, 0.00000, 0.00000, 0.00000},
[]float64{0.27273, 1.00000, 0.00000, 0.00000},
[]float64{0.09091, 0.28750, 1.00000, 0.00000},
[]float64{0.18182, 0.23125, 0.00360, 1.00000})
suite.UFour = NewMatrix(
[]float64{1.00000, 0.00000, 0.00000, 0.00000},
[]float64{0.27273, 1.00000, 0.00000, 0.00000},
[]float64{0.09091, 0.28750, 1.00000, 0.00000},
[]float64{0.18182, 0.23125, 0.00360, 1.00000})
suite.PFour = NewMatrix(
[]float64{1, 0, 0, 0},
[]float64{0, 0, 1, 0},
[]float64{0, 1, 0, 0},
[]float64{0, 0, 0, 1})
}
func (suite *LUPDecompTestSuite) TestLUP() {
L1, U1, P1, err1 := suite.ThreeMatrix.LUP()
L2, U2, P2, err2 := suite.FourMatrix.LUP()
suite.Equal(suite.LThree, L1, "They should be equal")
suite.Equal(suite.UThree, U1, "They should be equal")
suite.Equal(suite.PThree, P1, "They should be equal")
suite.Equal(nil, err1, "There should be no error")
suite.Equal(suite.LFour, L2, "They should be equal")
suite.Equal(suite.UFour, U2, "They should be equal")
suite.Equal(suite.PFour, P2, "They should be equal")
suite.Equal(nil, err2, "There should be no error")
}
//*******************************
// Inverse of Matrices Test Suite
//*******************************
type InverseTestSuite struct {
suite.Suite
ZeroDeterminantMatrix,
TenIdentity *Matrix
RandMatrixInverse *Matrix
}
func (suite *InverseTestSuite) SetupTest() {
suite.ZeroDeterminantMatrix = NewMatrix([]float64{2, -2}, []float64{-2, 2})
suite.TenIdentity = Identity(10)
suite.RandMatrixInverse = NewMatrix(
[]float64{-0.0460854, -0.0705092, 0.0326931, 0.045097, -0.164044, 0.0123061, 0.104152, -0.0198876, 0.0493236, 0.102875},
[]float64{0.141227, 0.325278, 0.0262483, -0.179546, 0.419574, -0.121419, -0.306189, -0.0357916, -0.162755, -0.362409},
[]float64{-0.0715951, -0.0268494, 0.0000621608, 0.029943, -0.0600711, 0.0617097, 0.0618292, 0.0345928, 0.0558978, 0.0424593},
[]float64{-0.0430982, -0.202563, 0.0235125, 0.164038, -0.235156, 0.0869331, 0.207031, -0.00181176, 0.0909108, 0.178495},
[]float64{-0.170888, -0.341571, 0.0211468, 0.281072, -0.546889, 0.193541, 0.36861, 0.0233994, 0.187385, 0.486657},
[]float64{0.133272, 0.0910483, 0.00621968, -0.0741133, 0.234174, -0.0504633, -0.16888, -0.0265523, -0.0423938, -0.202145},
[]float64{-0.0699641, 0.0156878, 0.0117219, -0.00723369, -0.00487834, -0.029769, -0.0838612, 0.0756502, 0.0214575, -0.0122182},
[]float64{0.0507206, 0.222235, -0.00168086, -0.150038, 0.356268, -0.126108, -0.272279, -0.0189743, -0.0668899, -0.298225},
[]float64{-0.133813, -0.329467, -0.0227785, 0.228099, -0.457624, 0.170425, 0.362524,-0.00890932, 0.141323, 0.346878},
[]float64{0.168294, 0.34461, 0.031145, -0.287184, 0.592122, -0.189706, -0.426741, -0.0494601, -0.190007, -0.486913})
}
func (suite *InverseTestSuite) TestInverse() {
inverse1, err1 := NonsquareMatrix.Inverse()
inverse2, err2 := suite.ZeroDeterminantMatrix.Inverse()
inverse3, err3 := suite.TenIdentity.Inverse()
inverse4, err4 := RandMatrix.Inverse()
suite.Equal(nilMatrixP, inverse1, "Non sqaure Matrix should be nil")
suite.NotEqual(nil, err1, "There should be an error")
suite.Equal(nilMatrixP, inverse2, "Zero determinant should be nil")
suite.NotEqual(nil, err2, "There should be an error")
suite.Equal(suite.TenIdentity, inverse3, "Identity inverse is itself")
suite.Equal(nil, err3, "There should be no error")
suite.Equal(suite.RandMatrixInverse, inverse4, "Identity inverse is itself")
suite.Equal(nil, err4, "There should be no error")
}
//****************************************************
// Eigenvalues and Determinants of Matrices Test Suite
//****************************************************
type EigValDeterminantTestSuite struct{
suite.Suite
Uppertriangular1,
Uppertriangular2,
RandMatrix *Matrix
IdentityEigenVals,
Upper1EigenVals,
Upper2EigenVals,
RandEigenVals []complex128
}
func (suite *EigValDeterminantTestSuite) SetupTest() {
suite.IdentityEigenVals = []complex128{1+0i, 1+0i, 1+0i}
suite.Uppertriangular1 = NewMatrix(
[]float64{5, 10, 9, 3, 4},
[]float64{0, 4, -6, 7.234, -3},
[]float64{0, 0, 3, 13098.38, 239}, [
]float64{0, 0, 0, 2, -70},
[]float64{0, 0, 0, 0, 1})
suite.Upper1EigenVals = []complex128{5+0i, 4+0i, 3+0i, 2+0i, 1+0i}
suite.Uppertriangular2 = NewMatrix(
[]float64{1, 10, 9, 3, 4},
[]float64{0, 3, -6, 7.234, -3},
[]float64{0, 0, 4, 13098.38, 239}, [
]float64{0, 0, 0, 5, -70},
[]float64{0, 0, 0, 0, 2})
suite.Upper2EigenVals = []complex128{1+0i, 3+0i, 4+0i, 5+0i, 2+0i}
suite.RandEigenVals = []complex128{18.5377+11.1238i, 18.5377-11.1238i, -13.4118+9.58048i, -13.4118-9.58048i, -13.5186+0i, 0.557657+12.1091i, 0.557657-12.1091i, 9.66163+0i, 7.42975+0i, -1.32488+0i}
}
func (suite *EigValDeterminantTestSuite) TestDeterminant() {
det1, err1 := NonsquareMatrix.Determinant()
det2, err2 := ThreeIdentity.Determinant()
det3, err3 := suite.Uppertriangular1.Determinant()
det4, err4 := suite.Uppertriangular2.Determinant()
det5, err5 := RandMatrix.Determinant()
det6, err6 := RandFourMatrix.Determinant()
suite.Equal(0.0, det1, "There should be no determinant")
suite.NotEqual(err1, nil, "There should be an error")
suite.Equal(det2, 1.0, "They should be equal")
suite.Equal(err2, nil, "There should be no error")
suite.Equal(det3, 120.0, "They should be equal")
suite.Equal(err3, nil, "There should be no error")
suite.Equal(det4, 120.0,"They should be equal")
suite.Equal(err4, nil, "There should be no error")
suite.Equal(det5, 2.39872e+10, "They should be equal")
suite.Equal(err5, nil, "There should be no error")
suite.Equal(0.0, det6, "They should be equal")
suite.Equal(nil, err6, "There should be no error")
}
func (suite *EigValDeterminantTestSuite) TestEigenvalues() {
eval1, err1 := NonsquareMatrix.Eigenvalues()
eval2, err2 := ThreeIdentity.Eigenvalues()
eval3, err3 := suite.Uppertriangular1.Eigenvalues()
eval4, err4 := suite.Uppertriangular2.Eigenvalues()
eval5, err5 := RandMatrix.Eigenvalues()
suite.Equal(eval1, nil, "There should be no eigenvalues")
suite.NotEqual(err1, nil, "There should be an error")
suite.Equal(eval2, suite.IdentityEigenVals, "They should be equal")
suite.Equal(err2, nil, "There should be no error")
suite.Equal(eval3, suite.Upper1EigenVals, "They should be equal")
suite.Equal(err3, nil, "There should be no error")
suite.Equal(eval4, suite.Upper2EigenVals,"They should be equal")
suite.Equal(err4, nil, "There should be no error")
suite.Equal(eval5, suite.RandEigenVals, "They should be equal")
suite.Equal(err5, nil, "There should be no error")
}
// brief: Runs all test suites when "go test" is run
//
//
//
func TestAll(t *testing.T) {
suite.Run(t, new(ConstructorsTestSuite))
suite.Run(t, new(AdditionTestSuite))
suite.Run(t, new(MultiplicationTestSuite))
suite.Run(t, new(LUPDecompTestSuite))
//suite.Run(t, new(InverseTestSuite))
suite.Run(t, new(EigValDeterminantTestSuite))
}
|
package main
import "fmt"
type user struct {
name string
age int
}
type person struct {
name string
}
func main() {
var u1 user
var u2 user
u1.name = "abc"
//var p person
if u1 == u2 { // you can compare same type of struct
fmt.Println("true")
} else {
fmt.Println("False")
}
// u1.name == u2.name
//u1.age == u2.age
}
|
package zset
type ZSet struct {
dict map[string]*zSkipListNode
zSkipList *zSkipList
}
func NewZSet() *ZSet {
return &ZSet{
dict: make(map[string]*zSkipListNode),
zSkipList: NewZSkipList(),
}
}
func (z *ZSet) Add(key string, score float64) {
flag := ZADD_NX
z.ZAdd(key, score, &flag, nil)
}
func (z *ZSet) Update(key string, score float64) {
flag := ZADD_XX
z.ZAdd(key, score, &flag, nil)
}
func (z *ZSet) ZAdd(key string, score float64, flags *int, newScore *float64) int {
// Check vars
incr := (*flags & ZADD_INCR) != 0
nx := (*flags & ZADD_NX) != 0
xx := (*flags & ZADD_XX) != 0
*flags = 0
var curScore float64
var zNode *zSkipListNode
zNode, ok := z.dict[key]
if ok && zNode != nil {
if nx {
*flags |= ZADD_NOP
return 1
}
curScore = zNode.score
//incr
if incr {
score += curScore
if newScore != nil {
*newScore = score
}
}
// remove and re-insert when score changed.
if score != curScore {
node := z.zSkipList.Update(key, curScore, score)
z.dict[key] = node
*flags |= ZADD_UPDATED
}
return 1
} else if !xx {
node := z.zSkipList.InsertNode(key, score)
z.dict[key] = node
*flags |= ZADD_ADDED
if newScore != nil {
*newScore = score
}
return 1
} else {
*flags |= ZADD_NOP
return 1
}
}
func (z *ZSet) Del(key string) *float64 {
if zNode, ok := z.dict[key]; ok {
delete(z.dict, key)
z.zSkipList.Delete(key, zNode.score)
return &zNode.score
} else {
return nil
}
}
func (z *ZSet) Rank(key string) int {
if zNode, ok := z.dict[key]; ok {
return z.zSkipList.Rank(key, zNode.score)
} else {
return -1
}
}
func (z *ZSet) RevRank(key string) int {
rank := z.Rank(key)
if rank != -1 {
rank = z.zSkipList.length - rank - 1
}
return rank
}
func (z *ZSet) IncrBy(key string, increment float64) (score float64) {
flag := ZADD_INCR
z.ZAdd(key, increment, &flag, &score)
return
}
func (z *ZSet) Range(start, end int) (res []RangeResp) {
if start >= end {
return
}
list := z.zSkipList.Range(start, end)
for _, l := range list {
res = append(res, RangeResp{Key: l.key, Score: l.score})
}
return
}
type RangeResp struct {
Key string
Score float64
}
|
package main
import (
"fmt"
"math"
)
type shape interface {
area() float64
}
type triangle struct {
baseLength float64
height float64
}
type square struct {
sideLength float64
}
func main() {
t := triangle{baseLength: 4, height: 2}
fmt.Printf("Triangle area is %.2f\n", t.area())
s := square{sideLength: 2}
fmt.Printf("Square area is %.2f\n", s.area())
}
func (t triangle) area() float64 {
return t.baseLength * t.height / 2
}
func (s square) area() float64 {
return math.Pow(s.sideLength, 2)
}
|
package protocol
import (
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"strings"
)
type v2Service struct {
resourceUrl string
}
func NewV2Client(url string) v2Service {
return v2Service{resourceUrl: url}
}
func (svc v2Service) IsValid() bool {
return !strings.HasSuffix(svc.resourceUrl, ".json")
}
func (svc v2Service) GetServiceVersion() int {
return 2
}
func (svc v2Service) GetPackageData(id string) (Package, error) {
var pkg Package
url := fmt.Sprintf(svc.getSearchUrlFormat(), id)
var feed v2feed
err := xmlRequest(url, &feed)
if err != nil {
return pkg, err
}
for _, entry := range feed.Entries {
if pkg.Id == "" {
pkg.Id = entry.Properties.Id
}
pkg.Versions = append(pkg.Versions, Version{Version: entry.Properties.Version, DownloadUrl: entry.Content.DownloadUrl})
}
pkg.VersionMap = makeVersionMap(pkg.Versions)
return pkg, nil
}
func (svc v2Service) DownloadPackage(version Version) (io.Reader, error) {
return downloadData(version.DownloadUrl)
}
func (svc v2Service) GetVersion(id, version string) (Version, error) {
return Version{}, nil
}
func (svc v2Service) GetNuspec(pkg Package, version Version) (*Nuspec, error) {
r, err := svc.DownloadPackage(version)
if err != nil {
return nil, err
}
return getNuspec(pkg.Id, version.Version, r)
}
// private helpers
func (svc v2Service) getSearchUrlFormat() string {
return svc.resourceUrl + "FindPackagesById()?id='%s'"
}
func (svc v2Service) getDownloadUrl(id, version string) string {
return ""
}
func getResourceUrl(url string) (string, error) {
var service v2serviceResponse
err := xmlRequest(url, &service)
if err != nil {
return "", err
}
if service.Workspace.Collection.Href == "" {
return "", errors.New("invalid response, probably not a v2 service")
}
return url + service.Workspace.Collection.Href, nil
}
func xmlRequest(url string, out interface{}) error {
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
dec := xml.NewDecoder(resp.Body)
err = dec.Decode(&out)
if err != nil {
return err
}
return nil
}
|
package api
func decodeCity(city string) (cityCode string) {
switch city {
case "МОСКВА":
city = "2000000"
return city
case "САНКТ-ПЕТЕРБУРГ":
city = "2004001"
return city
case "ОРСК":
city = "2040480"
return city
}
return
}
|
package main
type Education struct {
ObjectType string `json:"docType"`
Name string `json:"Name"` // 姓名
Gender string `json:"Gender"` // 性别
Nation string `json:"Nation"` // 民族
EntityID string `json:"EntityID"` // 身份证号
Place string `json:"Place"` // 籍贯
BirthDay string `json:"BirthDay"` // 出生日期
EnrollDate string `json:"EnrollDate"` // 入学日期
GraduationDate string `json:"GraduationDate"` // 毕(结)业日期
SchoolName string `json:"SchoolName"` // 学校名称
Major string `json:"Major"` // 专业
QuaType string `json:"QuaType"` // 学历类别
Length string `json:"Length"` // 学制
Mode string `json:"Mode"` // 学习形式
Level string `json:"Level"` // 层次
Graduation string `json:"Graduation"` // 毕(结)业
CertNo string `json:"CertNo"` // 证书编号
Photo string `json:"Photo"` // 照片
Historys []HistoryItem // 当前edu的历史记录
}
type HistoryItem struct {
TxId string
Education Education
}
|
package service
import (
"CloudRestaurant/dao"
"CloudRestaurant/model"
)
type FoodCategoryService struct {
}
/**
* 获取美食类别
*/
func (fcs *FoodCategoryService)Categories()([]model.FoodCategory,error){
//数据库操作层
foodCategoryDao:=dao.NewFoodCategoryDao()
return foodCategoryDao.QueryCategories()
}
|
package rakuten
import (
"context"
"fmt"
)
type TravelVacantHotelSearchParams struct {
LargeClassCode string `url:"largeClassCode,omitempty"`
MiddleClassCode string `url:"middleClassCode,omitempty"`
SmallClassCode string `url:"smallClassCode,omitempty"`
DetailClassCode string `url:"detailClassCode,omitempty"`
HotelNo int `url:"hotelNo,omitempty"`
CheckDate string `url:"checkDate,omitempty"`
CheckOutDate string `url:"checkOutDate,omitempty"`
AdultNum int `url:"adultNum,omitempty"`
UpClassNum int `url:"upClassNum,omitempty"`
LowClassNum int `url:"lowClassNum,omitempty"`
InfrantWithMBNum int `url:"infrantWithMBNum,omitempty"`
InfrantWithMNum int `url:"infrantWithMNum,omitempty"`
InfrantWithBNum int `url:"infrantWithBNum,omitempty"`
InfrantWithoutMBNum int `url:"infrantWithoutMBNum,omitempty"`
RoomNum int `url:"roomNum,omitempty"`
MaxCharge int `url:"maxCharge,omitempty"`
Latitue float64 `url:"latitude,omitempty"`
Longtitude float64 `url:"longtitude,omitempty"`
SearchRadius int `url:"searchRadius,omitempty"`
SqueezeCondition string `url:"squeezeCondition,omitempty"`
Carrier int `url:"carrier,omitempty"`
DatumType int `url:"datumType,omitempty"`
Hits int `url:"hits,omitempty"`
Page int `url:"page,omitempty"`
SearchPattern int `url:"searchPattern,omitempty"`
HotelThumbnailSize int `url:"hotelThumbnailSize,omitempty"`
ResponseType string `url:"responseType,omitempty"`
Sort string `url:"sort,omitempty"`
AllReturnFlag int `url:"allReturnFlag,omitempty"`
}
type TravelVacantHotelSearchResponse struct {
PagingInfo struct {
RecordCount int `json:"recordCount"`
PageCount int `json:"pageCount"`
Page int `json:"page"`
First int `json:"first"`
Last int `json:"last"`
} `json:"pagingInfo"`
Hotels []struct {
Hotel []struct {
HotelBasicInfo struct {
HotelNo int `json:"hotelNo"`
HotelName string `json:"hotelName"`
HotelInformationURL string `json:"hotelInformationUrl"`
PlanListURL string `json:"planListUrl"`
DpPlanListURL string `json:"dpPlanListUrl"`
ReviewURL string `json:"reviewUrl"`
HotelKanaName string `json:"hotelKanaName"`
HotelSpecial string `json:"hotelSpecial"`
HotelMinCharge int `json:"hotelMinCharge"`
Latitude float64 `json:"latitude"`
Longitude float64 `json:"longitude"`
PostalCode string `json:"postalCode"`
Address1 string `json:"address1"`
Address2 string `json:"address2"`
TelephoneNo string `json:"telephoneNo"`
FaxNo string `json:"faxNo"`
Access string `json:"access"`
ParkingInformation string `json:"parkingInformation"`
NearestStation string `json:"nearestStation"`
HotelImageURL string `json:"hotelImageUrl"`
HotelThumbnailURL string `json:"hotelThumbnailUrl"`
RoomImageURL string `json:"roomImageUrl"`
RoomThumbnailURL string `json:"roomThumbnailUrl"`
HotelMapImageURL string `json:"hotelMapImageUrl"`
ReviewCount int `json:"reviewCount"`
ReviewAverage float64 `json:"reviewAverage"`
UserReview string `json:"userReview"`
} `json:"hotelBasicInfo,omitempty"`
RoomInfo []struct {
RoomBasicInfo struct {
RoomClass string `json:"roomClass"`
RoomName string `json:"roomName"`
PlanID int `json:"planId"`
PlanName string `json:"planName"`
PointRate int `json:"pointRate"`
WithDinnerFlag int `json:"withDinnerFlag"`
DinnerSelectFlag int `json:"dinnerSelectFlag"`
WithBreakfastFlag int `json:"withBreakfastFlag"`
BreakfastSelectFlag int `json:"breakfastSelectFlag"`
Payment string `json:"payment"`
ReserveURL string `json:"reserveUrl"`
SalesformFlag int `json:"salesformFlag"`
} `json:"roomBasicInfo,omitempty"`
DailyCharge struct {
StayDate string `json:"stayDate"`
RakutenCharge int `json:"rakutenCharge"`
Total int `json:"total"`
ChargeFlag int `json:"chargeFlag"`
} `json:"dailyCharge,omitempty"`
} `json:"roomInfo,omitempty"`
} `json:"hotel"`
} `json:"hotels"`
}
func (s *TravelService) VacantHotelSearch(ctx context.Context, opt *TravelVacantHotelSearchParams) (*TravelVacantHotelSearchResponse, *Response, error) {
urlSuffix := fmt.Sprintf("Travel/VacantHotelSearch/20170426?")
req, err := s.client.NewRequest("GET", urlSuffix, opt, nil)
if err != nil {
return nil, nil, err
}
respBody := &TravelVacantHotelSearchResponse{}
resp, err := s.client.Do(ctx, req, respBody)
if err != nil {
return nil, resp, err
}
return respBody, resp, nil
}
|
package main
import (
"fmt"
"time"
)
func main() {
/*
Switch Example
In this program we use time package for finding when was a sunday
*/
num := 2
switch num {
case 1:
fmt.Println("One")
case 2:
fmt.Println("Two")
default:
fmt.Println("None")
}
demoTime()
findSaturday()
}
func demoTime() {
fmt.Println(time.Now().Weekday())
}
func findSaturday() {
today := time.Now().Weekday()
switch time.Saturday {
case today + 0:
fmt.Println("Today is Saturday")
case today + 1:
fmt.Println("Tomorrow is Monday")
case today + 2:
fmt.Println("Day after tomorrow is Monday")
default:
fmt.Println("So far way")
}
}
|
package cache
import (
"bytes"
"encoding/json"
"strings"
"sync"
eventpkg "github.com/serverless/event-gateway/event"
"github.com/serverless/event-gateway/libkv"
"go.uber.org/zap"
)
type eventTypeCache struct {
sync.RWMutex
cache map[libkv.EventTypeKey]*eventpkg.Type
log *zap.Logger
}
func newEventTypeCache(log *zap.Logger) *eventTypeCache {
return &eventTypeCache{
cache: map[libkv.EventTypeKey]*eventpkg.Type{},
log: log,
}
}
func (c *eventTypeCache) Modified(k string, v []byte) {
eventType := &eventpkg.Type{}
err := json.NewDecoder(bytes.NewReader(v)).Decode(eventType)
if err != nil {
c.log.Error("Could not deserialize Event Type state.", zap.Error(err), zap.String("key", k), zap.String("value", string(v)))
return
}
c.log.Debug("Event Type local cache received value update.", zap.String("key", k), zap.Object("value", eventType))
c.Lock()
defer c.Unlock()
segments := strings.Split(k, "/")
c.cache[libkv.EventTypeKey{Space: segments[0], Name: eventpkg.TypeName(segments[1])}] = eventType
}
func (c *eventTypeCache) Deleted(k string, v []byte) {
c.Lock()
defer c.Unlock()
segments := strings.Split(k, "/")
delete(c.cache, libkv.EventTypeKey{Space: segments[0], Name: eventpkg.TypeName(segments[1])})
}
|
package geometry
import (
"math"
)
// Shift the Line by the Vector.
func Shift(l Line, v Vector) Line {
return MustLine(NewLineFromPoints(
Point{X: l.a.X + v.I, Y: l.a.Y + v.J},
Point{X: l.b.X + v.I, Y: l.b.Y + v.J},
))
}
// ShortestVector returns the shortest Vector from Lines a to b.
//
// The Vector has length 0 if the Lines intersect.
func ShortestVector(a, b Line) Vector {
if !AreParallel(a, b) {
return Vector{I: 0, J: 0}
}
p := Perpendicular(a)
m, n := MustPoint(Intersection(a, p)), MustPoint(Intersection(b, p))
return Vector{I: n.X - m.X, J: n.Y - m.Y}
}
// Scale a Vector to a new one of the same direction but given length.
//
// Returns ErrNoVector if v is length 0 since the scaled Vector's direction
// can't be determined.
func Scale(v Vector, l Number) (Vector, error) {
cl := Length(v)
if IsZero(cl) {
return Vector{}, ErrNoVector
}
return Vector{I: l * v.I / cl, J: l * v.J / cl}, nil
}
// Length of Vector v.
func Length(v Vector) Number {
return Number(math.Hypot(float64(v.I), float64(v.J)))
}
// Distance between Points a and b.
func Distance(a, b Point) Number {
return Length(Vector{I: b.X - a.X, J: b.Y - a.Y})
}
// AngleBetween Lines a and b so that it is the shortest Angle to rotate a
// counter-clockwise to be parallel to b.
func AngleBetween(a, b Line) Angle {
if AreParallel(a, b) {
return 0
}
radsa := math.Atan2(float64(dy(a)), float64(dx(a)))
radsb := math.Atan2(float64(dy(b)), float64(dx(b)))
rads := math.Mod(radsa-radsb, 2*math.Pi)
i := MustPoint(Intersection(a, b))
if !AreParallel(Rotate(a, i, Angle(rads)), b) {
rads = 2*math.Pi - rads
}
return Angle(rads)
}
// Perpendicular Line to Line l through any Point on l.
func Perpendicular(l Line) Line {
return PerpendicularThroughPoint(l, l.a)
}
// PerpendicularThroughPoint returns a perpendicular Line to Line l that passes
// through Point p.
func PerpendicularThroughPoint(l Line, p Point) Line {
return MustLine(NewLineFromPointAndSlope(p, -dx(l), dy(l)))
}
// Intersection returns the Point where Lines a and b intersect.
// b.
//
// Returns an ErrParallel if the Lines are parallel since the intersection won't
// exist if the Lines aren't the same or occurs at infinitely many Points if the
// Lines are the same.
func Intersection(a, b Line) (Point, error) {
if AreParallel(a, b) {
return Point{}, ErrNoIntersection
}
m, n := det(a.a.X, a.a.Y, a.b.X, a.b.Y), det(b.a.X, b.a.Y, b.b.X, b.b.Y)
xn := det(m, a.a.X-a.b.X, n, b.a.X-b.b.X)
yn := det(m, a.a.Y-a.b.Y, n, b.a.Y-b.b.Y)
d := det(a.a.X-a.b.X, a.a.Y-a.b.Y, b.a.X-b.b.X, b.a.Y-b.b.Y)
return Point{X: xn / d, Y: yn / d}, nil
}
// RotateAroundOrigin rotates Line l counter-clockwise by Angle rads around
// the origin.
func RotateAroundOrigin(l Line, rads Angle) Line {
return Rotate(l, Point{X: 0, Y: 0}, rads)
}
// Rotate Line l counter-clockise around Point p by Angle rads.
func Rotate(l Line, p Point, rads Angle) Line {
ax, ay, bx, by := l.a.X-p.X, l.a.Y-p.Y, l.b.X-p.X, l.b.Y-p.Y
cos := Number(math.Cos(float64(rads)))
sin := Number(math.Sin(float64(rads)))
a := Point{
X: det(ax, ay, sin, cos) + p.X,
Y: det(ay, -ax, sin, cos) + p.Y,
}
b := Point{
X: det(bx, by, sin, cos) + p.X,
Y: det(by, -bx, sin, cos) + p.Y,
}
return MustLine(NewLineFromPoints(a, b))
}
// dx returns the x-difference of the Points on the Line.
func dx(l Line) Number {
return l.a.X - l.b.X
}
// dy returns the y-difference of the Points on the Line.
func dy(l Line) Number {
return l.a.Y - l.b.Y
}
// det is determinant of 2x2 matrix with a and b in the first row and c and d in
// the second row.
func det(a, b, c, d Number) Number {
return a*d - b*c
}
// dot is the dot-product of Vectors a and b.
func dot(a, b Vector) Number {
return a.I*b.I + a.J*b.J
}
// StandardCoefficients for the Line's equation and the value the equation is
// equal to.
//
// Line-equation is ax + by = c.
func StandardCoefficients(l Line) (Number, Number, Number) {
m, n := dy(l), -dx(l)
return m, n, m*l.a.X + n*l.a.Y
}
|
package utils
import "strings"
func UnTitle(src string) string {
if src == "" {
return ""
}
if len(src) == 1 {
return strings.ToLower(string(src[0]))
}
return strings.ToLower(string(src[0])) + src[1:]
}
func UpTitle(src string) string {
if src == "" {
return ""
}
return strings.ToUpper(src)
}
|
package services
import (
"bytes"
"goChat/Server/inMemoryDatabase"
"goChat/Server/models"
"net/http"
"net/http/httptest"
"testing"
)
func TestNewAuthService(t *testing.T) {
repo := getInMemoryUserRepo()
authService := NewAuthService(repo)
//userId, _ := createTestUser(repo)
//user, _ := repo.GetUserByID(userId)
if authService == nil {
t.Errorf("failed to initialize auth service")
}
}
func TestAuthService_AuthenticateHandler(t *testing.T) {
repo := getInMemoryUserRepo()
authService := NewAuthService(repo)
createTestUser(repo)
// user, _ := repo.GetUserByID(userId)
jsonCredential := []byte(`{"userName":"siva@gochat.com","password":"hello"}`)
//data := url.Values{}
//data.Add("userName", user.Email)
//data.Add("password", "hello")
req, err := http.NewRequest("GET", "/login", bytes.NewBuffer(jsonCredential))
if err != nil {
t.Fatal(err)
}
resRec := httptest.NewRecorder()
handler := http.HandlerFunc(authService.AuthenticateHandler)
handler.ServeHTTP(resRec, req)
if status := resRec.Code; status != http.StatusOK {
t.Errorf("handler returned unexpected result")
}
jsonCredential = []byte(`{"userName":"siva1@gochat.com","password":"hello"}`)
req, err = http.NewRequest("GET", "/login", bytes.NewBuffer(jsonCredential))
resRec = httptest.NewRecorder()
handler = http.HandlerFunc(authService.AuthenticateHandler)
handler.ServeHTTP(resRec, req)
if status := resRec.Code; status != http.StatusUnauthorized {
t.Errorf("handler returned unexpected result for nagative user")
}
}
func createTestUser(repo *inMemoryDatabase.UserRepository) (string, error) {
user := models.User{
Email: "siva@gochat.com",
FirstName: "siva",
LastName: "siva",
PasswordHashed: "hello",
NickName: "siva",
}
return repo.Create(user)
}
func getInMemoryUserRepo() *inMemoryDatabase.UserRepository {
return inMemoryDatabase.NewUserRepository()
}
|
package main
import (
"fmt"
"log"
)
type account struct{ ID string }
func newAccount(ID string) *account {
return &account{ID}
}
func (a *account) checkAccount(ID string) error {
log.Printf("checking account with %s id\n", ID)
if a.ID != ID {
return fmt.Errorf("account ID not verified")
}
log.Printf("account verified")
return nil
}
|
package gonba
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
)
const baseAddress = "https://stats.nba.com/stats/"
const baseAddressV2 = "http://data.nba.com/data/5s/json/cms/noseason/"
const baseAddressV3 = "http://data.nba.net/prod/"
// A Client is required for api calls
type Client struct {
baseURL string
httpClient *http.Client
}
func NewClient() *Client {
customHttp := &http.Client{
Timeout: time.Second * 10,
Transport: buildCustomTransport(),
}
return &Client{
baseURL: baseAddress,
httpClient: customHttp,
}
}
func (c *Client) makeRequest(endpoint string, params map[string]string, schema interface{}) int {
body, status := c.makeRequestWithoutJson(endpoint, params)
json.Unmarshal(body, schema)
return status
}
func (c *Client) makeRequestWithoutJson(endpoint string, params map[string]string) ([]byte, int) {
requestUrl := c.baseURL+endpoint
if val, ok := params["version"]; ok {
if val == "2" {
requestUrl = baseAddressV2 + endpoint
} else if val == "3" {
requestUrl = baseAddressV3 + endpoint
}
delete(params, "version")
}
request, _ := http.NewRequest("GET", requestUrl, nil)
request.Header.Set("Content-Type", "application/json")
query := request.URL.Query()
for key, value := range params {
query.Add(key, value)
}
request.URL.RawQuery = query.Encode()
//fmt.Println(request.URL)
response, err := c.httpClient.Do(request)
if err != nil {
fmt.Printf("Error when making request %v\n", err.Error())
return nil, 404
}
defer response.Body.Close()
body, _ := ioutil.ReadAll(response.Body)
return body, response.StatusCode
}
func buildCustomTransport() *http.Transport {
defaultRoundTripper := http.DefaultTransport
defaultTransportPointer, success := defaultRoundTripper.(*http.Transport)
if !success {
panic(fmt.Sprintf("defaultRoundTripper not an *http.Transport"))
}
defaultTransport := *defaultTransportPointer
defaultTransport.MaxIdleConns = 100
defaultTransport.MaxIdleConnsPerHost = 100
return &defaultTransport
}
|
package types
import (
types2 "github.com/Secured-Finance/dione/blockchain/types"
)
type PrePrepareMessage struct {
Block *types2.Block
}
type PrepareMessage struct {
Blockhash []byte
Signature []byte
}
type CommitMessage PrepareMessage
|
package main
import (
"fmt"
"strings"
)
func appendbit(data []byte, idx int, on bool) {
byteidx := (uint)(idx / 8)
bitidx := (uint)(idx % 8)
if on {
data[byteidx] |= 1 << (7 - bitidx)
} else {
data[byteidx] &^= 1 << (7 - bitidx)
}
}
func parseinput(input string, padbytes int, postpad int) []byte {
//allow extra space for future expansions..
emptybyte := "........"
pad := ""
ppad := ""
for i := 0; i < padbytes; i++ {
pad += emptybyte
}
for i := 0; i < postpad; i++ {
ppad += emptybyte
}
input = pad + input + ppad
data := make([]byte, (len(input)/8)+1)
for i, c := range input {
switch c {
case '#':
appendbit(data, i, true)
case '.':
appendbit(data, i, false)
}
}
return data
}
func itergroupins(data []byte, rules map[byte]bool, from int) (int, []byte) {
result := make([]byte, len(data))
firstdata := 0
datastarted := false
last := 0
for i := from; i < len(data); i++ {
b := data[i]
//skip all the leading pad zeros added for negative growth.
if b != 0 && !datastarted {
datastarted = true
firstdata = i - 1
}
if datastarted && (b != 0 || data[i-1] != 0) {
last = i
p := uint16(data[i-1])
c := uint16(data[i])
t := (p << 8) + c
//got 16 bits, move 5 bit window across, isolate patterns
for x := 0; x < 8; x++ {
m := t & 0xF800
s := m >> 11
q := byte(s)
//we're comparing a 5 bit window, starting at byte i-1, but setting the middle bit in the output.
//so we need set the index at +2
appendbit(result, ((i-1)*8)+x+2, rules[q])
t = t << 1
}
}
}
if last+1 == len(data) {
result = append(result, 0)
}
return firstdata, result
}
func evalpattern(p byte) bool {
return true
}
func sumpots(data []byte, offset int) int64 {
total := int64(0)
for i, b := range data {
for bit := 0; bit < 8; bit++ {
potnumber := int64(-offset*8) + int64(i*8) + int64(bit)
if (b<<uint(bit))&0x80 == 0x80 {
total += int64(potnumber)
}
}
}
return total
}
func prettyprintpots(data []byte) {
for _, b := range data {
for bit := 0; bit < 8; bit++ {
if (b<<uint(bit))&0x80 == 0x80 {
fmt.Print("#")
} else {
fmt.Print(".")
}
}
}
fmt.Println("")
}
func processrules(input string) map[byte]bool {
result := map[byte]bool{}
for _, line := range strings.Split(input, "\n") {
line = strings.TrimSpace(line)
if line == "" {
continue
}
line = "..." + line
if line[12:13] == "#" {
data := []byte{0}
for i, r := range line[0:8] {
switch r {
case '#':
appendbit(data, i, true)
case '.':
appendbit(data, i, false)
}
}
result[data[0]] = true
}
}
return result
}
func twentyiters(data []byte, offset int, rules map[byte]bool) {
scanstart := 0
for x := 0; x < 20; x++ {
scanstart, data = itergroupins(data, rules, scanstart)
}
fmt.Println(sumpots(data, offset))
}
func main() {
//real input.
//input := "..##.#######...##.###...#..#.#.#..#.##.#.##....####..........#..#.######..####.#.#..###.##..##..#..#"
//rules := processrules(rules())
//test input
input := "#..#.#..##......###...###"
rules := processrules(testrules())
//offset to zero bit byte / padding bytes each end.
offset := 2
data := parseinput(input, offset, offset)
twentyiters(data, offset, rules)
main2(data, offset, rules)
}
func testrules() string {
return `
...## => #
..#.. => #
.#... => #
.#.#. => #
.#.## => #
.##.. => #
.#### => #
#.#.# => #
#.### => #
##.#. => #
##.## => #
###.. => #
###.# => #
####. => #
`
}
|
package watcher_test
import (
"errors"
"os"
"time"
"github.com/cloudfoundry-incubator/runtime-schema/bbs/fake_bbs"
"github.com/cloudfoundry-incubator/runtime-schema/models"
"github.com/cloudfoundry/gibson"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/pivotal-golang/lager/lagertest"
"github.com/tedsuo/ifrit"
"github.com/cloudfoundry-incubator/route-emitter/nats_emitter/fake_nats_emitter"
"github.com/cloudfoundry-incubator/route-emitter/routing_table"
"github.com/cloudfoundry-incubator/route-emitter/routing_table/fake_routing_table"
. "github.com/cloudfoundry-incubator/route-emitter/watcher"
)
var _ = Describe("Watcher", func() {
var (
bbs *fake_bbs.FakeRouteEmitterBBS
table *fake_routing_table.FakeRoutingTable
emitter *fake_nats_emitter.FakeNATSEmitter
watcher *Watcher
process ifrit.Process
dummyMessagesToEmit routing_table.MessagesToEmit
)
BeforeEach(func() {
bbs = fake_bbs.NewFakeRouteEmitterBBS()
table = &fake_routing_table.FakeRoutingTable{}
emitter = &fake_nats_emitter.FakeNATSEmitter{}
logger := lagertest.NewTestLogger("test")
dummyContainer := routing_table.Container{Host: "1.1.1.1", Port: 11}
dummyMessage := routing_table.RegistryMessageFor(dummyContainer, "foo.com", "bar.com")
dummyMessagesToEmit = routing_table.MessagesToEmit{
RegistrationMessages: []gibson.RegistryMessage{dummyMessage},
}
watcher = NewWatcher(bbs, table, emitter, logger)
process = ifrit.Envoke(watcher)
})
AfterEach(func() {
process.Signal(os.Interrupt)
Eventually(process.Wait()).Should(Receive())
})
Describe("Desired LRP changes", func() {
Context("when a create/update (includes an after) change arrives", func() {
BeforeEach(func() {
desiredChange := models.DesiredLRPChange{
Before: nil,
After: &models.DesiredLRP{
Domain: "tests",
ProcessGuid: "pg",
Routes: []string{"route-1", "route-2"},
},
}
table.SetRoutesReturns(dummyMessagesToEmit)
bbs.DesiredLRPChangeChan <- desiredChange
})
It("should set the routes on the table", func() {
Eventually(table.SetRoutesCallCount).Should(Equal(1))
processGuid, routes := table.SetRoutesArgsForCall(0)
Ω(processGuid).Should(Equal("pg"))
Ω(routes).Should(Equal([]string{"route-1", "route-2"}))
})
It("should emit whatever the table tells it to emit", func() {
Eventually(emitter.EmitCallCount).Should(Equal(1))
Ω(emitter.EmitArgsForCall(0)).Should(Equal(dummyMessagesToEmit))
})
})
Context("when the change is a delete (no after)", func() {
BeforeEach(func() {
desiredChange := models.DesiredLRPChange{
Before: &models.DesiredLRP{
Domain: "tests",
ProcessGuid: "pg",
Routes: []string{"route-1"},
},
After: nil,
}
table.RemoveRoutesReturns(dummyMessagesToEmit)
bbs.DesiredLRPChangeChan <- desiredChange
})
It("should remove the routes from the table", func() {
Eventually(table.RemoveRoutesCallCount).Should(Equal(1))
processGuid := table.RemoveRoutesArgsForCall(0)
Ω(processGuid).Should(Equal("pg"))
})
It("should emit whatever the table tells it to emit", func() {
Eventually(emitter.EmitCallCount).Should(Equal(1))
Ω(emitter.EmitArgsForCall(0)).Should(Equal(dummyMessagesToEmit))
})
})
Context("when watching for change fails", func() {
var errorTime time.Time
BeforeEach(func() {
errorTime = time.Now()
bbs.SendWatchForDesiredLRPChangesError(errors.New("bbs watch failed"))
desiredChange := models.DesiredLRPChange{
Before: nil,
After: &models.DesiredLRP{
Domain: "tests",
ProcessGuid: "pg",
Routes: []string{"route-1", "route-2"},
},
}
bbs.DesiredLRPChangeChan <- desiredChange
})
It("should retry after 3 seconds", func() {
Eventually(table.SetRoutesCallCount, 5).Should(Equal(1))
Ω(time.Since(errorTime)).Should(BeNumerically("~", 3*time.Second, 200*time.Millisecond))
})
It("should be possible to SIGINT the route emitter", func() {
process.Signal(os.Interrupt)
Eventually(process.Wait()).Should(Receive())
})
})
})
Describe("Actual LRP changes", func() {
Context("when a create/update (includes an after) change arrives", func() {
BeforeEach(func() {
actualChange := models.ActualLRPChange{
Before: nil,
After: &models.ActualLRP{
ProcessGuid: "pg",
Host: "1.1.1.1",
State: models.ActualLRPStateRunning,
Ports: []models.PortMapping{
{ContainerPort: 8080, HostPort: 11},
},
},
}
table.AddOrUpdateContainerReturns(dummyMessagesToEmit)
bbs.ActualLRPChangeChan <- actualChange
})
It("should add/update the container on the table", func() {
Eventually(table.AddOrUpdateContainerCallCount).Should(Equal(1))
processGuid, container := table.AddOrUpdateContainerArgsForCall(0)
Ω(processGuid).Should(Equal("pg"))
Ω(container).Should(Equal(routing_table.Container{Host: "1.1.1.1", Port: 11}))
})
It("should emit whatever the table tells it to emit", func() {
Eventually(emitter.EmitCallCount).Should(Equal(1))
Ω(emitter.EmitArgsForCall(0)).Should(Equal(dummyMessagesToEmit))
})
})
Context("when watching for change fails", func() {
var errorTime time.Time
BeforeEach(func() {
errorTime = time.Now()
bbs.SendWatchForActualLRPChangesError(errors.New("bbs watch failed"))
actualChange := models.ActualLRPChange{
Before: nil,
After: &models.ActualLRP{
ProcessGuid: "pg",
Host: "1.1.1.1",
State: models.ActualLRPStateRunning,
Ports: []models.PortMapping{
{ContainerPort: 8080, HostPort: 11},
},
},
}
table.AddOrUpdateContainerReturns(dummyMessagesToEmit)
bbs.ActualLRPChangeChan <- actualChange
})
It("should retry after 3 seconds", func() {
Eventually(emitter.EmitCallCount, 5).Should(Equal(1))
Ω(time.Since(errorTime)).Should(BeNumerically("~", 3*time.Second, 200*time.Millisecond))
})
It("should be possible to SIGINT the route emitter", func() {
process.Signal(os.Interrupt)
Eventually(process.Wait()).Should(Receive())
})
})
Context("when the change is a delete (no after)", func() {
BeforeEach(func() {
actualChange := models.ActualLRPChange{
Before: &models.ActualLRP{
ProcessGuid: "pg",
Host: "1.1.1.1",
State: models.ActualLRPStateRunning,
Ports: []models.PortMapping{
{ContainerPort: 8080, HostPort: 11},
},
},
After: nil,
}
table.RemoveContainerReturns(dummyMessagesToEmit)
bbs.ActualLRPChangeChan <- actualChange
})
It("should remove the container from the table", func() {
Eventually(table.RemoveContainerCallCount).Should(Equal(1))
processGuid, container := table.RemoveContainerArgsForCall(0)
Ω(processGuid).Should(Equal("pg"))
Ω(container).Should(Equal(routing_table.Container{Host: "1.1.1.1", Port: 11}))
})
It("should emit whatever the table tells it to emit", func() {
Eventually(emitter.EmitCallCount).Should(Equal(1))
Ω(emitter.EmitArgsForCall(0)).Should(Equal(dummyMessagesToEmit))
})
})
})
})
|
package config
import (
"errors"
"reflect"
"testing"
"github.com/go-kratos/kratos/v2/log"
"github.com/stretchr/testify/assert"
)
const (
_testJSON = `
{
"server":{
"http":{
"addr":"0.0.0.0",
"port":80,
"timeout":0.5,
"enable_ssl":true
},
"grpc":{
"addr":"0.0.0.0",
"port":10080,
"timeout":0.2
}
},
"data":{
"database":{
"driver":"mysql",
"source":"root:root@tcp(127.0.0.1:3306)/karta_id?parseTime=true"
}
},
"endpoints":[
"www.aaa.com",
"www.bbb.org"
]
}`
)
type testConfigStruct struct {
Server struct {
Http struct {
Addr string `json:"addr"`
Port int `json:"port"`
Timeout float64 `json:"timeout"`
EnableSSL bool `json:"enable_ssl"`
} `json:"http"`
GRpc struct {
Addr string `json:"addr"`
Port int `json:"port"`
Timeout float64 `json:"timeout"`
} `json:"grpc"`
} `json:"server"`
Data struct {
Database struct {
Driver string `json:"driver"`
Source string `json:"source"`
} `json:"database"`
} `json:"data"`
Endpoints []string `json:"endpoints"`
}
type testJsonSource struct {
data string
sig chan struct{}
err chan struct{}
}
func newTestJsonSource(data string) *testJsonSource {
return &testJsonSource{data: data, sig: make(chan struct{}), err: make(chan struct{})}
}
func (p *testJsonSource) Load() ([]*KeyValue, error) {
kv := &KeyValue{
Key: "json",
Value: []byte(p.data),
Format: "json",
}
return []*KeyValue{kv}, nil
}
func (p *testJsonSource) Watch() (Watcher, error) {
return newTestWatcher(p.sig, p.err), nil
}
type testWatcher struct {
sig chan struct{}
err chan struct{}
exit chan struct{}
}
func newTestWatcher(sig, err chan struct{}) Watcher {
return &testWatcher{sig: sig, err: err, exit: make(chan struct{})}
}
func (w *testWatcher) Next() ([]*KeyValue, error) {
select {
case <-w.sig:
return nil, nil
case <-w.err:
return nil, errors.New("error")
case <-w.exit:
return nil, nil
}
}
func (w *testWatcher) Stop() error {
close(w.exit)
return nil
}
func TestConfig(t *testing.T) {
var (
err error
httpAddr = "0.0.0.0"
httpTimeout = 0.5
grpcPort = 10080
enableSSL = true
endpoint1 = "www.aaa.com"
databaseDriver = "mysql"
)
c := New(
WithSource(newTestJsonSource(_testJSON)),
WithDecoder(defaultDecoder),
WithResolver(defaultResolver),
WithLogger(log.DefaultLogger),
)
err = c.Close()
assert.Nil(t, err)
jSource := newTestJsonSource(_testJSON)
opts := options{
sources: []Source{jSource},
decoder: defaultDecoder,
resolver: defaultResolver,
logger: log.DefaultLogger,
}
cf := &config{}
cf.opts = opts
cf.reader = newReader(opts)
err = cf.Load()
assert.Nil(t, err)
val, err := cf.Value("data.database.driver").String()
assert.Nil(t, err)
assert.Equal(t, databaseDriver, val)
err = cf.Watch("endpoints", func(key string, value Value) {
})
assert.Nil(t, err)
jSource.sig <- struct{}{}
jSource.err <- struct{}{}
var testConf testConfigStruct
err = cf.Scan(&testConf)
assert.Nil(t, err)
assert.Equal(t, httpAddr, testConf.Server.Http.Addr)
assert.Equal(t, httpTimeout, testConf.Server.Http.Timeout)
assert.Equal(t, enableSSL, testConf.Server.Http.EnableSSL)
assert.Equal(t, grpcPort, testConf.Server.GRpc.Port)
assert.Equal(t, endpoint1, testConf.Endpoints[0])
assert.Equal(t, 2, len(testConf.Endpoints))
}
func TestDefaultResolver(t *testing.T) {
var (
portString = "8080"
countInt = 10
enableBool = true
rateFloat = 0.9
)
data := map[string]interface{}{
"foo": map[string]interface{}{
"bar": map[string]interface{}{
"notexist": "${NOTEXIST:100}",
"port": "${PORT:8081}",
"count": "${COUNT:0}",
"enable": "${ENABLE:false}",
"rate": "${RATE}",
"empty": "${EMPTY:foobar}",
"url": "${URL:http://example.com}",
"array": []interface{}{
"${PORT}",
map[string]interface{}{"foobar": "${NOTEXIST:8081}"},
},
"value1": "${test.value}",
"value2": "$PORT",
"value3": "$PORT:default",
},
},
"test": map[string]interface{}{
"value": "foobar",
},
"PORT": "8080",
"COUNT": "10",
"ENABLE": "true",
"RATE": "0.9",
"EMPTY": "",
}
tests := []struct {
name string
path string
expect interface{}
}{
{
name: "test not exist int env with default",
path: "foo.bar.notexist",
expect: 100,
},
{
name: "test string with default",
path: "foo.bar.port",
expect: portString,
},
{
name: "test int with default",
path: "foo.bar.count",
expect: countInt,
},
{
name: "test bool with default",
path: "foo.bar.enable",
expect: enableBool,
},
{
name: "test float without default",
path: "foo.bar.rate",
expect: rateFloat,
},
{
name: "test empty value with default",
path: "foo.bar.empty",
expect: "",
},
{
name: "test url with default",
path: "foo.bar.url",
expect: "http://example.com",
},
{
name: "test array",
path: "foo.bar.array",
expect: []interface{}{portString, map[string]interface{}{"foobar": "8081"}},
},
{
name: "test ${test.value}",
path: "foo.bar.value1",
expect: "foobar",
},
{
name: "test $value",
path: "foo.bar.value2",
expect: portString,
},
{
name: "test $value:default",
path: "foo.bar.value3",
expect: portString + ":default",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
err := defaultResolver(data)
assert.NoError(t, err)
rd := reader{
values: data,
}
if v, ok := rd.Value(test.path); ok {
var actual interface{}
switch test.expect.(type) {
case int:
if actual, err = v.Int(); err == nil {
assert.Equal(t, test.expect, int(actual.(int64)), "int value should be equal")
}
case string:
if actual, err = v.String(); err == nil {
assert.Equal(t, test.expect, actual, "string value should be equal")
}
case bool:
if actual, err = v.Bool(); err == nil {
assert.Equal(t, test.expect, actual, "bool value should be equal")
}
case float64:
if actual, err = v.Float(); err == nil {
assert.Equal(t, test.expect, actual, "float64 value should be equal")
}
default:
actual = v.Load()
if !reflect.DeepEqual(test.expect, actual) {
t.Logf("expect: %#v, actural: %#v", test.expect, actual)
t.Fail()
}
}
if err != nil {
t.Error(err)
}
} else {
t.Error("value path not found")
}
})
}
}
|
package main
import (
"github.com/gin-gonic/gin"
"github.com/pepelazz/golangLearning/uploadImage/uploadImage"
"log"
"net/http"
)
func main() {
r := gin.New()
// вырубаем CORS
r.Use(LiberalCORS)
r.Static("/stat-img", "./image")
r.Static("/static", "./webClient/dist")
r.Static("/statics", "./webClient/dist/statics")
r.StaticFile("/", "./webClient/dist/index.html")
r.POST("/upload_image", uploadImage.SimpleUploadImage)
r.POST("/upload_image_resize", uploadImage.ResizeUploadImage)
r.POST("/get_all_image", uploadImage.GetAllImage)
// на ненайденный url отправляем статический файл для запуска vuejs приложения
r.NoRoute(func(c *gin.Context) {
http.ServeFile(c.Writer, c.Request, "./webClient/dist/index.html")
})
err := r.Run(":3083")
if err != nil {
log.Fatalf("run webserver: %s", err)
}
}
// LiberalCORS is a very allowing CORS middleware.
func LiberalCORS(c *gin.Context) {
c.Header("Access-Control-Allow-Origin", "*")
if c.Request.Method == "OPTIONS" {
if len(c.Request.Header["Access-Control-Request-Headers"]) > 0 {
c.Header("Access-Control-Allow-Headers", c.Request.Header["Access-Control-Request-Headers"][0])
}
c.AbortWithStatus(http.StatusOK)
}
}
|
package netio
import (
"errors"
"io"
"log"
"net"
"reflect"
"github.com/Dliv3/Venom/global"
"github.com/Dliv3/Venom/utils"
)
// WritePacket write packet to node.Conn
func WritePacket(output io.Writer, packet interface{}) error {
t := reflect.TypeOf(packet)
v := reflect.ValueOf(packet)
if k := t.Kind(); k != reflect.Struct {
return errors.New("second param is not struct")
}
count := t.NumField()
for i := 0; i < count; i++ {
val := v.Field(i).Interface()
// type switch
switch value := val.(type) {
case uint16:
_, err := Write(output, utils.Uint16ToBytes(value))
if err != nil {
return err
}
case uint32:
_, err := Write(output, utils.Uint32ToBytes(value))
if err != nil {
return err
}
case uint64:
_, err := Write(output, utils.Uint64ToBytes(value))
if err != nil {
return err
}
case string:
_, err := Write(output, []byte(value))
if err != nil {
return err
}
case []byte:
_, err := Write(output, value)
if err != nil {
return err
}
case [2]byte:
_, err := Write(output, value[0:])
if err != nil {
return err
}
case [4]byte:
_, err := Write(output, value[0:])
if err != nil {
return err
}
case [32]byte:
_, err := Write(output, value[0:])
if err != nil {
return err
}
default:
return errors.New("type unsupport")
}
}
return nil
}
// ReadPacket read packet from node.Conn
// packet data start from the packet separator
func ReadPacket(input io.Reader, packet interface{}) error {
v := reflect.ValueOf(packet)
t := reflect.TypeOf(packet)
if v.Kind() == reflect.Ptr && !v.Elem().CanSet() {
return errors.New("packet is not a reflect. Ptr or elem can not be setted")
}
v = v.Elem()
t = t.Elem()
count := t.NumField()
for i := 0; i < count; i++ {
val := v.Field(i).Interface()
f := v.FieldByName(t.Field(i).Name)
// 类型断言
switch val.(type) {
case string:
// 字段为分隔符,只有分隔符字段可被设置成string类型
// 在处理协议数据包之前,首先读取到协议数据分隔符
// 分隔符为协议结构体的第一个数据
if i == 0 {
separator, err := readUntilSeparator(input, global.PROTOCOL_SEPARATOR)
if err != nil {
return err
}
f.SetString(separator)
}
case uint16:
var buf [2]byte
_, err := Read(input, buf[0:])
if err != nil {
return err
}
f.SetUint(uint64(utils.BytesToUint16(buf[0:])))
case uint32:
var buf [4]byte
_, err := Read(input, buf[0:])
if err != nil {
return err
}
f.SetUint(uint64(utils.BytesToUint32(buf[0:])))
case uint64:
var buf [8]byte
_, err := Read(input, buf[0:])
if err != nil {
return err
}
f.SetUint(uint64(utils.BytesToUint64(buf[0:])))
case []byte:
// 要求, 未指明长度的字段名需要有字段来指定其长度,并长度字段名为该字段名+Len
// 如HashID字段是通过HashIDLen指明长度的
// 并且要求HashIDLen在结构体中的位置在HashID之前
temp := v.FieldByName(t.Field(i).Name + "Len")
// 类型断言,要求长度字段类型必须为uint16、uint32或uint64
var length uint64
switch lengthTemp := temp.Interface().(type) {
case uint64:
length = lengthTemp
case uint32:
length = uint64(lengthTemp)
case uint16:
length = uint64(lengthTemp)
}
// 如果长度为0,就不需要读数据了
if length != 0 {
if length > global.MAX_PACKET_SIZE {
return nil
}
buf := make([]byte, length)
_, err := Read(input, buf[0:])
if err != nil {
return err
}
f.SetBytes(buf)
}
case [2]byte:
var buf [2]byte
_, err := Read(input, buf[0:])
if err != nil {
return err
}
f.Set(reflect.ValueOf(buf))
case [4]byte:
var buf [4]byte
_, err := Read(input, buf[0:])
if err != nil {
return err
}
f.Set(reflect.ValueOf(buf))
case [32]byte:
var buf [32]byte
_, err := Read(input, buf[0:])
if err != nil {
return err
}
// 使用reflect给array类型赋值的方法
f.Set(reflect.ValueOf(buf))
default:
return errors.New("type unsupport")
}
}
return nil
}
func Read(input io.Reader, buffer []byte) (int, error) {
n, err := io.ReadFull(input, buffer)
if err != nil {
// log.Println("[-]Read Error: ", err)
}
return n, err
}
func Write(output io.Writer, buffer []byte) (int, error) {
if len(buffer) > 0 {
n, err := output.Write(buffer)
if err != nil {
// log.Println("[-]Write Error: ", err)
}
return n, err
}
return 0, nil
}
// if found, return PROTOCOL_SEPARATOR
func readUntilSeparator(input io.Reader, separator string) (string, error) {
kmp, _ := utils.NewKMP(separator)
i := 0
var one [1]byte
for {
_, err := Read(input, one[0:])
if err != nil {
return "", err
}
if kmp.Pattern[i] == one[0] {
if i == kmp.Size-1 {
return kmp.Pattern, nil
}
i++
continue
}
if kmp.Prefix[i] > -1 {
i = kmp.Prefix[i]
} else {
i = 0
}
}
}
func NetCopy(input, output net.Conn) (err error) {
defer input.Close()
buf := make([]byte, global.MAX_PACKET_SIZE)
for {
count, err := input.Read(buf)
if err != nil {
if err == io.EOF && count > 0 {
output.Write(buf[:count])
}
if err != io.EOF {
log.Fatalln("[-]Read error:", err)
}
break
}
if count > 0 {
output.Write(buf[:count])
}
}
return
}
|
// Package startf implements dataset transformations using the starlark programming dialect
// For more info on starlark check github.com/google/starlark
package startf
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"github.com/qri-io/dataset"
"github.com/qri-io/dataset/dsfs"
"github.com/qri-io/qfs"
"github.com/qri-io/qri/p2p"
"github.com/qri-io/qri/repo"
"github.com/qri-io/starlib"
skyctx "github.com/qri-io/startf/context"
skyds "github.com/qri-io/startf/ds"
skyqri "github.com/qri-io/startf/qri"
"go.starlark.net/resolve"
"go.starlark.net/starlark"
)
// ExecOpts defines options for execution
type ExecOpts struct {
Node *p2p.QriNode // supply a QriNode to make the 'qri' module available in starlark
AllowFloat bool // allow floating-point numbers
AllowSet bool // allow set data type
AllowLambda bool // allow lambda expressions
AllowNestedDef bool // allow nested def statements
Secrets map[string]interface{} // passed-in secrets (eg: API keys)
Globals starlark.StringDict // global values to pass for script execution
MutateFieldCheck func(path ...string) error // func that errors if field specified by path is mutated
OutWriter io.Writer // provide a writer to record script "stdout" to
ModuleLoader ModuleLoader // starlark module loader function
}
// AddQriNodeOpt adds a qri node to execution options
func AddQriNodeOpt(node *p2p.QriNode) func(o *ExecOpts) {
return func(o *ExecOpts) {
o.Node = node
}
}
// AddMutateFieldCheck provides a checkFunc to ExecScript
func AddMutateFieldCheck(check func(path ...string) error) func(o *ExecOpts) {
return func(o *ExecOpts) {
o.MutateFieldCheck = check
}
}
// SetOutWriter provides a writer to record the "stderr" diagnostic output of the transform script
func SetOutWriter(w io.Writer) func(o *ExecOpts) {
return func(o *ExecOpts) {
if w != nil {
o.OutWriter = w
}
}
}
// DefaultExecOpts applies default options to an ExecOpts pointer
func DefaultExecOpts(o *ExecOpts) {
o.AllowFloat = true
o.AllowSet = true
o.AllowLambda = true
o.Globals = starlark.StringDict{}
o.OutWriter = ioutil.Discard
o.ModuleLoader = DefaultModuleLoader
}
type transform struct {
node *p2p.QriNode
next *dataset.Dataset
prev *dataset.Dataset
skyqri *skyqri.Module
checkFunc func(path ...string) error
globals starlark.StringDict
bodyFile qfs.File
stderr io.Writer
moduleLoader ModuleLoader
download starlark.Iterable
}
// ModuleLoader is a function that can load starlark modules
type ModuleLoader func(thread *starlark.Thread, module string) (starlark.StringDict, error)
// DefaultModuleLoader is the loader ExecScript will use unless configured otherwise
var DefaultModuleLoader = func(thread *starlark.Thread, module string) (dict starlark.StringDict, err error) {
return starlib.Loader(thread, module)
}
// ExecScript executes a transformation against a starlark script file. The next dataset pointer
// may be modified, while the prev dataset point is read-only. At a bare minimum this function
// will set transformation details, but starlark scripts can modify many parts of the dataset
// pointer, including meta, structure, and transform. opts may provide more ways for output to
// be produced from this function.
func ExecScript(next, prev *dataset.Dataset, opts ...func(o *ExecOpts)) error {
var err error
if next.Transform == nil || next.Transform.ScriptFile() == nil {
return fmt.Errorf("no script to execute")
}
o := &ExecOpts{}
DefaultExecOpts(o)
for _, opt := range opts {
opt(o)
}
// hoist execution settings to resolve package settings
resolve.AllowFloat = o.AllowFloat
resolve.AllowSet = o.AllowSet
resolve.AllowLambda = o.AllowLambda
resolve.AllowNestedDef = o.AllowNestedDef
// add error func to starlark environment
starlark.Universe["error"] = starlark.NewBuiltin("error", Error)
for key, val := range o.Globals {
starlark.Universe[key] = val
}
// set transform details
next.Transform.Syntax = "starlark"
next.Transform.SyntaxVersion = Version
script := next.Transform.ScriptFile()
// "tee" the script reader to avoid losing script data, as starlark.ExecFile
// reads, data will be copied to buf, which is re-set to the transform script
buf := &bytes.Buffer{}
tr := io.TeeReader(script, buf)
pipeScript := qfs.NewMemfileReader(script.FileName(), tr)
t := &transform{
node: o.Node,
next: next,
prev: prev,
skyqri: skyqri.NewModule(o.Node),
checkFunc: o.MutateFieldCheck,
stderr: o.OutWriter,
moduleLoader: o.ModuleLoader,
}
if o.Node != nil {
// if node localstreams exists, write to both localstreams and output buffer
t.stderr = io.MultiWriter(o.OutWriter, o.Node.LocalStreams.ErrOut)
}
ctx := skyctx.NewContext(next.Transform.Config, o.Secrets)
thread := &starlark.Thread{
Load: t.ModuleLoader,
Print: func(thread *starlark.Thread, msg string) {
// note we're ignoring a returned error here
_, _ = t.stderr.Write([]byte(msg))
},
}
// execute the transformation
t.globals, err = starlark.ExecFile(thread, pipeScript.FileName(), pipeScript, t.locals())
if err != nil {
if evalErr, ok := err.(*starlark.EvalError); ok {
return fmt.Errorf(evalErr.Backtrace())
}
return err
}
funcs, err := t.specialFuncs()
if err != nil {
return err
}
for name, fn := range funcs {
val, err := fn(t, thread, ctx)
if err != nil {
if evalErr, ok := err.(*starlark.EvalError); ok {
return fmt.Errorf(evalErr.Backtrace())
}
return err
}
ctx.SetResult(name, val)
}
err = callTransformFunc(t, thread, ctx)
if evalErr, ok := err.(*starlark.EvalError); ok {
return fmt.Errorf(evalErr.Backtrace())
}
// restore consumed script file
next.Transform.SetScriptFile(qfs.NewMemfileBytes("transform.star", buf.Bytes()))
return err
}
// Error halts program execution with an error
func Error(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
var msg starlark.Value
if err := starlark.UnpackPositionalArgs("error", args, kwargs, 1, &msg); err != nil {
return nil, err
}
return nil, fmt.Errorf("transform error: %s", msg)
}
// ErrNotDefined is for when a starlark value is not defined or does not exist
var ErrNotDefined = fmt.Errorf("not defined")
// globalFunc checks if a global function is defined
func (t *transform) globalFunc(name string) (fn *starlark.Function, err error) {
x, ok := t.globals[name]
if !ok {
return fn, ErrNotDefined
}
if x.Type() != "function" {
return fn, fmt.Errorf("'%s' is not a function", name)
}
return x.(*starlark.Function), nil
}
func confirmIterable(x starlark.Value) (starlark.Iterable, error) {
v, ok := x.(starlark.Iterable)
if !ok {
return nil, fmt.Errorf("did not return structured data")
}
return v, nil
}
func (t *transform) specialFuncs() (defined map[string]specialFunc, err error) {
specialFuncs := map[string]specialFunc{
"download": callDownloadFunc,
}
defined = map[string]specialFunc{}
for name, fn := range specialFuncs {
if _, err = t.globalFunc(name); err != nil {
if err == ErrNotDefined {
err = nil
continue
}
return nil, err
}
defined[name] = fn
}
return
}
type specialFunc func(t *transform, thread *starlark.Thread, ctx *skyctx.Context) (result starlark.Value, err error)
func callDownloadFunc(t *transform, thread *starlark.Thread, ctx *skyctx.Context) (result starlark.Value, err error) {
httpGuard.EnableNtwk()
defer httpGuard.DisableNtwk()
t.print("📡 running download...\n")
var download *starlark.Function
if download, err = t.globalFunc("download"); err != nil {
if err == ErrNotDefined {
return starlark.None, nil
}
return starlark.None, err
}
return starlark.Call(thread, download, starlark.Tuple{ctx.Struct()}, nil)
}
func callTransformFunc(t *transform, thread *starlark.Thread, ctx *skyctx.Context) (err error) {
var transform *starlark.Function
if transform, err = t.globalFunc("transform"); err != nil {
if err == ErrNotDefined {
return nil
}
return err
}
t.print("🤖 running transform...\n")
d := skyds.NewDataset(t.prev, t.checkFunc)
d.SetMutable(t.next)
if _, err = starlark.Call(thread, transform, starlark.Tuple{d.Methods(), ctx.Struct()}, nil); err != nil {
return err
}
return nil
}
func (t *transform) setSpinnerMsg(msg string) {
if t.node != nil {
t.node.LocalStreams.SpinnerMsg(msg)
}
}
// print writes output only if a node is specified
func (t *transform) print(msg string) {
t.stderr.Write([]byte(msg))
}
func (t *transform) locals() starlark.StringDict {
return starlark.StringDict{
"load_dataset": starlark.NewBuiltin("load_dataset", t.LoadDataset),
}
}
// ModuleLoader sums all loading assets to resolve a module name during transform execution
func (t *transform) ModuleLoader(thread *starlark.Thread, module string) (dict starlark.StringDict, err error) {
if module == skyqri.ModuleName && t.skyqri != nil {
return t.skyqri.Namespace(), nil
}
if t.moduleLoader == nil {
return nil, fmt.Errorf("couldn't load module: %s", module)
}
return t.moduleLoader(thread, module)
}
// LoadDataset is a function
func (t *transform) LoadDataset(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
var refstr starlark.String
if err := starlark.UnpackArgs("load_dataset", args, kwargs, "ref", &refstr); err != nil {
return starlark.None, err
}
ds, err := t.loadDataset(refstr.GoString())
if err != nil {
return starlark.None, err
}
return skyds.NewDataset(ds, nil).Methods(), nil
}
func (t *transform) loadDataset(refstr string) (*dataset.Dataset, error) {
if t.node == nil {
return nil, fmt.Errorf("no qri node available to load dataset: %s", refstr)
}
ref, err := repo.ParseDatasetRef(refstr)
if err != nil {
return nil, err
}
if err := repo.CanonicalizeDatasetRef(t.node.Repo, &ref); err != nil {
return nil, err
}
t.node.LocalStreams.PrintErr(fmt.Sprintf("load: %s\n", ref.String()))
ds, err := dsfs.LoadDataset(t.node.Repo.Store(), ref.Path)
if err != nil {
return nil, err
}
if ds.BodyFile() == nil {
if err = ds.OpenBodyFile(t.node.Repo.Filesystem()); err != nil {
return nil, err
}
}
if t.next.Transform.Resources == nil {
t.next.Transform.Resources = map[string]*dataset.TransformResource{}
}
t.next.Transform.Resources[ref.Path] = &dataset.TransformResource{Path: ref.String()}
return ds, nil
}
|
// This package provides ID masking using hash ids.
//
// Hash IDs are a string represnetation of numerical incrementing IDs,
// obfuscating the integer value. For more information see
// http://hashids.org/go/
package hashseq
import (
"database/sql/driver"
"encoding/json"
"fmt"
hashid "github.com/speps/go-hashids"
)
var HashData = &hashid.HashIDData{
Alphabet: hashid.DefaultAlphabet,
MinLength: 4,
Salt: "",
}
// Set the salt to use for ID obfuscation
func SetSalt(salt string) {
HashData.Salt = salt
}
type Id struct {
Int64 int64
}
func (id *Id) Int() int {
return int(id.Int64)
}
// Return the hashid as an obfuscated string
func (id *Id) String() string {
str, err := hashid.NewWithData(HashData).Encode([]int{id.Int()})
if err != nil {
return ""
}
return str
}
// Returns the hashid as a string fulfilling the json.Marshaller interface
func (id Id) MarshalJSON() ([]byte, error) {
str, err := hashid.NewWithData(HashData).Encode([]int{id.Int()})
if err != nil {
return nil, err
}
return json.Marshal(str)
}
// Unmarshal a string and decode into an integer
func (id *Id) UnmarshalJSON(data []byte) error {
decoded, err := Decode(data)
if err != nil {
return err
}
id.Int64 = int64(decoded)
return nil
}
// Decode a hashid byte into an Id, setting its integer
func Decode(hashid []byte) (id int64, err error) {
return DecodeString(string(hashid))
}
func MustDecodeString(hashid string) int64 {
i, err := DecodeString(hashid)
if err != nil {
panic(err.Error())
}
return int64(i)
}
// Decode a hashid string into an Id, setting its integer
func DecodeString(h string) (id int64, err error) {
ints := hashid.NewWithData(HashData).Decode(h)
if len(ints) != 1 {
err = fmt.Errorf("Unexpected hashid value")
return
}
return int64(ints[0]), nil
}
// Database scanning
func (id *Id) Scan(value interface{}) (err error) {
var data int64
// If the first four bytes of this are 0000
switch value.(type) {
// Same as []byte
case int64:
data = value.(int64)
case nil:
return
default:
return fmt.Errorf("Invalid format: can't convert %T into id.Id", value)
}
id.Int64 = data
return nil
}
// This is called when saving the ID to a database
func (id Id) Value() (driver.Value, error) {
return id.Int64, nil
}
|
package model
type Node struct {
*FieldDescriptor
StructRoot bool // 结构体标记的dummy node
// 各种类型的值
Value string
EnumValue int32
Raw []byte
IsEmpty bool
Child []*Node // 优先遍历值, 再key
SugguestIgnore bool // 建议忽略, 非repeated的普通字段导出时, 如果原单元格没填, 这个字段为true
}
func (self *Node) AddValue(value string) *Node {
n := &Node{
Value: value,
}
self.Child = append(self.Child, n)
return n
}
func (self *Node) AddKey(def *FieldDescriptor) *Node {
n := &Node{
FieldDescriptor: def,
}
self.Child = append(self.Child, n)
return n
}
|
// Copyright 2020 Paul Greenberg greenpau@outlook.com
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package netskope
import (
"encoding/json"
"fmt"
//"go.uber.org/zap"
"strconv"
)
// ClientEndpointResponse is response from clients API endpoint.
type ClientEndpointResponse struct {
Status string `json:"success,omitempty"`
Message string `json:"msg,omitempty"`
ClientEndpoints []*ClientEndpoint `json:"data,omitempty"`
}
// ClientEndpoint is a compute endpoint.
type ClientEndpoint struct {
ID string `json:"_id,omitempty"`
DeviceID string `json:"device_id,omitempty"`
InstallTimestamp float64 `json:"client_install_time,omitempty"`
Version string `json:"client_version,omitempty"`
Users []*User `json:"users,omitempty"`
HostInfo *Host `json:"host_info,omitempty"`
LastEvent *Event `json:"last_event,omitempty"`
}
// GetClientEndpoints returns a list of ClientEndpoint instances.
func (c *Client) GetClientEndpoints(opts map[string]interface{}) ([]*ClientEndpoint, error) {
endpoints := []*ClientEndpoint{}
pageSize := 1000
offset := 0
if v, exists := opts["page_size"]; exists {
pageSize = v.(int)
}
for {
params := make(map[string]string)
params["limit"] = strconv.Itoa(pageSize)
params["skip"] = strconv.Itoa(offset)
b, err := c.callAPI("GET", "clients", params)
if err != nil {
return endpoints, err
}
resp := &ClientEndpointResponse{}
if err := json.Unmarshal(b, &resp); err != nil {
return endpoints, fmt.Errorf("failed unmarshalling response: %s", err)
}
//c.log.Warn("response", zap.Any("response", string(b)))
//if resp.Status != "success" {
// return endpoints, fmt.Errorf("failed request: %s", resp.Message)
//}
for _, endpoint := range resp.ClientEndpoints {
//c.log.Warn("endpoint", zap.Any("endpoint", endpoint))
endpoints = append(endpoints, endpoint)
}
offset += len(resp.ClientEndpoints)
if len(resp.ClientEndpoints) < pageSize {
break
}
}
return endpoints, nil
}
// ToJSONString serializes ClientEndpoint to a string.
func (c *ClientEndpoint) ToJSONString() (string, error) {
itemJSON, err := json.Marshal(c)
if err != nil {
return "", fmt.Errorf("failed converting to json: %s", err)
}
return string(itemJSON), nil
}
// UnmarshalJSON unpacks byte array into ClientEndpoint.
func (c *ClientEndpoint) UnmarshalJSON(b []byte) error {
var m map[string]interface{}
if len(b) < 10 {
return fmt.Errorf("invalid ClientEndpoint data: %s", b)
}
if err := json.Unmarshal(b, &m); err != nil {
return fmt.Errorf("failed to unpack ClientEndpoint")
}
if _, exists := m["attributes"]; !exists {
return fmt.Errorf("failed to unpack ClientEndpoint, attributes not found")
}
for k, v := range m["attributes"].(map[string]interface{}) {
switch k {
case "_id":
c.ID = v.(string)
case "device_id":
c.DeviceID = v.(string)
case "client_install_time":
c.InstallTimestamp = v.(float64)
case "client_version":
c.Version = v.(string)
case "users":
for _, u := range v.([]interface{}) {
usr := &User{}
if err := usr.load(u.(map[string]interface{})); err != nil {
return fmt.Errorf("failed to unpack ClientEndpoint, %s attribute error: %s", k, err)
}
c.Users = append(c.Users, usr)
}
case "host_info":
hostInfo := &Host{}
if err := hostInfo.load(v.(map[string]interface{})); err != nil {
return fmt.Errorf("failed to unpack ClientEndpoint, %s attribute error: %s", k, err)
}
c.HostInfo = hostInfo
case "last_event":
lastEvent := &Event{}
if err := lastEvent.load(v.(map[string]interface{})); err != nil {
return fmt.Errorf("failed to unpack ClientEndpoint, %s attribute error: %s", k, err)
}
c.LastEvent = lastEvent
default:
return fmt.Errorf("failed to unpack ClientEndpoint, unsupported attribute: %s, %v", k, v)
}
}
return nil
}
|
package templates
import (
"fmt"
th "html/template"
"os"
"path"
"path/filepath"
"reflect"
"strings"
tt "text/template"
)
const (
envPrefix = "AUTHELIA_"
envXPrefix = "X_AUTHELIA_"
)
// IMPORTANT: This is a copy of github.com/authelia/authelia/internal/configuration's secretSuffixes except all uppercase.
// Make sure you update these at the same time.
var envSecretSuffixes = []string{
"KEY", "SECRET", "PASSWORD", "TOKEN", "CERTIFICATE_CHAIN",
}
func isSecretEnvKey(key string) (isSecretEnvKey bool) {
key = strings.ToUpper(key)
if !strings.HasPrefix(key, envPrefix) && !strings.HasPrefix(key, envXPrefix) {
return false
}
for _, s := range envSecretSuffixes {
suffix := strings.ToUpper(s)
if strings.HasSuffix(key, suffix) {
return true
}
}
return false
}
func fileExists(path string) (exists bool) {
info, err := os.Stat(path)
return err == nil && !info.IsDir()
}
func readTemplate(name, ext, category, overridePath string) (tPath string, embed bool, data []byte, err error) {
if overridePath != "" {
tPath = filepath.Join(overridePath, name+ext)
if fileExists(tPath) {
if data, err = os.ReadFile(tPath); err != nil {
return tPath, false, nil, fmt.Errorf("failed to read template override at path '%s': %w", tPath, err)
}
return tPath, false, data, nil
}
}
tPath = path.Join("src", category, name+ext)
if data, err = embedFS.ReadFile(tPath); err != nil {
return tPath, true, nil, fmt.Errorf("failed to read embedded template '%s': %w", tPath, err)
}
return tPath, true, data, nil
}
func parseTextTemplate(name, tPath string, embed bool, data []byte) (t *tt.Template, err error) {
if t, err = tt.New(name + extText).Funcs(FuncMap()).Parse(string(data)); err != nil {
if embed {
return nil, fmt.Errorf("failed to parse embedded template '%s': %w", tPath, err)
}
return nil, fmt.Errorf("failed to parse template override at path '%s': %w", tPath, err)
}
return t, nil
}
func parseHTMLTemplate(name, tPath string, embed bool, data []byte) (t *th.Template, err error) {
if t, err = th.New(name + extHTML).Funcs(FuncMap()).Parse(string(data)); err != nil {
if embed {
return nil, fmt.Errorf("failed to parse embedded template '%s': %w", tPath, err)
}
return nil, fmt.Errorf("failed to parse template override at path '%s': %w", tPath, err)
}
return t, nil
}
func loadEmailTemplate(name, overridePath string) (t *EmailTemplate, err error) {
var (
embed bool
tpath string
data []byte
)
t = &EmailTemplate{}
if tpath, embed, data, err = readTemplate(name, extText, TemplateCategoryNotifications, overridePath); err != nil {
return nil, err
}
if t.Text, err = parseTextTemplate(name, tpath, embed, data); err != nil {
return nil, err
}
if tpath, embed, data, err = readTemplate(name, extHTML, TemplateCategoryNotifications, overridePath); err != nil {
return nil, err
}
if t.HTML, err = parseHTMLTemplate(name, tpath, embed, data); err != nil {
return nil, err
}
return t, nil
}
func strval(v any) string {
switch v := v.(type) {
case string:
return v
case []byte:
return string(v)
case fmt.Stringer:
return v.String()
default:
return fmt.Sprintf("%v", v)
}
}
func strslice(v any) []string {
switch v := v.(type) {
case []string:
return v
case []any:
b := make([]string, 0, len(v))
for _, s := range v {
if s != nil {
b = append(b, strval(s))
}
}
return b
default:
val := reflect.ValueOf(v)
switch val.Kind() {
case reflect.Array, reflect.Slice:
l := val.Len()
b := make([]string, 0, l)
for i := 0; i < l; i++ {
value := val.Index(i).Interface()
if value != nil {
b = append(b, strval(value))
}
}
return b
default:
if v == nil {
return []string{}
}
return []string{strval(v)}
}
}
}
|
package main
import (
"github.com/gin-gonic/gin"
"github.com/linjinglan/gittest/src/common/route"
)
func main() {
r := gin.New()
r = route.PathRoute(r)
r.Run(":8000")
}
|
package command
import (
"context"
"fmt"
"github.com/romantomjak/b2/b2"
)
func (c *ListCommand) listBuckets() int {
client, err := c.Client()
if err != nil {
c.ui.Error(fmt.Sprintf("Error: %v", err))
return 1
}
req := &b2.BucketListRequest{
AccountID: client.Session.AccountID,
}
ctx := context.TODO()
buckets, _, err := client.Bucket.List(ctx, req)
if err != nil {
c.ui.Error(fmt.Sprintf("Error: %v", err))
return 1
}
for _, bucket := range buckets {
c.ui.Output(bucket.Name + "/")
}
return 0
}
|
package image
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestReleaseImageList(t *testing.T) {
cases := []struct {
name string
pullSpec string
arch string
result string
}{
{
name: "4.10rc",
pullSpec: "quay.io/openshift-release-dev/ocp-release:4.10.0-rc.1-x86_64",
arch: "x86_64",
result: "[{\"openshift_version\":\"4.10\",\"cpu_architecture\":\"x86_64\",\"url\":\"quay.io/openshift-release-dev/ocp-release:4.10.0-rc.1-x86_64\",\"version\":\"4.10.0-rc.1\"}]",
},
{
name: "pull-spec-includes-port-number",
pullSpec: "quay.io:433/openshift-release-dev/ocp-release:4.10.0-rc.1-x86_64",
arch: "x86_64",
result: "[{\"openshift_version\":\"4.10\",\"cpu_architecture\":\"x86_64\",\"url\":\"quay.io:433/openshift-release-dev/ocp-release:4.10.0-rc.1-x86_64\",\"version\":\"4.10.0-rc.1\"}]",
},
{
name: "arm",
pullSpec: "quay.io/openshift-release-dev/ocp-release:4.10.0-rc.1-aarch64",
arch: "aarch64",
result: "[{\"openshift_version\":\"4.10\",\"cpu_architecture\":\"aarch64\",\"url\":\"quay.io/openshift-release-dev/ocp-release:4.10.0-rc.1-aarch64\",\"version\":\"4.10.0-rc.1\"}]",
},
{
name: "4.11ci",
pullSpec: "registry.ci.openshift.org/ocp/release:4.11.0-0.ci-2022-05-16-202609",
arch: "x86_64",
result: "[{\"openshift_version\":\"4.11\",\"cpu_architecture\":\"x86_64\",\"url\":\"registry.ci.openshift.org/ocp/release:4.11.0-0.ci-2022-05-16-202609\",\"version\":\"4.11.0-0.ci-2022-05-16-202609\"}]",
},
{
name: "CI-ephemeral",
pullSpec: "registry.build04.ci.openshift.org/ci-op-m7rfgytz/release@sha256:ebb203f24ee060d61bdb466696a9c20b3841f9929badf9b81fc99cbedc2a679e",
arch: "x86_64",
result: "[{\"openshift_version\":\"was not built correctly\",\"cpu_architecture\":\"x86_64\",\"url\":\"registry.build04.ci.openshift.org/ci-op-m7rfgytz/release@sha256:ebb203f24ee060d61bdb466696a9c20b3841f9929badf9b81fc99cbedc2a679e\",\"version\":\"was not built correctly\"}]",
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
output, err := releaseImageList(tc.pullSpec, tc.arch)
assert.NoError(t, err)
if err == nil {
assert.Equal(t, tc.result, output)
}
})
}
}
func TestReleaseImageListErrors(t *testing.T) {
cases := []string{
"",
"quay.io/openshift-release-dev/ocp-release-4.10",
"quay.io/openshift-release-dev/ocp-release:4",
}
for _, tc := range cases {
t.Run(tc, func(t *testing.T) {
_, err := releaseImageList(tc, "x86_64")
assert.Error(t, err)
})
}
}
|
package nntp
import (
"fmt"
//"math/rand"
"sort"
"strings"
)
// implements the algorithm from http://www.jwz.org/doc/threading.html
// Tree-structured wrapper around ParsedArticle.
type Container struct {
Article *ParsedArticle // underlying Article
Parent, Child, Next *Container // link structure (threaded tree)
Id MessageId // its Article's ID (makes sense if we don't have this article)
Secondary *Container // next container in breadth-first traversal
}
// Result of depth-first walking a Container tree and noting the
// depths.
type DepthContainer struct {
Cont *Container // visited Cont
D int // at depth depth
}
type idTable map[MessageId]*Container
type subjectTable map[string]*Container
var id_table idTable
// Prefixes that typically mean a follow up. For technical
// reasons, this can't be a constant.
var _BAD_PREFIXES = []string{"re: ", "aw: "}
// Threads articles according to the algorithm from
// http://www.jwz.org/doc/threading.html; see also
// https://raw.github.com/kedorlaomer/loread/master/threading.txt
func Thread(articles []ParsedArticle) map[*Container]bool {
id_table = make(idTable)
// 1
for _, message := range articles {
// 1A
container := containerById(message.Id)
if container.Article == nil {
container.Article = new(ParsedArticle)
*container.Article = message
}
// 1B
for i := 0; i < len(message.References)-1; i++ {
container1 := containerById(message.References[i])
container2 := containerById(message.References[i+1])
if container1.Parent == nil && container2.Parent == nil &&
mayLink(container1, container2) {
container2.Parent = container1
}
}
// 1C
if l := len(message.References); l > 0 {
if last := containerById(message.References[l-1]); mayLink(container, last) {
container.Parent = last
}
} else {
container.Parent = nil
}
}
// we „forgot“ to set Child and Next links
// Child links
for _, container := range id_table {
if parent := container.Parent; parent != nil && parent.Child == nil {
parent.Child = container
}
}
// Next links
for _, container := range id_table {
if parent := container.Parent; parent != nil && parent.Child != container {
otherChild := parent.Child
for otherChild.Next != nil && otherChild != container {
otherChild = otherChild.Next
}
if otherChild != container {
otherChild.Next = container
}
}
}
// 2
rootSet := make(map[*Container]bool)
for _, message := range articles {
container := containerById(message.Id)
for container.Parent != nil {
container = container.Parent
}
rootSet[container] = true
}
// 3
id_table = nil
// 4
//
// we use WalkContainers as replacement for recursion
repeat := false
// for whatever reason, doing this once isn't sufficient
for repeat {
repeat = false
ch := make(chan *DepthContainer)
go WalkContainers(rootSet, ch)
for d := range ch {
container := d.Cont
// 4A
if container.Article == nil {
if container.Child == nil && container.Next == nil {
delete(rootSet, container)
repeat = true
// delete from parent's child list, if existing
deleteFromParentsList(container)
}
}
// 4B
if container.Article == nil && container.Child != nil {
// remove this container
repeat = true
delete(rootSet, container)
// promote single child to root set
if container.Child.Next == nil {
rootSet[container.Child] = true
} else if container.Parent != nil {
// promote non-single child to non-root
parent := container.Parent
last := parent.Child
for last.Next != nil {
last = last.Next
}
last.Next = container.Child
}
}
}
}
// 5
// A
subject_table := make(subjectTable)
// B
for this := range rootSet {
subject := findSubject(this)
if subject != "" {
old, ok := subject_table[subject]
if !ok ||
(this.Article == nil || old.Article != nil) ||
isFollowup(old.Article.Subject) && !isFollowup(this.Article.Subject) {
subject_table[subject] = this
}
}
}
// C
////for this := range rootSet {
//// subject := findSubject(this)
//// that, ok := subject_table[subject]
//// if !ok || this == that {
//// continue
//// }
//// // (a)
//// // both are dummies
//// if this.Article == nil && that.Article == nil {
//// // append this' children to that's children
//// last := that.Child
//// for last.Next != nil {
//// last = last.Next
//// }
//// last.Next = this.Child
//// // and delete this
//// delete(rootSet, this)
//// subject_table[subject] = that
//// } else if ((this.Article == nil) && (that.Article != nil)) ||
//// ((this.Article != nil) && (that.Article == nil)) {
//// // (b)
//// // one is empty, another one is not
//// if this.Article == nil {
//// this, that = that, this
//// }
//// // that is empty, this isn't
//// subject_table[subject] = that
//// makeToChildOf(this, that)
//// } else if that.Article != nil && !isFollowup(that.Article.Subject) &&
//// this.Article != nil && isFollowup(this.Article.Subject) {
//// // (c)
//// // that is a follow-up, this isn't
//// makeToChildOf(this, that)
//// subject_table[subject] = that
//// } else if that.Article != nil && isFollowup(that.Article.Subject) &&
//// this.Article != nil && !isFollowup(this.Article.Subject) {
//// // (d)
//// // misordered
//// makeToChildOf(that, this)
//// } else {
//// // (e)
//// // otherwise
//// newId := fmt.Sprintf("id%s@random.id", rand.Int())
//// container := &Container{
//// Id: MessageId(newId),
//// }
//// // container
//// // ↓
//// // this→⋯→last→that
//// this.Parent = container
//// that.Parent = container
//// container.Child = this
//// last := this
//// for last.Next != nil {
//// last = last.Next
//// }
//// last.Next = that
//// }
////}
// 6 (nothing)
// 7
ch := make(chan *DepthContainer)
go WalkContainers(rootSet, ch)
for container := range ch {
sortSiblings(container.Cont)
}
// the algorithm ends here; we need additional work
// add Secondary links according to depth-first traversal
ch = make(chan *DepthContainer)
go WalkContainers(rootSet, ch)
var first *DepthContainer
for first = range ch {
if first != nil && first.Cont != nil {
break
}
}
for second := range ch {
if second == nil || second.Cont == nil || second.Cont.Article != nil {
first.Cont.Secondary = second.Cont
first = second
}
}
return rootSet
}
// id_table[id] may be nil, but we want an empty container
// instead.
func containerById(id MessageId) *Container {
rv := id_table[id]
if rv == nil {
rv = &Container{Id: id}
id_table[id] = rv
}
return rv
}
// Is c2 reachable from c1 by using Parent links?
func (c1 *Container) reachableUpwards(c2 *Container) bool {
c := c1.Parent
for c != nil {
if c == c2 {
return true
}
c = c.Parent
}
return false
}
// Is adding a link between c1 and c2 allowed?
func mayLink(c1, c2 *Container) bool {
return c1 != c2 && !c1.reachableUpwards(c2) && !c2.reachableUpwards(c1)
}
// for debugging: displays the link structure of container
func printContainers(container *Container) {
c := container
for c != nil {
printContainersRek(c, 0)
c = c.Next
}
}
func printContainersRek(c *Container, depth int) {
if c == nil {
return
}
if depth > 0 {
for i := 0; i < depth; i++ {
fmt.Print("•")
}
}
if c.Article != nil {
fmt.Printf("%s (%s)\n", c.Article.Subject, c.Id)
} else {
fmt.Printf("<<empty container>> (%s)\n", c.Id)
}
for c2 := c.Child; c2 != nil; c2 = c2.Next {
printContainersRek(c2, depth+1)
}
}
// writes containers in breadth-first order to ch; closes ch
func WalkContainers(containers map[*Container]bool, ch chan<- *DepthContainer) {
for container := range containers {
c := container
for c != nil {
walkContainersRek(c, ch, 0)
c = c.Next
}
}
close(ch)
}
// recursive kernel for walkContainers
func walkContainersRek(container *Container, ch chan<- *DepthContainer, depth int) {
if container == nil {
return
}
ch <- &DepthContainer{
Cont: container,
D: depth,
}
for c := container.Child; c != nil; c = c.Next {
walkContainersRek(c, ch, depth+1)
}
}
// removes leading _BAD_PREFIXES from subj
func stripPrefixes(subj string) string {
redo := true
for redo {
redo = false
for _, prefix := range _BAD_PREFIXES {
if strings.HasPrefix(strings.ToLower(subj), prefix) {
subj = subj[len(prefix):]
redo = true
}
}
}
return subj
}
// does subj start with one of the typical follow-up prefixes?
func isFollowup(subj string) bool {
for _, prefix := range _BAD_PREFIXES {
if strings.HasPrefix(strings.ToLower(subj), prefix) {
return true
}
}
return false
}
// uses the procedure described in 5B for finding the subject of
// c's article
func findSubject(c *Container) string {
if article1 := c.Article; article1 == nil {
if c.Child == nil || c.Child.Article == nil || c.Child.Article.Subject == "" {
return ""
}
return c.Child.Article.Subject
} else {
return article1.Subject
}
return ""
}
// infrastructure for sorting []*Container by date
type containers []*Container
// nil is smaller than anything
func (c containers) Less(i, j int) bool {
if c[i] == nil || c[i].Article == nil {
return true
}
if c[j] == nil || c[j].Article == nil {
return false
}
return c[i].Article.Date.Before(c[j].Article.Date)
}
func (c containers) Swap(i, j int) {
c[i], c[j] = c[j], c[i]
}
func (c containers) Len() int {
return len(c)
}
// sorts c.Child, c.Child.Next, … by their date header
func sortSiblings(c *Container) {
siblings := []*Container{}
if c.Child != nil {
for s := c.Child; s != nil; s = s.Next {
siblings = append(siblings, s)
}
if len(siblings) > 1 {
sort.Sort(containers(siblings))
// insert to linked list
c.Child = siblings[0]
iter := c.Child
for _, s := range siblings[1:] {
iter.Next = s
iter = s
}
// terminate list
iter.Next = nil
}
}
}
func deleteFromParentsList(container *Container) {
if parent := container.Parent; parent != nil {
if container == parent.Child {
parent.Child = nil
} else {
find := parent.Child
for find.Next != container {
find = find.Next
}
find.Next = nil
}
}
}
// make this to a child of that
func makeToChildOf(this, that *Container) {
// make this a child of that and a sibling of that's children
// this that that
// ↓ ⇒ ↓
// c→⋯→last c→⋯→last→this
this.Parent = that
last := that.Child
for last.Next != nil {
last = last.Next
}
// relink this' siblings' Parent links to that
last.Next = this
for this != nil {
this.Parent = that
this = this.Next
}
}
|
package main
import (
"context"
"fmt"
"os"
"os/exec"
"os/signal"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/fsnotify/fsnotify"
"github.com/karlkfi/kubexit/pkg/kubernetes"
"github.com/karlkfi/kubexit/pkg/log"
"github.com/karlkfi/kubexit/pkg/supervisor"
"github.com/karlkfi/kubexit/pkg/tombstone"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/watch"
)
func main() {
var err error
ctx := log.WithLogger(context.Background(), log.L)
args := os.Args[1:]
if len(args) == 0 {
log.G(ctx).Error("no arguments found")
os.Exit(2)
}
name := os.Getenv("KUBEXIT_NAME")
if name == "" {
log.G(ctx).Error("missing env var: KUBEXIT_NAME")
os.Exit(2)
}
// add field to the context logger to differentiate when pod container logs are intermingled
ctx = log.WithLogger(ctx, log.G(ctx).WithField("container_name", name))
log.G(ctx).Info("KUBEXIT_NAME parsed")
graveyard := os.Getenv("KUBEXIT_GRAVEYARD")
if graveyard == "" {
graveyard = "/graveyard"
} else {
graveyard = strings.TrimRight(graveyard, "/")
graveyard = filepath.Clean(graveyard)
}
ts := &tombstone.Tombstone{
Graveyard: graveyard,
Name: name,
}
log.G(ctx).
WithField("graveyard", graveyard).
WithField("tombstone", ts.Path()).
Info("KUBEXIT_GRAVEYARD parsed")
birthDepsStr := os.Getenv("KUBEXIT_BIRTH_DEPS")
var birthDeps []string
if birthDepsStr != "" {
birthDeps = strings.Split(birthDepsStr, ",")
}
log.G(ctx).WithField("birth_deps", birthDeps).Info("KUBEXIT_BIRTH_DEPS parsed")
deathDepsStr := os.Getenv("KUBEXIT_DEATH_DEPS")
var deathDeps []string
if deathDepsStr != "" {
deathDeps = strings.Split(deathDepsStr, ",")
}
log.G(ctx).WithField("death_deps", deathDeps).Info("KUBEXIT_DEATH_DEPS parsed")
birthTimeout := 30 * time.Second
birthTimeoutStr := os.Getenv("KUBEXIT_BIRTH_TIMEOUT")
if birthTimeoutStr != "" {
birthTimeout, err = time.ParseDuration(birthTimeoutStr)
if err != nil {
log.G(ctx).Errorf("failed to parse birth timeout: %v", err)
os.Exit(2)
}
}
log.G(ctx).WithField("birth_timeout", birthTimeout).Info("KUBEXIT_BIRTH_TIMEOUT parsed")
gracePeriod := 30 * time.Second
gracePeriodStr := os.Getenv("KUBEXIT_GRACE_PERIOD")
if gracePeriodStr != "" {
gracePeriod, err = time.ParseDuration(gracePeriodStr)
if err != nil {
log.G(ctx).Errorf("failed to parse grace period: %v", err)
os.Exit(2)
}
}
log.G(ctx).WithField("grace_period", gracePeriod).Info("KUBEXIT_GRACE_PERIOD parsed")
podName := os.Getenv("KUBEXIT_POD_NAME")
if podName == "" {
if len(birthDeps) > 0 {
log.G(ctx).Error("missing env var: KUBEXIT_POD_NAME")
os.Exit(2)
}
}
log.G(ctx).WithField("pod_name", podName).Info("KUBEXIT_POD_NAME parsed")
namespace := os.Getenv("KUBEXIT_NAMESPACE")
if namespace == "" {
if len(birthDeps) > 0 {
log.G(ctx).Error("missing env var: KUBEXIT_NAMESPACE")
os.Exit(2)
}
}
log.G(ctx).WithField("namespace", namespace).Info("KUBEXIT_POD_NAME parsed")
child := supervisor.New(ctx, args[0], args[1:]...)
// watch for death deps early, so they can interrupt waiting for birth deps
var deathDepTerminated bool
if len(deathDeps) > 0 {
ctx, stopGraveyardWatcher := context.WithCancel(ctx)
// stop graveyard watchers on exit, if not sooner
defer stopGraveyardWatcher()
log.G(ctx).Info("Watching graveyard...")
err = tombstone.Watch(ctx, graveyard, onDeathOfAny(deathDeps, func() error {
// mark process as killed by a death dependency
deathDepTerminated = true
stopGraveyardWatcher()
// trigger graceful shutdown
// Error & exit if not started.
// ShutdownWithTimeout doesn't block until timeout
err := child.ShutdownWithTimeout(gracePeriod)
if err != nil {
return fmt.Errorf("failed to shutdown: %v", err)
}
return nil
}))
if err != nil {
fatal(ctx, child, ts, fmt.Errorf("failed to watch graveyard: %v", err))
}
}
if len(birthDeps) > 0 {
err = waitForBirthDeps(ctx, birthDeps, namespace, podName, birthTimeout)
if err != nil {
fatal(ctx, child, ts, err)
}
}
err = child.Start()
if err != nil {
fatal(ctx, child, ts, err)
}
err = ts.RecordBirth(ctx)
if err != nil {
fatal(ctx, child, ts, err)
}
code := waitForChildExit(ctx, child)
err = ts.RecordDeath(ctx, code)
ignoreCodeOnDeathDeps := strings.ToLower(os.Getenv("KUBEXIT_IGNORE_CODE_ON_DEATH_DEPS")) == "true"
if deathDepTerminated && ignoreCodeOnDeathDeps && code != 0 {
log.G(ctx).WithField("original_exit_code", code).Info("got a non-zero exit code but exitting with code 0")
code = 0
}
err = ts.RecordDeath(ctx, code)
if err != nil {
log.G(ctx).Error(err)
os.Exit(1)
}
os.Exit(code)
}
func waitForBirthDeps(ctx context.Context, birthDeps []string, namespace, podName string, timeout time.Duration) error {
// Cancel context on SIGTERM to trigger graceful exit
ctx = withCancelOnSignal(ctx, syscall.SIGTERM)
ctx, stopPodWatcher := context.WithTimeout(ctx, timeout)
// Stop pod watcher on exit, if not sooner
defer stopPodWatcher()
log.G(ctx).Info("watching pod updates...")
err := kubernetes.WatchPod(ctx, namespace, podName,
onReadyOfAll(ctx, birthDeps, stopPodWatcher),
)
if err != nil {
return fmt.Errorf("failed to watch pod: %v", err)
}
// Block until all birth deps are ready
<-ctx.Done()
err = ctx.Err()
if err == context.DeadlineExceeded {
return fmt.Errorf("timed out waiting for birth deps to be ready: %s", timeout)
} else if err != nil && err != context.Canceled {
// ignore canceled. shouldn't be other errors, but just in case...
return fmt.Errorf("waiting for birth deps to be ready: %v", err)
}
log.G(ctx).WithField("birth_deps", birthDeps).Info("all birth deps ready")
return nil
}
// withCancelOnSignal calls cancel when one of the specified signals is recieved.
func withCancelOnSignal(ctx context.Context, signals ...os.Signal) context.Context {
ctx, cancel := context.WithCancel(ctx)
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, signals...)
// Trigger context cancel on SIGTERM
go func() {
for {
select {
case s, ok := <-sigCh:
if !ok {
return
}
log.G(ctx).WithField("signal", s).Info("received shutdown signal")
cancel()
case <-ctx.Done():
signal.Reset()
close(sigCh)
}
}
}()
return ctx
}
// wait for the child to exit and return the exit code
func waitForChildExit(ctx context.Context, child *supervisor.Supervisor) int {
var code int
err := child.Wait()
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
code = exitErr.ProcessState.ExitCode()
} else {
code = -1
}
} else {
code = 0
}
log.G(ctx).
WithField("exit_code", code).
WithField("error", err).
Info("child exited")
return code
}
// fatal logs a terminal error and exits.
// The child process may or may not be running.
func fatal(ctx context.Context, child *supervisor.Supervisor, ts *tombstone.Tombstone, err error) {
log.G(ctx).Error(err)
// Skipped if not started.
err = child.ShutdownNow()
if err != nil {
log.G(ctx).Errorf("failed to shutdown child process: %v", err)
os.Exit(1)
}
// Wait for shutdown...
//TODO: timout in case the process is zombie?
code := waitForChildExit(ctx, child)
// Attempt to record death, if possible.
// Another process may be waiting for it.
err = ts.RecordDeath(ctx, code)
if err != nil {
log.G(ctx).Errorf("failed to record death of child process: %v", err)
os.Exit(1)
}
os.Exit(1)
}
// onReadyOfAll returns an EventHandler that executes the callback when all of
// the birthDeps containers are ready.
func onReadyOfAll(ctx context.Context, birthDeps []string, callback func()) kubernetes.EventHandler {
birthDepSet := map[string]struct{}{}
for _, depName := range birthDeps {
birthDepSet[depName] = struct{}{}
}
return func(event watch.Event) {
log.G(ctx).WithField("event_type", event.Type).Info("recieved pod watch event")
// ignore Deleted (Watch will auto-stop on delete)
if event.Type == watch.Deleted {
return
}
pod, ok := event.Object.(*corev1.Pod)
if !ok {
log.G(ctx).WithField("object", event.Object).Warn("recieved unexpected non-pod object type")
return
}
// Convert ContainerStatuses list to map of ready container names
readyContainers := map[string]struct{}{}
for _, status := range pod.Status.ContainerStatuses {
if status.Ready {
readyContainers[status.Name] = struct{}{}
}
}
// Check if all birth deps are ready
for _, name := range birthDeps {
if _, ok := readyContainers[name]; !ok {
// at least one birth dep is not ready
return
}
}
callback()
}
}
// onDeathOfAny returns an EventHandler that executes the callback when any of
// the deathDeps processes have died.
func onDeathOfAny(deathDeps []string, callback func() error) tombstone.EventHandler {
deathDepSet := map[string]struct{}{}
for _, depName := range deathDeps {
deathDepSet[depName] = struct{}{}
}
return func(ctx context.Context, event fsnotify.Event) error {
if event.Op&fsnotify.Create != fsnotify.Create && event.Op&fsnotify.Write != fsnotify.Write {
// ignore other events
return nil
}
graveyard := filepath.Dir(event.Name)
name := filepath.Base(event.Name)
log.G(ctx).WithField("tombstone", name).Info("recieved tombstone watch event")
if _, ok := deathDepSet[name]; !ok {
// ignore other tombstones
return nil
}
ts, err := tombstone.Read(graveyard, name)
if err != nil {
log.G(ctx).WithField("tombstone", name).Errorf("failed to read tombstone: %v", err)
return nil
}
if ts.Died == nil {
// still alive
return nil
}
log.G(ctx).
WithField("tombstone", name).
WithField("tombstone_content", ts).
Errorf("recieved new death event")
return callback()
}
}
|
package api
import (
"encoding/json"
"github.com/asaskevich/govalidator"
"github.com/fasthttp/router"
"github.com/polundrra/shortlink/internal/service"
"github.com/valyala/fasthttp"
"log"
"regexp"
"strings"
)
type LinkApi struct {
service service.Service
}
func New(service service.Service) LinkApi {
return LinkApi{service}
}
func (l *LinkApi) Router() fasthttp.RequestHandler {
r := router.New()
r.POST("/get-short-link", l.getShortLink)
r.GET("/{code}", l.redirect)
return r.Handler
}
type request struct {
Url string
CustomEnd string
}
type response struct {
Url string
}
func (l *LinkApi) getShortLink(ctx *fasthttp.RequestCtx) {
req := request{}
if err := json.Unmarshal(ctx.Request.Body(), &req); err != nil {
ctx.Response.SetStatusCode(fasthttp.StatusBadRequest)
ctx.Write([]byte("error unmarshal request body" + err.Error()))
return
}
if req.CustomEnd != "" {
if !validateEnding(req.CustomEnd) {
ctx.SetStatusCode(fasthttp.StatusBadRequest)
ctx.WriteString("invalid custom link")
return
}
}
url := strings.TrimSpace(req.Url)
if !govalidator.IsURL(url) {
log.Println(url)
ctx.Response.SetStatusCode(fasthttp.StatusBadRequest)
ctx.Write([]byte("invalid URL"))
return
}
customEnd := strings.TrimSpace(req.CustomEnd)
shortLink, err := l.service.CreateShortLink(ctx, url, customEnd)
if err != nil {
if err == service.ErrCodeConflict {
ctx.SetStatusCode(fasthttp.StatusBadRequest)
} else {
ctx.SetStatusCode(fasthttp.StatusInternalServerError)
}
ctx.Write([]byte(err.Error()))
return
}
resp, err := json.Marshal(&response{shortLink})
if err != nil {
ctx.Response.SetStatusCode(fasthttp.StatusInternalServerError)
ctx.Write([]byte(err.Error()))
return
}
if _, err := ctx.Write(resp); err != nil {
ctx.Response.SetStatusCode(fasthttp.StatusInternalServerError)
ctx.Write([]byte(err.Error()))
return
}
}
func (l *LinkApi) redirect(ctx *fasthttp.RequestCtx) {
shortLink := (ctx.UserValue("code")).(string)
url, err := l.service.GetLongLink(ctx, shortLink)
if err != nil {
if err == service.ErrLongLinkNotFound {
ctx.SetStatusCode(fasthttp.StatusNotFound)
return
}
ctx.Response.SetStatusCode(fasthttp.StatusInternalServerError)
ctx.Write([]byte(err.Error()))
return
}
ctx.Response.SetStatusCode(fasthttp.StatusPermanentRedirect)
ctx.Response.Header.Set("Location", url)
}
func validateEnding(ending string) bool {
regex:= regexp.MustCompile("^[a-zA-Z0-9-_]{1,32}$")
return regex.MatchString(ending)
}
|
package tyme
import (
"fmt"
"time"
)
// LocalDate represents year, month and daae without Location
// e.g) January 2, 2006
type LocalDate struct {
month LocalMonth
date int
}
// NewLocalDate returns instance of LocalDate with given year, month and date
func NewLocalDate(year int, month time.Month, date int) LocalDate {
return LocalDate{
month: NewLocalMonth(year, month),
date: date,
}
}
// Year returns year
func (d *LocalDate) Year() int {
return d.month.Year()
}
// Month returns month
func (d *LocalDate) Month() time.Month {
return d.month.Month()
}
// Date returns number of date
func (d *LocalDate) Date() int {
return d.date
}
// String returns "yyyy-mm-dd" format
func (d LocalDate) String() string {
return fmt.Sprintf("%04d-%02d-%02d", d.Year(), int(d.Month()), d.Date())
}
|
// Copyright (c) 2021 Alexey Khan
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package main
import (
"fmt"
"log"
"strings"
"github.com/jedib0t/go-pretty/v6/table"
"github.com/jedib0t/go-pretty/v6/text"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
const (
appVersion = "0.1.0"
appCopyright = "alexeykhan"
appLicense = "MIT"
appViewWidth = 64
commandOptionEmpty = ""
commandOptionDetailedYearly = "Y"
commandOptionDetailedMonthly = "M"
commandOptions = " Параметры и опции команды:"
commandUsageExamples = "\n Примеры использования:\n"
tableColumnYear = "Год"
tableColumnMonth = "Месяц"
tableColumnExpenses = "Расходы"
tableColumnInflationInitial = "Исходная ценность"
tableColumnInflationEquivalent = "Эквивалент ценности"
tableColumnInvestments = "Вложения"
tableColumnInterestIncome = "Проценты"
tableColumnTotalSavings = "Накопления"
tableFooterTotal = "ИТОГО"
)
var logo = [...]string{
` ______ ______ ______ ______ ______ ________ `,
` / \ / \ / \ / | / \ / |`,
`/$$$$$$ |/$$$$$$ |/$$$$$$ |$$$$$$/ /$$$$$$ |$$$$$$$$/ `,
`$$ |__$$ |$$ \__$$/ $$ \__$$/ $$ | $$ \__$$/ $$ | `,
`$$ $$ |$$ \ $$ \ $$ | $$ \ $$ | `,
`$$$$$$$$ | $$$$$$ | $$$$$$ | $$ | $$$$$$ | $$ | `,
`$$ | $$ |/ \__$$ |/ \__$$ | _$$ |_ / \__$$ | $$ | `,
`$$ | $$ |$$ $$/ $$ $$/ / $$ |$$ $$/ $$ | `,
`$$/ $$/ $$$$$$/ $$$$$$/ $$$$$$/ $$$$$$/ $$/ `,
}
func yearsDuration(years uint8) string {
info := "лет"
if years != 11 && years%10 == 1 {
info = "года"
}
return fmt.Sprintf("%d %s", years, info)
}
func savingsTable(payment, interest float64, years uint8, detailed bool) (rendered string, personalInvestments, interestIncome, totalSavings float64) {
firstColumn := tableColumnYear
if detailed {
firstColumn = tableColumnMonth
}
t := getTableWriter(
firstColumn,
tableColumnInvestments,
tableColumnInterestIncome,
tableColumnTotalSavings)
var index interface{}
periods := 12 * int(years)
periodRate := interest * 0.01 / 12
for i := 0; i <= periods; i++ {
interest := totalSavings * periodRate
interestIncome += interest
if i == periods {
totalSavings += interest
t.AppendSeparator()
} else {
totalSavings += interest + payment
personalInvestments += payment
}
index = i + 1
if !detailed {
index = (i + 1) / 12
}
if i == periods {
index = tableFooterTotal
}
if detailed || (i+1 >= 12 && (i+1)%12 == 0 || i == periods) {
t.AppendRow(table.Row{
index,
fmt.Sprintf("%.2f", personalInvestments),
fmt.Sprintf("%.2f", interestIncome),
fmt.Sprintf("%.2f", totalSavings),
})
}
}
return t.Render(), personalInvestments, interestIncome, totalSavings
}
func commandOverview(title, about string, examples []string) string {
boldWhiteText := text.Colors{text.Bold, text.FgHiWhite}
normalWhiteText := text.Colors{text.FgHiWhite}
var overview string
upperCaseTitle := text.FormatUpper.Apply(title)
formattedTitle := boldWhiteText.Sprintf(" %s", upperCaseTitle)
overview += formattedTitle + "\n\n"
wrappedAbout := text.WrapSoft(about, appViewWidth-2)
for _, line := range strings.Split(wrappedAbout, "\n") {
trimmedLine := strings.TrimSpace(line)
overview += normalWhiteText.Sprintf(" %s\n", trimmedLine)
}
overview += boldWhiteText.Sprint(commandUsageExamples)
for _, line := range examples {
trimmedLine := strings.TrimSpace(line)
overview += normalWhiteText.Sprintf(" $ %s\n", trimmedLine)
}
return overview
}
func wrapUsage(usage string, max, indent int) string {
var final string
wrapped := text.WrapSoft(usage, max-indent)
lines := strings.Split(wrapped, "\n")
if len(lines) > 1 {
for i, line := range lines {
if i > 0 {
final += strings.Repeat(" ", indent)
}
final += line + "\n"
}
} else {
final += lines[0] + "\n"
}
return final
}
func unquoteUsage(flag *pflag.Flag) (name string, usage string) {
usage = flag.Usage
for i := 0; i < len(usage); i++ {
if usage[i] == '`' {
for j := i + 1; j < len(usage); j++ {
if usage[j] == '`' {
name = usage[i+1 : j]
usage = usage[:i] + name + usage[j+1:]
return name, usage
}
}
break // Only one back quote; use type name.
}
}
name = flag.Value.Type()
switch name {
case "float64":
name = "float"
case "int64":
name = "int"
case "uint64":
name = "uint"
case "stringSlice":
name = "[]string"
case "intSlice":
name = "[]int"
case "uintSlice":
name = "[]uint"
case "boolSlice":
name = "[]bool"
}
return
}
func printHeader() {
boldGreenFormat := text.Colors{text.Bold, text.FgHiGreen}
shields := []string{
text.Colors{text.FgHiWhite, text.BgGreen}.Sprint(" version ") +
text.Colors{text.Bold, text.FgHiGreen}.Sprintf(" %s ", appVersion),
text.Colors{text.FgHiWhite, text.BgGreen}.Sprint(" license ") +
text.Colors{text.Bold, text.FgHiGreen}.Sprintf(" %s ", appLicense),
text.Colors{text.FgHiWhite, text.BgGreen}.Sprint(" copyright ") +
text.Colors{text.Bold, text.FgHiGreen}.Sprintf(" %s ", appCopyright),
}
var centeredLogo string
for _, logoLine := range logo {
formattedLine := boldGreenFormat.Sprint(logoLine)
centeredLogo += text.AlignCenter.Apply(formattedLine, appViewWidth) + "\n"
}
fmt.Printf("\n%s\n %s\n\n", centeredLogo, strings.Join(shields, " "))
}
func printDescriptor(cmd *cobra.Command) {
var (
maxLen int
flagLines []string
)
fmt.Println(cmd.Example)
cmd.LocalFlags().VisitAll(func(flag *pflag.Flag) {
if flag.Hidden {
return
}
line := ""
if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name)
} else {
line = fmt.Sprintf(" --%s", flag.Name)
}
varType, usage := unquoteUsage(flag)
line += " " + varType
line += "\x00"
if len(line) > maxLen {
maxLen = len(line)
}
line += usage
flagLines = append(flagLines, line)
})
if len(flagLines) > 0 {
fmt.Println(text.Colors{text.Bold, text.FgHiWhite}.Sprint(commandOptions))
for _, line := range flagLines {
sIdx := strings.Index(line, "\x00")
spacing := strings.Repeat(" ", maxLen-sIdx)
concatenated := line[:sIdx] + spacing + " " + wrapUsage(line[sIdx+1:], appViewWidth, maxLen+1)
fmt.Print(text.Colors{text.FgHiWhite}.Sprint(concatenated))
}
}
fmt.Println()
}
func getTableWriter(columns ...string) table.Writer {
var tableRow []interface{}
for _, col := range columns {
tableRow = append(tableRow, col)
}
yearColumnWidth := 6
moneyColumnMaxWidth := (appViewWidth - yearColumnWidth - 17) / 3
inflationColumnMaxWidth := (appViewWidth - yearColumnWidth - 14) / 2
t := table.NewWriter()
t.SetAllowedRowLength(appViewWidth)
t.AppendHeader(tableRow)
t.SetStyle(table.Style{
Name: "Assist",
Box: table.BoxStyle{
BottomLeft: " ┗",
BottomRight: "┛",
BottomSeparator: "━┻",
Left: " ┃",
LeftSeparator: " ┣",
MiddleHorizontal: "━",
MiddleSeparator: "━╋",
MiddleVertical: " ┃",
PaddingLeft: " ",
PaddingRight: " ",
Right: "┃",
RightSeparator: "┫",
TopLeft: " ┏",
TopRight: "┓",
TopSeparator: "━┳",
},
Color: table.ColorOptions{
Footer: text.Colors{text.FgHiWhite},
Header: text.Colors{text.FgHiWhite},
Row: text.Colors{text.FgHiWhite},
RowAlternate: text.Colors{text.FgHiWhite},
},
Format: table.FormatOptions{
Footer: text.FormatUpper,
Header: text.FormatUpper,
Row: text.FormatDefault,
},
Options: table.Options{
DrawBorder: true,
SeparateColumns: true,
SeparateFooter: true,
SeparateHeader: true,
SeparateRows: false,
},
})
t.SetColumnConfigs([]table.ColumnConfig{
{
Name: tableColumnYear,
Align: text.AlignCenter,
AlignFooter: text.AlignLeft,
AlignHeader: text.AlignCenter,
WidthMin: yearColumnWidth,
WidthMax: yearColumnWidth,
},
{
Name: tableColumnMonth,
Align: text.AlignCenter,
AlignFooter: text.AlignLeft,
AlignHeader: text.AlignCenter,
WidthMin: yearColumnWidth,
WidthMax: yearColumnWidth,
},
{
Name: tableColumnExpenses,
Align: text.AlignCenter,
AlignFooter: text.AlignLeft,
AlignHeader: text.AlignCenter,
WidthMin: moneyColumnMaxWidth,
WidthMax: moneyColumnMaxWidth,
},
{
Name: tableColumnInvestments,
Align: text.AlignCenter,
AlignFooter: text.AlignLeft,
AlignHeader: text.AlignCenter,
WidthMin: moneyColumnMaxWidth,
WidthMax: moneyColumnMaxWidth,
},
{
Name: tableColumnInterestIncome,
Align: text.AlignCenter,
AlignFooter: text.AlignLeft,
AlignHeader: text.AlignCenter,
WidthMin: moneyColumnMaxWidth,
WidthMax: moneyColumnMaxWidth,
},
{
Name: tableColumnTotalSavings,
Align: text.AlignCenter,
AlignFooter: text.AlignLeft,
AlignHeader: text.AlignCenter,
WidthMin: moneyColumnMaxWidth,
WidthMax: moneyColumnMaxWidth,
},
{
Name: tableColumnInflationInitial,
Align: text.AlignCenter,
AlignFooter: text.AlignLeft,
AlignHeader: text.AlignCenter,
WidthMin: inflationColumnMaxWidth,
WidthMax: inflationColumnMaxWidth,
},
{
Name: tableColumnInflationEquivalent,
Align: text.AlignCenter,
AlignFooter: text.AlignLeft,
AlignHeader: text.AlignCenter,
WidthMin: inflationColumnMaxWidth,
WidthMax: inflationColumnMaxWidth,
},
})
return t
}
func validateDetailedOption(value string) error {
if value != commandOptionEmpty &&
value != commandOptionDetailedYearly &&
value != commandOptionDetailedMonthly {
return fmt.Errorf("invalid argument value: detailed = %q; available options: %q, %q",
value, commandOptionDetailedYearly, commandOptionDetailedMonthly)
}
return nil
}
func getTaskOverview(title, overview string) string {
boldWhiteText := text.Colors{text.Bold, text.FgHiWhite}
normalWhiteText := text.Colors{text.FgHiWhite}
upperCaseTitle := text.FormatUpper.Apply(title)
formattedTitle := boldWhiteText.Sprintf(" %s", upperCaseTitle)
wrappedTask := text.WrapSoft(overview, appViewWidth-2)
taskOverview := formattedTitle + "\n\n"
for _, line := range strings.Split(wrappedTask, "\n") {
trimmedLine := strings.TrimSpace(line)
taskOverview += normalWhiteText.Sprintf(" %s\n", trimmedLine)
}
return taskOverview
}
func getFloat64(cmd *cobra.Command, name string) float64 {
value, err := cmd.Flags().GetFloat64(name)
if err != nil {
_ = cmd.Help()
log.Fatal(err)
}
return value
}
func getUint8(cmd *cobra.Command, name string) uint8 {
value, err := cmd.Flags().GetUint8(name)
if err != nil {
_ = cmd.Help()
log.Fatal(err)
}
return value
}
func getString(cmd *cobra.Command, name string) string {
value, err := cmd.Flags().GetString(name)
if err != nil {
_ = cmd.Help()
log.Fatal(err)
}
return value
}
func getBool(cmd *cobra.Command, name string) bool {
value, err := cmd.Flags().GetBool(name)
if err != nil {
_ = cmd.Help()
log.Fatal(err)
}
return value
}
|
// ˅
package main
import (
"fmt"
"os"
"strconv"
"strings"
)
// ˄
// Analyze the syntax
type Context struct {
// ˅
// ˄
nodes []string
currentIndex int
// ˅
// ˄
}
func NewContext(text string) *Context {
// ˅
return &Context{strings.Fields(text), 0}
// ˄
}
func (self *Context) NextToken() string {
// ˅
self.currentIndex++
if self.currentIndex < len(self.nodes) {
return self.nodes[self.currentIndex]
} else {
return ""
}
// ˄
}
func (self *Context) GetToken() string {
// ˅
return self.nodes[self.currentIndex]
// ˄
}
func (self *Context) SlideToken(token string) {
// ˅
if token != self.GetToken() {
fmt.Println("WARNING: " + token + " is expected but " + self.GetToken() + " was found.")
os.Exit(1)
}
self.NextToken()
// ˄
}
func (self *Context) GetNumber() int {
// ˅
i, err := strconv.Atoi(self.GetToken())
if err != nil {
fmt.Println("WARNING: " + self.GetToken())
os.Exit(1)
}
return i
// ˄
}
// ˅
// ˄
|
//go:build !linux
package main
import "syscall"
var sigInfo = syscall.SIGINFO
|
package swag
// Version of swag
const Version = "v2.3.1"
|
package main
import (
"fmt"
"strconv"
"strings"
)
func main() {
time := "09:56"
timeArray := strings.Split(time, ":")
fmt.Println(timeArray)
isValid := true
temp, _ := strconv.Atoi(timeArray[0])
if temp < 0 || temp > 23 {
fmt.Println("false")
return
}
temp2, _ := strconv.Atoi(timeArray[1])
if temp2 < 0 || temp2 > 59 {
fmt.Println("false")
return
}
fmt.Println(isValid)
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package alpha
import (
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
)
func DCLOSPolicyAssignmentSchema() *dcl.Schema {
return &dcl.Schema{
Info: &dcl.Info{
Title: "OSConfig/OSPolicyAssignment",
Description: "Represents an OSPolicyAssignment resource.",
StructName: "OSPolicyAssignment",
Reference: &dcl.Link{
Text: "API documentation",
URL: "https://cloud.google.com/compute/docs/osconfig/rest/v1/projects.locations.osPolicyAssignments",
},
},
Paths: &dcl.Paths{
Get: &dcl.Path{
Description: "The function used to get information about a OSPolicyAssignment",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "oSPolicyAssignment",
Required: true,
Description: "A full instance of a OSPolicyAssignment",
},
},
},
Apply: &dcl.Path{
Description: "The function used to apply information about a OSPolicyAssignment",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "oSPolicyAssignment",
Required: true,
Description: "A full instance of a OSPolicyAssignment",
},
},
},
Delete: &dcl.Path{
Description: "The function used to delete a OSPolicyAssignment",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "oSPolicyAssignment",
Required: true,
Description: "A full instance of a OSPolicyAssignment",
},
},
},
DeleteAll: &dcl.Path{
Description: "The function used to delete all OSPolicyAssignment",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "project",
Required: true,
Schema: &dcl.PathParametersSchema{
Type: "string",
},
},
dcl.PathParameters{
Name: "location",
Required: true,
Schema: &dcl.PathParametersSchema{
Type: "string",
},
},
},
},
List: &dcl.Path{
Description: "The function used to list information about many OSPolicyAssignment",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "project",
Required: true,
Schema: &dcl.PathParametersSchema{
Type: "string",
},
},
dcl.PathParameters{
Name: "location",
Required: true,
Schema: &dcl.PathParametersSchema{
Type: "string",
},
},
},
},
},
Components: &dcl.Components{
Schemas: map[string]*dcl.Component{
"OSPolicyAssignment": &dcl.Component{
Title: "OSPolicyAssignment",
ID: "projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}}",
UsesStateHint: true,
ParentContainer: "project",
HasCreate: true,
SchemaProperty: dcl.Property{
Type: "object",
Required: []string{
"name",
"osPolicies",
"instanceFilter",
"rollout",
"project",
"location",
},
Properties: map[string]*dcl.Property{
"baseline": &dcl.Property{
Type: "boolean",
GoName: "Baseline",
ReadOnly: true,
Description: "Output only. Indicates that this revision has been successfully rolled out in this zone and new VMs will be assigned OS policies from this revision. For a given OS policy assignment, there is only one revision with a value of `true` for this field.",
Immutable: true,
},
"deleted": &dcl.Property{
Type: "boolean",
GoName: "Deleted",
ReadOnly: true,
Description: "Output only. Indicates that this revision deletes the OS policy assignment.",
Immutable: true,
},
"description": &dcl.Property{
Type: "string",
GoName: "Description",
Description: "OS policy assignment description. Length of the description is limited to 1024 characters.",
},
"etag": &dcl.Property{
Type: "string",
GoName: "Etag",
ReadOnly: true,
Description: "The etag for this OS policy assignment. If this is provided on update, it must match the server's etag.",
Immutable: true,
},
"instanceFilter": &dcl.Property{
Type: "object",
GoName: "InstanceFilter",
GoType: "OSPolicyAssignmentInstanceFilter",
Description: "Required. Filter to select VMs.",
Properties: map[string]*dcl.Property{
"all": &dcl.Property{
Type: "boolean",
GoName: "All",
Description: "Target all VMs in the project. If true, no other criteria is permitted.",
SendEmpty: true,
},
"exclusionLabels": &dcl.Property{
Type: "array",
GoName: "ExclusionLabels",
Description: "List of label sets used for VM exclusion. If the list has more than one label set, the VM is excluded if any of the label sets are applicable for the VM.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "object",
GoType: "OSPolicyAssignmentInstanceFilterExclusionLabels",
Properties: map[string]*dcl.Property{
"labels": &dcl.Property{
Type: "object",
AdditionalProperties: &dcl.Property{
Type: "string",
},
GoName: "Labels",
Description: "Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.",
},
},
},
},
"inclusionLabels": &dcl.Property{
Type: "array",
GoName: "InclusionLabels",
Description: "List of label sets used for VM inclusion. If the list has more than one `LabelSet`, the VM is included if any of the label sets are applicable for the VM.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "object",
GoType: "OSPolicyAssignmentInstanceFilterInclusionLabels",
Properties: map[string]*dcl.Property{
"labels": &dcl.Property{
Type: "object",
AdditionalProperties: &dcl.Property{
Type: "string",
},
GoName: "Labels",
Description: "Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.",
},
},
},
},
"inventories": &dcl.Property{
Type: "array",
GoName: "Inventories",
Description: "List of inventories to select VMs. A VM is selected if its inventory data matches at least one of the following inventories.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "object",
GoType: "OSPolicyAssignmentInstanceFilterInventories",
Required: []string{
"osShortName",
},
Properties: map[string]*dcl.Property{
"osShortName": &dcl.Property{
Type: "string",
GoName: "OSShortName",
Description: "Required. The OS short name",
},
"osVersion": &dcl.Property{
Type: "string",
GoName: "OSVersion",
Description: "The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.",
},
},
},
},
},
},
"location": &dcl.Property{
Type: "string",
GoName: "Location",
Description: "The location for the resource",
Immutable: true,
},
"name": &dcl.Property{
Type: "string",
GoName: "Name",
Description: "Resource name.",
Immutable: true,
},
"osPolicies": &dcl.Property{
Type: "array",
GoName: "OSPolicies",
Description: "Required. List of OS policies to be applied to the VMs.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "object",
GoType: "OSPolicyAssignmentOSPolicies",
Required: []string{
"id",
"mode",
"resourceGroups",
},
Properties: map[string]*dcl.Property{
"allowNoResourceGroupMatch": &dcl.Property{
Type: "boolean",
GoName: "AllowNoResourceGroupMatch",
Description: "This flag determines the OS policy compliance status when none of the resource groups within the policy are applicable for a VM. Set this value to `true` if the policy needs to be reported as compliant even if the policy has nothing to validate or enforce.",
},
"description": &dcl.Property{
Type: "string",
GoName: "Description",
Description: "Policy description. Length of the description is limited to 1024 characters.",
},
"id": &dcl.Property{
Type: "string",
GoName: "Id",
Description: "Required. The id of the OS policy with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the assignment.",
},
"mode": &dcl.Property{
Type: "string",
GoName: "Mode",
GoType: "OSPolicyAssignmentOSPoliciesModeEnum",
Description: "Required. Policy mode Possible values: MODE_UNSPECIFIED, VALIDATION, ENFORCEMENT",
Enum: []string{
"MODE_UNSPECIFIED",
"VALIDATION",
"ENFORCEMENT",
},
},
"resourceGroups": &dcl.Property{
Type: "array",
GoName: "ResourceGroups",
Description: "Required. List of resource groups for the policy. For a particular VM, resource groups are evaluated in the order specified and the first resource group that is applicable is selected and the rest are ignored. If none of the resource groups are applicable for a VM, the VM is considered to be non-compliant w.r.t this policy. This behavior can be toggled by the flag `allow_no_resource_group_match`",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "object",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroups",
Required: []string{
"resources",
},
Properties: map[string]*dcl.Property{
"inventoryFilters": &dcl.Property{
Type: "array",
GoName: "InventoryFilters",
Description: "List of inventory filters for the resource group. The resources in this resource group are applied to the target VM if it satisfies at least one of the following inventory filters. For example, to apply this resource group to VMs running either `RHEL` or `CentOS` operating systems, specify 2 items for the list with following values: inventory_filters[0].os_short_name='rhel' and inventory_filters[1].os_short_name='centos' If the list is empty, this resource group will be applied to the target VM unconditionally.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "object",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters",
Required: []string{
"osShortName",
},
Properties: map[string]*dcl.Property{
"osShortName": &dcl.Property{
Type: "string",
GoName: "OSShortName",
Description: "Required. The OS short name",
},
"osVersion": &dcl.Property{
Type: "string",
GoName: "OSVersion",
Description: "The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.",
},
},
},
},
"resources": &dcl.Property{
Type: "array",
GoName: "Resources",
Description: "Required. List of resources configured for this resource group. The resources are executed in the exact order specified here.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "object",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResources",
Required: []string{
"id",
},
Properties: map[string]*dcl.Property{
"exec": &dcl.Property{
Type: "object",
GoName: "Exec",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec",
Description: "Exec resource",
Conflicts: []string{
"pkg",
"repository",
"file",
},
Required: []string{
"validate",
},
Properties: map[string]*dcl.Property{
"enforce": &dcl.Property{
Type: "object",
GoName: "Enforce",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce",
Description: "What to run to bring this resource into the desired state. An exit code of 100 indicates \"success\", any other exit code indicates a failure running enforce.",
Required: []string{
"interpreter",
},
Properties: map[string]*dcl.Property{
"args": &dcl.Property{
Type: "array",
GoName: "Args",
Description: "Optional arguments to pass to the source during execution.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "string",
GoType: "string",
},
},
"file": &dcl.Property{
Type: "object",
GoName: "File",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile",
Description: "A remote or local file.",
Conflicts: []string{
"script",
},
Properties: map[string]*dcl.Property{
"allowInsecure": &dcl.Property{
Type: "boolean",
GoName: "AllowInsecure",
Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.",
},
"gcs": &dcl.Property{
Type: "object",
GoName: "Gcs",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs",
Description: "A Cloud Storage object.",
Conflicts: []string{
"remote",
"localPath",
},
Required: []string{
"bucket",
"object",
},
Properties: map[string]*dcl.Property{
"bucket": &dcl.Property{
Type: "string",
GoName: "Bucket",
Description: "Required. Bucket of the Cloud Storage object.",
},
"generation": &dcl.Property{
Type: "integer",
Format: "int64",
GoName: "Generation",
Description: "Generation number of the Cloud Storage object.",
},
"object": &dcl.Property{
Type: "string",
GoName: "Object",
Description: "Required. Name of the Cloud Storage object.",
},
},
},
"localPath": &dcl.Property{
Type: "string",
GoName: "LocalPath",
Description: "A local path within the VM to use.",
Conflicts: []string{
"remote",
"gcs",
},
},
"remote": &dcl.Property{
Type: "object",
GoName: "Remote",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote",
Description: "A generic remote file.",
Conflicts: []string{
"gcs",
"localPath",
},
Required: []string{
"uri",
},
Properties: map[string]*dcl.Property{
"sha256Checksum": &dcl.Property{
Type: "string",
GoName: "Sha256Checksum",
Description: "SHA256 checksum of the remote file.",
},
"uri": &dcl.Property{
Type: "string",
GoName: "Uri",
Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.",
},
},
},
},
},
"interpreter": &dcl.Property{
Type: "string",
GoName: "Interpreter",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum",
Description: "Required. The script interpreter to use. Possible values: INTERPRETER_UNSPECIFIED, NONE, SHELL, POWERSHELL",
Enum: []string{
"INTERPRETER_UNSPECIFIED",
"NONE",
"SHELL",
"POWERSHELL",
},
},
"outputFilePath": &dcl.Property{
Type: "string",
GoName: "OutputFilePath",
Description: "Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 100K bytes.",
},
"script": &dcl.Property{
Type: "string",
GoName: "Script",
Description: "An inline script. The size of the script is limited to 1024 characters.",
Conflicts: []string{
"file",
},
},
},
},
"validate": &dcl.Property{
Type: "object",
GoName: "Validate",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate",
Description: "Required. What to run to validate this resource is in the desired state. An exit code of 100 indicates \"in desired state\", and exit code of 101 indicates \"not in desired state\". Any other exit code indicates a failure running validate.",
Required: []string{
"interpreter",
},
Properties: map[string]*dcl.Property{
"args": &dcl.Property{
Type: "array",
GoName: "Args",
Description: "Optional arguments to pass to the source during execution.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "string",
GoType: "string",
},
},
"file": &dcl.Property{
Type: "object",
GoName: "File",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile",
Description: "A remote or local file.",
Conflicts: []string{
"script",
},
Properties: map[string]*dcl.Property{
"allowInsecure": &dcl.Property{
Type: "boolean",
GoName: "AllowInsecure",
Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.",
},
"gcs": &dcl.Property{
Type: "object",
GoName: "Gcs",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs",
Description: "A Cloud Storage object.",
Conflicts: []string{
"remote",
"localPath",
},
Required: []string{
"bucket",
"object",
},
Properties: map[string]*dcl.Property{
"bucket": &dcl.Property{
Type: "string",
GoName: "Bucket",
Description: "Required. Bucket of the Cloud Storage object.",
},
"generation": &dcl.Property{
Type: "integer",
Format: "int64",
GoName: "Generation",
Description: "Generation number of the Cloud Storage object.",
},
"object": &dcl.Property{
Type: "string",
GoName: "Object",
Description: "Required. Name of the Cloud Storage object.",
},
},
},
"localPath": &dcl.Property{
Type: "string",
GoName: "LocalPath",
Description: "A local path within the VM to use.",
Conflicts: []string{
"remote",
"gcs",
},
},
"remote": &dcl.Property{
Type: "object",
GoName: "Remote",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote",
Description: "A generic remote file.",
Conflicts: []string{
"gcs",
"localPath",
},
Required: []string{
"uri",
},
Properties: map[string]*dcl.Property{
"sha256Checksum": &dcl.Property{
Type: "string",
GoName: "Sha256Checksum",
Description: "SHA256 checksum of the remote file.",
},
"uri": &dcl.Property{
Type: "string",
GoName: "Uri",
Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.",
},
},
},
},
},
"interpreter": &dcl.Property{
Type: "string",
GoName: "Interpreter",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum",
Description: "Required. The script interpreter to use. Possible values: INTERPRETER_UNSPECIFIED, NONE, SHELL, POWERSHELL",
Enum: []string{
"INTERPRETER_UNSPECIFIED",
"NONE",
"SHELL",
"POWERSHELL",
},
},
"outputFilePath": &dcl.Property{
Type: "string",
GoName: "OutputFilePath",
Description: "Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 100K bytes.",
},
"script": &dcl.Property{
Type: "string",
GoName: "Script",
Description: "An inline script. The size of the script is limited to 1024 characters.",
Conflicts: []string{
"file",
},
},
},
},
},
},
"file": &dcl.Property{
Type: "object",
GoName: "File",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile",
Description: "File resource",
Conflicts: []string{
"pkg",
"repository",
"exec",
},
Required: []string{
"path",
"state",
},
Properties: map[string]*dcl.Property{
"content": &dcl.Property{
Type: "string",
GoName: "Content",
Description: "A a file with this content. The size of the content is limited to 1024 characters.",
Conflicts: []string{
"file",
},
},
"file": &dcl.Property{
Type: "object",
GoName: "File",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile",
Description: "A remote or local source.",
Conflicts: []string{
"content",
},
Properties: map[string]*dcl.Property{
"allowInsecure": &dcl.Property{
Type: "boolean",
GoName: "AllowInsecure",
Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.",
},
"gcs": &dcl.Property{
Type: "object",
GoName: "Gcs",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs",
Description: "A Cloud Storage object.",
Conflicts: []string{
"remote",
"localPath",
},
Required: []string{
"bucket",
"object",
},
Properties: map[string]*dcl.Property{
"bucket": &dcl.Property{
Type: "string",
GoName: "Bucket",
Description: "Required. Bucket of the Cloud Storage object.",
},
"generation": &dcl.Property{
Type: "integer",
Format: "int64",
GoName: "Generation",
Description: "Generation number of the Cloud Storage object.",
},
"object": &dcl.Property{
Type: "string",
GoName: "Object",
Description: "Required. Name of the Cloud Storage object.",
},
},
},
"localPath": &dcl.Property{
Type: "string",
GoName: "LocalPath",
Description: "A local path within the VM to use.",
Conflicts: []string{
"remote",
"gcs",
},
},
"remote": &dcl.Property{
Type: "object",
GoName: "Remote",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote",
Description: "A generic remote file.",
Conflicts: []string{
"gcs",
"localPath",
},
Required: []string{
"uri",
},
Properties: map[string]*dcl.Property{
"sha256Checksum": &dcl.Property{
Type: "string",
GoName: "Sha256Checksum",
Description: "SHA256 checksum of the remote file.",
},
"uri": &dcl.Property{
Type: "string",
GoName: "Uri",
Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.",
},
},
},
},
},
"path": &dcl.Property{
Type: "string",
GoName: "Path",
Description: "Required. The absolute path of the file within the VM.",
},
"permissions": &dcl.Property{
Type: "string",
GoName: "Permissions",
ReadOnly: true,
Description: "Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one bit corresponds to the execute permission. Default behavior is 755. Below are some examples of permissions and their associated values: read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4",
},
"state": &dcl.Property{
Type: "string",
GoName: "State",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum",
Description: "Required. Desired state of the file. Possible values: OS_POLICY_COMPLIANCE_STATE_UNSPECIFIED, COMPLIANT, NON_COMPLIANT, UNKNOWN, NO_OS_POLICIES_APPLICABLE",
Enum: []string{
"OS_POLICY_COMPLIANCE_STATE_UNSPECIFIED",
"COMPLIANT",
"NON_COMPLIANT",
"UNKNOWN",
"NO_OS_POLICIES_APPLICABLE",
},
},
},
},
"id": &dcl.Property{
Type: "string",
GoName: "Id",
Description: "Required. The id of the resource with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the OS policy.",
},
"pkg": &dcl.Property{
Type: "object",
GoName: "Pkg",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg",
Description: "Package resource",
Conflicts: []string{
"repository",
"exec",
"file",
},
Required: []string{
"desiredState",
},
Properties: map[string]*dcl.Property{
"apt": &dcl.Property{
Type: "object",
GoName: "Apt",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt",
Description: "A package managed by Apt.",
Conflicts: []string{
"deb",
"yum",
"zypper",
"rpm",
"googet",
"msi",
},
Required: []string{
"name",
},
Properties: map[string]*dcl.Property{
"name": &dcl.Property{
Type: "string",
GoName: "Name",
Description: "Required. Package name.",
},
},
},
"deb": &dcl.Property{
Type: "object",
GoName: "Deb",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb",
Description: "A deb package file.",
Conflicts: []string{
"apt",
"yum",
"zypper",
"rpm",
"googet",
"msi",
},
Required: []string{
"source",
},
Properties: map[string]*dcl.Property{
"pullDeps": &dcl.Property{
Type: "boolean",
GoName: "PullDeps",
Description: "Whether dependencies should also be installed. - install when false: `dpkg -i package` - install when true: `apt-get update && apt-get -y install package.deb`",
},
"source": &dcl.Property{
Type: "object",
GoName: "Source",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource",
Description: "Required. A deb package.",
Properties: map[string]*dcl.Property{
"allowInsecure": &dcl.Property{
Type: "boolean",
GoName: "AllowInsecure",
Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.",
},
"gcs": &dcl.Property{
Type: "object",
GoName: "Gcs",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs",
Description: "A Cloud Storage object.",
Conflicts: []string{
"remote",
"localPath",
},
Required: []string{
"bucket",
"object",
},
Properties: map[string]*dcl.Property{
"bucket": &dcl.Property{
Type: "string",
GoName: "Bucket",
Description: "Required. Bucket of the Cloud Storage object.",
},
"generation": &dcl.Property{
Type: "integer",
Format: "int64",
GoName: "Generation",
Description: "Generation number of the Cloud Storage object.",
},
"object": &dcl.Property{
Type: "string",
GoName: "Object",
Description: "Required. Name of the Cloud Storage object.",
},
},
},
"localPath": &dcl.Property{
Type: "string",
GoName: "LocalPath",
Description: "A local path within the VM to use.",
Conflicts: []string{
"remote",
"gcs",
},
},
"remote": &dcl.Property{
Type: "object",
GoName: "Remote",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote",
Description: "A generic remote file.",
Conflicts: []string{
"gcs",
"localPath",
},
Required: []string{
"uri",
},
Properties: map[string]*dcl.Property{
"sha256Checksum": &dcl.Property{
Type: "string",
GoName: "Sha256Checksum",
Description: "SHA256 checksum of the remote file.",
},
"uri": &dcl.Property{
Type: "string",
GoName: "Uri",
Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.",
},
},
},
},
},
},
},
"desiredState": &dcl.Property{
Type: "string",
GoName: "DesiredState",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum",
Description: "Required. The desired state the agent should maintain for this package. Possible values: DESIRED_STATE_UNSPECIFIED, INSTALLED, REMOVED",
Enum: []string{
"DESIRED_STATE_UNSPECIFIED",
"INSTALLED",
"REMOVED",
},
},
"googet": &dcl.Property{
Type: "object",
GoName: "Googet",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget",
Description: "A package managed by GooGet.",
Conflicts: []string{
"apt",
"deb",
"yum",
"zypper",
"rpm",
"msi",
},
Required: []string{
"name",
},
Properties: map[string]*dcl.Property{
"name": &dcl.Property{
Type: "string",
GoName: "Name",
Description: "Required. Package name.",
},
},
},
"msi": &dcl.Property{
Type: "object",
GoName: "Msi",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi",
Description: "An MSI package.",
Conflicts: []string{
"apt",
"deb",
"yum",
"zypper",
"rpm",
"googet",
},
Required: []string{
"source",
},
Properties: map[string]*dcl.Property{
"properties": &dcl.Property{
Type: "array",
GoName: "Properties",
Description: "Additional properties to use during installation. This should be in the format of Property=Setting. Appended to the defaults of `ACTION=INSTALL REBOOT=ReallySuppress`.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "string",
GoType: "string",
},
},
"source": &dcl.Property{
Type: "object",
GoName: "Source",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource",
Description: "Required. The MSI package.",
Properties: map[string]*dcl.Property{
"allowInsecure": &dcl.Property{
Type: "boolean",
GoName: "AllowInsecure",
Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.",
},
"gcs": &dcl.Property{
Type: "object",
GoName: "Gcs",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs",
Description: "A Cloud Storage object.",
Conflicts: []string{
"remote",
"localPath",
},
Required: []string{
"bucket",
"object",
},
Properties: map[string]*dcl.Property{
"bucket": &dcl.Property{
Type: "string",
GoName: "Bucket",
Description: "Required. Bucket of the Cloud Storage object.",
},
"generation": &dcl.Property{
Type: "integer",
Format: "int64",
GoName: "Generation",
Description: "Generation number of the Cloud Storage object.",
},
"object": &dcl.Property{
Type: "string",
GoName: "Object",
Description: "Required. Name of the Cloud Storage object.",
},
},
},
"localPath": &dcl.Property{
Type: "string",
GoName: "LocalPath",
Description: "A local path within the VM to use.",
Conflicts: []string{
"remote",
"gcs",
},
},
"remote": &dcl.Property{
Type: "object",
GoName: "Remote",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote",
Description: "A generic remote file.",
Conflicts: []string{
"gcs",
"localPath",
},
Required: []string{
"uri",
},
Properties: map[string]*dcl.Property{
"sha256Checksum": &dcl.Property{
Type: "string",
GoName: "Sha256Checksum",
Description: "SHA256 checksum of the remote file.",
},
"uri": &dcl.Property{
Type: "string",
GoName: "Uri",
Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.",
},
},
},
},
},
},
},
"rpm": &dcl.Property{
Type: "object",
GoName: "Rpm",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm",
Description: "An rpm package file.",
Conflicts: []string{
"apt",
"deb",
"yum",
"zypper",
"googet",
"msi",
},
Required: []string{
"source",
},
Properties: map[string]*dcl.Property{
"pullDeps": &dcl.Property{
Type: "boolean",
GoName: "PullDeps",
Description: "Whether dependencies should also be installed. - install when false: `rpm --upgrade --replacepkgs package.rpm` - install when true: `yum -y install package.rpm` or `zypper -y install package.rpm`",
},
"source": &dcl.Property{
Type: "object",
GoName: "Source",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource",
Description: "Required. An rpm package.",
Properties: map[string]*dcl.Property{
"allowInsecure": &dcl.Property{
Type: "boolean",
GoName: "AllowInsecure",
Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.",
},
"gcs": &dcl.Property{
Type: "object",
GoName: "Gcs",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs",
Description: "A Cloud Storage object.",
Conflicts: []string{
"remote",
"localPath",
},
Required: []string{
"bucket",
"object",
},
Properties: map[string]*dcl.Property{
"bucket": &dcl.Property{
Type: "string",
GoName: "Bucket",
Description: "Required. Bucket of the Cloud Storage object.",
},
"generation": &dcl.Property{
Type: "integer",
Format: "int64",
GoName: "Generation",
Description: "Generation number of the Cloud Storage object.",
},
"object": &dcl.Property{
Type: "string",
GoName: "Object",
Description: "Required. Name of the Cloud Storage object.",
},
},
},
"localPath": &dcl.Property{
Type: "string",
GoName: "LocalPath",
Description: "A local path within the VM to use.",
Conflicts: []string{
"remote",
"gcs",
},
},
"remote": &dcl.Property{
Type: "object",
GoName: "Remote",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote",
Description: "A generic remote file.",
Conflicts: []string{
"gcs",
"localPath",
},
Required: []string{
"uri",
},
Properties: map[string]*dcl.Property{
"sha256Checksum": &dcl.Property{
Type: "string",
GoName: "Sha256Checksum",
Description: "SHA256 checksum of the remote file.",
},
"uri": &dcl.Property{
Type: "string",
GoName: "Uri",
Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.",
},
},
},
},
},
},
},
"yum": &dcl.Property{
Type: "object",
GoName: "Yum",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum",
Description: "A package managed by YUM.",
Conflicts: []string{
"apt",
"deb",
"zypper",
"rpm",
"googet",
"msi",
},
Required: []string{
"name",
},
Properties: map[string]*dcl.Property{
"name": &dcl.Property{
Type: "string",
GoName: "Name",
Description: "Required. Package name.",
},
},
},
"zypper": &dcl.Property{
Type: "object",
GoName: "Zypper",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper",
Description: "A package managed by Zypper.",
Conflicts: []string{
"apt",
"deb",
"yum",
"rpm",
"googet",
"msi",
},
Required: []string{
"name",
},
Properties: map[string]*dcl.Property{
"name": &dcl.Property{
Type: "string",
GoName: "Name",
Description: "Required. Package name.",
},
},
},
},
},
"repository": &dcl.Property{
Type: "object",
GoName: "Repository",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository",
Description: "Package repository resource",
Conflicts: []string{
"pkg",
"exec",
"file",
},
Properties: map[string]*dcl.Property{
"apt": &dcl.Property{
Type: "object",
GoName: "Apt",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt",
Description: "An Apt Repository.",
Conflicts: []string{
"yum",
"zypper",
"goo",
},
Required: []string{
"archiveType",
"uri",
"distribution",
"components",
},
Properties: map[string]*dcl.Property{
"archiveType": &dcl.Property{
Type: "string",
GoName: "ArchiveType",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum",
Description: "Required. Type of archive files in this repository. Possible values: ARCHIVE_TYPE_UNSPECIFIED, DEB, DEB_SRC",
Enum: []string{
"ARCHIVE_TYPE_UNSPECIFIED",
"DEB",
"DEB_SRC",
},
},
"components": &dcl.Property{
Type: "array",
GoName: "Components",
Description: "Required. List of components for this repository. Must contain at least one item.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "string",
GoType: "string",
},
},
"distribution": &dcl.Property{
Type: "string",
GoName: "Distribution",
Description: "Required. Distribution of this repository.",
},
"gpgKey": &dcl.Property{
Type: "string",
GoName: "GpgKey",
Description: "URI of the key file for this repository. The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`.",
},
"uri": &dcl.Property{
Type: "string",
GoName: "Uri",
Description: "Required. URI for this repository.",
},
},
},
"goo": &dcl.Property{
Type: "object",
GoName: "Goo",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo",
Description: "A Goo Repository.",
Conflicts: []string{
"apt",
"yum",
"zypper",
},
Required: []string{
"name",
"url",
},
Properties: map[string]*dcl.Property{
"name": &dcl.Property{
Type: "string",
GoName: "Name",
Description: "Required. The name of the repository.",
},
"url": &dcl.Property{
Type: "string",
GoName: "Url",
Description: "Required. The url of the repository.",
},
},
},
"yum": &dcl.Property{
Type: "object",
GoName: "Yum",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum",
Description: "A Yum Repository.",
Conflicts: []string{
"apt",
"zypper",
"goo",
},
Required: []string{
"id",
"baseUrl",
},
Properties: map[string]*dcl.Property{
"baseUrl": &dcl.Property{
Type: "string",
GoName: "BaseUrl",
Description: "Required. The location of the repository directory.",
},
"displayName": &dcl.Property{
Type: "string",
GoName: "DisplayName",
Description: "The display name of the repository.",
},
"gpgKeys": &dcl.Property{
Type: "array",
GoName: "GpgKeys",
Description: "URIs of GPG keys.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "string",
GoType: "string",
},
},
"id": &dcl.Property{
Type: "string",
GoName: "Id",
Description: "Required. A one word, unique name for this repository. This is the `repo id` in the yum config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for resource conflicts.",
},
},
},
"zypper": &dcl.Property{
Type: "object",
GoName: "Zypper",
GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper",
Description: "A Zypper Repository.",
Conflicts: []string{
"apt",
"yum",
"goo",
},
Required: []string{
"id",
"baseUrl",
},
Properties: map[string]*dcl.Property{
"baseUrl": &dcl.Property{
Type: "string",
GoName: "BaseUrl",
Description: "Required. The location of the repository directory.",
},
"displayName": &dcl.Property{
Type: "string",
GoName: "DisplayName",
Description: "The display name of the repository.",
},
"gpgKeys": &dcl.Property{
Type: "array",
GoName: "GpgKeys",
Description: "URIs of GPG keys.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "string",
GoType: "string",
},
},
"id": &dcl.Property{
Type: "string",
GoName: "Id",
Description: "Required. A one word, unique name for this repository. This is the `repo id` in the zypper config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for GuestPolicy conflicts.",
},
},
},
},
},
},
},
},
},
},
},
},
},
},
"project": &dcl.Property{
Type: "string",
GoName: "Project",
Description: "The project for the resource",
Immutable: true,
ResourceReferences: []*dcl.PropertyResourceReference{
&dcl.PropertyResourceReference{
Resource: "Cloudresourcemanager/Project",
Field: "name",
Parent: true,
},
},
},
"reconciling": &dcl.Property{
Type: "boolean",
GoName: "Reconciling",
ReadOnly: true,
Description: "Output only. Indicates that reconciliation is in progress for the revision. This value is `true` when the `rollout_state` is one of: * IN_PROGRESS * CANCELLING",
Immutable: true,
},
"revisionCreateTime": &dcl.Property{
Type: "string",
Format: "date-time",
GoName: "RevisionCreateTime",
ReadOnly: true,
Description: "Output only. The timestamp that the revision was created.",
Immutable: true,
},
"revisionId": &dcl.Property{
Type: "string",
GoName: "RevisionId",
ReadOnly: true,
Description: "Output only. The assignment revision ID A new revision is committed whenever a rollout is triggered for a OS policy assignment",
Immutable: true,
},
"rollout": &dcl.Property{
Type: "object",
GoName: "Rollout",
GoType: "OSPolicyAssignmentRollout",
Description: "Required. Rollout to deploy the OS policy assignment. A rollout is triggered in the following situations: 1) OSPolicyAssignment is created. 2) OSPolicyAssignment is updated and the update contains changes to one of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment is deleted.",
Required: []string{
"disruptionBudget",
"minWaitDuration",
},
Properties: map[string]*dcl.Property{
"disruptionBudget": &dcl.Property{
Type: "object",
GoName: "DisruptionBudget",
GoType: "OSPolicyAssignmentRolloutDisruptionBudget",
Description: "Required. The maximum number (or percentage) of VMs per zone to disrupt at any given moment.",
Properties: map[string]*dcl.Property{
"fixed": &dcl.Property{
Type: "integer",
Format: "int64",
GoName: "Fixed",
Description: "Specifies a fixed value.",
Conflicts: []string{
"percent",
},
},
"percent": &dcl.Property{
Type: "integer",
Format: "int64",
GoName: "Percent",
Description: "Specifies the relative value defined as a percentage, which will be multiplied by a reference value.",
Conflicts: []string{
"fixed",
},
},
},
},
"minWaitDuration": &dcl.Property{
Type: "string",
GoName: "MinWaitDuration",
Description: "Required. This determines the minimum duration of time to wait after the configuration changes are applied through the current rollout. A VM continues to count towards the `disruption_budget` at least until this duration of time has passed after configuration changes are applied.",
},
},
},
"rolloutState": &dcl.Property{
Type: "string",
GoName: "RolloutState",
GoType: "OSPolicyAssignmentRolloutStateEnum",
ReadOnly: true,
Description: "Output only. OS policy assignment rollout state Possible values: ROLLOUT_STATE_UNSPECIFIED, IN_PROGRESS, CANCELLING, CANCELLED, SUCCEEDED",
Immutable: true,
Enum: []string{
"ROLLOUT_STATE_UNSPECIFIED",
"IN_PROGRESS",
"CANCELLING",
"CANCELLED",
"SUCCEEDED",
},
},
"skipAwaitRollout": &dcl.Property{
Type: "boolean",
GoName: "SkipAwaitRollout",
Description: "Set to true to skip awaiting rollout during resource creation and update.",
Unreadable: true,
},
"uid": &dcl.Property{
Type: "string",
GoName: "Uid",
ReadOnly: true,
Description: "Output only. Server generated unique id for the OS policy assignment resource.",
Immutable: true,
},
},
},
},
},
},
}
}
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package date_test
import (
"testing"
"time"
"github.com/fxtlabs/date"
)
func TestParseISO(t *testing.T) {
cases := []struct {
value string
year int
month time.Month
day int
}{
{"1969-12-31", 1969, time.December, 31},
{"+1970-01-01", 1970, time.January, 1},
{"+01970-01-02", 1970, time.January, 2},
{"2000-02-28", 2000, time.February, 28},
{"+2000-02-29", 2000, time.February, 29},
{"+02000-03-01", 2000, time.March, 1},
{"+002004-02-28", 2004, time.February, 28},
{"2004-02-29", 2004, time.February, 29},
{"2004-03-01", 2004, time.March, 1},
{"0000-01-01", 0, time.January, 1},
{"+0001-02-03", 1, time.February, 3},
{"+00019-03-04", 19, time.March, 4},
{"0100-04-05", 100, time.April, 5},
{"2000-05-06", 2000, time.May, 6},
{"+30000-06-07", 30000, time.June, 7},
{"+400000-07-08", 400000, time.July, 8},
{"+5000000-08-09", 5000000, time.August, 9},
{"-0001-09-11", -1, time.September, 11},
{"-0019-10-12", -19, time.October, 12},
{"-00100-11-13", -100, time.November, 13},
{"-02000-12-14", -2000, time.December, 14},
{"-30000-02-15", -30000, time.February, 15},
{"-0400000-05-16", -400000, time.May, 16},
{"-5000000-09-17", -5000000, time.September, 17},
}
for _, c := range cases {
d, err := date.ParseISO(c.value)
if err != nil {
t.Errorf("ParseISO(%v) == %v", c.value, err)
}
year, month, day := d.Date()
if year != c.year || month != c.month || day != c.day {
t.Errorf("ParseISO(%v) == %v, want (%v, %v, %v)", c.value, d, c.year, c.month, c.day)
}
}
badCases := []string{
"1234-05",
"1234-5-6",
"1234-05-6",
"1234-5-06",
"12340506",
"1234/05/06",
"1234-0A-06",
"1234-05-0B",
"1234-05-06trailing",
"padding1234-05-06",
"1-02-03",
"10-11-12",
"100-02-03",
"+1-02-03",
"+10-11-12",
"+100-02-03",
"-123-05-06",
}
for _, c := range badCases {
d, err := date.ParseISO(c)
if err == nil {
t.Errorf("ParseISO(%v) == %v", c, d)
}
}
}
func TestParse(t *testing.T) {
// Test ability to parse a few common date formats
cases := []struct {
layout string
value string
year int
month time.Month
day int
}{
{date.ISO8601, "1969-12-31", 1969, time.December, 31},
{date.ISO8601B, "19700101", 1970, time.January, 1},
{date.RFC822, "29-Feb-00", 2000, time.February, 29},
{date.RFC822W, "Mon, 01-Mar-04", 2004, time.March, 1},
{date.RFC850, "Wednesday, 12-Aug-15", 2015, time.August, 12},
{date.RFC1123, "05 Dec 1928", 1928, time.December, 5},
{date.RFC1123W, "Mon, 05 Dec 1928", 1928, time.December, 5},
{date.RFC3339, "2345-06-07", 2345, time.June, 7},
}
for _, c := range cases {
d, err := date.Parse(c.layout, c.value)
if err != nil {
t.Errorf("Parse(%v) == %v", c.value, err)
}
year, month, day := d.Date()
if year != c.year || month != c.month || day != c.day {
t.Errorf("Parse(%v) == %v, want (%v, %v, %v)", c.value, d, c.year, c.month, c.day)
}
}
// Test inability to parse ISO 8601 expanded year format
badCases := []string{
"+1234-05-06",
"+12345-06-07",
"-1234-05-06",
"-12345-06-07",
}
for _, c := range badCases {
d, err := date.Parse(date.ISO8601, c)
if err == nil {
t.Errorf("Parse(%v) == %v", c, d)
}
}
}
func TestFormatISO(t *testing.T) {
cases := []struct {
value string
n int
}{
{"-5000-02-03", 4},
{"-05000-02-03", 5},
{"-005000-02-03", 6},
{"+0000-01-01", 4},
{"+00000-01-01", 5},
{"+1000-01-01", 4},
{"+01000-01-01", 5},
{"+1970-01-01", 4},
{"+001999-12-31", 6},
{"+999999-12-31", 6},
}
for _, c := range cases {
d, err := date.ParseISO(c.value)
if err != nil {
t.Errorf("FormatISO(%v) cannot parse input: %v", c.value, err)
continue
}
value := d.FormatISO(c.n)
if value != c.value {
t.Errorf("FormatISO(%v) == %v, want %v", c, value, c.value)
}
}
}
|
package main
import (
"log"
"os"
"github.com/gin-gonic/gin"
"supertimemachine/service"
"github.com/gin-contrib/cors"
"flag"
"gopkg.in/mgo.v2"
"time"
)
func main() {
// all this initialization logic should be set somewhere else.
port := os.Getenv("PORT")
mongoUrl := os.Getenv("MONGO_URL")
mongoUser := os.Getenv("MONGO_USER")
mongoPassword := os.Getenv("MONGO_PASSWORD")
mongoDatabase := os.Getenv("MONGO_DATABASE")
if port == "" {
log.Fatal("$PORT must be set")
}
if mongoUrl == "" {
log.Fatal("MONGO_URL must be set!")
}
if mongoUser == "" {
log.Fatal("MONGO_USER must be set!")
}
if mongoPassword == "" {
log.Fatal("MONGO_PASSWORD must be set!")
}
if mongoDatabase == "" {
log.Fatal("MONGO_DATABASE must be set!")
}
allowCors := flag.String("allowCORS", "", "Allow CORS");
flag.Parse()
router := gin.New()
if *allowCors != "" {
log.Println("Allow CORS from: ", *allowCors)
config := cors.DefaultConfig()
config.AllowMethods = append(config.AllowMethods, "PATCH")
config.AllowOrigins = []string{*allowCors}
router.Use(cors.New(config))
}
router.Use(gin.Logger())
router.LoadHTMLGlob("templates/*.tmpl.html")
router.Static("/static", "../elm-client/dist/static")
router.GET("/", func(c *gin.Context) {
c.File("../elm-client/dist/index.html")
})
router.GET("/task/", service.GetAllTasksHandler)
router.GET("/task/:id", service.GetTaskHandler)
router.PATCH("/task/:id", service.TaskPatchHandler)
router.POST("/task/", service.AddNewTaskHandler)
dialInfo := mgo.DialInfo{
Username:mongoUser,
Password:mongoPassword,
Addrs:[]string{mongoUrl},
Database: mongoDatabase,
Timeout:10*time.Second,
}
session, err := mgo.DialWithInfo(&dialInfo)
if err != nil {
log.Fatal("Cannot Dial Mongo: ", err)
}
session.SetMode(mgo.Monotonic, true)
defer session.Close()
service.Session = session
log.Println("Started the mongo connection! ")
router.Run(":" + port)
}
|
package loggermw
import (
"net/http"
"net/http/httptest"
"testing"
)
func TestWriteHeader(t *testing.T) {
rec := httptest.NewRecorder()
crw := customResponseWriter{
ResponseWriter: rec,
status: 0,
size: 0,
}
crw.WriteHeader(http.StatusOK)
if crw.status != http.StatusOK {
t.Errorf("Expected: %d, Got: %d", http.StatusOK, crw.status)
}
}
func TestWrite(t *testing.T) {
rec := httptest.NewRecorder()
crw := customResponseWriter{
ResponseWriter: rec,
status: 0,
size: 0,
}
buffer := []byte{'a', 'b', 'c'}
crw.Write(buffer)
if crw.size != len(buffer) {
t.Errorf("Expected: %d, Got: %d", len(buffer), crw.size)
}
}
|
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"time"
"github.com/utilitywarehouse/go-pubsub"
"github.com/utilitywarehouse/go-pubsub/amqp"
)
func main() {
sink, err := amqp.NewMessageSink(amqp.MessageSinkConfig{
Address: "amqp://localhost:5672/",
Topic: "demo-topic",
})
if err != nil {
log.Fatal(err)
}
sink.PutMessage(MyMessage{
CustomerID: "customer-01",
Message: fmt.Sprintf("hello. it is currently %v", time.Now()),
})
cons := amqp.NewMessageSource(amqp.MessageSourceConfig{
Address: "amqp://localhost:5672/",
ConsumerGroup: "demo-group",
Topic: "demo-topic",
})
// consume messages for 2 seconds
ctx, _ := context.WithTimeout(context.Background(), 2*time.Second)
handler := func(m pubsub.ConsumerMessage) error {
fmt.Printf("message is: %s\n", m.Data)
return nil
}
onError := func(m pubsub.ConsumerMessage, e error) error {
panic("unexpected error")
}
if err := cons.ConsumeMessages(ctx, handler, onError); err != nil {
log.Fatal(err)
}
}
type MyMessage struct {
CustomerID string
Message string
}
func (m MyMessage) Marshal() ([]byte, error) {
return json.Marshal(m)
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package addon
import (
"testing"
"github.com/stretchr/testify/assert"
"helm.sh/helm/v3/pkg/chart/loader"
)
var files = []*loader.BufferedFile{
{
Name: "metadata.yaml",
Data: []byte(`name: test-helm-addon
version: 1.0.0
description: This is a addon for test when install addon from helm repo
icon: https://www.terraform.io/assets/images/logo-text-8c3ba8a6.svg
url: https://terraform.io/
tags: []
deployTo:
control_plane: true
runtime_cluster: false
dependencies: []
invisible: false`),
},
{
Name: "/resources/parameter.cue",
Data: []byte(`parameter: {
// test wrong parameter
example: *"default"
}`),
},
}
func TestMemoryReader(t *testing.T) {
m := MemoryReader{
Name: "fluxcd",
Files: files,
}
meta, err := m.ListAddonMeta()
assert.NoError(t, err)
assert.Equal(t, len(meta["fluxcd"].Items), 2)
metaFile, err := m.ReadFile("metadata.yaml")
assert.NoError(t, err)
assert.NotEmpty(t, metaFile)
parameterData, err := m.ReadFile("/resources/parameter.cue")
assert.NoError(t, err)
assert.NotEmpty(t, parameterData)
}
|
package ir
import (
"errors"
"fmt"
)
type InstrCode int
const (
Add InstrCode = iota
Sub
Mul
UDiv
SDiv
URem
SRem
And
Or
Xor
Shl
AShr
LShr
Not
ICmp
Call
Alloc
Store
Load
Bitcast
SExt
ZExt
Trunc
GEP
Br
Brif
Ret
Unreachable
)
var instrCodeNameMap map[InstrCode]string
func init() {
instrCodeNameMap = make(map[InstrCode]string)
instrCodeNameMap[Add] = "add"
instrCodeNameMap[Sub] = "sub"
instrCodeNameMap[Mul] = "mul"
instrCodeNameMap[UDiv] = "udiv"
instrCodeNameMap[SDiv] = "sdiv"
instrCodeNameMap[URem] = "urem"
instrCodeNameMap[SRem] = "srem"
instrCodeNameMap[And] = "and"
instrCodeNameMap[Or] = "or"
instrCodeNameMap[Xor] = "xor"
instrCodeNameMap[Shl] = "shl"
instrCodeNameMap[AShr] = "ashr"
instrCodeNameMap[LShr] = "lshr"
instrCodeNameMap[Not] = "not"
instrCodeNameMap[ICmp] = "icmp"
instrCodeNameMap[Call] = "call"
instrCodeNameMap[Alloc] = "alloc"
instrCodeNameMap[Store] = "store"
instrCodeNameMap[Load] = "load"
instrCodeNameMap[Bitcast] = "bitcast"
instrCodeNameMap[SExt] = "sext"
instrCodeNameMap[ZExt] = "zext"
instrCodeNameMap[Trunc] = "trunc"
instrCodeNameMap[GEP] = "gep"
instrCodeNameMap[Br] = "br"
instrCodeNameMap[Brif] = "brif"
instrCodeNameMap[Ret] = "ret"
instrCodeNameMap[Unreachable] = "unreachable"
}
func (v InstrCode) String() string {
return instrCodeNameMap[v]
}
type Instr struct {
code InstrCode
name string
operands []Value
block *Block
intPredicate IntPredicate // used by icmp
typ Type // used by sext, zext, trunc, alloc, bitcast
function *Function // used by call
}
func newInstr(code InstrCode, operands ...Value) *Instr {
return &Instr{
code: code,
operands: operands,
}
}
func instrCheckMismatchedTypes(i *Instr, typ1, typ2 Type) error {
if !typ1.Equals(typ2) {
return newInstrErr(fmt.Sprintf("Mismatched types: `%s` and `%s`", typ1.String(), typ2.String()), i)
}
return nil
}
func instrCheckTypesAreInt(i *Instr, typs ...Type) error {
for _, typ := range typs {
if _, ok := typ.(*IntType); !ok {
return newInstrErr(fmt.Sprintf("Type `%s` is not an int", typ.String()), i)
}
}
return nil
}
func instrCheckTypeFirstClass(i *Instr, typ Type) error {
if !typ.IsFirstClass() {
return newInstrErr(fmt.Sprintf("Type `%s` is invalid", typ), i)
}
return nil
}
func instrCheckTypesArePointer(i *Instr, typs ...Type) error {
for _, typ := range typs {
if _, ok := typ.(*PointerType); !ok {
return newInstrErr(fmt.Sprintf("Type `%s` is not a pointer", typ.String()), i)
}
}
return nil
}
func assert(cond bool) {
if !cond {
panic("assertion failed")
}
}
func (v *Instr) validate() error {
switch v.code {
case Add, Sub, Mul, UDiv, SDiv, URem, SRem, And, Or, Xor, Shl, AShr, LShr:
assert(len(v.operands) == 2)
if err := instrCheckTypesAreInt(v, v.operands[0].Type(), v.operands[1].Type()); err != nil {
return err
}
if err := instrCheckMismatchedTypes(v, v.operands[0].Type(), v.operands[1].Type()); err != nil {
return err
}
case Not:
assert(len(v.operands) == 1)
if err := instrCheckTypesAreInt(v, v.operands[0].Type()); err != nil {
return err
}
case ICmp:
assert(len(v.operands) == 2)
if err := instrCheckTypesAreInt(v, v.operands[0].Type(), v.operands[1].Type()); err != nil {
return err
}
case Alloc:
assert(len(v.operands) == 0)
if err := instrCheckTypeFirstClass(v, v.typ); err != nil {
return err
}
case Store:
assert(len(v.operands) == 2)
if err := instrCheckTypeFirstClass(v, v.operands[1].Type()); err != nil {
return err
}
if ptr, ok := v.operands[0].Type().(*PointerType); ok {
if !ptr.element.Equals(v.operands[1].Type()) {
return errors.New("Type of location for store must be pointer to type of value")
}
} else {
return errors.New(fmt.Sprintf("Type `%s` is not a pointer", v.operands[0].Type().String()))
}
case Load:
assert(len(v.operands) == 1)
if _, ok := v.operands[0].Type().(*PointerType); !ok {
return errors.New(fmt.Sprintf("Type `%s` is not a pointer", v.operands[0].Type().String()))
}
case Bitcast:
assert(len(v.operands) == 1)
if err := instrCheckTypesArePointer(v, v.operands[0].Type(), v.typ); err != nil {
return err
}
case ZExt, SExt:
assert(len(v.operands) == 1)
if err := instrCheckTypesAreInt(v, v.operands[0].Type(), v.typ); err != nil {
return err
}
origType := v.operands[0].Type().(*IntType)
toType := v.typ.(*IntType)
if origType.Size() >= toType.Size() {
return errors.New("Must extend int to larger int")
}
case Trunc:
assert(len(v.operands) == 1)
if err := instrCheckTypesAreInt(v, v.operands[0].Type(), v.typ); err != nil {
return err
}
origType := v.operands[0].Type().(*IntType)
toType := v.typ.(*IntType)
if origType.Size() <= toType.Size() {
return errors.New("Must truncate int to smaller int")
}
case Call:
argLen := len(v.operands)
parLen := len(v.function.typ.parameters)
if argLen != parLen {
if !(argLen > parLen && v.function.typ.IsVariadic()) {
return newInstrErr(fmt.Sprintf("Function expects %d arguments, have %d", parLen, argLen), v)
}
}
for i, par := range v.function.typ.parameters {
if err := instrCheckMismatchedTypes(v, par, v.operands[i].Type()); err != nil {
return err
}
}
return nil
case Br:
assert(len(v.operands) == 1)
case Brif:
assert(len(v.operands) == 3)
if err := instrCheckTypesAreInt(v, v.operands[0].Type()); err != nil {
return err
}
if intType, _ := v.operands[0].Type().(*IntType); intType.size != 1 {
return newInstrErr("Condition to brif must be of type i1", v)
}
case Ret:
assert(len(v.operands) == 1)
if v.operands[0].Type().IsVoid() {
return nil
}
if err := instrCheckTypeFirstClass(v, v.operands[0].Type()); err != nil {
return err
}
funcRetType := v.Block().function.typ.returnType
if !funcRetType.Equals(v.operands[0].Type()) {
return newInstrErr(fmt.Sprintf("Cannot return value of type `%s` from function `%s` with return type `%s`", v.operands[0].Type().String(), v.Block().function.Name(), funcRetType), v)
}
case Unreachable:
assert(len(v.operands) == 0)
case GEP:
typ := v.operands[0].Type()
for i, index := range v.operands[1:] {
if _, ok := index.Type().(*IntType); !ok {
return errors.New("expected int type in GEP, found something else")
}
if i == 0 {
if ptr, ok := v.operands[0].Type().(*PointerType); ok {
typ = ptr.Element()
} else {
return errors.New(fmt.Sprintf("Type `%s` is not a pointer", v.operands[0].Type().String()))
}
} else {
switch typ.(type) {
case *ArrayType:
typ = typ.(*ArrayType).Element()
default:
return errors.New("invalid arg to GEP")
}
}
}
default:
panic("unimplemented opcode")
}
return nil
}
func (v Instr) IsTerminating() bool {
switch v.code {
case Unreachable, Ret, Br, Brif:
return true
default:
return false
}
}
func (v Instr) HasSideEffects() bool {
if v.IsTerminating() {
return false
}
switch v.code {
case Call, Store:
return true
default:
return false
}
}
func (v Instr) string() string {
ret := ""
if !v.Type().IsVoid() {
ret += v.Identifier() + " = "
}
ret += v.code.String()
for i, arg := range v.operands {
ret += " " + arg.Type().String() + " " + arg.Identifier()
if i < len(v.operands)-1 {
ret += ","
}
}
switch v.code {
case Bitcast, ZExt, SExt, Trunc:
ret += " to"
fallthrough
case Alloc:
ret += " " + v.typ.String()
}
return ret
}
func (v *Instr) Code() InstrCode {
return v.code
}
func (v Instr) Type() Type {
if v.IsTerminating() {
return NewVoidType()
}
switch v.code {
case Add, Sub, Mul, UDiv, SDiv, URem, SRem, And, Or, Xor, Shl, AShr, LShr, Not:
return v.operands[0].Type()
case ICmp:
return NewIntType(1)
case Load:
if ptr, ok := v.operands[0].Type().(*PointerType); ok {
return ptr.element
}
return nil
case Alloc:
return NewPointerType(v.typ)
case Store:
return NewVoidType()
case Call:
return v.function.typ.ReturnType()
case SExt, ZExt, Trunc, Bitcast:
return v.typ
case GEP:
typ := v.operands[0].Type()
for range v.operands[1:] {
switch typ.(type) {
case *PointerType:
typ = typ.(*PointerType).Element()
case *ArrayType:
typ = typ.(*ArrayType).Element()
default:
return nil
}
}
return NewPointerType(typ)
default:
panic("unimplemented instrcode")
}
}
// IntPredicate returns the IntPredicate of the instruction. Only applicable to icmp instructions.
func (v Instr) IntPredicate() IntPredicate {
if v.code != ICmp {
panic("Called IntPredicate() on a non-icmp instruction")
}
return v.intPredicate
}
// Function returns the Function of the instruction. Only applicable to call instructions.
func (v Instr) Function() *Function {
if v.code != Call {
panic("Called Function() on a non-call instruction")
}
return v.function
}
func (v *Instr) SetName(name string) { v.name = name }
func (v Instr) Name() string { return v.name }
func (v Instr) Identifier() string { return "%" + v.name }
func (v Instr) IsGlobal() bool { return false }
func (v Instr) IsLit() bool { return false }
func (v Instr) Operands() []Value { return v.operands }
func (v *Instr) setBlock(block *Block) { v.block = block }
func (v Instr) Block() *Block { return v.block }
|
/*
Copyright 2018 The NSQ-Operator Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package prometheus registers nsq-operator version information as
// Prometheus metrics.
package prometheus
import (
"github.com/andyxning/nsq-operator/pkg/version"
"github.com/prometheus/client_golang/prometheus"
)
func init() {
versionInfoMetric := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "nsq_operator_version_info",
Help: "A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which NSQ-Operator was built, and platform on which it is running.",
},
[]string{"major", "minor", "gitVersion", "gitCommit", "gitTreeState", "buildDate", "goVersion", "compiler", "platform"},
)
versionInfo := version.Get()
versionInfoMetric.WithLabelValues(
versionInfo.Major, versionInfo.Minor, versionInfo.GitVersion,
versionInfo.GitCommit, versionInfo.GitTreeState, versionInfo.BuildDate,
versionInfo.GoVersion, versionInfo.Compiler, versionInfo.Platform).Set(1)
prometheus.MustRegister(versionInfoMetric)
}
|
package main
import (
"fmt"
"time"
)
func main() {
fmt.Println("Welcome to Cabbie")
fmt.Printf("\n")
fmt. Println("Our Operational areas include: \n Alakahia \n Aluu \n Choba \n Rumosi \n Rumola \n Mgbuoba")
//Get users name.
var name string
fmt.Println("Enter your name: ")
fmt.Scanf("%s", &name)
fmt.Printf("\n")
// Get users current location.
fmt.Println(
"Hello " + name + " please choose your pick up location.")
var Location string
fmt.Scanf("%s", &Location)
fmt.Printf("\n")
fmt.Println("Your pickup point is " + Location + ", Choose your drop off location.")
//Get users drop off location.
var Drop string
fmt.Scanf("%s", &Drop)
fmt.Printf("\n")
var fare int
// var Aluu string
// var Choba string
// var Mgbuoba string
// var Rumosi string
// var Rumokoro string
// var Rumola string
switch Location {
case "Aluu":
switch Drop {
case "Alakahia":
fare = 1000
case "Choba":
fare = 500
case "Mgbuoba":
fare = 2000
case "Rumola":
fare=2500
case "Rumosi":
fare=1000
case "Rumokoro":
fare=1500
default:
fmt.Println("Location false")
fmt.Println("Bye Laters")
return
}
case "Alakahia":
switch Drop {
case "Aluu":
fare=1000
case "Choba":
fare=500
case "Mgbuoba":
fare=1500
case "Rumola":
fare=2000
case "Rumosi":
fare=500
case "Rumokoro":
fare=1000
default:
fmt.Println("Location false")
fmt.Println("Bye Laters")
return
}
case "Choba":
switch Drop {
case "Alakahia":
fare=500
case "Aluu":
fare=500
case "Mgbuoba":
fare=1500
case "Rumola":
fare=2000
case "Rumosi":
fare=1000
case "Rumokoro":
fare=1500
default:
fmt.Println("Location false")
fmt.Println("Bye Laters")
return
}
case "Mgbuoba":
switch Drop {
case "Alakahia":
fare=1500
case "Aluu":
fare=2000
case "Choba":
fare=1500
case "Rumola":
fare=1500
case "Rumosi":
fare=500
case "Rumokoro":
fare=1000
default:
fmt.Println("Location false")
fmt.Println("Bye Laters")
return
}
case "Rumokoro":
switch Drop {
case "Alakahia":
fare=1500
case "Aluu":
fare=1500
case "Choba":
fare=1500
case "Mgbuoba":
fare=1000
case "Rumola":
fare=1000
case "Rumosi":
fare=500
default:
fmt.Println("Location false")
fmt.Println("Bye Laters")
return
}
case "Rumola":
switch Drop {
case "Alakahia":
fare=2000
case "Aluu":
fare=2500
case "Choba":
fare=2000
case "Mgbuoba":
fare=1500
case "Rumosi":
fare=500
case "Rumokoro":
fare=1000
default:
fmt.Println("Location false")
fmt.Println("Bye Laters")
return
}
case "Rumosi":
switch Drop {
case "Alakahia":
fare=1000
case "Aluu":
fare=1500
case "Choba":
fare=1500
case "Mgbuoba":
fare=2000
case "Rumokoro":
fare=1000
case "Rumola":
fare=500
default:
fmt.Println("Location false")
fmt.Println("Bye Laters")
return
}
default:
fmt.Println("Location false")
fmt.Println("Bye Laters")
return
}
fmt.Println("Your fare from " + Location + " to " + Drop + " is: " )
fmt.Print(fare)
fmt.Printf("\n")
tick := time.Tick(100* time.Millisecond)
boom := time.After(500 * time.Millisecond)
for{
select{
case<-tick:
fmt.Println("Anticipating Arrival.............................")
case<-boom:
fmt.Println("We have arrived")
return
default:
fmt.Println("..........................................................................................................................")
time.Sleep(10000 * time.Millisecond)
break
}
break
}
fmt. Println("We have arrived")
fmt.Printf("\n")
var Change int
var Fare int
var Correct int
fmt.Println("Enter your Fare")
fmt.Scanf("%d", &Fare)
Change = Fare - fare
if Fare > fare {
fmt.Println("Your Change is:")
fmt.Print(Change)
fmt.Printf("\n")
}else if Fare < fare {
for i:=1; i<fare; i++{
fmt. Println("Your fare is incorrect, Please input correct amount")
fmt.Println("Enter correct fare")
fmt.Scanf("%d", &Correct)
Correct = Change
if i%4==0{
fmt. Println("You have been reported to the police")
break
}
if i == fare{
break
}
}
return
}
var tip int
fmt.Println("Enter your tip: ")
fmt.Scanf("%d", &tip)
fmt.Printf("\n")
if tip > fare{
fmt.Println("Gracias Mucho")
fmt.Printf("\n")
}else if tip < 100 {
fmt. Println("You are a Stingy ass bastard")
fmt.Printf("\n")
}
fmt.Printf("\n")
fmt.Println("THANKS FOR USING CABBIE")
}
|
package file
import (
"testing"
"bldy.build/build/workspace/testws"
"bldy.build/build/label"
)
func TestPath(t *testing.T) {
tests := []struct {
name string
file string
pkg string
wd string
path string
}{
{
name: "fileAtRoot",
file: "a.c",
pkg: "//:",
wd: "/home/x/src/awesomeproject/",
path: "/home/x/src/awesomeproject/a.c",
},
{
name: "fileAbsolute",
file: "//:a.c",
pkg: "//:",
wd: "/home/x/src/awesomeproject/",
path: "/home/x/src/awesomeproject/a.c",
},
{
name: "fileInPkg",
file: "a.c",
pkg: "//b:",
wd: "/home/x/src/awesomeproject/",
path: "/home/x/src/awesomeproject/b/a.c",
},
{
name: "fileInDir",
file: "b/a.c",
pkg: "//.:",
wd: "/home/x/src/awesomeproject/",
path: "/home/x/src/awesomeproject/b/a.c",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
file, _ := label.Parse(test.file) // these aren't the errors we are testing for
pkg, _ := label.Parse(test.pkg) //
f := New(file, pkg, &testws.TestWS{WD: test.wd})
if expected, got := test.path, f.Path(); expected != got {
t.Logf("was expecting %q got %q instead", expected, got)
t.Fail()
}
})
}
}
|
package storage
var (
impl Manager
)
// Implementor returns the storage manage service implementor.
func Implementor() Manager {
return impl
}
// RegisterImplementor registers the storage manage service implementor.
func RegisterImplementor(mgr Manager) {
impl = mgr
}
type Manager interface {
New(component string)
Get(component string) Storage
}
|
package main
import (
"gatewayManager/controllers"
"gatewayManager/models"
"github.com/gin-gonic/gin"
"io"
"os"
"path"
"path/filepath"
)
func main() {
gin.DisableConsoleColor()
// Logging to a file.
f, _ := os.Create("app.log")
gin.DefaultWriter = io.MultiWriter(f)
r := gin.Default()
models.ConnectDataBase()
r.NoRoute(func(c *gin.Context) {
dir, file := path.Split(c.Request.RequestURI)
ext := filepath.Ext(file)
if file == "" || ext == "" {
c.File("../html/index.html")
} else {
c.File("../html" + path.Join(dir, file))
}
})
r.GET("/api/gateways", controllers.FindGateways)
r.POST("/api/gateways", controllers.CreateGateway)
r.GET("/api/gateways/:id", controllers.FindGateway)
r.DELETE("/api/gateways/:id", controllers.DeleteGateway)
r.GET("/api/devices", controllers.FindDevices)
r.POST("/api/devices", controllers.CreateDevice)
r.GET("/api/devices/:id", controllers.FindDevice)
r.DELETE("/api/devices/:id", controllers.DeleteDevice)
r.Run(":8080")
}
|
package hystrix
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestOptionsAreSet(t *testing.T) {
c := NewClient(
WithHTTPTimeout(10*time.Second),
WithCommandName("test"),
WithHystrixTimeout(1100),
WithMaxConcurrentRequests(10),
WithErrorPercentThreshold(30),
WithSleepWindow(5),
WithRequestVolumeThreshold(5),
)
assert.Equal(t, 10*time.Second, c.timeout)
assert.Equal(t, "test", c.hystrixCommandName)
assert.Equal(t, time.Duration(1100), c.hystrixTimeout)
assert.Equal(t, 10, c.maxConcurrentRequests)
assert.Equal(t, 30, c.errorPercentThreshold)
assert.Equal(t, 5, c.sleepWindow)
assert.Equal(t, 5, c.requestVolumeThreshold)
}
func TestOptionsHaveDefaults(t *testing.T) {
c := NewClient(WithCommandName("test-defaults"))
assert.Equal(t, 30*time.Second, c.timeout)
assert.Equal(t, "test-defaults", c.hystrixCommandName)
assert.Equal(t, 30*time.Second, c.hystrixTimeout)
assert.Equal(t, 100, c.maxConcurrentRequests)
assert.Equal(t, 25, c.errorPercentThreshold)
assert.Equal(t, 10, c.sleepWindow)
assert.Equal(t, 10, c.requestVolumeThreshold)
}
|
package validate
import (
"fmt"
"net"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestClusterName(t *testing.T) {
maxSizeName := strings.Repeat("123456789.", 5) + "1234"
cases := []struct {
name string
clusterName string
valid bool
}{
{"empty", "", false},
{"only whitespace", " ", false},
{"single lowercase", "a", true},
{"single uppercase", "A", false},
{"contains whitespace", "abc D", false},
{"single number", "1", true},
{"single dot", ".", false},
{"ends with dot", "a.", false},
{"starts with dot", ".a", false},
{"multiple labels", "a.a", true},
{"starts with dash", "-a", false},
{"ends with dash", "a-", false},
{"label starts with dash", "a.-a", false},
{"label ends with dash", "a-.a", false},
{"invalid percent", "a%a", false},
{"only non-ascii", "日本語", false},
{"contains non-ascii", "a日本語a", false},
{"max size", maxSizeName, true},
{"too long", maxSizeName + "a", false},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := ClusterName(tc.clusterName)
if tc.valid {
assert.NoError(t, err)
} else {
assert.Error(t, err)
}
})
}
}
func TestOnPremClusterName(t *testing.T) {
cases := []struct {
name string
clusterName string
valid bool
}{
{"single lowercase", "a", true},
{"has a dot", "a.a", false},
{"valid name", "abcde", true},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := OnPremClusterName(tc.clusterName)
if tc.valid {
assert.NoError(t, err)
} else {
assert.Error(t, err)
}
})
}
}
func TestClusterName1035(t *testing.T) {
maxSizeName := "a" + strings.Repeat("123456789.", 5) + "123"
cases := []struct {
name string
clusterName string
valid bool
}{
{"empty", "", false},
{"only whitespace", " ", false},
{"single lowercase", "a", true},
{"single uppercase", "A", false},
{"contains whitespace", "abc D", false},
{"single number", "1", false},
{"single dot", ".", false},
{"ends with dot", "a.", false},
{"starts with dot", ".a", false},
{"multiple labels", "a.a", true},
{"starts with dash", "-a", false},
{"ends with dash", "a-", false},
{"label starts with dash", "a.-a", false},
{"label ends with dash", "a-.a", false},
{"invalid percent", "a%a", false},
{"only non-ascii", "日本語", false},
{"contains non-ascii", "a日本語a", false},
{"max size", maxSizeName, true},
{"too long", maxSizeName + "a", false},
{"URLs", "https://hello.openshift.org", false},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := ClusterName1035(tc.clusterName)
if tc.valid {
assert.NoError(t, err)
} else {
assert.Error(t, err)
}
})
}
}
func TestVCenter(t *testing.T) {
cases := []struct {
name string
clusterName string
valid bool
}{
{"empty", "", false},
{"only whitespace", " ", false},
{"single lowercase", "a", true},
{"single uppercase", "A", false},
{"contains whitespace", "abc D", false},
{"single number", "1", true},
{"single dot", ".", false},
{"ends with dot", "a.", false},
{"starts with dot", ".a", false},
{"multiple labels", "a.a", true},
{"starts with dash", "-a", false},
{"ends with dash", "a-", false},
{"label starts with dash", "a.-a", false},
{"label ends with dash", "a-.a", false},
{"invalid percent", "a%a", false},
{"only non-ascii", "日本語", false},
{"contains non-ascii", "a日本語a", false},
{"URLs", "https://hello.openshift.org", false},
{"IP", "192.168.1.1", true},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := Host(tc.clusterName)
if tc.valid {
assert.NoError(t, err)
} else {
assert.Error(t, err)
}
})
}
}
func TestSubnetCIDR(t *testing.T) {
cases := []struct {
cidr string
expErr string
}{
{"0.0.0.0/32", "address must be specified"},
{"1.2.3.4/0", "invalid network address. got 1.2.3.4/0, expecting 0.0.0.0/0"},
{"1.2.3.4/1", "invalid network address. got 1.2.3.4/1, expecting 0.0.0.0/1"},
{"1.2.3.4/31", ""},
{"1.2.3.4/32", ""},
{"0:0:0:0:0:1:102:304/116", "invalid network address. got ::1:102:304/116, expecting ::1:102:0/116"},
{"0:0:0:0:0:ffff:102:304/116", "invalid network address. got 1.2.3.4/20, expecting 1.2.0.0/20"},
{"255.255.255.255/1", "invalid network address. got 255.255.255.255/1, expecting 128.0.0.0/1"},
{"255.255.255.255/32", ""},
}
for _, tc := range cases {
t.Run(tc.cidr, func(t *testing.T) {
ip, cidr, err := net.ParseCIDR(tc.cidr)
if err != nil {
t.Fatalf("could not parse cidr: %v", err)
}
err = SubnetCIDR(&net.IPNet{IP: ip, Mask: cidr.Mask})
if tc.expErr != "" {
assert.EqualError(t, err, tc.expErr)
} else {
assert.NoError(t, err)
}
})
}
}
func TestDomainName_AcceptingTrailingDot(t *testing.T) {
cases := []struct {
domain string
valid bool
}{
{"", false},
{" ", false},
{"a", true},
{".", false},
{"日本語", false},
{"日本語.com", false},
{"abc.日本語.com", false},
{"a日本語a.com", false},
{"abc", true},
{"ABC", false},
{"ABC123", false},
{"ABC123.COM123", false},
{"1", true},
{"0.0", true},
{"1.2.3.4", true},
{"1.2.3.4.", true},
{"abc.", true},
{"abc.com", true},
{"abc.com.", true},
{"a.b.c.d.e.f", true},
{".abc", false},
{".abc.com", false},
{".abc.com", false},
}
for _, tc := range cases {
t.Run(tc.domain, func(t *testing.T) {
err := DomainName(tc.domain, true)
if tc.valid {
assert.NoError(t, err)
} else {
assert.Error(t, err)
}
})
}
}
func TestDomainName_RejectingTrailingDot(t *testing.T) {
cases := []struct {
domain string
valid bool
}{
{"", false},
{" ", false},
{"a", true},
{".", false},
{"日本語", false},
{"日本語.com", false},
{"abc.日本語.com", false},
{"a日本語a.com", false},
{"abc", true},
{"ABC", false},
{"ABC123", false},
{"ABC123.COM123", false},
{"1", true},
{"0.0", true},
{"1.2.3.4", true},
{"1.2.3.4.", false},
{"abc.", false},
{"abc.com", true},
{"abc.com.", false},
{"a.b.c.d.e.f", true},
{".abc", false},
{".abc.com", false},
{".abc.com", false},
}
for _, tc := range cases {
t.Run(tc.domain, func(t *testing.T) {
err := DomainName(tc.domain, false)
if tc.valid {
assert.NoError(t, err)
} else {
assert.Error(t, err)
}
})
}
}
func TestNoProxyDomainName(t *testing.T) {
cases := []struct {
domain string
valid bool
}{
{"", false},
{" ", false},
{"a", true},
{".", false},
{"日本語", false},
{"日本語.com", false},
{"abc.日本語.com", false},
{"a日本語a.com", false},
{"abc", true},
{"ABC", false},
{"ABC123", false},
{"ABC123.COM123", false},
{"1", true},
{"0.0", true},
{"1.2.3.4", true},
{"1.2.3.4.", true},
{"abc.", true},
{"abc.com", true},
{"abc.com.", true},
{"a.b.c.d.e.f", true},
{".abc", true},
{".abc.com", true},
}
for _, tc := range cases {
t.Run(tc.domain, func(t *testing.T) {
err := NoProxyDomainName(tc.domain)
if tc.valid {
assert.NoError(t, err)
} else {
assert.Error(t, err)
}
})
}
}
func TestDoCIDRsOverlap(t *testing.T) {
cases := []struct {
a string
b string
overlap bool
}{
{
a: "192.168.0.0/30",
b: "192.168.0.3/30",
overlap: true,
},
{
a: "192.168.0.0/30",
b: "192.168.0.4/30",
overlap: false,
},
{
a: "192.168.0.0/29",
b: "192.168.0.4/30",
overlap: true,
},
{
a: "0.0.0.0/0",
b: "192.168.0.0/24",
overlap: true,
},
}
for _, tc := range cases {
t.Run(fmt.Sprintf("%s %s", tc.a, tc.b), func(t *testing.T) {
_, a, err := net.ParseCIDR(tc.a)
if err != nil {
t.Fatalf("could not parse cidr %q: %v", tc.a, err)
}
_, b, err := net.ParseCIDR(tc.b)
if err != nil {
t.Fatalf("could not parse cidr %q: %v", tc.b, err)
}
actual := DoCIDRsOverlap(a, b)
assert.Equal(t, tc.overlap, actual)
})
}
}
func TestImagePullSecret(t *testing.T) {
cases := []struct {
name string
secret string
valid bool
}{
{
name: "single entry with auth",
secret: `{"auths":{"example.com":{"auth":"authorization value"}}}`,
valid: true,
},
{
name: "single entry with credsStore",
secret: `{"auths":{"example.com":{"credsStore":"creds store value"}}}`,
valid: true,
},
{
name: "empty",
secret: `{}`,
valid: false,
},
{
name: "no auths",
secret: `{"not-auths":{"example.com":{"auth":"authorization value"}}}`,
valid: false,
},
{
name: "no auth or credsStore",
secret: `{"auths":{"example.com":{"unrequired-field":"value"}}}`,
valid: false,
},
{
name: "additional fields",
secret: `{"auths":{"example.com":{"auth":"authorization value","other-field":"other field value"}}}`,
valid: true,
},
{
name: "no entries",
secret: `{"auths":{}}`,
valid: false,
},
{
name: "multiple valid entries",
secret: `{"auths":{"example.com":{"auth":"authorization value"},"other-example.com":{"auth":"other auth value"}}}`,
valid: true,
},
{
name: "mix of valid and invalid entries",
secret: `{"auths":{"example.com":{"auth":"authorization value"},"other-example.com":{"unrequired-field":"value"}}}`,
valid: false,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := ImagePullSecret(tc.secret)
if tc.valid {
assert.NoError(t, err)
} else {
assert.Error(t, err)
}
})
}
}
const invalidFormatCertificate = `-----INVALID FORMAT-----
MIIF2zCCA8OgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwgYExCzAJBgNVBAYTAlVT
MRcwFQYDVQQIDA5Ob3J0aCBDYXJvbGluYTEQMA4GA1UEBwwHUmFsZWlnaDEUMBIG
A1UECgwLUmVkIEhhdCBJbmMxHzAdBgNVBAsMFk9wZW5TaGlmdCBJbnN0YWxsIFRl
c3QxEDAOBgNVBAMMB1Jvb3QgQ0EwHhcNMTkwNzIyMjAwNzUxWhcNMjkwNzE5MjAw
NzUxWjB3MQswCQYDVQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFDAS
BgNVBAoMC1JlZCBIYXQgSW5jMR8wHQYDVQQLDBZPcGVuU2hpZnQgSW5zdGFsbCBU
ZXN0MRgwFgYDVQQDDA9JbnRlcm1lZGlhdGUgQ0EwggIiMA0GCSqGSIb3DQEBAQUA
A4ICDwAwggIKAoICAQDZhc69vEq9XyG+vcOW4rPx9aYJgn7NFXaE88xrKajFyu2v
kD5Mz7geQV/RQKp1RMvj/1JCW5Npw8QwoPXNGQ8M+d+ajGgSkUZNVBQRXiR/hpfK
ohox9gJRsOVCAvhyE15iZHkEVFFcchiWbsTM9QllLsiiI0qZ/QpkUmJmDyXUV4Hq
hoAGXsojp0xaEQhrl+Hayiwao7qZkbKFCbNIDFU++ZDNT41qqDwcYmbkBJgYoGdS
IAk4Mjf7+rLJPXWNYtYB3g1cuN4pH8FkFT9zocNr0xrsx2itY4gvXgIe/vzts8aw
sHx1h2HcZK7iJEHs25QGrsZhiADeb0i5pN1kaPqpY0qgQUCIaqZAtMMeHXQ0k3PB
xTz8vk0388oFLaJFuI0P9Q6CRf5+4rc9O201aUIuue3Y4IS6zAcd8yL5d5vxvCiN
Dbl7YenBS4C9xSEEiVZwN7AtIdKFq5pGrlptmhVbGFW1CLQNsVWpetCY12Sh9FOq
2IBaAup+XgRgO4kHs3t7euVaS2viH3MplPsOUim8NZPZBdZkTtS3W9SynBDriy1d
KtrYgz0zrgEAa82mq4INaR+7Utct97zhKa1zM47KlHgkauiTPkUcqVhoNWxdM5tI
nSWym/9pPHUmzt8v/F8COA/8Xv+db2QX14S3fStI+8mp084RWuevtbh5WcoypQID
AQABo2YwZDAdBgNVHQ4EFgQUPUqJPYDZeUXbBlR0xXA/F+DYYagwHwYDVR0jBBgw
FoAUjWflPh3KYZ5o3BP3Po4v2ZBshVkwEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNV
HQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBAH665ntrBhyf+MPFnkY+1VUr
VrfRlP4SccoujdLB/sUKqydYsED+mDJ+V8uFOgoi7PHqwvsRS+yR/bB0bNNYSfKY
slCMQA3sJ7SNDPBsec955ehYPNdquhem+oICzgFaQwL9ULDG87fKZjmaKO25dIYX
ttLqn+0b0GjpfQRuZ3NpAnCTWevodc5A3aYQm6vYeCyeIHGPpmtLE6oPRFib7wtD
n4DFVM57F34ClnnF4m8jq9HoTcM1Y3qOFyslK/4FRyx3HXbEVsm5L289l0AS866U
WEVM9DCqpFNLTwRk0mn4mspNcRxTDUTiHAxMhKxHGgbPcFzCJXqZzkW56bDcAGA5
sQr+MOfa1P/K7pVcFtOAhsBi5ff1G4t1G1+amqXEDalL+qKRGFugGVf+poyb2C3g
sfxkPBp9jPPMgMzXULQglwU4IUm8GtBb9Lh6AFPvt78XAWvNvHLP1Rf8JNZ9prx5
N9RzIKSWKm6CVEjSDvQ42j4OpW0eecHAoluZFMrykVl+KmapWUwQF6v0xz1RJdQ+
q3vGJ6shhiFd6y0ygxPwMaEjhhpbRy4tK9iDBj5yRpo+HE5X+FQSN6NHOYWMeDoZ
uzd86/huEH5qIAL4unM9YFTzJ4CFOC8EJMDW6ul0uKjOwGPP3R1Vss6sC7kR0gXI
rLWYdt40z0pjcR3FDVzh
-----INVALID FORMAT-----
`
const validCACertificate = `-----BEGIN CERTIFICATE-----
MIIF2zCCA8OgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwgYExCzAJBgNVBAYTAlVT
MRcwFQYDVQQIDA5Ob3J0aCBDYXJvbGluYTEQMA4GA1UEBwwHUmFsZWlnaDEUMBIG
A1UECgwLUmVkIEhhdCBJbmMxHzAdBgNVBAsMFk9wZW5TaGlmdCBJbnN0YWxsIFRl
c3QxEDAOBgNVBAMMB1Jvb3QgQ0EwHhcNMTkwNzIyMjAwNzUxWhcNMjkwNzE5MjAw
NzUxWjB3MQswCQYDVQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFDAS
BgNVBAoMC1JlZCBIYXQgSW5jMR8wHQYDVQQLDBZPcGVuU2hpZnQgSW5zdGFsbCBU
ZXN0MRgwFgYDVQQDDA9JbnRlcm1lZGlhdGUgQ0EwggIiMA0GCSqGSIb3DQEBAQUA
A4ICDwAwggIKAoICAQDZhc69vEq9XyG+vcOW4rPx9aYJgn7NFXaE88xrKajFyu2v
kD5Mz7geQV/RQKp1RMvj/1JCW5Npw8QwoPXNGQ8M+d+ajGgSkUZNVBQRXiR/hpfK
ohox9gJRsOVCAvhyE15iZHkEVFFcchiWbsTM9QllLsiiI0qZ/QpkUmJmDyXUV4Hq
hoAGXsojp0xaEQhrl+Hayiwao7qZkbKFCbNIDFU++ZDNT41qqDwcYmbkBJgYoGdS
IAk4Mjf7+rLJPXWNYtYB3g1cuN4pH8FkFT9zocNr0xrsx2itY4gvXgIe/vzts8aw
sHx1h2HcZK7iJEHs25QGrsZhiADeb0i5pN1kaPqpY0qgQUCIaqZAtMMeHXQ0k3PB
xTz8vk0388oFLaJFuI0P9Q6CRf5+4rc9O201aUIuue3Y4IS6zAcd8yL5d5vxvCiN
Dbl7YenBS4C9xSEEiVZwN7AtIdKFq5pGrlptmhVbGFW1CLQNsVWpetCY12Sh9FOq
2IBaAup+XgRgO4kHs3t7euVaS2viH3MplPsOUim8NZPZBdZkTtS3W9SynBDriy1d
KtrYgz0zrgEAa82mq4INaR+7Utct97zhKa1zM47KlHgkauiTPkUcqVhoNWxdM5tI
nSWym/9pPHUmzt8v/F8COA/8Xv+db2QX14S3fStI+8mp084RWuevtbh5WcoypQID
AQABo2YwZDAdBgNVHQ4EFgQUPUqJPYDZeUXbBlR0xXA/F+DYYagwHwYDVR0jBBgw
FoAUjWflPh3KYZ5o3BP3Po4v2ZBshVkwEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNV
HQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBAH665ntrBhyf+MPFnkY+1VUr
VrfRlP4SccoujdLB/sUKqydYsED+mDJ+V8uFOgoi7PHqwvsRS+yR/bB0bNNYSfKY
slCMQA3sJ7SNDPBsec955ehYPNdquhem+oICzgFaQwL9ULDG87fKZjmaKO25dIYX
ttLqn+0b0GjpfQRuZ3NpAnCTWevodc5A3aYQm6vYeCyeIHGPpmtLE6oPRFib7wtD
n4DFVM57F34ClnnF4m8jq9HoTcM1Y3qOFyslK/4FRyx3HXbEVsm5L289l0AS866U
WEVM9DCqpFNLTwRk0mn4mspNcRxTDUTiHAxMhKxHGgbPcFzCJXqZzkW56bDcAGA5
sQr+MOfa1P/K7pVcFtOAhsBi5ff1G4t1G1+amqXEDalL+qKRGFugGVf+poyb2C3g
sfxkPBp9jPPMgMzXULQglwU4IUm8GtBb9Lh6AFPvt78XAWvNvHLP1Rf8JNZ9prx5
N9RzIKSWKm6CVEjSDvQ42j4OpW0eecHAoluZFMrykVl+KmapWUwQF6v0xz1RJdQ+
q3vGJ6shhiFd6y0ygxPwMaEjhhpbRy4tK9iDBj5yRpo+HE5X+FQSN6NHOYWMeDoZ
uzd86/huEH5qIAL4unM9YFTzJ4CFOC8EJMDW6ul0uKjOwGPP3R1Vss6sC7kR0gXI
rLWYdt40z0pjcR3FDVzh
-----END CERTIFICATE-----
`
const validCertificate = `-----BEGIN CERTIFICATE-----
MIIF0TCCA7mgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwdzELMAkGA1UEBhMCVVMx
FzAVBgNVBAgMDk5vcnRoIENhcm9saW5hMRQwEgYDVQQKDAtSZWQgSGF0IEluYzEf
MB0GA1UECwwWT3BlblNoaWZ0IEluc3RhbGwgVGVzdDEYMBYGA1UEAwwPSW50ZXJt
ZWRpYXRlIENBMB4XDTE5MDcyMjIwMTMwMloXDTIwMDczMTIwMTMwMlowgY4xCzAJ
BgNVBAYTAlVTMRcwFQYDVQQIDA5Ob3J0aCBDYXJvbGluYTEQMA4GA1UEBwwHUmFs
ZWlnaDEUMBIGA1UECgwLUmVkIEhhdCBJbmMxHzAdBgNVBAsMFk9wZW5TaGlmdCBJ
bnN0YWxsIFRlc3QxHTAbBgNVBAMMFHJlZ2lzdHJ5LmV4YW1wbGUuY29tMIIBIjAN
BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyCT3n3zYL7PkLtnzBU9WUyZBz1Q+
SXUP739DjT+xmRunE1ViD2wfkVIhTHowlw6B7+23tSRQngEu5i4+lglzqouYY5jE
sqWUXaPMa5FeeDstI6LIUxqk9/2yWRBrrdJlVWor57F310aTzkYtkmkCJTDy3k9R
Le8jma8fnchaVpttbHgN/F+CiS+OV8u9PtALGuJ4DfHy2hM4pxhiKMxFpYOaxBuq
41Y0ts8CXyEiVWwZB7+fFrAjog8uJuhAdya1rAWvSDo+GQr2CDY2/PJcAVHO1n9F
h1LkjqIOd4OOqOy9gIYDc5bvZvWGuzeCN8icrdH8KM53witq5yhZHRL4EQIDAQAB
o4IBTTCCAUkwCQYDVR0TBAIwADARBglghkgBhvhCAQEEBAMCBkAwMwYJYIZIAYb4
QgENBCYWJE9wZW5TU0wgR2VuZXJhdGVkIFNlcnZlciBDZXJ0aWZpY2F0ZTAdBgNV
HQ4EFgQUxzJ/lMs83RIoGheipNbag+SZr4wwga8GA1UdIwSBpzCBpIAUPUqJPYDZ
eUXbBlR0xXA/F+DYYaihgYekgYQwgYExCzAJBgNVBAYTAlVTMRcwFQYDVQQIDA5O
b3J0aCBDYXJvbGluYTEQMA4GA1UEBwwHUmFsZWlnaDEUMBIGA1UECgwLUmVkIEhh
dCBJbmMxHzAdBgNVBAsMFk9wZW5TaGlmdCBJbnN0YWxsIFRlc3QxEDAOBgNVBAMM
B1Jvb3QgQ0GCAhAAMA4GA1UdDwEB/wQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcD
ATANBgkqhkiG9w0BAQsFAAOCAgEALXbZyIAXZPBBe+uWBscUFpmXvfQR9CQRbnYi
X4453dLKZqEsWNVcUQihrfaNyHCrkG7dSgT64JWWJcyXLXe9TfVR9FLGjzt4p0P2
V9T+PFjp3JN+Elh6XDeNisZ7fHzYs2yYnugZELdWkLOcUwkvUHhSQ5aSWYFrngn7
J3mT3GS3WSpLUvVQDn3RBDbS0ossnF1tq9n6Nhs4Xvhdso6gEZU9SeztbnSK9N/k
zWLV5PjgwpevJ17jzpxm7ZIAlcp31i4SIircJtGwgUS3cJZXPPWMdK72qnLQjFMF
BNEc11EBilMK8tn/K4Dn06BBJMRtOCkq0KhopeZX0HmtQE29z6hy9fcTBklCwLXQ
NMSOKemXsOiGTwghosa0xw0H2e9R8z9KTX5xBGgHbHbWu7e/oDVY9+XTnfQ0ZqFi
aBa/U/WWLMQQDNvQQGsllxBHC+pOpDD8YhycPmbpsFfhNo58U9VQ6mqtX3o7j5nP
imNTY4B5RmZUILe+C0XhON6VL5RCa+s6YngIUcfeylTSB8BTeVBxIAInubKzrgZM
4ThJWLbaiTkRqaT/viDfxsmgzJsrDm3ZWzYXwF/a5o6NHK4lqCYf/1nvbjgE5PAm
69R88P32rKeiRJ8AoC4N/5YR++NkB11gsW9ooU2nV90owi6eMhQ6+qGLTrq0mTtv
CNA1OOo=
-----END CERTIFICATE-----
`
const invalidStringWithCertificate = `-----BEGIN CERTIFICATE-----
MIIF0TCCA7mgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwdzELMAkGA1UEBhMCVVMx
FzAVBgNVBAgMDk5vcnRoIENhcm9saW5hMRQwEgYDVQQKDAtSZWQgSGF0IEluYzEf
MB0GA1UECwwWT3BlblNoaWZ0IEluc3RhbGwgVGVzdDEYMBYGA1UEAwwPSW50ZXJt
ZWRpYXRlIENBMB4XDTE5MDcyMjIwMTMwMloXDTIwMDczMTIwMTMwMlowgY4xCzAJ
BgNVBAYTAlVTMRcwFQYDVQQIDA5Ob3J0aCBDYXJvbGluYTEQMA4GA1UEBwwHUmFs
ZWlnaDEUMBIGA1UECgwLUmVkIEhhdCBJbmMxHzAdBgNVBAsMFk9wZW5TaGlmdCBJ
bnN0YWxsIFRlc3QxHTAbBgNVBAMMFHJlZ2lzdHJ5LmV4YW1wbGUuY29tMIIBIjAN
BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyCT3n3zYL7PkLtnzBU9WUyZBz1Q+
SXUP739DjT+xmRunE1ViD2wfkVIhTHowlw6B7+23tSRQngEu5i4+lglzqouYY5jE
sqWUXaPMa5FeeDstI6LIUxqk9/2yWRBrrdJlVWor57F310aTzkYtkmkCJTDy3k9R
Le8jma8fnchaVpttbHgN/F+CiS+OV8u9PtALGuJ4DfHy2hM4pxhiKMxFpYOaxBuq
41Y0ts8CXyEiVWwZB7+fFrAjog8uJuhAdya1rAWvSDo+GQr2CDY2/PJcAVHO1n9F
h1LkjqIOd4OOqOy9gIYDc5bvZvWGuzeCN8icrdH8KM53witq5yhZHRL4EQIDAQAB
o4IBTTCCAUkwCQYDVR0TBAIwADARBglghkgBhvhCAQEEBAMCBkAwMwYJYIZIAYb4
QgENBCYWJE9wZW5TU0wgR2VuZXJhdGVkIFNlcnZlciBDZXJ0aWZpY2F0ZTAdBgNV
HQ4EFgQUxzJ/lMs83RIoGheipNbag+SZr4wwga8GA1UdIwSBpzCBpIAUPUqJPYDZ
eUXbBlR0xXA/F+DYYaihgYekgYQwgYExCzAJBgNVBAYTAlVTMRcwFQYDVQQIDA5O
b3J0aCBDYXJvbGluYTEQMA4GA1UEBwwHUmFsZWlnaDEUMBIGA1UECgwLUmVkIEhh
dCBJbmMxHzAdBgNVBAsMFk9wZW5TaGlmdCBJbnN0YWxsIFRlc3QxEDAOBgNVBAMM
B1Jvb3QgQ0GCAhAAMA4GA1UdDwEB/wQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcD
ATANBgkqhkiG9w0BAQsFAAOCAgEALXbZyIAXZPBBe+uWBscUFpmXvfQR9CQRbnYi
X4453dLKZqEsWNVcUQihrfaNyHCrkG7dSgT64JWWJcyXLXe9TfVR9FLGjzt4p0P2
V9T+PFjp3JN+Elh6XDeNisZ7fHzYs2yYnugZELdWkLOcUwkvUHhSQ5aSWYFrngn7
J3mT3GS3WSpLUvVQDn3RBDbS0ossnF1tq9n6Nhs4Xvhdso6gEZU9SeztbnSK9N/k
zWLV5PjgwpevJ17jzpxm7ZIAlcp31i4SIircJtGwgUS3cJZXPPWMdK72qnLQjFMF
BNEc11EBilMK8tn/K4Dn06BBJMRtOCkq0KhopeZX0HmtQE29z6hy9fcTBklCwLXQ
NMSOKemXsOiGTwghosa0xw0H2e9R8z9KTX5xBGgHbHbWu7e/oDVY9+XTnfQ0ZqFi
aBa/U/WWLMQQDNvQQGsllxBHC+pOpDD8YhycPmbpsFfhNo58U9VQ6mqtX3o7j5nP
imNTY4B5RmZUILe+C0XhON6VL5RCa+s6YngIUcfeylTSB8BTeVBxIAInubKzrgZM
4ThJWLbaiTkRqaT/viDfxsmgzJsrDm3ZWzYXwF/a5o6NHK4lqCYf/1nvbjgE5PAm
69R88P32rKeiRJ8AoC4N/5YR++NkB11gsW9ooU2nV90owi6eMhQ6+qGLTrq0mTtv
CNA1OOo=
-----END CERTIFICATE-----
Invalid data here
`
func TestAdditionalTrustBundle(t *testing.T) {
cases := []struct {
name string
certificate string
valid bool
}{
{
name: "valid ca certificate",
certificate: validCACertificate,
valid: true,
},
{
name: "valid certificate",
certificate: validCertificate,
valid: true,
},
{
name: "invalid format",
certificate: invalidFormatCertificate,
valid: false,
},
{
name: "invalid certificate",
certificate: invalidFormatCertificate,
valid: false,
},
{
name: "valid certificate with extra invalid string",
certificate: invalidStringWithCertificate,
valid: false,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := CABundle(tc.certificate)
if tc.valid {
assert.NoError(t, err)
} else {
assert.Error(t, err)
}
})
}
}
func TestSSHPublicKey(t *testing.T) {
cases := []struct {
name string
key string
valid bool
}{
{
name: "valid",
key: "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAklOUpkDHrfHY17SbrmTIpNLTGK9Tjom/BWDSUGPl+nafzlHDTYW7hdI4yZ5ew18JH4JW9jbhUFrviQzM7xlELEVf4h9lFX5QVkbPppSwg0cda3Pbv7kOdJ/MTyBlWXFCR+HAo3FXRitBqxiX1nKhXpHAZsMciLq8V6RjsNAQwdsdMFvSlVK/7XAt3FaoJoAsncM1Q9x5+3V0Ww68/eIFmb1zuUFljQJKprrX88XypNDvjYNby6vw/Pb0rwert/EnmZ+AW4OZPnTPI89ZPmVMLuayrD2cE86Z/il8b+gw3r3+1nKatmIkjn2so1d01QraTlMqVSsbxNrRFi9wrf+M7Q==",
valid: true,
},
{
name: "valid with email",
key: "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAklOUpkDHrfHY17SbrmTIpNLTGK9Tjom/BWDSUGPl+nafzlHDTYW7hdI4yZ5ew18JH4JW9jbhUFrviQzM7xlELEVf4h9lFX5QVkbPppSwg0cda3Pbv7kOdJ/MTyBlWXFCR+HAo3FXRitBqxiX1nKhXpHAZsMciLq8V6RjsNAQwdsdMFvSlVK/7XAt3FaoJoAsncM1Q9x5+3V0Ww68/eIFmb1zuUFljQJKprrX88XypNDvjYNby6vw/Pb0rwert/EnmZ+AW4OZPnTPI89ZPmVMLuayrD2cE86Z/il8b+gw3r3+1nKatmIkjn2so1d01QraTlMqVSsbxNrRFi9wrf+M7Q== name@example.com",
valid: true,
},
{
name: "invalid format",
key: "bad-format AAAAB3NzaC1yc2EAAAABIwAAAQEAklOUpkDHrfHY17SbrmTIpNLTGK9Tjom/BWDSUGPl+nafzlHDTYW7hdI4yZ5ew18JH4JW9jbhUFrviQzM7xlELEVf4h9lFX5QVkbPppSwg0cda3Pbv7kOdJ/MTyBlWXFCR+HAo3FXRitBqxiX1nKhXpHAZsMciLq8V6RjsNAQwdsdMFvSlVK/7XAt3FaoJoAsncM1Q9x5+3V0Ww68/eIFmb1zuUFljQJKprrX88XypNDvjYNby6vw/Pb0rwert/EnmZ+AW4OZPnTPI89ZPmVMLuayrD2cE86Z/il8b+gw3r3+1nKatmIkjn2so1d01QraTlMqVSsbxNrRFi9wrf+M7Q==",
valid: true,
},
{
name: "invalid key",
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL",
valid: false,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := SSHPublicKey(tc.key)
if tc.valid {
assert.NoError(t, err)
} else {
assert.Error(t, err)
}
})
}
}
func TestURI(t *testing.T) {
cases := []struct {
name string
uri string
valid bool
}{
{
name: "valid",
uri: "https://example.com",
valid: true,
},
{
name: "missing scheme",
uri: "example.com",
valid: false,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := URI(tc.uri)
if tc.valid {
assert.NoError(t, err)
} else {
assert.Error(t, err)
}
})
}
}
func TestMAC(t *testing.T) {
cases := []struct {
name string
addr string
expected string
}{
{
name: "valid_mac",
addr: "7A:CE:E3:29:35:6F",
},
{
name: "invalid_multicast",
addr: "7D:CE:E3:29:35:6F",
expected: "expected unicast mac address",
},
{
name: "invalid_infiniband",
addr: "00-00-00-00-fe-80-00-00-00-00-00-00-02-00-5e-10-00-00-00-01",
expected: "invalid MAC address",
},
{
name: "invalid_mac",
addr: "this is a bad mac",
expected: "invalid MAC address",
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := MAC(tc.addr)
if tc.expected == "" {
assert.NoError(t, err)
} else {
assert.Regexp(t, tc.expected, err)
}
})
}
}
|
package main
import (
"fmt"
"github.com/sanguohot/medichain/util"
"github.com/urfave/cli"
"os"
"time"
)
func InitApp() error {
app := cli.NewApp()
app.Name = "medichain"
app.Usage = "command line for medichain!"
app.Version = "1.0.1"
app.Compiled = time.Now()
app.Authors = []cli.Author{
cli.Author{
Name: "Sanguohot",
Email: "hw535431@163.com",
},
}
app.Action = func(c *cli.Context) error {
fmt.Printf("%s-%s", app.Name, app.Version)
fmt.Printf("\n%s", app.Usage)
return nil
}
app.Commands = nil
return app.Run(os.Args)
}
func main() {
err := InitApp()
if err != nil {
zap.Logger.Fatal(err.Error())
}
}
|
package monitors
import (
"reflect"
"testing"
"time"
"github.com/janwiemers/up/models"
)
func TestPopulateDefaults(t *testing.T) {
tests := []struct {
name string
args models.Application
want models.Application
}{
{
name: "Populate all defaults",
want: models.Application{
Name: "test",
Protocol: "http",
Expectation: "200",
Interval: 5 * time.Minute,
},
args: models.Application{
Name: "test",
},
},
{
name: "Populate some defaults",
want: models.Application{
Name: "test",
Protocol: "dns",
Expectation: "200",
Interval: 5 * time.Minute,
},
args: models.Application{
Name: "test",
Protocol: "dns",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := populateApplicationDefaults(tt.args)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("populateApplicationDefaults() = %v, want %v", got, tt.want)
}
})
}
}
|
package service
type Service interface {
Start()
}
|
package oauth2state
import (
"fmt"
"sync"
)
// MemStateStore is an in-memory implementation of StateStorer that
// can be used safely by concurrent goroutines in a single server instance
type MemStateStore struct {
states map[string]string
mutex sync.RWMutex
valueGenerator ValueGenerator
}
// NewMemStateStore creates a new memory store for state values
func NewMemStateStore() *MemStateStore {
return &MemStateStore{
states: make(map[string]string),
valueGenerator: CryptoValueGenerator{},
}
}
// NewState creates a new random state value and associates the given URL with that value
func (s *MemStateStore) NewState(url string) (string, error) {
state := s.valueGenerator.String()
err := s.Add(state, url)
return state, err
}
// Add a new state/url combination to the store
func (s *MemStateStore) Add(state, url string) error {
s.mutex.Lock()
defer s.mutex.Unlock()
if state == "" {
return fmt.Errorf("State argument not provided")
}
s.states[state] = url
return nil
}
// Contains checks if the given state value exists in the store.
func (s *MemStateStore) Contains(state string) (bool, error) {
s.mutex.RLock()
defer s.mutex.RUnlock()
if state == "" {
return false, fmt.Errorf("State argument not provided")
}
_, ok := s.states[state]
return ok, nil
}
// URL retrieves the URL that is associated with a given state value
func (s *MemStateStore) URL(state string) (string, error) {
s.mutex.RLock()
defer s.mutex.RUnlock()
if state == "" {
return "", fmt.Errorf("State argument not provided")
}
url, ok := s.states[state]
if !ok {
return "", fmt.Errorf("could not find state value %q in memory store", state)
}
return url, nil
}
// Delete the given state value from the store
func (s *MemStateStore) Delete(state string) error {
s.mutex.Lock()
defer s.mutex.Unlock()
if state == "" {
return fmt.Errorf("State argument not provided")
}
delete(s.states, state)
return nil
}
// SetValueGenerator changes the default random number generator. Use only in tests.
func (s *MemStateStore) SetValueGenerator(generator ValueGenerator) {
s.valueGenerator = generator
}
|
package main
import (
"html/template"
"net/http"
)
// create stucture เก็บข้อมูล
type Product struct {
Name string
Price int
}
func main() {
// สร้างตัวแปร var ให้เก็บค่า template ไปแสดงยัง index
var templates = template.Must(template.ParseFiles("index.html"))
// แสดงหน้าแรก
http.HandleFunc("/index", func(w http.ResponseWriter, r *http.Request) {
// ตัวแปร myProduct รับข้อมูลมาจาก Product struct
myProduct := Product{"นมสด", 500}
// การใช้งาน template
templates.ExecuteTemplate(w, "index.html", myProduct)
})
// แสดงหน้า login
http.HandleFunc("/login", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "login.html")
})
// แสดงหน้า signup
http.HandleFunc("/signup", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "signup.html")
})
// แสดงไฟล์ txt ในหน้าเว็บ
http.HandleFunc("/file", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "file.txt")
})
// port 8080
http.ListenAndServe(":8080", nil)
}
|
package req
import (
"github.com/DanielRenne/mangosNode/rep"
"log"
"testing"
)
const url = "tcp://127.0.0.1:600"
var tGlobal *testing.T
var messages chan string
var messages2 chan string
func TestReq(t *testing.T) {
tGlobal = t
messages = make(chan string)
messages2 = make(chan string)
var replyNode rep.Node
var requestNode Node
err := replyNode.Listen(url, 2, handleRequests)
if err != nil {
t.Errorf("Error starting listen reply node at req_test.TestReq: %v", err.Error())
return
}
err = requestNode.Connect(url, handleReply)
if err != nil {
t.Errorf("Error connecting request node at req_test.TestReq: %v", err.Error())
return
}
err = requestNode.Request([]byte("MyRequest"))
if err != nil {
t.Errorf("Error sending request at req_test.TestReq: %v", err.Error())
return
}
msg := <-messages
log.Println(msg)
msg2 := <-messages2
log.Println(msg2)
}
func handleRequests(node *rep.Node, msg []byte) {
if string(msg) != "MyRequest" {
tGlobal.Errorf("Failed to match the reply response message at req_test.handleRequests")
messages <- "Handle Requests Failed"
return
}
messages <- "Handle Requests Passed"
err := node.Reply([]byte("MyReply"))
if err != nil {
tGlobal.Errorf("Error sending reply message at req_test.handleRequests: %v", err.Error())
return
}
}
func handleReply(node *Node, msg []byte) {
if string(msg) != "MyReply" {
tGlobal.Errorf("Failed to match the reply response message at req_test.handleReply")
messages2 <- "Handle Reply Failed"
return
}
messages2 <- "Handle Reply Passed"
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.