text stringlengths 11 4.05M |
|---|
package logging
import (
"go.uber.org/zap"
"time"
)
func LogZap() {
logger, _ := zap.NewProduction()
defer logger.Sync()
logger.Info("failed to fetch URL",
zap.Int("attempt", 3),
zap.Duration("backoff", time.Second),
zap.Error(GetError()),
)
}
|
package client
import (
"sync"
"time"
"github.com/MagalixCorp/magalix-agent/v3/proto"
"github.com/MagalixTechnologies/core/logger"
)
// PipeSender interface for sender
type PipeSender interface {
Send(kind proto.PacketKind, in interface{}, out interface{}) error
}
// Pipe pipe
type Pipe struct {
cond *sync.Cond
sender PipeSender
storage PipeStore
}
// NewPipe creates a new pipe
func NewPipe(sender PipeSender) *Pipe {
return &Pipe{
cond: sync.NewCond(&sync.Mutex{}),
sender: sender,
storage: NewDefaultPipeStore(),
}
}
// Send pushes a packet to the pipe to be sent
func (p *Pipe) Send(pack Package) int {
pack.time = time.Now()
ret := p.storage.Add(&pack)
p.cond.Broadcast()
return ret
}
// Start start multiple workers for sending packages
func (p *Pipe) Start(workers int) {
for i := 0; i < workers; i++ {
p.start()
}
}
// start start a single worker
func (p *Pipe) start() {
go func() {
for {
p.cond.L.Lock()
pack := p.storage.Pop()
if pack == nil {
p.cond.Wait()
p.cond.L.Unlock()
continue
}
p.cond.L.Unlock()
logFields := logger.With(
"kind", pack.Kind,
"diff", time.Since(pack.time),
"remaining", p.storage.Len(),
)
logFields.Debugf("sending packet %s ....", pack.Kind.String())
err := p.sender.Send(pack.Kind, pack.Data, nil)
if err != nil {
p.storage.Add(pack)
logFields.Errorw("error sending packet", "error", err, "remaining", p.storage.Len())
} else {
logFields.Debugw("completed sending packet", "remaining", p.storage.Len())
}
}
}()
}
// Len gets the number of pending packages
func (p *Pipe) Len() int {
return p.storage.Len()
}
|
package template
type Aircon struct {
Operation *Action `json:"operation"`
Modes map[string]*AirconMode `json:"modes"`
}
type AirconMode struct {
Temp *Action `json:"temp,omitempty"`
Humid *Action `json:"humid,omitempty"`
Fan *Action `json:"fan,omitempty"`
HorizontalVane *Action `json:"horizontal_vane,omitempty"`
VerticalVane *Action `json:"vertical_vane,omitempty"`
}
|
package main
import (
"fmt"
"myProject/videoWater/deal/config"
"myProject/videoWater/deal/factory"
)
func main() {
Run()
}
func Run() {
con := config.ReadConfig()
fmt.Println(con)
factory.DoFactory(con)
} |
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
im_type "github.com/kyf/6ryim/6ryim_http/im_type"
)
func init() {
handlers["/sendmsg"] = func(w http.ResponseWriter, r *http.Request, params url.Values, logger *log.Logger) {
msg := params.Get("msg")
var result string
if len(msg) == 0 {
result = "msg is empty!"
response(w, result)
return
}
go processMessage(msg, logger)
result1 := make(map[string]string)
result1["response"] = "processMessage"
response(w, result1)
}
}
func processMessage(msg string, logger *log.Logger) {
var message im_type.Message
err := json.Unmarshal([]byte(msg), &message)
if err != nil {
logger.Printf("processMessage json.Unmarshal err:%v", err)
return
}
var param url.Values = make(url.Values)
param.Set("msg", msg)
res, err := http.PostForm(fmt.Sprintf("%smessage/receive", WS_SERVICE_URL), param)
if err != nil {
logger.Printf("processMessage.postform err:%v", err)
return
}
data, err := ioutil.ReadAll(res.Body)
if err != nil {
logger.Printf("processMessage readall err:%v", err)
return
}
logger.Printf("[processMessage]response is %s", string(data))
}
|
/*
* @lc app=leetcode.cn id=1576 lang=golang
*
* [1576] 替换所有的问号
*/
// @lc code=start
package main
func modifyString(s string) string {
b := []byte(s)
chars := "abcdefghijklmnopqrstuvwxyz"
for i := 0; i < len(b); i++ {
if b[i] == '?' {
for j := 0; j < len(chars); j++ {
if (i == 0 || b[i-1] != chars[j]) && (i == len(b)-1 || b[i+1] != chars[j]) {
b[i] = chars[j]
break
}
}
}
}
return string(b)
}
//
// func main() {
// fmt.Println(modifyString("???zs???"))
// }
// @lc code=end
|
package collection
import (
"errors"
)
/*
ArrayDeque是一个双端队列实现,
内部使用数组进行元素存储,不允许存储null值,
可以高效的进行元素查找和尾部插入取出,
是用作队列、双端队列、栈的绝佳选择,性能比LinkedList还要好。
*/
const (
// 初始化最小容量
MinInitialCapacity int = 8
)
type ArrayDeque struct {
elements []interface{}
head int
tail int
}
// 返回队列的大小
func (a *ArrayDeque) Size() int {
return (a.tail - a.head) & (len(a.elements) - 1)
}
// 判断队列的是否为空
func (a *ArrayDeque) IsEmpty() bool {
return a.tail == a.head
}
func (a *ArrayDeque) Clear() {
h := a.head
t := a.tail
if h != t { // clear all cells
a.head, a.tail = 0, 0
mask := cap(a.elements) - 1
i := h
for {
a.elements[i] = nil
i = (i + 1) & mask
if i == t {
break
}
}
}
}
// 将指定的元素插入此双端队列的末尾
func (a *ArrayDeque) Add(e interface{}) (error, bool) {
err := a.AddLast(e)
if err != nil {
return err, false
}
return nil, true
}
// 在此双端队列的末尾插入指定的元素
func (a *ArrayDeque) AddLast(e interface{}) error {
if a.elements == nil {
a.elements = make([]interface{}, 0, 16)
}
if e == nil {
return errors.New("NullPointer Error")
}
// 向数组中末尾添加元素,数组容量不够会自动扩容为原来的两倍
a.tail = a.tail + 1
a.elements = append(a.elements, e)
return nil
}
// 在此双端队列的前面插入指定的元素
func (a *ArrayDeque) AddFirst(e interface{}) error {
if a.elements == nil {
a.elements = make([]interface{}, 16, 16)
}
if e == nil {
return errors.New("NullPointer Error")
}
// 向数组中头部添加元素,数组容量不够会自动扩容为原来的两倍
a.head = a.head - 1
a.elements[(a.head)&(len(a.elements)-1)] = e
a.elements = append(a.elements, e)
return nil
}
// 将数组容量扩成之前的两倍
func (a *ArrayDeque) doubleCapacity() error {
p := a.head
n := len(a.elements)
r := n - p // number of elements to the right of p
newCapacity := n << 1
if newCapacity < 0 {
return errors.New("Sorry, deque too big")
}
newArray := make([]interface{}, newCapacity, newCapacity)
// 容量变为原来的两倍,然后把head之后的元素复制到新数组的开头,把剩余的元素复制到新数组之后
a.arrayCopy(a.elements, p, newArray, 0, r)
a.arrayCopy(a.elements, 0, newArray, r, p)
a.elements = newArray
a.head = 0
a.tail = n
return nil
}
// 复制数组
func (a *ArrayDeque) arrayCopy(src []interface{}, srcPos int, dest []interface{}, destPos int, length int) {
for i := srcPos; i < srcPos+length; i++ {
dest[destPos] = src[i]
destPos++
}
}
// 将指定的元素插入此双端队列的前面
func (a *ArrayDeque) OfferFirst(e interface{}) bool {
return false
}
func (a *ArrayDeque) OfferLast(e interface{}) bool {
return false
}
// 删除队首元素
func (a *ArrayDeque) RemoveFirst() (error, interface{}) {
e := a.PollFirst()
if e == nil {
return errors.New("NoSuchElement"), nil
}
return nil, e
}
// 删除队尾元素
func (a *ArrayDeque) RemoveLast() (error, interface{}) {
e := a.PollLast()
if e == nil {
return errors.New("NoSuchElement"), nil
}
return nil, e
}
func (a *ArrayDeque) PollFirst() interface{} {
h := a.head
result := a.elements[h]
// Element is nil if deque empty
if result == nil {
return nil
}
a.elements[h] = nil
a.head = (h + 1) & (cap(a.elements) - 1)
return result
}
func (a *ArrayDeque) PollLast() interface{} {
t := (a.tail - 1) & (cap(a.elements) - 1)
result := a.elements[t]
if result == nil {
return nil
}
a.elements[t] = nil
a.tail = t
return result
}
// 返回列首元素
func (a *ArrayDeque) GetFirst() (error, interface{}) {
result := a.elements[a.head]
if result == nil {
return errors.New("NoSuchElement"), nil
}
return nil, result
}
// 返回队尾元素
func (a *ArrayDeque) GetLast() (error, interface{}) {
result := a.elements[(a.tail-1)&(cap(a.elements)-1)]
if result == nil {
return errors.New("NoSuchElement"), nil
}
return nil, result
}
func (a *ArrayDeque) PeekFirst() interface{} {
return nil
}
func (a *ArrayDeque) PeekLast() interface{} {
return nil
}
func (a *ArrayDeque) Push(e interface{}) {
}
func (a *ArrayDeque) Element() interface{} {
return nil
}
func (a *ArrayDeque) Peek() interface{} {
return nil
}
func (a *ArrayDeque) Offer(e interface{}) bool {
return false
}
func (a *ArrayDeque) Poll() interface{} {
return nil
}
func (a *ArrayDeque) Pop() interface{} {
return nil
}
func (a *ArrayDeque) Remove() interface{} {
return nil
}
|
package main
import "fmt"
func main() {
a := new(int)
fmt.Println(*a)
mm := make(map[string]int)
mm["abc"] = 10
fmt.Println(mm["abc"])
}
|
package v1
import (
rapiv1 "github.com/peteabre/ocp-client-go/pkg/route/api/v1"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
)
type OcpV1Interface interface {
RoutesGetter
}
type OcpV1Client struct {
restClient rest.Interface
}
func (c *OcpV1Client) Routes(namespace string) RouteInterface {
return newRoutes(c.restClient, namespace)
}
// NewForConfig creates a new CoreV1Client for the given config.
func NewForConfig(c *rest.Config) (*OcpV1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientFor(&config)
if err != nil {
return nil, err
}
return &OcpV1Client{client}, nil
}
func setConfigDefaults(config *rest.Config) error {
//TODO: Change this to a more common one
gv := rapiv1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/oapi"
config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
|
package base
import (
"io"
"time"
"encoding/hex"
"crypto/rand"
"encoding/base64"
"crypto/sha256"
)
func Hash(s string) string {
h := sha256.New()
h.Write([]byte(s))
io.WriteString(h, time.Now().String())
return hex.EncodeToString(h.Sum(nil))
}
func RandomString() string {
b := make([]byte, 64)
if _, err := io.ReadFull(rand.Reader, b); err != nil {
return ""
}
return base64.URLEncoding.EncodeToString(b)
} |
package main
import (
"fmt"
"log"
"math/rand"
"sort"
"github.com/hodgesds/perf-utils"
"golang.org/x/sys/unix"
)
func randInt64s(n int) []int64 {
ints := make([]int64, n)
for i := 0; i < n; i++ {
ints[i] = rand.Int63()
}
return ints
}
func randInt64Ps(n int) []*int64 {
ints := make([]*int64, n)
for i := 0; i < n; i++ {
v := rand.Int63()
ints[i] = &v
}
return ints
}
func compareInts(entries []int) {
for _, entry := range entries {
int64s := randInt64s(entry)
profileValue, err := perf.L1Data(
unix.PERF_COUNT_HW_CACHE_OP_READ,
unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS,
func() error {
sort.SliceStable(int64s, func(i, j int) bool {
return int64s[i] < int64s[j]
})
return nil
})
if err != nil {
log.Fatal(err)
}
fmt.Printf("L1 Data Read Hit sort []int64 size %d: %+v\n", entry, profileValue)
int64s = randInt64s(entry)
profileValue, err = perf.L1Data(
unix.PERF_COUNT_HW_CACHE_OP_READ,
unix.PERF_COUNT_HW_CACHE_RESULT_MISS,
func() error {
sort.SliceStable(int64s, func(i, j int) bool {
return int64s[i] < int64s[j]
})
return nil
})
if err != nil {
log.Fatal(err)
}
fmt.Printf("L1 Data Read Miss sort []int64 size %d: %+v\n", entry, profileValue)
int64Ps := randInt64Ps(entry)
profileValue, err = perf.L1Data(
unix.PERF_COUNT_HW_CACHE_OP_READ,
unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS,
func() error {
sort.SliceStable(int64Ps, func(i, j int) bool {
return *int64Ps[i] < *int64Ps[j]
})
return nil
})
if err != nil {
log.Fatal(err)
}
fmt.Printf("L1 Data Read Hit sort []*int64 size %d: %+v\n", entry, profileValue)
int64Ps = randInt64Ps(entry)
profileValue, err = perf.L1Data(
unix.PERF_COUNT_HW_CACHE_OP_READ,
unix.PERF_COUNT_HW_CACHE_RESULT_MISS,
func() error {
sort.SliceStable(int64s, func(i, j int) bool {
return *int64Ps[i] < *int64Ps[j]
})
return nil
})
if err != nil {
log.Fatal(err)
}
fmt.Printf("L1 Data Read Miss sort []*int64 size %d: %+v\n", entry, profileValue)
println("")
}
}
|
package tester
import (
"github.com/zeuxisoo/go-zenwords/pkg/keywords"
)
// CreateRPC will prepare all related data for RPC testing
func CreateRPC() {
keywords.NewKeywords("../../words.txt")
}
|
package main
import (
"flag"
"fmt"
"github.com/gerald1248/timeline"
"os"
"path/filepath"
"sync"
)
func main() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: ./%s [<JSON file> [<JSON file>]]\n", filepath.Base(os.Args[0]))
flag.PrintDefaults()
os.Exit(0)
}
certificate := flag.String("c", "cert.pem", "TLS server certificate")
key := flag.String("k", "key.pem", "TLS server key")
hostname := flag.String("n", "localhost", "Hostname")
port := flag.Int("p", 8443, "listen on port")
flag.Parse()
args := flag.Args()
if len(args) == 0 {
serve(*certificate, *key, *hostname, *port)
return
}
ch := make(chan timeline.ShortResult)
for _, input := range args {
go timeline.ProcessFile(input, ch)
}
var mu sync.Mutex
var code int
for range args {
result := <-ch
mu.Lock()
code += result.Code
mu.Unlock()
fmt.Println(result.Message)
}
os.Exit(code)
}
|
package coin
import (
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"incognito-chain/common"
"incognito-chain/common/base58"
"incognito-chain/key/wallet"
"incognito-chain/privacy/key"
"incognito-chain/privacy/operation"
)
const (
MaxSizeInfoCoin = 255
JsonMarshalFlag = 34
CoinVersion1 = 1
CoinVersion2 = 2
TxRandomGroupSize = 68
)
const (
PedersenPrivateKeyIndex = operation.PedersenPrivateKeyIndex
PedersenValueIndex = operation.PedersenValueIndex
PedersenSndIndex = operation.PedersenSndIndex
PedersenShardIDIndex = operation.PedersenShardIDIndex
PedersenRandomnessIndex = operation.PedersenRandomnessIndex
)
func getMin(a, b int) int {
if a < b {
return a
}
return b
}
func parseScalarForSetBytes(coinBytes *[]byte, offset *int) (*operation.Scalar, error) {
b := *coinBytes
if *offset >= len(b) {
return nil, errors.New("Offset is larger than len(bytes), cannot parse scalar")
}
var sc *operation.Scalar = nil
lenField := b[*offset]
*offset += 1
if lenField != 0 {
if *offset+int(lenField) > len(b) {
return nil, errors.New("Offset+curLen is larger than len(bytes), cannot parse scalar for set bytes")
}
data := b[*offset : *offset+int(lenField)]
sc = new(operation.Scalar).FromBytesS(data)
*offset += int(lenField)
}
return sc, nil
}
func parsePointForSetBytes(coinBytes *[]byte, offset *int) (*operation.Point, error) {
b := *coinBytes
if *offset >= len(b) {
return nil, errors.New("Offset is larger than len(bytes), cannot parse point")
}
var point *operation.Point = nil
var err error
lenField := b[*offset]
*offset += 1
if lenField != 0 {
if *offset+int(lenField) > len(b) {
return nil, errors.New("Offset+curLen is larger than len(bytes), cannot parse point for set bytes")
}
data := b[*offset : *offset+int(lenField)]
point, err = new(operation.Point).FromBytesS(data)
if err != nil {
return nil, err
}
*offset += int(lenField)
}
return point, nil
}
func parseInfoForSetBytes(coinBytes *[]byte, offset *int) ([]byte, error) {
b := *coinBytes
if *offset >= len(b) {
return []byte{}, errors.New("Offset is larger than len(bytes), cannot parse info")
}
info := []byte{}
lenField := b[*offset]
*offset += 1
if lenField != 0 {
if *offset+int(lenField) > len(b) {
return []byte{}, errors.New("Offset+curLen is larger than len(bytes), cannot parse info for set bytes")
}
info = make([]byte, lenField)
copy(info, b[*offset:*offset+int(lenField)])
*offset += int(lenField)
}
return info, nil
}
func CreatePaymentInfosFromPlainCoinsAndAddress(c []PlainCoin, paymentAddress key.PaymentAddress, message []byte) []*key.PaymentInfo {
sumAmount := uint64(0)
for i := 0; i < len(c); i += 1 {
sumAmount += c[i].GetValue()
}
paymentInfos := make([]*key.PaymentInfo, 1)
paymentInfos[0] = key.InitPaymentInfo(paymentAddress, sumAmount, message)
return paymentInfos
}
func NewCoinFromAmountAndTxRandomBytes(amount uint64, publicKey *operation.Point, txRandom *TxRandom, info []byte) *CoinV2 {
c := new(CoinV2).Init()
c.SetPublicKey(publicKey)
c.SetAmount(new(operation.Scalar).FromUint64(amount))
c.SetRandomness(operation.RandomScalar())
c.SetTxRandom(txRandom)
c.SetCommitment(operation.PedCom.CommitAtIndex(c.GetAmount(), c.GetRandomness(), operation.PedersenValueIndex))
c.SetSharedRandom(nil)
c.SetInfo(info)
return c
}
type SenderSeal struct {
r operation.Scalar
txRandomIndex uint32
}
func (s SenderSeal) GetR() *operation.Scalar { return &s.r }
func (s SenderSeal) GetIndex() uint32 { return s.txRandomIndex }
func (s SenderSeal) MarshalJSON() ([]byte, error) {
var res []byte = append(s.r.ToBytesS(), common.Uint32ToBytes(s.txRandomIndex)...)
return json.Marshal(hex.EncodeToString(res))
}
func (s *SenderSeal) UnmarshalJSON(src []byte) error {
var temp string
json.Unmarshal(src, &temp)
raw, err := hex.DecodeString(temp)
if err != nil {
return err
}
if len(raw) == operation.Ed25519KeySize+4 {
sc := &operation.Scalar{}
sc.FromBytesS(raw[:operation.Ed25519KeySize])
ind, _ := common.BytesToUint32(raw[operation.Ed25519KeySize:])
*s = SenderSeal{
r: *sc,
txRandomIndex: ind,
}
return nil
}
return errors.New("Error unmarshalling sender seal : unexpected length")
}
func NewCoinFromPaymentInfo(p *CoinParams) (*CoinV2, *SenderSeal, error) {
receiverPublicKey, err := new(operation.Point).FromBytesS(p.PaymentAddress.Pk)
if err != nil {
errStr := fmt.Sprintf("Cannot parse outputCoinV2 from PaymentInfo when parseByte PublicKey, error %v ", err)
return nil, nil, errors.New(errStr)
}
receiverPublicKeyBytes := receiverPublicKey.ToBytesS()
targetShardID := common.GetShardIDFromLastByte(receiverPublicKeyBytes[len(receiverPublicKeyBytes)-1])
c := new(CoinV2).Init()
// Amount, Randomness, SharedRandom are transparency until we call concealData
c.SetAmount(new(operation.Scalar).FromUint64(p.Amount))
c.SetRandomness(operation.RandomScalar())
c.SetSharedRandom(operation.RandomScalar()) // shared randomness for creating one-time-address
c.SetSharedConcealRandom(operation.RandomScalar()) //shared randomness for concealing amount and blinding asset tag
c.SetInfo(p.Message)
c.SetCommitment(operation.PedCom.CommitAtIndex(c.GetAmount(), c.GetRandomness(), operation.PedersenValueIndex))
// If this is going to burning address then dont need to create ota
if wallet.IsPublicKeyBurningAddress(p.PaymentAddress.Pk) {
publicKey, err := new(operation.Point).FromBytesS(p.PaymentAddress.Pk)
if err != nil {
panic("Something is wrong with info.paymentAddress.pk, burning address should be a valid point")
}
c.SetPublicKey(publicKey)
return c, nil, nil
}
// Increase index until have the right shardID
index := uint32(0)
publicOTA := p.PaymentAddress.GetOTAPublicKey()
publicSpend := p.PaymentAddress.GetPublicSpend()
rK := new(operation.Point).ScalarMult(publicOTA, c.GetSharedRandom())
for {
index += 1
// Get publickey
hash := operation.HashToScalar(append(rK.ToBytesS(), common.Uint32ToBytes(index)...))
HrKG := new(operation.Point).ScalarMultBase(hash)
publicKey := new(operation.Point).Add(HrKG, publicSpend)
c.SetPublicKey(publicKey)
senderShardID, recvShardID, coinPrivacyType, _ := DeriveShardInfoFromCoin(publicKey.ToBytesS())
if recvShardID == int(targetShardID) && senderShardID == p.SenderShardID && coinPrivacyType == p.CoinPrivacyType {
otaRandomPoint := new(operation.Point).ScalarMultBase(c.GetSharedRandom())
concealRandomPoint := new(operation.Point).ScalarMultBase(c.GetSharedConcealRandom())
c.SetTxRandomDetail(concealRandomPoint, otaRandomPoint, index)
break
}
}
seal := SenderSeal{
r: *c.GetSharedRandom(),
txRandomIndex: index,
}
return c, &seal, nil
}
func CoinV2ArrayToCoinArray(coinArray []*CoinV2) []Coin {
res := make([]Coin, len(coinArray))
for i := 0; i < len(coinArray); i += 1 {
res[i] = coinArray[i]
}
return res
}
func ParseOTAInfoToString(pubKey *operation.Point, txRandom *TxRandom) (string, string) {
return base58.Base58Check{}.Encode(pubKey.ToBytesS(), common.ZeroByte), base58.Base58Check{}.Encode(txRandom.Bytes(), common.ZeroByte)
}
func ParseOTAInfoFromString(pubKeyStr, txRandomStr string) (*operation.Point, *TxRandom, error) {
publicKeyB, version, err := base58.Base58Check{}.Decode(pubKeyStr)
if err != nil || version != common.ZeroByte {
return nil, nil, errors.New("ParseOTAInfoFromString Cannot decode base58check string")
}
pubKey, err := new(operation.Point).FromBytesS(publicKeyB)
if err != nil {
return nil, nil, errors.New("ParseOTAInfoFromString Cannot set Point from bytes")
}
txRandomB, version, err := base58.Base58Check{}.Decode(txRandomStr)
if err != nil || version != common.ZeroByte {
return nil, nil, errors.New("ParseOTAInfoFromString Cannot decode base58check string")
}
txRandom := new(TxRandom)
if err := txRandom.SetBytes(txRandomB); err != nil {
return nil, nil, errors.New("ParseOTAInfoFromString Cannot set txRandom from bytes")
}
return pubKey, txRandom, nil
}
const (
PrivacyTypeTransfer = iota
PrivacyTypeMint
)
// DeriveShardInfoFromCoin returns the sender origin & receiver shard of a coin based on the
// PublicKey on that coin (encoded inside its last byte).
// Does not support MaxShardNumber > 8
func DeriveShardInfoFromCoin(coinPubKey []byte) (int, int, int, error) {
numShards := common.MaxShardNumber
n := int(coinPubKey[len(coinPubKey)-1]) % 128 // use 7 bits
receiverShardID := n % numShards
n /= numShards
senderShardID := n % numShards
coinPrivacyType := n / numShards
if coinPrivacyType > PrivacyTypeMint {
return -1, -1, -1, fmt.Errorf("coin %x has unsupported PrivacyType %d", coinPubKey, coinPrivacyType)
}
return senderShardID, receiverShardID, coinPrivacyType, nil
}
// CoinParams contains the necessary data to create a new coin
type CoinParams struct {
key.PaymentInfo
SenderShardID int
CoinPrivacyType int
}
// From initializes the CoinParam using input data (PaymentInfo must not be nil)
func (p *CoinParams) From(inf *key.PaymentInfo, sid, cptype int) *CoinParams {
return &CoinParams{
PaymentInfo: *inf,
SenderShardID: sid % common.MaxShardNumber,
CoinPrivacyType: cptype % (PrivacyTypeMint + 1),
}
}
// FromPaymentInfo initializes the CoinParam using a PaymentInfo (must not be nil);
// others are set to default
func (p *CoinParams) FromPaymentInfo(inf *key.PaymentInfo) *CoinParams {
receiverPublicKeyBytes := inf.PaymentAddress.Pk
shardID := common.GetShardIDFromLastByte(receiverPublicKeyBytes[len(receiverPublicKeyBytes)-1])
return &CoinParams{
PaymentInfo: *inf,
SenderShardID: int(shardID),
CoinPrivacyType: PrivacyTypeTransfer,
}
}
|
package service
import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
func NewKubernetesClientset() *kubernetes.Clientset {
//!!!!!!! dev !!!!!!!!!!
//kubeconfig := flag.String("kubeconfig", "./config", "absolute path to the kubeconfig file")
//flag.Parse()
//config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
//!!!!!!! prod !!!!!!!!!!
config, err := rest.InClusterConfig()
if err != nil {
panic(err.Error())
}
// creates the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err.Error())
}
return clientset
}
|
package httpserver
import (
"github.com/labstack/echo"
"strconv"
"net/http"
"Barracks/rank"
//"github.com/labstack/echo/middleware"
)
func StartServer(rankInfo *rank.RankInfo, rankInfoFreeze *rank.RankInfo, port uint) {
e := echo.New()
//e.Use(middleware.Logger())
e.GET("/api/:contestName/ranking", func(ctx echo.Context) error {
if rankInfoFreeze.RankData.ContestInfo.Name != ctx.Param("contestName") {
return ctx.NoContent(http.StatusNotFound)
}
//r := rankInfoFreeze.GetRanking()
//if r == nil {
// return ctx.NoContent(http.StatusNotFound)
//}
var r []rank.UserRankSummary
for k := range rankInfoFreeze.RankData.UserMap {
r = append(r, *rankInfoFreeze.GetUserSummary(uint(k), 0))
}
return ctx.JSON(http.StatusOK, r)
})
e.GET("/api/acceptedCnts/:userId", func(ctx echo.Context) error {
userId, err := strconv.Atoi(ctx.Param("userId"))
if err != nil {
return ctx.NoContent(http.StatusNotFound)
}
r := rankInfo.GetUserSummary(uint(userId), 0)
if r == nil {
return ctx.NoContent(http.StatusNotFound)
}
r.Rank = rankInfoFreeze.GetUserSummary(uint(userId), 0).Rank
return ctx.JSON(http.StatusOK, r)
})
e.GET("/api/problemStatuses/:userId", func(ctx echo.Context) error {
userId, err := strconv.Atoi(ctx.Param("userId"))
if err != nil {
return ctx.NoContent(http.StatusNotFound)
}
r := rankInfo.GetUserProblemStatusSummary(uint(userId))
if r == nil {
return ctx.NoContent(http.StatusNotFound)
}
return ctx.JSON(http.StatusOK, r)
})
e.Logger.Fatal(e.Start(":"+strconv.Itoa(int(port))))
}
|
package notion
import "time"
type IngredientReport struct {
IngredientID string `json:"ingredient_id,omitempty"`
Value float64 `json:"value"`
Date time.Time `json:"date"`
}
type BatchIngredientReport struct {
IngredientID string `json:"ingredient_id"`
Reports []IngredientReport `json:"reports"`
}
type IngredientReportResponse struct {
Errors []string `json:"errors"`
Status string `json:"status"`
}
|
package listen
import (
"golib/comm"
"net"
"regexp"
"strings"
"time"
"github.com/astaxie/beego/logs"
)
var cmdHanderAdapters = make(map[string]CmdHander)
//Listen ...
//监听端口
func Listen(port string) {
//监听2040端口
listener, err := net.Listen("tcp", ":"+port)
if err != nil {
logs.Error("Start TCP listener failed.", err.Error())
return
}
defer listener.Close()
for {
conn, err := listener.Accept() //
if err != nil {
logs.Error("Accept listener faild.", err.Error())
continue
}
//串行处理连接,解析命令
handleConn(conn)
}
}
//处理TCP连接
func handleConn(conn net.Conn) {
logs.Debug("Start to handle a tcp conn")
defer conn.Close()
var cmdString = ""
for {
//设置3秒钟的读取超时时间
conn.SetReadDeadline(time.Now().Add(time.Second * 3))
buffer := make([]byte, 2048)
n, err := conn.Read(buffer)
//读取有错误,或者发送命令不正确,读取3秒超时
if err != nil {
conn.Write([]byte(err.Error() + " " + cmdString))
logs.Error("Read tcp command faild,", err.Error(), " ", cmdString)
break
}
cmdString += string(buffer[:n])
//判断命令格式是否符合标准,通过正则表达式匹配(CMD::xxxxx##xxxx::CMD)
reg := regexp.MustCompile(`^` + comm.CmdPrefix + `(.*)` + comm.CmdSuffix + `$`)
cmdmatch := reg.FindStringSubmatch(cmdString)
if len(cmdmatch) < 2 {
//正则表达式未匹配成功,继续读取缓冲区的值,直到超时
logs.Info("match tcp cmd failed,go on: ", cmdString)
continue
}
adaperName := strings.Split(cmdmatch[1], comm.ParamSep)[0]
adapter, ok := cmdHanderAdapters[adaperName]
//如果没有对应的命令适配器,直接报异常结束
if !ok {
conn.Write([]byte("The cmd hander adapter(" + adaperName + ") does not exsit"))
logs.Error("The cmd hander adapter(", adaperName, ") does not exsit")
break
}
if adapter.CmdParse(cmdmatch[1]) {
conn.Write([]byte("SUCC"))
logs.Info("Parse cmd ", cmdmatch[1], " succ.")
go adapter.Start()
break
} else {
conn.Write([]byte(adapter.CmdParseMsg()))
logs.Error("Parse cmd ", cmdmatch[1], " failed,", adapter.CmdParseMsg())
break
}
}
}
//RegisterHander 注册命令处理接口
func RegisterHander(name string, hander CmdHander) {
if cmdHanderAdapters == nil {
logs.Error("CmdHander: Register adapter is nil")
return
}
if _, ok := cmdHanderAdapters[name]; ok {
logs.Error("CmdHander: Register called twice for adapter " + name)
return
}
cmdHanderAdapters[name] = hander
}
|
package server
import (
"context"
"encoding/binary"
"log"
"math"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
spb "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/summary_go_proto"
tpb "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/tensor_go_proto"
dtpb "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/types_go_proto"
"github.com/wchargin/tensorboard-data-server/io/logdir"
"github.com/wchargin/tensorboard-data-server/io/run"
"github.com/wchargin/tensorboard-data-server/mem"
dppb "github.com/wchargin/tensorboard-data-server/proto/data_provider_proto"
)
// Server implements the TensorBoardDataProviderServer interface.
type Server struct {
dppb.UnimplementedTensorBoardDataProviderServer
ll *logdir.Loader
}
const (
// blobBatchSizeBytes controls the pagination behavior of the ReadBlob
// RPC. Only this many bytes will be sent per frame in the response
// stream. Chosen as 8 MiB, which is reasonably small but exceeds the
// default gRPC response size limit of 4 MiB. (Clients need to handle
// larger responses from RPCs like ReadScalars, so this helps catch the
// problem earlier.)
blobBatchSizeBytes = 1024 * 1024 * 8
)
// NewServer creates an RPC server wrapper around a *logdir.Loader.
func NewServer(ll *logdir.Loader) *Server {
return &Server{ll: ll}
}
// ListRuns handles the ListRuns RPC.
func (s *Server) ListRuns(ctx context.Context, req *dppb.ListRunsRequest) (*dppb.ListRunsResponse, error) {
res := new(dppb.ListRunsResponse)
runs := s.ll.Runs()
res.Runs = make([]*dppb.Run, len(runs))
{
i := 0
for run := range s.ll.Runs() {
res.Runs[i] = &dppb.Run{Id: run, Name: run}
i++
}
}
return res, nil
}
// ListScalars handles the ListScalars RPC.
func (s *Server) ListScalars(ctx context.Context, req *dppb.ListScalarsRequest) (*dppb.ListScalarsResponse, error) {
res := new(dppb.ListScalarsResponse)
runFilter, tagFilter := filters(req.RunTagFilter)
for run, acc := range s.ll.Runs() {
if !matchesFilter(runFilter, run) {
continue
}
var tags []*dppb.ListScalarsResponse_TagEntry
for tag, md := range acc.List() {
if md == nil || md.DataClass != spb.DataClass_DATA_CLASS_SCALAR {
continue
}
if md.PluginData.GetPluginName() != req.PluginFilter.GetPluginName() {
continue
}
if !matchesFilter(tagFilter, tag) {
continue
}
sample := acc.Sample(tag)
if len(sample) == 0 {
// shouldn't happen, but don't panic
continue
}
last := sample[len(sample)-1]
e := &dppb.ListScalarsResponse_TagEntry{
TagName: tag,
TimeSeries: &dppb.ScalarTimeSeries{
MaxStep: int64(last.EventStep),
MaxWallTime: maxWallTime(sample),
SummaryMetadata: md,
},
}
tags = append(tags, e)
}
if tags != nil {
e := &dppb.ListScalarsResponse_RunEntry{
RunName: run,
Tags: tags,
}
res.Runs = append(res.Runs, e)
}
}
return res, nil
}
// ReadScalars handles the ReadScalars RPC.
func (s *Server) ReadScalars(ctx context.Context, req *dppb.ReadScalarsRequest) (*dppb.ReadScalarsResponse, error) {
res := new(dppb.ReadScalarsResponse)
runFilter, tagFilter := filters(req.RunTagFilter)
numPoints := int(req.Downsample.GetNumPoints())
if numPoints < 0 {
return nil, status.Errorf(codes.InvalidArgument, "downsample.num_points: want non-negative, got %v", numPoints)
}
for run, acc := range s.ll.Runs() {
if !matchesFilter(runFilter, run) {
continue
}
var tags []*dppb.ReadScalarsResponse_TagEntry
for tag, md := range acc.List() {
if md == nil || md.DataClass != spb.DataClass_DATA_CLASS_SCALAR {
continue
}
if md.PluginData.GetPluginName() != req.PluginFilter.GetPluginName() {
continue
}
if !matchesFilter(tagFilter, tag) {
continue
}
sample := downsampleValueData(acc.Sample(tag), numPoints)
data := dppb.ScalarData{
Step: make([]int64, len(sample)),
WallTime: make([]float64, len(sample)),
Value: make([]float64, len(sample)),
}
for i, x := range sample {
data.Step[i] = int64(x.EventStep)
data.WallTime[i] = x.EventWallTime
data.Value[i] = scalarValue(x.Value.GetTensor())
}
e := &dppb.ReadScalarsResponse_TagEntry{
TagName: tag,
Data: &data,
}
tags = append(tags, e)
}
if tags != nil {
e := &dppb.ReadScalarsResponse_RunEntry{
RunName: run,
Tags: tags,
}
res.Runs = append(res.Runs, e)
}
}
return res, nil
}
// ListTensors handles the ListTensors RPC.
func (s *Server) ListTensors(ctx context.Context, req *dppb.ListTensorsRequest) (*dppb.ListTensorsResponse, error) {
res := new(dppb.ListTensorsResponse)
runFilter, tagFilter := filters(req.RunTagFilter)
for run, acc := range s.ll.Runs() {
if !matchesFilter(runFilter, run) {
continue
}
var tags []*dppb.ListTensorsResponse_TagEntry
for tag, md := range acc.List() {
if md == nil || md.DataClass != spb.DataClass_DATA_CLASS_TENSOR {
continue
}
if md.PluginData.GetPluginName() != req.PluginFilter.GetPluginName() {
continue
}
if !matchesFilter(tagFilter, tag) {
continue
}
sample := acc.Sample(tag)
if len(sample) == 0 {
// shouldn't happen, but don't panic
continue
}
last := sample[len(sample)-1]
e := &dppb.ListTensorsResponse_TagEntry{
TagName: tag,
TimeSeries: &dppb.TensorTimeSeries{
MaxStep: int64(last.EventStep),
MaxWallTime: maxWallTime(sample),
SummaryMetadata: md,
},
}
tags = append(tags, e)
}
if tags != nil {
e := &dppb.ListTensorsResponse_RunEntry{
RunName: run,
Tags: tags,
}
res.Runs = append(res.Runs, e)
}
}
return res, nil
}
// ReadTensors handles the ReadTensors RPC.
func (s *Server) ReadTensors(ctx context.Context, req *dppb.ReadTensorsRequest) (*dppb.ReadTensorsResponse, error) {
res := new(dppb.ReadTensorsResponse)
runFilter, tagFilter := filters(req.RunTagFilter)
numPoints := int(req.Downsample.GetNumPoints())
if numPoints < 0 {
return nil, status.Errorf(codes.InvalidArgument, "downsample.num_points: want non-negative, got %v", numPoints)
}
for run, acc := range s.ll.Runs() {
if !matchesFilter(runFilter, run) {
continue
}
var tags []*dppb.ReadTensorsResponse_TagEntry
for tag, md := range acc.List() {
if md == nil || md.DataClass != spb.DataClass_DATA_CLASS_TENSOR {
continue
}
if md.PluginData.GetPluginName() != req.PluginFilter.GetPluginName() {
continue
}
if !matchesFilter(tagFilter, tag) {
continue
}
sample := downsampleValueData(acc.Sample(tag), numPoints)
data := dppb.TensorData{
Step: make([]int64, len(sample)),
WallTime: make([]float64, len(sample)),
Value: make([]*tpb.TensorProto, len(sample)),
}
for i, x := range sample {
data.Step[i] = int64(x.EventStep)
data.WallTime[i] = x.EventWallTime
data.Value[i] = x.Value.GetTensor()
}
e := &dppb.ReadTensorsResponse_TagEntry{
TagName: tag,
Data: &data,
}
tags = append(tags, e)
}
if tags != nil {
e := &dppb.ReadTensorsResponse_RunEntry{
RunName: run,
Tags: tags,
}
res.Runs = append(res.Runs, e)
}
}
return res, nil
}
// ListBlobSequences handles the ListBlobSequences RPC.
func (s *Server) ListBlobSequences(ctx context.Context, req *dppb.ListBlobSequencesRequest) (*dppb.ListBlobSequencesResponse, error) {
res := new(dppb.ListBlobSequencesResponse)
runFilter, tagFilter := filters(req.RunTagFilter)
for run, acc := range s.ll.Runs() {
if !matchesFilter(runFilter, run) {
continue
}
var tags []*dppb.ListBlobSequencesResponse_TagEntry
for tag, md := range acc.List() {
if md == nil || md.DataClass != spb.DataClass_DATA_CLASS_BLOB_SEQUENCE {
continue
}
if md.PluginData.GetPluginName() != req.PluginFilter.GetPluginName() {
continue
}
if !matchesFilter(tagFilter, tag) {
continue
}
sample := acc.Sample(tag)
if len(sample) == 0 {
// shouldn't happen, but don't panic
continue
}
last := sample[len(sample)-1]
e := &dppb.ListBlobSequencesResponse_TagEntry{
TagName: tag,
TimeSeries: &dppb.BlobSequenceTimeSeries{
MaxStep: int64(last.EventStep),
MaxWallTime: maxWallTime(sample),
MaxLength: maxLength(sample),
SummaryMetadata: md,
},
}
tags = append(tags, e)
}
if tags != nil {
e := &dppb.ListBlobSequencesResponse_RunEntry{
RunName: run,
Tags: tags,
}
res.Runs = append(res.Runs, e)
}
}
return res, nil
}
// ReadBlobSequences handles the ReadBlobSequences RPC.
func (s *Server) ReadBlobSequences(ctx context.Context, req *dppb.ReadBlobSequencesRequest) (*dppb.ReadBlobSequencesResponse, error) {
res := new(dppb.ReadBlobSequencesResponse)
runFilter, tagFilter := filters(req.RunTagFilter)
numPoints := int(req.Downsample.GetNumPoints())
if numPoints < 0 {
return nil, status.Errorf(codes.InvalidArgument, "downsample.num_points: want non-negative, got %v", numPoints)
}
for run, acc := range s.ll.Runs() {
if !matchesFilter(runFilter, run) {
continue
}
var tags []*dppb.ReadBlobSequencesResponse_TagEntry
for tag, md := range acc.List() {
if md == nil || md.DataClass != spb.DataClass_DATA_CLASS_BLOB_SEQUENCE {
continue
}
if md.PluginData.GetPluginName() != req.PluginFilter.GetPluginName() {
continue
}
if !matchesFilter(tagFilter, tag) {
continue
}
sample := acc.Sample(tag)
data := dppb.BlobSequenceData{
Step: make([]int64, len(sample)),
WallTime: make([]float64, len(sample)),
Values: make([]*dppb.BlobReferenceSequence, len(sample)),
}
// TODO(@wchargin): Re-downsample.
for i, x := range sample {
data.Step[i] = int64(x.EventStep)
data.WallTime[i] = x.EventWallTime
data.Values[i] = blobSequenceValues(req.ExperimentId, run, tag, x.EventStep, x.Value.GetTensor())
}
e := &dppb.ReadBlobSequencesResponse_TagEntry{
TagName: tag,
Data: &data,
}
tags = append(tags, e)
}
if tags != nil {
e := &dppb.ReadBlobSequencesResponse_RunEntry{
RunName: run,
Tags: tags,
}
res.Runs = append(res.Runs, e)
}
}
return res, nil
}
// ReadBlob handles the ReadBlob RPC.
func (s *Server) ReadBlob(req *dppb.ReadBlobRequest, stream dppb.TensorBoardDataProvider_ReadBlobServer) error {
bk, err := decodeBlobKey(req.BlobKey)
if err != nil {
return status.Errorf(codes.InvalidArgument, "invalid blob key %q: %v", req.BlobKey, err)
}
_ = bk.eid
var data []run.ValueDatum
if run := s.ll.Run(bk.run); run != nil {
data = run.Sample(bk.tag)
}
if data == nil {
return status.Errorf(codes.NotFound, "experiment %q has no time series for run %q, tag %q", bk.eid, bk.run, bk.tag)
}
var tensor *tpb.TensorProto
for _, d := range data {
if d.EventStep == bk.step {
tensor = d.Value.GetTensor()
break
}
}
if tensor == nil {
return status.Errorf(codes.NotFound, "time series for experiment %q, run %q, tag %q has no step %d; it may have been evicted from memory", bk.eid, bk.run, bk.tag, bk.step)
}
blobs := tensor.StringVal
if bk.index >= int64(len(blobs)) {
return status.Errorf(codes.NotFound, "time series for experiment %q, run %q, tag %q at step %d has no index %d (only %d items)", bk.eid, bk.run, bk.tag, bk.step, bk.index, len(blobs))
}
blob := blobs[bk.index]
for len(blob) > blobBatchSizeBytes {
frame := blob[:blobBatchSizeBytes]
stream.Send(&dppb.ReadBlobResponse{Data: frame})
blob = blob[blobBatchSizeBytes:]
}
if len(blob) > 0 {
stream.Send(&dppb.ReadBlobResponse{Data: blob})
}
return nil
}
// A stringFilter is a predicate for strings. If nil, it matches all strings.
// Otherwise, it matches exactly those strings in the referenced slice.
type stringFilter *[]string
func matchesFilter(f stringFilter, x string) bool {
if f == nil {
return true
}
for _, y := range *f {
if x == y {
return true
}
}
return false
}
// filters extracts two stringFilters from a *RunTagFilter, which may be nil.
// The returned filters may point into rtf.
func filters(rtf *dppb.RunTagFilter) (runs stringFilter, tags stringFilter) {
if rf := rtf.GetRuns(); rf != nil {
runs = &rf.Runs
}
if tf := rtf.GetTags(); tf != nil {
tags = &tf.Tags
}
return
}
// scalarValue gets the scalar data point associated with the given tensor,
// whose summary's time series should be DATA_CLASS_SCALAR.
func scalarValue(tensor *tpb.TensorProto) float64 {
switch tensor.Dtype {
case dtpb.DataType_DT_FLOAT:
if len(tensor.FloatVal) > 0 {
return float64(tensor.FloatVal[0])
}
return float64(math.Float32frombits(binary.LittleEndian.Uint32(tensor.TensorContent)))
case dtpb.DataType_DT_DOUBLE:
if len(tensor.DoubleVal) > 0 {
return tensor.DoubleVal[0]
}
return math.Float64frombits(binary.LittleEndian.Uint64(tensor.TensorContent))
default:
log.Printf("bad scalar dtype %v", tensor.Dtype)
return math.NaN()
}
}
// scalarValue gets the scalar data point associated with the given tensor,
// whose summary's time series should be DATA_CLASS_SCALAR.
func blobSequenceValues(eid string, run string, tag string, step mem.Step, tensor *tpb.TensorProto) *dppb.BlobReferenceSequence {
n := tensor.TensorShape.Dim[0].GetSize()
refs := make([]*dppb.BlobReference, n)
bk := blobKey{
eid: eid,
run: run,
tag: tag,
step: step,
}
for i := int64(0); i < n; i++ {
bk.index = i
refs[i] = &dppb.BlobReference{BlobKey: string(bk.encode())}
}
return &dppb.BlobReferenceSequence{BlobRefs: refs}
}
func maxWallTime(ds []run.ValueDatum) float64 {
result := math.Inf(-1)
for _, d := range ds {
wt := d.EventWallTime
if wt > result {
result = wt
}
}
return result
}
// maxLength gets the maximum length of any of the given data points, whose
// summaries' time series should be DATA_CLASS_BLOB_SEQUENCE.
func maxLength(ds []run.ValueDatum) int64 {
result := int64(-1)
for _, d := range ds {
length := d.Value.GetTensor().TensorShape.Dim[0].Size
if length > result {
result = length
}
}
return result
}
func downsampleValueData(sample []run.ValueDatum, k int) []run.ValueDatum {
dstSize := k
if dstSize > len(sample) {
dstSize = len(sample)
}
dst := make([]run.ValueDatum, dstSize)
downsample(len(sample), dstSize, func(i, j int) {
dst[j] = sample[i]
})
return dst
}
|
package mysql
import (
"bufio"
"context"
"database/sql"
"fmt"
"math/rand"
"os"
"strconv"
"strings"
"sync"
"github.com/fperf/fperf"
// use the mysql driver
_ "github.com/go-sql-driver/mysql"
)
const seqPlaceHolder = "__seq_int__"
const randPlaceHolder = "__rand_int__"
const randRangePlaceHolder = "__rand_range__" // 0 to rmax
var seq = seqCreater(0)
var random = randCreater(10000000000000000)
var randRange = randCreater(0)
func seqCreater(begin int64) func() string {
// filled map, filled generated to 16 bytes
l := []string{
"",
"0",
"00",
"000",
"0000",
"00000",
"000000",
"0000000",
"00000000",
"000000000",
"0000000000",
"00000000000",
"000000000000",
"0000000000000",
"00000000000000",
"000000000000000",
}
v := begin
m := &sync.Mutex{}
return func() string {
m.Lock()
s := strconv.FormatInt(v, 10)
v += 1
m.Unlock()
filled := len(l) - len(s)
if filled <= 0 {
return s
}
return l[filled] + s
}
}
func randCreater(max int64) func() string {
// filled map, filled generated to 16 bytes
l := []string{
"",
"0",
"00",
"000",
"0000",
"00000",
"000000",
"0000000",
"00000000",
"000000000",
"0000000000",
"00000000000",
"000000000000",
"0000000000000",
"00000000000000",
"000000000000000",
}
return func() string {
s := strconv.FormatInt(rand.Int63n(max), 10)
filled := len(l) - len(s)
if filled <= 0 {
return s
}
return l[filled] + s
}
}
func replaceSeq(s string) string {
return strings.Replace(s, seqPlaceHolder, seq(), -1)
}
func replaceRand(s string) string {
return strings.Replace(s, randPlaceHolder, random(), -1)
}
func replaceRandRange(s string) string {
return strings.Replace(s, randRangePlaceHolder, randRange(), -1)
}
func replace(s string) string {
if strings.Index(s, seqPlaceHolder) >= 0 {
s = replaceSeq(s)
}
if strings.Index(s, randPlaceHolder) >= 0 {
s = replaceRand(s)
}
if strings.Index(s, randRangePlaceHolder) >= 0 {
s = replaceRandRange(s)
}
return s
}
type Kind string
const (
Query = Kind("query")
Exec = Kind("exec")
)
type Statement struct {
sql string
kind Kind
}
type options struct {
isolation int
readonly bool
stdin bool
rmax int64
}
type Client struct {
options
db *sql.DB
s []*Statement
}
func loadStdin() []string {
var sqls []string
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
clauses := strings.Split(scanner.Text(), ";")
if len(clauses) == 0 {
continue
}
if clauses[len(clauses)-1] == "" {
clauses = clauses[0 : len(clauses)-1]
}
sqls = append(sqls, clauses...)
}
return sqls
}
func New(flag *fperf.FlagSet) fperf.Client {
c := &Client{}
flag.Usage = func() {
fmt.Printf("Usage: mysql <sqls...>\n use __rand_int__ or __seq_int__ to generate random or sequence keys\n")
}
flag.IntVar(&c.options.isolation, "isolation", 0, "isolation level")
flag.BoolVar(&c.options.readonly, "readonly", false, "readonly transaction")
flag.BoolVar(&c.options.stdin, "stdin", false, "read sqls from stdin")
flag.Int64Var(&c.options.rmax, "rmax", 0, "max value of __rand_range__")
flag.Parse()
if c.options.rmax > 0 {
randRange = randCreater(c.options.rmax)
}
args := flag.Args()
if len(args) == 0 && !c.options.stdin {
flag.Usage()
os.Exit(0)
}
var sqls []string
if len(args) > 0 {
sqls = strings.Split(strings.TrimSpace(args[0]), ";")
if sqls[len(sqls)-1] == "" {
sqls = sqls[0 : len(sqls)-1]
}
}
if c.options.stdin {
sqls = append(sqls, loadStdin()...)
}
for _, sql := range sqls {
tokens := strings.Fields(sql)
switch strings.ToLower(tokens[0]) {
case "select", "show":
c.s = append(c.s, &Statement{sql: sql, kind: Query})
case "insert", "delete", "update", "create", "drop":
c.s = append(c.s, &Statement{sql: sql, kind: Exec})
default:
fmt.Println("unkown sql:", sql)
os.Exit(-1)
}
}
return c
}
func (c *Client) Dial(addr string) error {
db, err := sql.Open("mysql", addr)
if err != nil {
return err
}
c.db = db
return nil
}
func (c *Client) Request() error {
ctx := context.Background()
txn, err := c.db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer txn.Rollback()
for _, s := range c.s {
var err error
var rows *sql.Rows
sql := replace(s.sql)
switch s.kind {
case Query:
rows, err = txn.Query(sql)
if rows != nil {
for rows.Next() {
}
rows.Close()
}
case Exec:
_, err = txn.Exec(sql)
}
if err != nil {
return err
}
}
return txn.Commit()
}
func init() {
//rand.Seed(time.Now().UnixNano())
fperf.Register("mysql", New, "mysql performance benchmark")
}
|
package main
import (
"fmt"
"reflect"
"testing"
"github.com/shopspring/decimal"
)
func testDouble(t *testing.T) {
values := []string{"23.5", "45.7", "67.9", "98.1", "23.5", "12.00", "13.000332"}
for _, num := range values {
val := double(num)
if reflect.TypeOf(val).String() != "float64" {
t.Error("Invalid Data Type, Must Always Be float64")
}
}
}
func testHandleErr(t *testing.T) {
price, err := decimal.NewFromString("136.02")
fmt.Println(price)
if err != nil {
HandleErr(err)
} else {
fmt.Println(price)
}
}
|
/**
* (C) Copyright IBM Corp. 2021.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* IBM OpenAPI SDK Code Generator Version: 3.24.0-fac1d4cc-20210108-162022
*/
// Package ibmclouddatabasesv5 : Operations and models for the IbmCloudDatabasesV5 service
package ibmclouddatabasesv5
import (
"context"
"encoding/json"
"fmt"
common "github.com/IBM/experimental-go-sdk/common"
"github.com/IBM/go-sdk-core/v4/core"
"github.com/go-openapi/strfmt"
"net/http"
"reflect"
"time"
)
// IbmCloudDatabasesV5 : The IBM Cloud Databases API enables interaction between applications and Cloud Databases
// database deployments.
//
// Access to the API requires an IAM Bearer Token or an IAM API Key to be presented through bearer authentication.
//
// Deployment IDs are CRNs on the IBM Cloud Databases v5 API platform. No lookup or translation the Compose style UUIDs
// is needed. The Deployment ID is a traditional UUID on the Compose v5 API platform.
//
// When you use CRNs, remember to URL escape the CRN value as they can include the forward-slash (/) character.
//
// Version: 5.0.0
type IbmCloudDatabasesV5 struct {
Service *core.BaseService
}
// DefaultServiceName is the default key used to find external configuration information.
const DefaultServiceName = "ibm_cloud_databases"
// IbmCloudDatabasesV5Options : Service options
type IbmCloudDatabasesV5Options struct {
ServiceName string
URL string
Authenticator core.Authenticator
}
// NewIbmCloudDatabasesV5UsingExternalConfig : constructs an instance of IbmCloudDatabasesV5 with passed in options and external configuration.
func NewIbmCloudDatabasesV5UsingExternalConfig(options *IbmCloudDatabasesV5Options) (ibmCloudDatabases *IbmCloudDatabasesV5, err error) {
if options.ServiceName == "" {
options.ServiceName = DefaultServiceName
}
if options.Authenticator == nil {
options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName)
if err != nil {
return
}
}
ibmCloudDatabases, err = NewIbmCloudDatabasesV5(options)
if err != nil {
return
}
err = ibmCloudDatabases.Service.ConfigureService(options.ServiceName)
if err != nil {
return
}
if options.URL != "" {
err = ibmCloudDatabases.Service.SetServiceURL(options.URL)
}
return
}
// NewIbmCloudDatabasesV5 : constructs an instance of IbmCloudDatabasesV5 with passed in options.
func NewIbmCloudDatabasesV5(options *IbmCloudDatabasesV5Options) (service *IbmCloudDatabasesV5, err error) {
serviceOptions := &core.ServiceOptions{
Authenticator: options.Authenticator,
}
baseService, err := core.NewBaseService(serviceOptions)
if err != nil {
return
}
if options.URL != "" {
err = baseService.SetServiceURL(options.URL)
if err != nil {
return
}
}
service = &IbmCloudDatabasesV5{
Service: baseService,
}
return
}
// GetServiceURLForRegion returns the service URL to be used for the specified region
func GetServiceURLForRegion(region string) (string, error) {
return "", fmt.Errorf("service does not support regional URLs")
}
// Clone makes a copy of "ibmCloudDatabases" suitable for processing requests.
func (ibmCloudDatabases *IbmCloudDatabasesV5) Clone() *IbmCloudDatabasesV5 {
if core.IsNil(ibmCloudDatabases) {
return nil
}
clone := *ibmCloudDatabases
clone.Service = ibmCloudDatabases.Service.Clone()
return &clone
}
// SetServiceURL sets the service URL
func (ibmCloudDatabases *IbmCloudDatabasesV5) SetServiceURL(url string) error {
return ibmCloudDatabases.Service.SetServiceURL(url)
}
// GetServiceURL returns the service URL
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetServiceURL() string {
return ibmCloudDatabases.Service.GetServiceURL()
}
// SetDefaultHeaders sets HTTP headers to be sent in every request
func (ibmCloudDatabases *IbmCloudDatabasesV5) SetDefaultHeaders(headers http.Header) {
ibmCloudDatabases.Service.SetDefaultHeaders(headers)
}
// SetEnableGzipCompression sets the service's EnableGzipCompression field
func (ibmCloudDatabases *IbmCloudDatabasesV5) SetEnableGzipCompression(enableGzip bool) {
ibmCloudDatabases.Service.SetEnableGzipCompression(enableGzip)
}
// GetEnableGzipCompression returns the service's EnableGzipCompression field
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetEnableGzipCompression() bool {
return ibmCloudDatabases.Service.GetEnableGzipCompression()
}
// EnableRetries enables automatic retries for requests invoked for this service instance.
// If either parameter is specified as 0, then a default value is used instead.
func (ibmCloudDatabases *IbmCloudDatabasesV5) EnableRetries(maxRetries int, maxRetryInterval time.Duration) {
ibmCloudDatabases.Service.EnableRetries(maxRetries, maxRetryInterval)
}
// DisableRetries disables automatic retries for requests invoked for this service instance.
func (ibmCloudDatabases *IbmCloudDatabasesV5) DisableRetries() {
ibmCloudDatabases.Service.DisableRetries()
}
// GetDeployables : Get all deployable databases
// Returns a list of all the types and associated major versions of database deployments that can be provisioned.
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetDeployables(getDeployablesOptions *GetDeployablesOptions) (result *GetDeployablesResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.GetDeployablesWithContext(context.Background(), getDeployablesOptions)
}
// GetDeployablesWithContext is an alternate form of the GetDeployables method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetDeployablesWithContext(ctx context.Context, getDeployablesOptions *GetDeployablesOptions) (result *GetDeployablesResponse, response *core.DetailedResponse, err error) {
err = core.ValidateStruct(getDeployablesOptions, "getDeployablesOptions")
if err != nil {
return
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployables`, nil)
if err != nil {
return
}
for headerName, headerValue := range getDeployablesOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "GetDeployables")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetDeployablesResponse)
if err != nil {
return
}
response.Result = result
return
}
// GetRegions : Get all deployable regions
// Returns a list of all the regions that deployments can be provisioned into from the current region. Used to determine
// region availability for read-only replicas.
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetRegions(getRegionsOptions *GetRegionsOptions) (result *GetRegionsResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.GetRegionsWithContext(context.Background(), getRegionsOptions)
}
// GetRegionsWithContext is an alternate form of the GetRegions method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetRegionsWithContext(ctx context.Context, getRegionsOptions *GetRegionsOptions) (result *GetRegionsResponse, response *core.DetailedResponse, err error) {
err = core.ValidateStruct(getRegionsOptions, "getRegionsOptions")
if err != nil {
return
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/regions`, nil)
if err != nil {
return
}
for headerName, headerValue := range getRegionsOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "GetRegions")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetRegionsResponse)
if err != nil {
return
}
response.Result = result
return
}
// GetDeploymentInfo : Get deployment information
// Gets the full data that is associated with a deployment. This data includes the ID, name, database type, and version.
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetDeploymentInfo(getDeploymentInfoOptions *GetDeploymentInfoOptions) (result *GetDeploymentInfoResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.GetDeploymentInfoWithContext(context.Background(), getDeploymentInfoOptions)
}
// GetDeploymentInfoWithContext is an alternate form of the GetDeploymentInfo method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetDeploymentInfoWithContext(ctx context.Context, getDeploymentInfoOptions *GetDeploymentInfoOptions) (result *GetDeploymentInfoResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getDeploymentInfoOptions, "getDeploymentInfoOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getDeploymentInfoOptions, "getDeploymentInfoOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *getDeploymentInfoOptions.ID,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getDeploymentInfoOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "GetDeploymentInfo")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetDeploymentInfoResponse)
if err != nil {
return
}
response.Result = result
return
}
// CreateDatabaseUser : Creates a user based on user type
// Creates a user in the database that can access the database through a connection.
func (ibmCloudDatabases *IbmCloudDatabasesV5) CreateDatabaseUser(createDatabaseUserOptions *CreateDatabaseUserOptions) (result *CreateDatabaseUserResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.CreateDatabaseUserWithContext(context.Background(), createDatabaseUserOptions)
}
// CreateDatabaseUserWithContext is an alternate form of the CreateDatabaseUser method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) CreateDatabaseUserWithContext(ctx context.Context, createDatabaseUserOptions *CreateDatabaseUserOptions) (result *CreateDatabaseUserResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(createDatabaseUserOptions, "createDatabaseUserOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(createDatabaseUserOptions, "createDatabaseUserOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *createDatabaseUserOptions.ID,
"user_type": *createDatabaseUserOptions.UserType,
}
builder := core.NewRequestBuilder(core.POST)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/users/{user_type}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range createDatabaseUserOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "CreateDatabaseUser")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
builder.AddHeader("Content-Type", "application/json")
body := make(map[string]interface{})
if createDatabaseUserOptions.User != nil {
body["user"] = createDatabaseUserOptions.User
}
_, err = builder.SetBodyContentJSON(body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCreateDatabaseUserResponse)
if err != nil {
return
}
response.Result = result
return
}
// ChangeUserPassword : Set specified user's password
// Sets the password of a specified user.
func (ibmCloudDatabases *IbmCloudDatabasesV5) ChangeUserPassword(changeUserPasswordOptions *ChangeUserPasswordOptions) (result *ChangeUserPasswordResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.ChangeUserPasswordWithContext(context.Background(), changeUserPasswordOptions)
}
// ChangeUserPasswordWithContext is an alternate form of the ChangeUserPassword method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) ChangeUserPasswordWithContext(ctx context.Context, changeUserPasswordOptions *ChangeUserPasswordOptions) (result *ChangeUserPasswordResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(changeUserPasswordOptions, "changeUserPasswordOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(changeUserPasswordOptions, "changeUserPasswordOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *changeUserPasswordOptions.ID,
"user_type": *changeUserPasswordOptions.UserType,
"username": *changeUserPasswordOptions.Username,
}
builder := core.NewRequestBuilder(core.PATCH)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/users/{user_type}/{username}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range changeUserPasswordOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "ChangeUserPassword")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
builder.AddHeader("Content-Type", "application/json")
body := make(map[string]interface{})
if changeUserPasswordOptions.User != nil {
body["user"] = changeUserPasswordOptions.User
}
_, err = builder.SetBodyContentJSON(body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalChangeUserPasswordResponse)
if err != nil {
return
}
response.Result = result
return
}
// DeleteDatabaseUser : Deletes a user based on user type
// Removes a user from the deployment.
func (ibmCloudDatabases *IbmCloudDatabasesV5) DeleteDatabaseUser(deleteDatabaseUserOptions *DeleteDatabaseUserOptions) (result *DeleteDatabaseUserResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.DeleteDatabaseUserWithContext(context.Background(), deleteDatabaseUserOptions)
}
// DeleteDatabaseUserWithContext is an alternate form of the DeleteDatabaseUser method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) DeleteDatabaseUserWithContext(ctx context.Context, deleteDatabaseUserOptions *DeleteDatabaseUserOptions) (result *DeleteDatabaseUserResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(deleteDatabaseUserOptions, "deleteDatabaseUserOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(deleteDatabaseUserOptions, "deleteDatabaseUserOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *deleteDatabaseUserOptions.ID,
"user_type": *deleteDatabaseUserOptions.UserType,
"username": *deleteDatabaseUserOptions.Username,
}
builder := core.NewRequestBuilder(core.DELETE)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/users/{user_type}/{username}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range deleteDatabaseUserOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "DeleteDatabaseUser")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDeleteDatabaseUserResponse)
if err != nil {
return
}
response.Result = result
return
}
// GetUser : Discover user name and password information for a deployment for a user with an endpoint type
// Only for Redis v5 and prior: Discover connection information for a deployment for a user with an endpoint type.
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetUser(getUserOptions *GetUserOptions) (result *Task, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.GetUserWithContext(context.Background(), getUserOptions)
}
// GetUserWithContext is an alternate form of the GetUser method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetUserWithContext(ctx context.Context, getUserOptions *GetUserOptions) (result *Task, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getUserOptions, "getUserOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getUserOptions, "getUserOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *getUserOptions.ID,
"user_id": *getUserOptions.UserID,
"endpoint_type": *getUserOptions.EndpointType,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/users/{user_id}/`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getUserOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "GetUser")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTask)
if err != nil {
return
}
response.Result = result
return
}
// SetDatabaseConfiguration : Change your database configuration
// Change your database configuration. Available for PostgreSQL, EnterpriseDB, and Redis ONLY.
func (ibmCloudDatabases *IbmCloudDatabasesV5) SetDatabaseConfiguration(setDatabaseConfigurationOptions *SetDatabaseConfigurationOptions) (result *SetDatabaseConfigurationResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.SetDatabaseConfigurationWithContext(context.Background(), setDatabaseConfigurationOptions)
}
// SetDatabaseConfigurationWithContext is an alternate form of the SetDatabaseConfiguration method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) SetDatabaseConfigurationWithContext(ctx context.Context, setDatabaseConfigurationOptions *SetDatabaseConfigurationOptions) (result *SetDatabaseConfigurationResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(setDatabaseConfigurationOptions, "setDatabaseConfigurationOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(setDatabaseConfigurationOptions, "setDatabaseConfigurationOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *setDatabaseConfigurationOptions.ID,
}
builder := core.NewRequestBuilder(core.PATCH)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/configuration`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range setDatabaseConfigurationOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "SetDatabaseConfiguration")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
builder.AddHeader("Content-Type", "application/json")
body := make(map[string]interface{})
if setDatabaseConfigurationOptions.Configuration != nil {
body["configuration"] = setDatabaseConfigurationOptions.Configuration
}
_, err = builder.SetBodyContentJSON(body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSetDatabaseConfigurationResponse)
if err != nil {
return
}
response.Result = result
return
}
// GetDatabaseConfigurationSchema : Get the schema of the database configuration
// Get the schema of the database configuration. Available for PostgreSQL and Redis ONLY.
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetDatabaseConfigurationSchema(getDatabaseConfigurationSchemaOptions *GetDatabaseConfigurationSchemaOptions) (result *GetDatabaseConfigurationSchemaResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.GetDatabaseConfigurationSchemaWithContext(context.Background(), getDatabaseConfigurationSchemaOptions)
}
// GetDatabaseConfigurationSchemaWithContext is an alternate form of the GetDatabaseConfigurationSchema method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetDatabaseConfigurationSchemaWithContext(ctx context.Context, getDatabaseConfigurationSchemaOptions *GetDatabaseConfigurationSchemaOptions) (result *GetDatabaseConfigurationSchemaResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getDatabaseConfigurationSchemaOptions, "getDatabaseConfigurationSchemaOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getDatabaseConfigurationSchemaOptions, "getDatabaseConfigurationSchemaOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *getDatabaseConfigurationSchemaOptions.ID,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/configuration/schema`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getDatabaseConfigurationSchemaOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "GetDatabaseConfigurationSchema")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetDatabaseConfigurationSchemaResponse)
if err != nil {
return
}
response.Result = result
return
}
// GetRemotes : Get read-only replica information
// Get the read-only replicas associated with a deployment. Available for PostgreSQL and EnterpriseDB ONLY.
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetRemotes(getRemotesOptions *GetRemotesOptions) (result *GetRemotesResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.GetRemotesWithContext(context.Background(), getRemotesOptions)
}
// GetRemotesWithContext is an alternate form of the GetRemotes method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetRemotesWithContext(ctx context.Context, getRemotesOptions *GetRemotesOptions) (result *GetRemotesResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getRemotesOptions, "getRemotesOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getRemotesOptions, "getRemotesOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *getRemotesOptions.ID,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/remotes`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getRemotesOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "GetRemotes")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetRemotesResponse)
if err != nil {
return
}
response.Result = result
return
}
// SetRemotes : Modify read-only replication on a deployment
// Promote a read-only remote replica to leader by calling with leader set to an empty string. Available for PostgreSQL
// and EnterpriseDB ONLY.
func (ibmCloudDatabases *IbmCloudDatabasesV5) SetRemotes(setRemotesOptions *SetRemotesOptions) (result *SetRemotesResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.SetRemotesWithContext(context.Background(), setRemotesOptions)
}
// SetRemotesWithContext is an alternate form of the SetRemotes method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) SetRemotesWithContext(ctx context.Context, setRemotesOptions *SetRemotesOptions) (result *SetRemotesResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(setRemotesOptions, "setRemotesOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(setRemotesOptions, "setRemotesOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *setRemotesOptions.ID,
}
builder := core.NewRequestBuilder(core.PATCH)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/remotes`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range setRemotesOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "SetRemotes")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
builder.AddHeader("Content-Type", "application/json")
body := make(map[string]interface{})
if setRemotesOptions.Remotes != nil {
body["remotes"] = setRemotesOptions.Remotes
}
if setRemotesOptions.SkipInitialBackup != nil {
body["skip_initial_backup"] = setRemotesOptions.SkipInitialBackup
}
_, err = builder.SetBodyContentJSON(body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSetRemotesResponse)
if err != nil {
return
}
response.Result = result
return
}
// GetRemotesSchema : Resync read-only replica
// Reinitialize a read-only replica. Available for PostgreSQL and EnterpriseDB ONLY.
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetRemotesSchema(getRemotesSchemaOptions *GetRemotesSchemaOptions) (result *GetRemotesSchemaResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.GetRemotesSchemaWithContext(context.Background(), getRemotesSchemaOptions)
}
// GetRemotesSchemaWithContext is an alternate form of the GetRemotesSchema method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetRemotesSchemaWithContext(ctx context.Context, getRemotesSchemaOptions *GetRemotesSchemaOptions) (result *GetRemotesSchemaResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getRemotesSchemaOptions, "getRemotesSchemaOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getRemotesSchemaOptions, "getRemotesSchemaOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *getRemotesSchemaOptions.ID,
}
builder := core.NewRequestBuilder(core.POST)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/remotes/resync`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getRemotesSchemaOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "GetRemotesSchema")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetRemotesSchemaResponse)
if err != nil {
return
}
response.Result = result
return
}
// SetPromotion : Promote read-only replica to a full deployment
// Promote a read-only replica or upgrade and promote a read-only replica. Available for PostgreSQL and EnterpriseDB
// ONLY.
func (ibmCloudDatabases *IbmCloudDatabasesV5) SetPromotion(setPromotionOptions *SetPromotionOptions) (result *SetPromotionResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.SetPromotionWithContext(context.Background(), setPromotionOptions)
}
// SetPromotionWithContext is an alternate form of the SetPromotion method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) SetPromotionWithContext(ctx context.Context, setPromotionOptions *SetPromotionOptions) (result *SetPromotionResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(setPromotionOptions, "setPromotionOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(setPromotionOptions, "setPromotionOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *setPromotionOptions.ID,
}
builder := core.NewRequestBuilder(core.POST)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/remotes/promotion`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range setPromotionOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "SetPromotion")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
builder.AddHeader("Content-Type", "application/json")
body := make(map[string]interface{})
if setPromotionOptions.Promotion != nil {
body["Promotion"] = setPromotionOptions.Promotion
}
_, err = builder.SetBodyContentJSON(body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSetPromotionResponse)
if err != nil {
return
}
response.Result = result
return
}
// GetDeploymentTasks : Get currently running tasks on a deployment
// Obtain a list of tasks currently running or recently run on a deployment. Tasks are ephemeral. Records of successful
// tasks are shown for 24-48 hours, and unsuccessful tasks are shown for 7-8 days.
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetDeploymentTasks(getDeploymentTasksOptions *GetDeploymentTasksOptions) (result *Tasks, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.GetDeploymentTasksWithContext(context.Background(), getDeploymentTasksOptions)
}
// GetDeploymentTasksWithContext is an alternate form of the GetDeploymentTasks method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetDeploymentTasksWithContext(ctx context.Context, getDeploymentTasksOptions *GetDeploymentTasksOptions) (result *Tasks, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getDeploymentTasksOptions, "getDeploymentTasksOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getDeploymentTasksOptions, "getDeploymentTasksOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *getDeploymentTasksOptions.ID,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/tasks`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getDeploymentTasksOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "GetDeploymentTasks")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTasks)
if err != nil {
return
}
response.Result = result
return
}
// GetTasks : Get information about a task
// Get information about a task and its status. Tasks themselves are persistent so old tasks can be consulted as well as
// running tasks.
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetTasks(getTasksOptions *GetTasksOptions) (result *GetTasksResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.GetTasksWithContext(context.Background(), getTasksOptions)
}
// GetTasksWithContext is an alternate form of the GetTasks method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetTasksWithContext(ctx context.Context, getTasksOptions *GetTasksOptions) (result *GetTasksResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getTasksOptions, "getTasksOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getTasksOptions, "getTasksOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *getTasksOptions.ID,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/tasks/{id}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getTasksOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "GetTasks")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetTasksResponse)
if err != nil {
return
}
response.Result = result
return
}
// GetBackupInfo : Get information about a backup
// Get information about a backup, such as creation date.
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetBackupInfo(getBackupInfoOptions *GetBackupInfoOptions) (result *GetBackupInfoResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.GetBackupInfoWithContext(context.Background(), getBackupInfoOptions)
}
// GetBackupInfoWithContext is an alternate form of the GetBackupInfo method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetBackupInfoWithContext(ctx context.Context, getBackupInfoOptions *GetBackupInfoOptions) (result *GetBackupInfoResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getBackupInfoOptions, "getBackupInfoOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getBackupInfoOptions, "getBackupInfoOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"backup_id": *getBackupInfoOptions.BackupID,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/backups/{backup_id}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getBackupInfoOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "GetBackupInfo")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetBackupInfoResponse)
if err != nil {
return
}
response.Result = result
return
}
// GetDeploymentBackups : Get currently available backups from a deployment
// Get details of all currently available backups from a deployment.
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetDeploymentBackups(getDeploymentBackupsOptions *GetDeploymentBackupsOptions) (result *Backups, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.GetDeploymentBackupsWithContext(context.Background(), getDeploymentBackupsOptions)
}
// GetDeploymentBackupsWithContext is an alternate form of the GetDeploymentBackups method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetDeploymentBackupsWithContext(ctx context.Context, getDeploymentBackupsOptions *GetDeploymentBackupsOptions) (result *Backups, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getDeploymentBackupsOptions, "getDeploymentBackupsOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getDeploymentBackupsOptions, "getDeploymentBackupsOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *getDeploymentBackupsOptions.ID,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/backups`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getDeploymentBackupsOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "GetDeploymentBackups")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalBackups)
if err != nil {
return
}
response.Result = result
return
}
// StartOndemandBackup : Initiate an on-demand backup
// Signal the platform to create an on-demand backup for the specified deployment. The returned task can be polled to
// track progress of the backup as it takes place.
func (ibmCloudDatabases *IbmCloudDatabasesV5) StartOndemandBackup(startOndemandBackupOptions *StartOndemandBackupOptions) (result *StartOndemandBackupResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.StartOndemandBackupWithContext(context.Background(), startOndemandBackupOptions)
}
// StartOndemandBackupWithContext is an alternate form of the StartOndemandBackup method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) StartOndemandBackupWithContext(ctx context.Context, startOndemandBackupOptions *StartOndemandBackupOptions) (result *StartOndemandBackupResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(startOndemandBackupOptions, "startOndemandBackupOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(startOndemandBackupOptions, "startOndemandBackupOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *startOndemandBackupOptions.ID,
}
builder := core.NewRequestBuilder(core.POST)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/backups`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range startOndemandBackupOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "StartOndemandBackup")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalStartOndemandBackupResponse)
if err != nil {
return
}
response.Result = result
return
}
// GetPITRdata : Get earliest point-in-time-recovery timestamp
// Returns the earliest available time for point-in-time-recovery in ISO8601 UTC format. PostgreSQL and EnterpriseDB
// only.
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetPITRdata(getPITRdataOptions *GetPITRdataOptions) (result *PointInTimeRecoveryData, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.GetPITRdataWithContext(context.Background(), getPITRdataOptions)
}
// GetPITRdataWithContext is an alternate form of the GetPITRdata method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetPITRdataWithContext(ctx context.Context, getPITRdataOptions *GetPITRdataOptions) (result *PointInTimeRecoveryData, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getPITRdataOptions, "getPITRdataOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getPITRdataOptions, "getPITRdataOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *getPITRdataOptions.ID,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/point_in_time_recovery_data`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getPITRdataOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "GetPITRdata")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPointInTimeRecoveryData)
if err != nil {
return
}
response.Result = result
return
}
// GetConnection : Discover connection information for a deployment for a user with an endpoint type
// Discover connection information for a deployment for a user with an endpoint type.
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetConnection(getConnectionOptions *GetConnectionOptions) (result *Connection, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.GetConnectionWithContext(context.Background(), getConnectionOptions)
}
// GetConnectionWithContext is an alternate form of the GetConnection method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetConnectionWithContext(ctx context.Context, getConnectionOptions *GetConnectionOptions) (result *Connection, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getConnectionOptions, "getConnectionOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getConnectionOptions, "getConnectionOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *getConnectionOptions.ID,
"user_type": *getConnectionOptions.UserType,
"user_id": *getConnectionOptions.UserID,
"endpoint_type": *getConnectionOptions.EndpointType,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/users/{user_type}/{user_id}/connections/{endpoint_type}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getConnectionOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "GetConnection")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
if getConnectionOptions.CertificateRoot != nil {
builder.AddQuery("certificate_root", fmt.Sprint(*getConnectionOptions.CertificateRoot))
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalConnection)
if err != nil {
return
}
response.Result = result
return
}
// CompleteConnection : Discover connection information for a deployment for a user with substitutions and an endpoint type
// Discover connection information for a deployment for a user. Behaves the same as the GET method but substitutes the
// provided password parameter into the returned connection information.
func (ibmCloudDatabases *IbmCloudDatabasesV5) CompleteConnection(completeConnectionOptions *CompleteConnectionOptions) (result *Connection, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.CompleteConnectionWithContext(context.Background(), completeConnectionOptions)
}
// CompleteConnectionWithContext is an alternate form of the CompleteConnection method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) CompleteConnectionWithContext(ctx context.Context, completeConnectionOptions *CompleteConnectionOptions) (result *Connection, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(completeConnectionOptions, "completeConnectionOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(completeConnectionOptions, "completeConnectionOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *completeConnectionOptions.ID,
"user_type": *completeConnectionOptions.UserType,
"user_id": *completeConnectionOptions.UserID,
"endpoint_type": *completeConnectionOptions.EndpointType,
}
builder := core.NewRequestBuilder(core.POST)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/users/{user_type}/{user_id}/connections/{endpoint_type}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range completeConnectionOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "CompleteConnection")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
builder.AddHeader("Content-Type", "application/json")
body := make(map[string]interface{})
if completeConnectionOptions.Password != nil {
body["password"] = completeConnectionOptions.Password
}
if completeConnectionOptions.CertificateRoot != nil {
body["certificate_root"] = completeConnectionOptions.CertificateRoot
}
_, err = builder.SetBodyContentJSON(body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalConnection)
if err != nil {
return
}
response.Result = result
return
}
// GetConnectionDeprecated : Discover connection information for a deployment for a user
// Discover connection information for a deployment for a user.
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetConnectionDeprecated(getConnectionDeprecatedOptions *GetConnectionDeprecatedOptions) (result *Connection, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.GetConnectionDeprecatedWithContext(context.Background(), getConnectionDeprecatedOptions)
}
// GetConnectionDeprecatedWithContext is an alternate form of the GetConnectionDeprecated method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetConnectionDeprecatedWithContext(ctx context.Context, getConnectionDeprecatedOptions *GetConnectionDeprecatedOptions) (result *Connection, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getConnectionDeprecatedOptions, "getConnectionDeprecatedOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getConnectionDeprecatedOptions, "getConnectionDeprecatedOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *getConnectionDeprecatedOptions.ID,
"user_type": *getConnectionDeprecatedOptions.UserType,
"user_id": *getConnectionDeprecatedOptions.UserID,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/users/{user_type}/{user_id}/connections`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getConnectionDeprecatedOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "GetConnectionDeprecated")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
if getConnectionDeprecatedOptions.CertificateRoot != nil {
builder.AddQuery("certificate_root", fmt.Sprint(*getConnectionDeprecatedOptions.CertificateRoot))
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalConnection)
if err != nil {
return
}
response.Result = result
return
}
// CompleteConnectionDeprecated : Discover connection information for a deployment for a user with substitutions
// Discover connection information for a deployment for a user. Behaves the same as the GET method but substitutes the
// given password parameter into the returned connection information.
func (ibmCloudDatabases *IbmCloudDatabasesV5) CompleteConnectionDeprecated(completeConnectionDeprecatedOptions *CompleteConnectionDeprecatedOptions) (result *Connection, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.CompleteConnectionDeprecatedWithContext(context.Background(), completeConnectionDeprecatedOptions)
}
// CompleteConnectionDeprecatedWithContext is an alternate form of the CompleteConnectionDeprecated method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) CompleteConnectionDeprecatedWithContext(ctx context.Context, completeConnectionDeprecatedOptions *CompleteConnectionDeprecatedOptions) (result *Connection, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(completeConnectionDeprecatedOptions, "completeConnectionDeprecatedOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(completeConnectionDeprecatedOptions, "completeConnectionDeprecatedOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *completeConnectionDeprecatedOptions.ID,
"user_type": *completeConnectionDeprecatedOptions.UserType,
"user_id": *completeConnectionDeprecatedOptions.UserID,
}
builder := core.NewRequestBuilder(core.POST)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/users/{user_type}/{user_id}/connections`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range completeConnectionDeprecatedOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "CompleteConnectionDeprecated")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
builder.AddHeader("Content-Type", "application/json")
body := make(map[string]interface{})
if completeConnectionDeprecatedOptions.Password != nil {
body["password"] = completeConnectionDeprecatedOptions.Password
}
if completeConnectionDeprecatedOptions.CertificateRoot != nil {
body["certificate_root"] = completeConnectionDeprecatedOptions.CertificateRoot
}
_, err = builder.SetBodyContentJSON(body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalConnection)
if err != nil {
return
}
response.Result = result
return
}
// GetDeploymentScalingGroups : Get currently available scaling groups from a deployment
// Scaling groups represent the various resources that are allocated to a deployment. This command allows for the
// retrieval of all of the groups for a particular deployment.
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetDeploymentScalingGroups(getDeploymentScalingGroupsOptions *GetDeploymentScalingGroupsOptions) (result *Groups, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.GetDeploymentScalingGroupsWithContext(context.Background(), getDeploymentScalingGroupsOptions)
}
// GetDeploymentScalingGroupsWithContext is an alternate form of the GetDeploymentScalingGroups method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetDeploymentScalingGroupsWithContext(ctx context.Context, getDeploymentScalingGroupsOptions *GetDeploymentScalingGroupsOptions) (result *Groups, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getDeploymentScalingGroupsOptions, "getDeploymentScalingGroupsOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getDeploymentScalingGroupsOptions, "getDeploymentScalingGroupsOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *getDeploymentScalingGroupsOptions.ID,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/groups`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getDeploymentScalingGroupsOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "GetDeploymentScalingGroups")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGroups)
if err != nil {
return
}
response.Result = result
return
}
// GetDefaultScalingGroups : Get default scaling groups for a new deployment
// Scaling groups represent the various resources allocated to a deployment. When a new deployment is created, there are
// a set of defaults for each database type. This endpoint returns them for a particular database.
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetDefaultScalingGroups(getDefaultScalingGroupsOptions *GetDefaultScalingGroupsOptions) (result *Groups, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.GetDefaultScalingGroupsWithContext(context.Background(), getDefaultScalingGroupsOptions)
}
// GetDefaultScalingGroupsWithContext is an alternate form of the GetDefaultScalingGroups method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetDefaultScalingGroupsWithContext(ctx context.Context, getDefaultScalingGroupsOptions *GetDefaultScalingGroupsOptions) (result *Groups, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getDefaultScalingGroupsOptions, "getDefaultScalingGroupsOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getDefaultScalingGroupsOptions, "getDefaultScalingGroupsOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"type": *getDefaultScalingGroupsOptions.Type,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployables/{type}/groups`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getDefaultScalingGroupsOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "GetDefaultScalingGroups")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGroups)
if err != nil {
return
}
response.Result = result
return
}
// SetDeploymentScalingGroup : Set scaling values on a specified group
// Set scaling value on a specified group. Can only be performed on is_adjustable=true groups. Values set are for the
// group as a whole and resources are distributed amongst the group. Values must be greater than or equal to the minimum
// size and must be a multiple of the step size.
func (ibmCloudDatabases *IbmCloudDatabasesV5) SetDeploymentScalingGroup(setDeploymentScalingGroupOptions *SetDeploymentScalingGroupOptions) (result *SetDeploymentScalingGroupResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.SetDeploymentScalingGroupWithContext(context.Background(), setDeploymentScalingGroupOptions)
}
// SetDeploymentScalingGroupWithContext is an alternate form of the SetDeploymentScalingGroup method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) SetDeploymentScalingGroupWithContext(ctx context.Context, setDeploymentScalingGroupOptions *SetDeploymentScalingGroupOptions) (result *SetDeploymentScalingGroupResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(setDeploymentScalingGroupOptions, "setDeploymentScalingGroupOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(setDeploymentScalingGroupOptions, "setDeploymentScalingGroupOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *setDeploymentScalingGroupOptions.ID,
"group_id": *setDeploymentScalingGroupOptions.GroupID,
}
builder := core.NewRequestBuilder(core.PATCH)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/groups/{group_id}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range setDeploymentScalingGroupOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "SetDeploymentScalingGroup")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
builder.AddHeader("Content-Type", "application/json")
_, err = builder.SetBodyContentJSON(setDeploymentScalingGroupOptions.SetDeploymentScalingGroupRequest)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSetDeploymentScalingGroupResponse)
if err != nil {
return
}
response.Result = result
return
}
// GetAutoscalingConditions : Get the autoscaling configuration from a deployment
// The Autoscaling configuration represents the various conditions that control autoscaling for a deployment. This
// command allows for the retrieval of all autoscaling conditions for a particular deployment.
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetAutoscalingConditions(getAutoscalingConditionsOptions *GetAutoscalingConditionsOptions) (result *GetAutoscalingConditionsResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.GetAutoscalingConditionsWithContext(context.Background(), getAutoscalingConditionsOptions)
}
// GetAutoscalingConditionsWithContext is an alternate form of the GetAutoscalingConditions method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetAutoscalingConditionsWithContext(ctx context.Context, getAutoscalingConditionsOptions *GetAutoscalingConditionsOptions) (result *GetAutoscalingConditionsResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getAutoscalingConditionsOptions, "getAutoscalingConditionsOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getAutoscalingConditionsOptions, "getAutoscalingConditionsOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *getAutoscalingConditionsOptions.ID,
"group_id": *getAutoscalingConditionsOptions.GroupID,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/groups/{group_id}/autoscaling`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getAutoscalingConditionsOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "GetAutoscalingConditions")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetAutoscalingConditionsResponse)
if err != nil {
return
}
response.Result = result
return
}
// SetAutoscalingConditions : Set the autoscaling configuration from a deployment
// Enable, disable, or set the conditions for autoscaling on your deployment. Memory, disk, and CPU (if available) can
// be set separately and are not all required.
func (ibmCloudDatabases *IbmCloudDatabasesV5) SetAutoscalingConditions(setAutoscalingConditionsOptions *SetAutoscalingConditionsOptions) (result *SetAutoscalingConditionsResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.SetAutoscalingConditionsWithContext(context.Background(), setAutoscalingConditionsOptions)
}
// SetAutoscalingConditionsWithContext is an alternate form of the SetAutoscalingConditions method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) SetAutoscalingConditionsWithContext(ctx context.Context, setAutoscalingConditionsOptions *SetAutoscalingConditionsOptions) (result *SetAutoscalingConditionsResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(setAutoscalingConditionsOptions, "setAutoscalingConditionsOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(setAutoscalingConditionsOptions, "setAutoscalingConditionsOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *setAutoscalingConditionsOptions.ID,
"group_id": *setAutoscalingConditionsOptions.GroupID,
}
builder := core.NewRequestBuilder(core.PATCH)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/groups/{group_id}/autoscaling`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range setAutoscalingConditionsOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "SetAutoscalingConditions")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
builder.AddHeader("Content-Type", "application/json")
body := make(map[string]interface{})
if setAutoscalingConditionsOptions.Autoscaling != nil {
body["autoscaling"] = setAutoscalingConditionsOptions.Autoscaling
}
_, err = builder.SetBodyContentJSON(body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSetAutoscalingConditionsResponse)
if err != nil {
return
}
response.Result = result
return
}
// KillConnections : Kill connections to a PostgreSQL or EnterpriseDB deployment
// Closes all the connections on a deployment. Available for PostgreSQL and EnterpriseDB ONLY.
func (ibmCloudDatabases *IbmCloudDatabasesV5) KillConnections(killConnectionsOptions *KillConnectionsOptions) (result *KillConnectionsResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.KillConnectionsWithContext(context.Background(), killConnectionsOptions)
}
// KillConnectionsWithContext is an alternate form of the KillConnections method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) KillConnectionsWithContext(ctx context.Context, killConnectionsOptions *KillConnectionsOptions) (result *KillConnectionsResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(killConnectionsOptions, "killConnectionsOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(killConnectionsOptions, "killConnectionsOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *killConnectionsOptions.ID,
}
builder := core.NewRequestBuilder(core.DELETE)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/management/database_connections`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range killConnectionsOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "KillConnections")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalKillConnectionsResponse)
if err != nil {
return
}
response.Result = result
return
}
// FileSync : Sync files uploaded to Elasticsearch deployment
// Starts a task that writes files to disk. Available for Elasticsearch ONLY.
func (ibmCloudDatabases *IbmCloudDatabasesV5) FileSync(fileSyncOptions *FileSyncOptions) (result *FileSyncResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.FileSyncWithContext(context.Background(), fileSyncOptions)
}
// FileSyncWithContext is an alternate form of the FileSync method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) FileSyncWithContext(ctx context.Context, fileSyncOptions *FileSyncOptions) (result *FileSyncResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(fileSyncOptions, "fileSyncOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(fileSyncOptions, "fileSyncOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *fileSyncOptions.ID,
}
builder := core.NewRequestBuilder(core.POST)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/elasticsearch/file_syncs`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range fileSyncOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "FileSync")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFileSyncResponse)
if err != nil {
return
}
response.Result = result
return
}
// CreateLogicalReplicationSlot : Create a new logical replication slot
// Creates a new logical replication slot on the specified database. For use with PostgreSQL, EnterpriseDB, and wal2json
// only.
func (ibmCloudDatabases *IbmCloudDatabasesV5) CreateLogicalReplicationSlot(createLogicalReplicationSlotOptions *CreateLogicalReplicationSlotOptions) (result *CreateLogicalReplicationSlotResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.CreateLogicalReplicationSlotWithContext(context.Background(), createLogicalReplicationSlotOptions)
}
// CreateLogicalReplicationSlotWithContext is an alternate form of the CreateLogicalReplicationSlot method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) CreateLogicalReplicationSlotWithContext(ctx context.Context, createLogicalReplicationSlotOptions *CreateLogicalReplicationSlotOptions) (result *CreateLogicalReplicationSlotResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(createLogicalReplicationSlotOptions, "createLogicalReplicationSlotOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(createLogicalReplicationSlotOptions, "createLogicalReplicationSlotOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *createLogicalReplicationSlotOptions.ID,
}
builder := core.NewRequestBuilder(core.POST)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/postgresql/logical_replication_slots`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range createLogicalReplicationSlotOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "CreateLogicalReplicationSlot")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
builder.AddHeader("Content-Type", "application/json")
body := make(map[string]interface{})
if createLogicalReplicationSlotOptions.LogicalReplicationSlot != nil {
body["logical_replication_slot"] = createLogicalReplicationSlotOptions.LogicalReplicationSlot
}
_, err = builder.SetBodyContentJSON(body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCreateLogicalReplicationSlotResponse)
if err != nil {
return
}
response.Result = result
return
}
// DeleteLogicalReplicationSlot : Delete a logical replication slot
// Deletes a logical replication slot from a database. For use with PostgreSQL, EnterpriseDB, and wal2json only.
func (ibmCloudDatabases *IbmCloudDatabasesV5) DeleteLogicalReplicationSlot(deleteLogicalReplicationSlotOptions *DeleteLogicalReplicationSlotOptions) (result *DeleteLogicalReplicationSlotResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.DeleteLogicalReplicationSlotWithContext(context.Background(), deleteLogicalReplicationSlotOptions)
}
// DeleteLogicalReplicationSlotWithContext is an alternate form of the DeleteLogicalReplicationSlot method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) DeleteLogicalReplicationSlotWithContext(ctx context.Context, deleteLogicalReplicationSlotOptions *DeleteLogicalReplicationSlotOptions) (result *DeleteLogicalReplicationSlotResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(deleteLogicalReplicationSlotOptions, "deleteLogicalReplicationSlotOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(deleteLogicalReplicationSlotOptions, "deleteLogicalReplicationSlotOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *deleteLogicalReplicationSlotOptions.ID,
"name": *deleteLogicalReplicationSlotOptions.Name,
}
builder := core.NewRequestBuilder(core.DELETE)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/postgresql/logical_replication_slots/{name}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range deleteLogicalReplicationSlotOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "DeleteLogicalReplicationSlot")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDeleteLogicalReplicationSlotResponse)
if err != nil {
return
}
response.Result = result
return
}
// GetWhitelist : Retrieve the allowlisted addresses and ranges for a deployment
// Retrieve the allowlisted addresses and ranges for a deployment.
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetWhitelist(getWhitelistOptions *GetWhitelistOptions) (result *Whitelist, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.GetWhitelistWithContext(context.Background(), getWhitelistOptions)
}
// GetWhitelistWithContext is an alternate form of the GetWhitelist method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) GetWhitelistWithContext(ctx context.Context, getWhitelistOptions *GetWhitelistOptions) (result *Whitelist, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getWhitelistOptions, "getWhitelistOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getWhitelistOptions, "getWhitelistOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *getWhitelistOptions.ID,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/whitelists/ip_addresses`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getWhitelistOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "GetWhitelist")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWhitelist)
if err != nil {
return
}
response.Result = result
return
}
// ReplaceWhitelist : Replace the allowlist for a deployment
// Replace the allowlist for a deployment. This action overwrites all existing entries, so when you modify the allowlist
// via a GET/update/PUT, provide the GET response's ETag header value in this endpoint's If-Match header to ensure that
// changes that are made by other clients are not accidentally overwritten.
func (ibmCloudDatabases *IbmCloudDatabasesV5) ReplaceWhitelist(replaceWhitelistOptions *ReplaceWhitelistOptions) (result *ReplaceWhitelistResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.ReplaceWhitelistWithContext(context.Background(), replaceWhitelistOptions)
}
// ReplaceWhitelistWithContext is an alternate form of the ReplaceWhitelist method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) ReplaceWhitelistWithContext(ctx context.Context, replaceWhitelistOptions *ReplaceWhitelistOptions) (result *ReplaceWhitelistResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(replaceWhitelistOptions, "replaceWhitelistOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(replaceWhitelistOptions, "replaceWhitelistOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *replaceWhitelistOptions.ID,
}
builder := core.NewRequestBuilder(core.PUT)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/whitelists/ip_addresses`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range replaceWhitelistOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "ReplaceWhitelist")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
builder.AddHeader("Content-Type", "application/json")
if replaceWhitelistOptions.IfMatch != nil {
builder.AddHeader("If-Match", fmt.Sprint(*replaceWhitelistOptions.IfMatch))
}
body := make(map[string]interface{})
if replaceWhitelistOptions.IpAddresses != nil {
body["ip_addresses"] = replaceWhitelistOptions.IpAddresses
}
_, err = builder.SetBodyContentJSON(body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalReplaceWhitelistResponse)
if err != nil {
return
}
response.Result = result
return
}
// AddWhitelistEntry : Add an address or range to the allowlist for a deployment
// Add an address or range to the allowlist for a deployment.
func (ibmCloudDatabases *IbmCloudDatabasesV5) AddWhitelistEntry(addWhitelistEntryOptions *AddWhitelistEntryOptions) (result *AddWhitelistEntryResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.AddWhitelistEntryWithContext(context.Background(), addWhitelistEntryOptions)
}
// AddWhitelistEntryWithContext is an alternate form of the AddWhitelistEntry method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) AddWhitelistEntryWithContext(ctx context.Context, addWhitelistEntryOptions *AddWhitelistEntryOptions) (result *AddWhitelistEntryResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(addWhitelistEntryOptions, "addWhitelistEntryOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(addWhitelistEntryOptions, "addWhitelistEntryOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *addWhitelistEntryOptions.ID,
}
builder := core.NewRequestBuilder(core.POST)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/whitelists/ip_addresses`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range addWhitelistEntryOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "AddWhitelistEntry")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
builder.AddHeader("Content-Type", "application/json")
body := make(map[string]interface{})
if addWhitelistEntryOptions.IpAddress != nil {
body["ip_address"] = addWhitelistEntryOptions.IpAddress
}
_, err = builder.SetBodyContentJSON(body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAddWhitelistEntryResponse)
if err != nil {
return
}
response.Result = result
return
}
// DeleteWhitelistEntry : Delete an address or range from the allowlist of a deployment
// Delete an address or range from the allowlist of a deployment.
func (ibmCloudDatabases *IbmCloudDatabasesV5) DeleteWhitelistEntry(deleteWhitelistEntryOptions *DeleteWhitelistEntryOptions) (result *DeleteWhitelistEntryResponse, response *core.DetailedResponse, err error) {
return ibmCloudDatabases.DeleteWhitelistEntryWithContext(context.Background(), deleteWhitelistEntryOptions)
}
// DeleteWhitelistEntryWithContext is an alternate form of the DeleteWhitelistEntry method which supports a Context parameter
func (ibmCloudDatabases *IbmCloudDatabasesV5) DeleteWhitelistEntryWithContext(ctx context.Context, deleteWhitelistEntryOptions *DeleteWhitelistEntryOptions) (result *DeleteWhitelistEntryResponse, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(deleteWhitelistEntryOptions, "deleteWhitelistEntryOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(deleteWhitelistEntryOptions, "deleteWhitelistEntryOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"id": *deleteWhitelistEntryOptions.ID,
"ipaddress": *deleteWhitelistEntryOptions.Ipaddress,
}
builder := core.NewRequestBuilder(core.DELETE)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudDatabases.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudDatabases.Service.Options.URL, `/deployments/{id}/whitelists/ip_addresses/{ipaddress}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range deleteWhitelistEntryOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_databases", "V5", "DeleteWhitelistEntry")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "application/json")
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudDatabases.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDeleteWhitelistEntryResponse)
if err != nil {
return
}
response.Result = result
return
}
// APasswordSettingUser : APasswordSettingUser struct
type APasswordSettingUser struct {
Password *string `json:"password,omitempty"`
}
// UnmarshalAPasswordSettingUser unmarshals an instance of APasswordSettingUser from the specified map of raw messages.
func UnmarshalAPasswordSettingUser(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(APasswordSettingUser)
err = core.UnmarshalPrimitive(m, "password", &obj.Password)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// AddWhitelistEntryOptions : The AddWhitelistEntry options.
type AddWhitelistEntryOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
IpAddress *WhitelistEntry
// Allows users to set headers on API requests
Headers map[string]string
}
// NewAddWhitelistEntryOptions : Instantiate AddWhitelistEntryOptions
func (*IbmCloudDatabasesV5) NewAddWhitelistEntryOptions(id string) *AddWhitelistEntryOptions {
return &AddWhitelistEntryOptions{
ID: core.StringPtr(id),
}
}
// SetID : Allow user to set ID
func (options *AddWhitelistEntryOptions) SetID(id string) *AddWhitelistEntryOptions {
options.ID = core.StringPtr(id)
return options
}
// SetIpAddress : Allow user to set IpAddress
func (options *AddWhitelistEntryOptions) SetIpAddress(ipAddress *WhitelistEntry) *AddWhitelistEntryOptions {
options.IpAddress = ipAddress
return options
}
// SetHeaders : Allow user to set Headers
func (options *AddWhitelistEntryOptions) SetHeaders(param map[string]string) *AddWhitelistEntryOptions {
options.Headers = param
return options
}
// AddWhitelistEntryResponse : AddWhitelistEntryResponse struct
type AddWhitelistEntryResponse struct {
Task *Task `json:"task,omitempty"`
}
// UnmarshalAddWhitelistEntryResponse unmarshals an instance of AddWhitelistEntryResponse from the specified map of raw messages.
func UnmarshalAddWhitelistEntryResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(AddWhitelistEntryResponse)
err = core.UnmarshalModel(m, "task", &obj.Task, UnmarshalTask)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// AutoscalingCPUGroupCPU : AutoscalingCPUGroupCPU struct
type AutoscalingCPUGroupCPU struct {
Scalers interface{} `json:"scalers,omitempty"`
Rate *AutoscalingCPUGroupCPURate `json:"rate,omitempty"`
}
// UnmarshalAutoscalingCPUGroupCPU unmarshals an instance of AutoscalingCPUGroupCPU from the specified map of raw messages.
func UnmarshalAutoscalingCPUGroupCPU(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(AutoscalingCPUGroupCPU)
err = core.UnmarshalPrimitive(m, "scalers", &obj.Scalers)
if err != nil {
return
}
err = core.UnmarshalModel(m, "rate", &obj.Rate, UnmarshalAutoscalingCPUGroupCPURate)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// AutoscalingCPUGroupCPURate : AutoscalingCPUGroupCPURate struct
type AutoscalingCPUGroupCPURate struct {
IncreasePercent *float64 `json:"increase_percent,omitempty"`
PeriodSeconds *int64 `json:"period_seconds,omitempty"`
LimitCountPerMember *int64 `json:"limit_count_per_member,omitempty"`
Units *string `json:"units,omitempty"`
}
// UnmarshalAutoscalingCPUGroupCPURate unmarshals an instance of AutoscalingCPUGroupCPURate from the specified map of raw messages.
func UnmarshalAutoscalingCPUGroupCPURate(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(AutoscalingCPUGroupCPURate)
err = core.UnmarshalPrimitive(m, "increase_percent", &obj.IncreasePercent)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "period_seconds", &obj.PeriodSeconds)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "limit_count_per_member", &obj.LimitCountPerMember)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "units", &obj.Units)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// AutoscalingDiskGroupDisk : AutoscalingDiskGroupDisk struct
type AutoscalingDiskGroupDisk struct {
Scalers *AutoscalingDiskGroupDiskScalers `json:"scalers,omitempty"`
Rate *AutoscalingDiskGroupDiskRate `json:"rate,omitempty"`
}
// UnmarshalAutoscalingDiskGroupDisk unmarshals an instance of AutoscalingDiskGroupDisk from the specified map of raw messages.
func UnmarshalAutoscalingDiskGroupDisk(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(AutoscalingDiskGroupDisk)
err = core.UnmarshalModel(m, "scalers", &obj.Scalers, UnmarshalAutoscalingDiskGroupDiskScalers)
if err != nil {
return
}
err = core.UnmarshalModel(m, "rate", &obj.Rate, UnmarshalAutoscalingDiskGroupDiskRate)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// AutoscalingDiskGroupDiskRate : AutoscalingDiskGroupDiskRate struct
type AutoscalingDiskGroupDiskRate struct {
IncreasePercent *float64 `json:"increase_percent,omitempty"`
PeriodSeconds *int64 `json:"period_seconds,omitempty"`
LimitMbPerMember *float64 `json:"limit_mb_per_member,omitempty"`
Units *string `json:"units,omitempty"`
}
// UnmarshalAutoscalingDiskGroupDiskRate unmarshals an instance of AutoscalingDiskGroupDiskRate from the specified map of raw messages.
func UnmarshalAutoscalingDiskGroupDiskRate(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(AutoscalingDiskGroupDiskRate)
err = core.UnmarshalPrimitive(m, "increase_percent", &obj.IncreasePercent)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "period_seconds", &obj.PeriodSeconds)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "limit_mb_per_member", &obj.LimitMbPerMember)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "units", &obj.Units)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// AutoscalingDiskGroupDiskScalers : AutoscalingDiskGroupDiskScalers struct
type AutoscalingDiskGroupDiskScalers struct {
Capacity *AutoscalingDiskGroupDiskScalersCapacity `json:"capacity,omitempty"`
IoUtilization *AutoscalingDiskGroupDiskScalersIoUtilization `json:"io_utilization,omitempty"`
}
// UnmarshalAutoscalingDiskGroupDiskScalers unmarshals an instance of AutoscalingDiskGroupDiskScalers from the specified map of raw messages.
func UnmarshalAutoscalingDiskGroupDiskScalers(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(AutoscalingDiskGroupDiskScalers)
err = core.UnmarshalModel(m, "capacity", &obj.Capacity, UnmarshalAutoscalingDiskGroupDiskScalersCapacity)
if err != nil {
return
}
err = core.UnmarshalModel(m, "io_utilization", &obj.IoUtilization, UnmarshalAutoscalingDiskGroupDiskScalersIoUtilization)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// AutoscalingDiskGroupDiskScalersCapacity : AutoscalingDiskGroupDiskScalersCapacity struct
type AutoscalingDiskGroupDiskScalersCapacity struct {
Enabled *bool `json:"enabled,omitempty"`
FreeSpaceLessThanPercent *int64 `json:"free_space_less_than_percent,omitempty"`
}
// UnmarshalAutoscalingDiskGroupDiskScalersCapacity unmarshals an instance of AutoscalingDiskGroupDiskScalersCapacity from the specified map of raw messages.
func UnmarshalAutoscalingDiskGroupDiskScalersCapacity(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(AutoscalingDiskGroupDiskScalersCapacity)
err = core.UnmarshalPrimitive(m, "enabled", &obj.Enabled)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "free_space_less_than_percent", &obj.FreeSpaceLessThanPercent)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// AutoscalingDiskGroupDiskScalersIoUtilization : AutoscalingDiskGroupDiskScalersIoUtilization struct
type AutoscalingDiskGroupDiskScalersIoUtilization struct {
Enabled *bool `json:"enabled,omitempty"`
OverPeriod *string `json:"over_period,omitempty"`
AbovePercent *int64 `json:"above_percent,omitempty"`
}
// UnmarshalAutoscalingDiskGroupDiskScalersIoUtilization unmarshals an instance of AutoscalingDiskGroupDiskScalersIoUtilization from the specified map of raw messages.
func UnmarshalAutoscalingDiskGroupDiskScalersIoUtilization(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(AutoscalingDiskGroupDiskScalersIoUtilization)
err = core.UnmarshalPrimitive(m, "enabled", &obj.Enabled)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "over_period", &obj.OverPeriod)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "above_percent", &obj.AbovePercent)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// AutoscalingGroup : AutoscalingGroup struct
type AutoscalingGroup struct {
Disk *AutoscalingDiskGroupDisk `json:"disk,omitempty"`
Memory *AutoscalingMemoryGroupMemory `json:"memory,omitempty"`
Cpu *AutoscalingCPUGroupCPU `json:"cpu,omitempty"`
}
// UnmarshalAutoscalingGroup unmarshals an instance of AutoscalingGroup from the specified map of raw messages.
func UnmarshalAutoscalingGroup(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(AutoscalingGroup)
err = core.UnmarshalModel(m, "disk", &obj.Disk, UnmarshalAutoscalingDiskGroupDisk)
if err != nil {
return
}
err = core.UnmarshalModel(m, "memory", &obj.Memory, UnmarshalAutoscalingMemoryGroupMemory)
if err != nil {
return
}
err = core.UnmarshalModel(m, "cpu", &obj.Cpu, UnmarshalAutoscalingCPUGroupCPU)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// AutoscalingMemoryGroupMemory : AutoscalingMemoryGroupMemory struct
type AutoscalingMemoryGroupMemory struct {
Scalers *AutoscalingMemoryGroupMemoryScalers `json:"scalers,omitempty"`
Rate *AutoscalingMemoryGroupMemoryRate `json:"rate,omitempty"`
}
// UnmarshalAutoscalingMemoryGroupMemory unmarshals an instance of AutoscalingMemoryGroupMemory from the specified map of raw messages.
func UnmarshalAutoscalingMemoryGroupMemory(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(AutoscalingMemoryGroupMemory)
err = core.UnmarshalModel(m, "scalers", &obj.Scalers, UnmarshalAutoscalingMemoryGroupMemoryScalers)
if err != nil {
return
}
err = core.UnmarshalModel(m, "rate", &obj.Rate, UnmarshalAutoscalingMemoryGroupMemoryRate)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// AutoscalingMemoryGroupMemoryRate : AutoscalingMemoryGroupMemoryRate struct
type AutoscalingMemoryGroupMemoryRate struct {
IncreasePercent *float64 `json:"increase_percent,omitempty"`
PeriodSeconds *int64 `json:"period_seconds,omitempty"`
LimitMbPerMember *float64 `json:"limit_mb_per_member,omitempty"`
Units *string `json:"units,omitempty"`
}
// UnmarshalAutoscalingMemoryGroupMemoryRate unmarshals an instance of AutoscalingMemoryGroupMemoryRate from the specified map of raw messages.
func UnmarshalAutoscalingMemoryGroupMemoryRate(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(AutoscalingMemoryGroupMemoryRate)
err = core.UnmarshalPrimitive(m, "increase_percent", &obj.IncreasePercent)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "period_seconds", &obj.PeriodSeconds)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "limit_mb_per_member", &obj.LimitMbPerMember)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "units", &obj.Units)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// AutoscalingMemoryGroupMemoryScalers : AutoscalingMemoryGroupMemoryScalers struct
type AutoscalingMemoryGroupMemoryScalers struct {
IoUtilization *AutoscalingMemoryGroupMemoryScalersIoUtilization `json:"io_utilization,omitempty"`
}
// UnmarshalAutoscalingMemoryGroupMemoryScalers unmarshals an instance of AutoscalingMemoryGroupMemoryScalers from the specified map of raw messages.
func UnmarshalAutoscalingMemoryGroupMemoryScalers(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(AutoscalingMemoryGroupMemoryScalers)
err = core.UnmarshalModel(m, "io_utilization", &obj.IoUtilization, UnmarshalAutoscalingMemoryGroupMemoryScalersIoUtilization)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// AutoscalingMemoryGroupMemoryScalersIoUtilization : AutoscalingMemoryGroupMemoryScalersIoUtilization struct
type AutoscalingMemoryGroupMemoryScalersIoUtilization struct {
Enabled *bool `json:"enabled,omitempty"`
OverPeriod *string `json:"over_period,omitempty"`
AbovePercent *int64 `json:"above_percent,omitempty"`
}
// UnmarshalAutoscalingMemoryGroupMemoryScalersIoUtilization unmarshals an instance of AutoscalingMemoryGroupMemoryScalersIoUtilization from the specified map of raw messages.
func UnmarshalAutoscalingMemoryGroupMemoryScalersIoUtilization(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(AutoscalingMemoryGroupMemoryScalersIoUtilization)
err = core.UnmarshalPrimitive(m, "enabled", &obj.Enabled)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "over_period", &obj.OverPeriod)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "above_percent", &obj.AbovePercent)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// AutoscalingSetGroup : AutoscalingSetGroup struct
// Models which "extend" this model:
// - AutoscalingSetGroupAutoscalingDiskGroup
// - AutoscalingSetGroupAutoscalingMemoryGroup
// - AutoscalingSetGroupAutoscalingCPUGroup
type AutoscalingSetGroup struct {
Disk *AutoscalingDiskGroupDisk `json:"disk,omitempty"`
Memory *AutoscalingMemoryGroupMemory `json:"memory,omitempty"`
Cpu *AutoscalingCPUGroupCPU `json:"cpu,omitempty"`
}
func (*AutoscalingSetGroup) isaAutoscalingSetGroup() bool {
return true
}
type AutoscalingSetGroupIntf interface {
isaAutoscalingSetGroup() bool
}
// UnmarshalAutoscalingSetGroup unmarshals an instance of AutoscalingSetGroup from the specified map of raw messages.
func UnmarshalAutoscalingSetGroup(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(AutoscalingSetGroup)
err = core.UnmarshalModel(m, "disk", &obj.Disk, UnmarshalAutoscalingDiskGroupDisk)
if err != nil {
return
}
err = core.UnmarshalModel(m, "memory", &obj.Memory, UnmarshalAutoscalingMemoryGroupMemory)
if err != nil {
return
}
err = core.UnmarshalModel(m, "cpu", &obj.Cpu, UnmarshalAutoscalingCPUGroupCPU)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// Backup : Backup struct
type Backup struct {
// ID of this backup.
ID *string `json:"id,omitempty"`
// ID of the deployment this backup relates to.
DeploymentID *string `json:"deployment_id,omitempty"`
// The type of backup.
Type *string `json:"type,omitempty"`
// The status of this backup.
Status *string `json:"status,omitempty"`
// Is this backup available to download?.
IsDownloadable *bool `json:"is_downloadable,omitempty"`
// Can this backup be used to restore an instance?.
IsRestorable *bool `json:"is_restorable,omitempty"`
// Date and time when this backup was created.
CreatedAt *strfmt.DateTime `json:"created_at,omitempty"`
}
// Constants associated with the Backup.Type property.
// The type of backup.
const (
Backup_Type_OnDemand = "on_demand"
Backup_Type_Scheduled = "scheduled"
)
// Constants associated with the Backup.Status property.
// The status of this backup.
const (
Backup_Status_Completed = "completed"
Backup_Status_Failed = "failed"
Backup_Status_Running = "running"
)
// UnmarshalBackup unmarshals an instance of Backup from the specified map of raw messages.
func UnmarshalBackup(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Backup)
err = core.UnmarshalPrimitive(m, "id", &obj.ID)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "deployment_id", &obj.DeploymentID)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "type", &obj.Type)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "status", &obj.Status)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "is_downloadable", &obj.IsDownloadable)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "is_restorable", &obj.IsRestorable)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// Backups : Backups struct
type Backups struct {
Backups []Backup `json:"backups,omitempty"`
}
// UnmarshalBackups unmarshals an instance of Backups from the specified map of raw messages.
func UnmarshalBackups(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Backups)
err = core.UnmarshalModel(m, "backups", &obj.Backups, UnmarshalBackup)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ChangeUserPasswordOptions : The ChangeUserPassword options.
type ChangeUserPasswordOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// User type.
UserType *string `validate:"required,ne="`
// User ID.
Username *string `validate:"required,ne="`
User *APasswordSettingUser
// Allows users to set headers on API requests
Headers map[string]string
}
// NewChangeUserPasswordOptions : Instantiate ChangeUserPasswordOptions
func (*IbmCloudDatabasesV5) NewChangeUserPasswordOptions(id string, userType string, username string) *ChangeUserPasswordOptions {
return &ChangeUserPasswordOptions{
ID: core.StringPtr(id),
UserType: core.StringPtr(userType),
Username: core.StringPtr(username),
}
}
// SetID : Allow user to set ID
func (options *ChangeUserPasswordOptions) SetID(id string) *ChangeUserPasswordOptions {
options.ID = core.StringPtr(id)
return options
}
// SetUserType : Allow user to set UserType
func (options *ChangeUserPasswordOptions) SetUserType(userType string) *ChangeUserPasswordOptions {
options.UserType = core.StringPtr(userType)
return options
}
// SetUsername : Allow user to set Username
func (options *ChangeUserPasswordOptions) SetUsername(username string) *ChangeUserPasswordOptions {
options.Username = core.StringPtr(username)
return options
}
// SetUser : Allow user to set User
func (options *ChangeUserPasswordOptions) SetUser(user *APasswordSettingUser) *ChangeUserPasswordOptions {
options.User = user
return options
}
// SetHeaders : Allow user to set Headers
func (options *ChangeUserPasswordOptions) SetHeaders(param map[string]string) *ChangeUserPasswordOptions {
options.Headers = param
return options
}
// ChangeUserPasswordResponse : ChangeUserPasswordResponse struct
type ChangeUserPasswordResponse struct {
Task *Task `json:"task,omitempty"`
}
// UnmarshalChangeUserPasswordResponse unmarshals an instance of ChangeUserPasswordResponse from the specified map of raw messages.
func UnmarshalChangeUserPasswordResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ChangeUserPasswordResponse)
err = core.UnmarshalModel(m, "task", &obj.Task, UnmarshalTask)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ChoicePropertySchema : Choice Property Schema.
type ChoicePropertySchema struct {
// Whether the setting is customer-configurable.
CustomerConfigurable *bool `json:"customer_configurable,omitempty"`
// The default value of the setting.
Default *int64 `json:"default,omitempty"`
// The description of the default value.
DefaultDescription *string `json:"default_description,omitempty"`
// The description of the setting.
Description *string `json:"description,omitempty"`
// The type of this setting (e.g., string, integer).
Kind *string `json:"kind,omitempty"`
// Whether or not changing this setting will restart the database.
RequiresRestart *bool `json:"requires_restart,omitempty"`
// The valid choices for this setting.
Choices []string `json:"choices,omitempty"`
}
// UnmarshalChoicePropertySchema unmarshals an instance of ChoicePropertySchema from the specified map of raw messages.
func UnmarshalChoicePropertySchema(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ChoicePropertySchema)
err = core.UnmarshalPrimitive(m, "customer_configurable", &obj.CustomerConfigurable)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "default", &obj.Default)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "default_description", &obj.DefaultDescription)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "description", &obj.Description)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "kind", &obj.Kind)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "requires_restart", &obj.RequiresRestart)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "choices", &obj.Choices)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// CompleteConnectionDeprecatedOptions : The CompleteConnectionDeprecated options.
type CompleteConnectionDeprecatedOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// User type.
UserType *string `validate:"required,ne="`
// User ID.
UserID *string `validate:"required,ne="`
// Password to be substituted into the response.
Password *string
// Optional certificate root path to prepend certificate names. Certificates would be stored in this directory for use
// by other commands.
CertificateRoot *string
// Allows users to set headers on API requests
Headers map[string]string
}
// NewCompleteConnectionDeprecatedOptions : Instantiate CompleteConnectionDeprecatedOptions
func (*IbmCloudDatabasesV5) NewCompleteConnectionDeprecatedOptions(id string, userType string, userID string) *CompleteConnectionDeprecatedOptions {
return &CompleteConnectionDeprecatedOptions{
ID: core.StringPtr(id),
UserType: core.StringPtr(userType),
UserID: core.StringPtr(userID),
}
}
// SetID : Allow user to set ID
func (options *CompleteConnectionDeprecatedOptions) SetID(id string) *CompleteConnectionDeprecatedOptions {
options.ID = core.StringPtr(id)
return options
}
// SetUserType : Allow user to set UserType
func (options *CompleteConnectionDeprecatedOptions) SetUserType(userType string) *CompleteConnectionDeprecatedOptions {
options.UserType = core.StringPtr(userType)
return options
}
// SetUserID : Allow user to set UserID
func (options *CompleteConnectionDeprecatedOptions) SetUserID(userID string) *CompleteConnectionDeprecatedOptions {
options.UserID = core.StringPtr(userID)
return options
}
// SetPassword : Allow user to set Password
func (options *CompleteConnectionDeprecatedOptions) SetPassword(password string) *CompleteConnectionDeprecatedOptions {
options.Password = core.StringPtr(password)
return options
}
// SetCertificateRoot : Allow user to set CertificateRoot
func (options *CompleteConnectionDeprecatedOptions) SetCertificateRoot(certificateRoot string) *CompleteConnectionDeprecatedOptions {
options.CertificateRoot = core.StringPtr(certificateRoot)
return options
}
// SetHeaders : Allow user to set Headers
func (options *CompleteConnectionDeprecatedOptions) SetHeaders(param map[string]string) *CompleteConnectionDeprecatedOptions {
options.Headers = param
return options
}
// CompleteConnectionOptions : The CompleteConnection options.
type CompleteConnectionOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// User type of `database` is the only currently supported value.
UserType *string `validate:"required,ne="`
// User ID.
UserID *string `validate:"required,ne="`
// Endpoint Type. The select endpoint must be enabled on the deployment before its connection information can be
// fetched.
EndpointType *string `validate:"required,ne="`
// Password to be substituted into the response.
Password *string
// Optional certificate root path to prepend certificate names. Certificates would be stored in this directory for use
// by other commands.
CertificateRoot *string
// Allows users to set headers on API requests
Headers map[string]string
}
// Constants associated with the CompleteConnectionOptions.EndpointType property.
// Endpoint Type. The select endpoint must be enabled on the deployment before its connection information can be
// fetched.
const (
CompleteConnectionOptions_EndpointType_Private = "private"
CompleteConnectionOptions_EndpointType_Public = "public"
)
// NewCompleteConnectionOptions : Instantiate CompleteConnectionOptions
func (*IbmCloudDatabasesV5) NewCompleteConnectionOptions(id string, userType string, userID string, endpointType string) *CompleteConnectionOptions {
return &CompleteConnectionOptions{
ID: core.StringPtr(id),
UserType: core.StringPtr(userType),
UserID: core.StringPtr(userID),
EndpointType: core.StringPtr(endpointType),
}
}
// SetID : Allow user to set ID
func (options *CompleteConnectionOptions) SetID(id string) *CompleteConnectionOptions {
options.ID = core.StringPtr(id)
return options
}
// SetUserType : Allow user to set UserType
func (options *CompleteConnectionOptions) SetUserType(userType string) *CompleteConnectionOptions {
options.UserType = core.StringPtr(userType)
return options
}
// SetUserID : Allow user to set UserID
func (options *CompleteConnectionOptions) SetUserID(userID string) *CompleteConnectionOptions {
options.UserID = core.StringPtr(userID)
return options
}
// SetEndpointType : Allow user to set EndpointType
func (options *CompleteConnectionOptions) SetEndpointType(endpointType string) *CompleteConnectionOptions {
options.EndpointType = core.StringPtr(endpointType)
return options
}
// SetPassword : Allow user to set Password
func (options *CompleteConnectionOptions) SetPassword(password string) *CompleteConnectionOptions {
options.Password = core.StringPtr(password)
return options
}
// SetCertificateRoot : Allow user to set CertificateRoot
func (options *CompleteConnectionOptions) SetCertificateRoot(certificateRoot string) *CompleteConnectionOptions {
options.CertificateRoot = core.StringPtr(certificateRoot)
return options
}
// SetHeaders : Allow user to set Headers
func (options *CompleteConnectionOptions) SetHeaders(param map[string]string) *CompleteConnectionOptions {
options.Headers = param
return options
}
// ConfigurationSchema : Database Configuration Schema.
type ConfigurationSchema struct {
Schema ConfigurationSchemaSchemaIntf `json:"schema" validate:"required"`
}
// UnmarshalConfigurationSchema unmarshals an instance of ConfigurationSchema from the specified map of raw messages.
func UnmarshalConfigurationSchema(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ConfigurationSchema)
err = core.UnmarshalModel(m, "schema", &obj.Schema, UnmarshalConfigurationSchemaSchema)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ConfigurationSchemaSchema : ConfigurationSchemaSchema struct
// Models which "extend" this model:
// - ConfigurationSchemaSchemaPGConfigurationSchema
// - ConfigurationSchemaSchemaRedisConfigurationSchema
type ConfigurationSchemaSchema struct {
// Integer Property Schema.
MaxConnections *IntegerPropertySchema `json:"max_connections,omitempty"`
// Integer Property Schema.
MaxPreparedConnections *IntegerPropertySchema `json:"max_prepared_connections,omitempty"`
// Integer Property Schema.
BackupRetentionPeriod *IntegerPropertySchema `json:"backup_retention_period,omitempty"`
// Integer Property Schema.
DeadlockTimeout *IntegerPropertySchema `json:"deadlock_timeout,omitempty"`
// Integer Property Schema.
EffectiveIoConcurrency *IntegerPropertySchema `json:"effective_io_concurrency,omitempty"`
// Integer Property Schema.
MaxReplicationSlots *IntegerPropertySchema `json:"max_replication_slots,omitempty"`
// Integer Property Schema.
MaxWalSenders *IntegerPropertySchema `json:"max_wal_senders,omitempty"`
// Integer Property Schema.
SharedBuffers *IntegerPropertySchema `json:"shared_buffers,omitempty"`
// Choice Property Schema.
SynchronousCommit *ChoicePropertySchema `json:"synchronous_commit,omitempty"`
// Choice Property Schema.
WalLevel *ChoicePropertySchema `json:"wal_level,omitempty"`
// Integer Property Schema.
ArchiveTimeout *IntegerPropertySchema `json:"archive_timeout,omitempty"`
// Integer Property Schema.
LogMinDurationStatement *IntegerPropertySchema `json:"log_min_duration_statement,omitempty"`
// Integer Property Schema.
MaxmemoryRedis *IntegerPropertySchema `json:"maxmemory-redis,omitempty"`
// Choice Property Schema.
MaxmemoryPolicy *ChoicePropertySchema `json:"maxmemory-policy,omitempty"`
// Choice Property Schema.
Appendonly *ChoicePropertySchema `json:"appendonly,omitempty"`
// Integer Property Schema.
MaxmemorySamples *IntegerPropertySchema `json:"maxmemory-samples,omitempty"`
// Choice Property Schema.
StopWritesOnBgsaveError *ChoicePropertySchema `json:"stop-writes-on-bgsave-error,omitempty"`
}
func (*ConfigurationSchemaSchema) isaConfigurationSchemaSchema() bool {
return true
}
type ConfigurationSchemaSchemaIntf interface {
isaConfigurationSchemaSchema() bool
}
// UnmarshalConfigurationSchemaSchema unmarshals an instance of ConfigurationSchemaSchema from the specified map of raw messages.
func UnmarshalConfigurationSchemaSchema(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ConfigurationSchemaSchema)
err = core.UnmarshalModel(m, "max_connections", &obj.MaxConnections, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "max_prepared_connections", &obj.MaxPreparedConnections, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "backup_retention_period", &obj.BackupRetentionPeriod, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "deadlock_timeout", &obj.DeadlockTimeout, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "effective_io_concurrency", &obj.EffectiveIoConcurrency, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "max_replication_slots", &obj.MaxReplicationSlots, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "max_wal_senders", &obj.MaxWalSenders, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "shared_buffers", &obj.SharedBuffers, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "synchronous_commit", &obj.SynchronousCommit, UnmarshalChoicePropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "wal_level", &obj.WalLevel, UnmarshalChoicePropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "archive_timeout", &obj.ArchiveTimeout, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "log_min_duration_statement", &obj.LogMinDurationStatement, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "maxmemory-redis", &obj.MaxmemoryRedis, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "maxmemory-policy", &obj.MaxmemoryPolicy, UnmarshalChoicePropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "appendonly", &obj.Appendonly, UnmarshalChoicePropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "maxmemory-samples", &obj.MaxmemorySamples, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "stop-writes-on-bgsave-error", &obj.StopWritesOnBgsaveError, UnmarshalChoicePropertySchema)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// Connection : Connection struct
type Connection struct {
Connection ConnectionConnectionIntf `json:"connection" validate:"required"`
}
// UnmarshalConnection unmarshals an instance of Connection from the specified map of raw messages.
func UnmarshalConnection(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Connection)
err = core.UnmarshalModel(m, "connection", &obj.Connection, UnmarshalConnectionConnection)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ConnectionCLI : CLI Connection.
type ConnectionCLI struct {
// Type of connection being described.
Type *string `json:"type,omitempty"`
Composed []string `json:"composed,omitempty"`
// A map of environment variables for a CLI connection.
Environment map[string]string `json:"environment,omitempty"`
// The name of the executable the CLI should run.
Bin *string `json:"bin,omitempty"`
// Sets of arguments to call the executable with. The outer array corresponds to a possible way to call the CLI; the
// inner array is the set of arguments to use with that call.
Arguments [][]string `json:"arguments,omitempty"`
Certificate *ConnectionCLICertificate `json:"certificate,omitempty"`
}
// UnmarshalConnectionCLI unmarshals an instance of ConnectionCLI from the specified map of raw messages.
func UnmarshalConnectionCLI(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ConnectionCLI)
err = core.UnmarshalPrimitive(m, "type", &obj.Type)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "composed", &obj.Composed)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "environment", &obj.Environment)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "bin", &obj.Bin)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "arguments", &obj.Arguments)
if err != nil {
return
}
err = core.UnmarshalModel(m, "certificate", &obj.Certificate, UnmarshalConnectionCLICertificate)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ConnectionCLICertificate : ConnectionCLICertificate struct
type ConnectionCLICertificate struct {
// Name associated with the certificate.
Name *string `json:"name,omitempty"`
// Base64 encoded version of the certificate.
CertificateBase64 *string `json:"certificate_base64,omitempty"`
}
// UnmarshalConnectionCLICertificate unmarshals an instance of ConnectionCLICertificate from the specified map of raw messages.
func UnmarshalConnectionCLICertificate(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ConnectionCLICertificate)
err = core.UnmarshalPrimitive(m, "name", &obj.Name)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "certificate_base64", &obj.CertificateBase64)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ConnectionConnection : ConnectionConnection struct
// Models which "extend" this model:
// - ConnectionConnectionPostgreSQLConnection
// - ConnectionConnectionRedisConnection
// - ConnectionConnectionElasticsearchConnection
// - ConnectionConnectionRabbitMQConnection
// - ConnectionConnectionEtcdConnection
// - ConnectionConnectionMongoDBConnection
type ConnectionConnection struct {
// Connection information for drivers and libraries.
Postgres *PostgreSQLConnectionURI `json:"postgres,omitempty"`
// Connection information for psql.
Cli *ConnectionCLI `json:"cli,omitempty"`
// Connection information for drivers and libraries.
Rediss *RedisConnectionURI `json:"rediss,omitempty"`
// Elasticsearch Connection information for drivers and libraries.
Https *ElasticsearchConnectionHTTPS `json:"https,omitempty"`
// RabbitMQ Connection information for AMQPS drivers and libraries.
Amqps *RabbitMQConnectionAMQPS `json:"amqps,omitempty"`
// RabbitMQ Connection information for MQTTS drivers and libraries.
Mqtts *RabbitMQConnectionMQTTS `json:"mqtts,omitempty"`
// RabbitMQ Connection information for STOMP drivers and libraries.
StompSsl *RabbitMQConnectionStompSSL `json:"stomp_ssl,omitempty"`
// GRPC(etcd3) Connection information for drivers and libraries.
Grpc *GRPCConnectionURI `json:"grpc,omitempty"`
// MongoDB Connection information for drivers and libraries.
Mongodb *MongoDBConnectionURI `json:"mongodb,omitempty"`
}
func (*ConnectionConnection) isaConnectionConnection() bool {
return true
}
type ConnectionConnectionIntf interface {
isaConnectionConnection() bool
}
// UnmarshalConnectionConnection unmarshals an instance of ConnectionConnection from the specified map of raw messages.
func UnmarshalConnectionConnection(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ConnectionConnection)
err = core.UnmarshalModel(m, "postgres", &obj.Postgres, UnmarshalPostgreSQLConnectionURI)
if err != nil {
return
}
err = core.UnmarshalModel(m, "cli", &obj.Cli, UnmarshalConnectionCLI)
if err != nil {
return
}
err = core.UnmarshalModel(m, "rediss", &obj.Rediss, UnmarshalRedisConnectionURI)
if err != nil {
return
}
err = core.UnmarshalModel(m, "https", &obj.Https, UnmarshalElasticsearchConnectionHTTPS)
if err != nil {
return
}
err = core.UnmarshalModel(m, "amqps", &obj.Amqps, UnmarshalRabbitMQConnectionAMQPS)
if err != nil {
return
}
err = core.UnmarshalModel(m, "mqtts", &obj.Mqtts, UnmarshalRabbitMQConnectionMQTTS)
if err != nil {
return
}
err = core.UnmarshalModel(m, "stomp_ssl", &obj.StompSsl, UnmarshalRabbitMQConnectionStompSSL)
if err != nil {
return
}
err = core.UnmarshalModel(m, "grpc", &obj.Grpc, UnmarshalGRPCConnectionURI)
if err != nil {
return
}
err = core.UnmarshalModel(m, "mongodb", &obj.Mongodb, UnmarshalMongoDBConnectionURI)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// CreateDatabaseUserOptions : The CreateDatabaseUser options.
type CreateDatabaseUserOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// User type.
UserType *string `validate:"required,ne="`
User *CreateDatabaseUserRequestUser
// Allows users to set headers on API requests
Headers map[string]string
}
// NewCreateDatabaseUserOptions : Instantiate CreateDatabaseUserOptions
func (*IbmCloudDatabasesV5) NewCreateDatabaseUserOptions(id string, userType string) *CreateDatabaseUserOptions {
return &CreateDatabaseUserOptions{
ID: core.StringPtr(id),
UserType: core.StringPtr(userType),
}
}
// SetID : Allow user to set ID
func (options *CreateDatabaseUserOptions) SetID(id string) *CreateDatabaseUserOptions {
options.ID = core.StringPtr(id)
return options
}
// SetUserType : Allow user to set UserType
func (options *CreateDatabaseUserOptions) SetUserType(userType string) *CreateDatabaseUserOptions {
options.UserType = core.StringPtr(userType)
return options
}
// SetUser : Allow user to set User
func (options *CreateDatabaseUserOptions) SetUser(user *CreateDatabaseUserRequestUser) *CreateDatabaseUserOptions {
options.User = user
return options
}
// SetHeaders : Allow user to set Headers
func (options *CreateDatabaseUserOptions) SetHeaders(param map[string]string) *CreateDatabaseUserOptions {
options.Headers = param
return options
}
// CreateDatabaseUserRequestUser : CreateDatabaseUserRequestUser struct
type CreateDatabaseUserRequestUser struct {
// User type for new user.
UserType *string `json:"user_type,omitempty"`
// Username for new user.
Username *string `json:"username,omitempty"`
// Password for new user.
Password *string `json:"password,omitempty"`
}
// UnmarshalCreateDatabaseUserRequestUser unmarshals an instance of CreateDatabaseUserRequestUser from the specified map of raw messages.
func UnmarshalCreateDatabaseUserRequestUser(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(CreateDatabaseUserRequestUser)
err = core.UnmarshalPrimitive(m, "user_type", &obj.UserType)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "username", &obj.Username)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "password", &obj.Password)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// CreateDatabaseUserResponse : CreateDatabaseUserResponse struct
type CreateDatabaseUserResponse struct {
Task *Task `json:"task,omitempty"`
}
// UnmarshalCreateDatabaseUserResponse unmarshals an instance of CreateDatabaseUserResponse from the specified map of raw messages.
func UnmarshalCreateDatabaseUserResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(CreateDatabaseUserResponse)
err = core.UnmarshalModel(m, "task", &obj.Task, UnmarshalTask)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// CreateLogicalReplicationSlotOptions : The CreateLogicalReplicationSlot options.
type CreateLogicalReplicationSlotOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
LogicalReplicationSlot *LogicalReplicationSlotLogicalReplicationSlot
// Allows users to set headers on API requests
Headers map[string]string
}
// NewCreateLogicalReplicationSlotOptions : Instantiate CreateLogicalReplicationSlotOptions
func (*IbmCloudDatabasesV5) NewCreateLogicalReplicationSlotOptions(id string) *CreateLogicalReplicationSlotOptions {
return &CreateLogicalReplicationSlotOptions{
ID: core.StringPtr(id),
}
}
// SetID : Allow user to set ID
func (options *CreateLogicalReplicationSlotOptions) SetID(id string) *CreateLogicalReplicationSlotOptions {
options.ID = core.StringPtr(id)
return options
}
// SetLogicalReplicationSlot : Allow user to set LogicalReplicationSlot
func (options *CreateLogicalReplicationSlotOptions) SetLogicalReplicationSlot(logicalReplicationSlot *LogicalReplicationSlotLogicalReplicationSlot) *CreateLogicalReplicationSlotOptions {
options.LogicalReplicationSlot = logicalReplicationSlot
return options
}
// SetHeaders : Allow user to set Headers
func (options *CreateLogicalReplicationSlotOptions) SetHeaders(param map[string]string) *CreateLogicalReplicationSlotOptions {
options.Headers = param
return options
}
// CreateLogicalReplicationSlotResponse : CreateLogicalReplicationSlotResponse struct
type CreateLogicalReplicationSlotResponse struct {
Task *Task `json:"task,omitempty"`
}
// UnmarshalCreateLogicalReplicationSlotResponse unmarshals an instance of CreateLogicalReplicationSlotResponse from the specified map of raw messages.
func UnmarshalCreateLogicalReplicationSlotResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(CreateLogicalReplicationSlotResponse)
err = core.UnmarshalModel(m, "task", &obj.Task, UnmarshalTask)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// DeleteDatabaseUserOptions : The DeleteDatabaseUser options.
type DeleteDatabaseUserOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// User type.
UserType *string `validate:"required,ne="`
// Username.
Username *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewDeleteDatabaseUserOptions : Instantiate DeleteDatabaseUserOptions
func (*IbmCloudDatabasesV5) NewDeleteDatabaseUserOptions(id string, userType string, username string) *DeleteDatabaseUserOptions {
return &DeleteDatabaseUserOptions{
ID: core.StringPtr(id),
UserType: core.StringPtr(userType),
Username: core.StringPtr(username),
}
}
// SetID : Allow user to set ID
func (options *DeleteDatabaseUserOptions) SetID(id string) *DeleteDatabaseUserOptions {
options.ID = core.StringPtr(id)
return options
}
// SetUserType : Allow user to set UserType
func (options *DeleteDatabaseUserOptions) SetUserType(userType string) *DeleteDatabaseUserOptions {
options.UserType = core.StringPtr(userType)
return options
}
// SetUsername : Allow user to set Username
func (options *DeleteDatabaseUserOptions) SetUsername(username string) *DeleteDatabaseUserOptions {
options.Username = core.StringPtr(username)
return options
}
// SetHeaders : Allow user to set Headers
func (options *DeleteDatabaseUserOptions) SetHeaders(param map[string]string) *DeleteDatabaseUserOptions {
options.Headers = param
return options
}
// DeleteDatabaseUserResponse : DeleteDatabaseUserResponse struct
type DeleteDatabaseUserResponse struct {
Task *Task `json:"task,omitempty"`
}
// UnmarshalDeleteDatabaseUserResponse unmarshals an instance of DeleteDatabaseUserResponse from the specified map of raw messages.
func UnmarshalDeleteDatabaseUserResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(DeleteDatabaseUserResponse)
err = core.UnmarshalModel(m, "task", &obj.Task, UnmarshalTask)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// DeleteLogicalReplicationSlotOptions : The DeleteLogicalReplicationSlot options.
type DeleteLogicalReplicationSlotOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// Name of the logical replication slot.
Name *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewDeleteLogicalReplicationSlotOptions : Instantiate DeleteLogicalReplicationSlotOptions
func (*IbmCloudDatabasesV5) NewDeleteLogicalReplicationSlotOptions(id string, name string) *DeleteLogicalReplicationSlotOptions {
return &DeleteLogicalReplicationSlotOptions{
ID: core.StringPtr(id),
Name: core.StringPtr(name),
}
}
// SetID : Allow user to set ID
func (options *DeleteLogicalReplicationSlotOptions) SetID(id string) *DeleteLogicalReplicationSlotOptions {
options.ID = core.StringPtr(id)
return options
}
// SetName : Allow user to set Name
func (options *DeleteLogicalReplicationSlotOptions) SetName(name string) *DeleteLogicalReplicationSlotOptions {
options.Name = core.StringPtr(name)
return options
}
// SetHeaders : Allow user to set Headers
func (options *DeleteLogicalReplicationSlotOptions) SetHeaders(param map[string]string) *DeleteLogicalReplicationSlotOptions {
options.Headers = param
return options
}
// DeleteLogicalReplicationSlotResponse : DeleteLogicalReplicationSlotResponse struct
type DeleteLogicalReplicationSlotResponse struct {
Task *Task `json:"task,omitempty"`
}
// UnmarshalDeleteLogicalReplicationSlotResponse unmarshals an instance of DeleteLogicalReplicationSlotResponse from the specified map of raw messages.
func UnmarshalDeleteLogicalReplicationSlotResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(DeleteLogicalReplicationSlotResponse)
err = core.UnmarshalModel(m, "task", &obj.Task, UnmarshalTask)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// DeleteWhitelistEntryOptions : The DeleteWhitelistEntry options.
type DeleteWhitelistEntryOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// An IPv4 address or a CIDR range (netmasked IPv4 address).
Ipaddress *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewDeleteWhitelistEntryOptions : Instantiate DeleteWhitelistEntryOptions
func (*IbmCloudDatabasesV5) NewDeleteWhitelistEntryOptions(id string, ipaddress string) *DeleteWhitelistEntryOptions {
return &DeleteWhitelistEntryOptions{
ID: core.StringPtr(id),
Ipaddress: core.StringPtr(ipaddress),
}
}
// SetID : Allow user to set ID
func (options *DeleteWhitelistEntryOptions) SetID(id string) *DeleteWhitelistEntryOptions {
options.ID = core.StringPtr(id)
return options
}
// SetIpaddress : Allow user to set Ipaddress
func (options *DeleteWhitelistEntryOptions) SetIpaddress(ipaddress string) *DeleteWhitelistEntryOptions {
options.Ipaddress = core.StringPtr(ipaddress)
return options
}
// SetHeaders : Allow user to set Headers
func (options *DeleteWhitelistEntryOptions) SetHeaders(param map[string]string) *DeleteWhitelistEntryOptions {
options.Headers = param
return options
}
// DeleteWhitelistEntryResponse : DeleteWhitelistEntryResponse struct
type DeleteWhitelistEntryResponse struct {
Task *Task `json:"task,omitempty"`
}
// UnmarshalDeleteWhitelistEntryResponse unmarshals an instance of DeleteWhitelistEntryResponse from the specified map of raw messages.
func UnmarshalDeleteWhitelistEntryResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(DeleteWhitelistEntryResponse)
err = core.UnmarshalModel(m, "task", &obj.Task, UnmarshalTask)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// Deployables : Deployable databases with their version information.
type Deployables struct {
// Deployment type - typically the name of the database.
Type *string `json:"type,omitempty"`
// An array of versions of the database, their status, preferedness, and transitions.
Versions []DeployablesVersionsItem `json:"versions,omitempty"`
}
// UnmarshalDeployables unmarshals an instance of Deployables from the specified map of raw messages.
func UnmarshalDeployables(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Deployables)
err = core.UnmarshalPrimitive(m, "type", &obj.Type)
if err != nil {
return
}
err = core.UnmarshalModel(m, "versions", &obj.Versions, UnmarshalDeployablesVersionsItem)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// DeployablesVersionsItem : DeployablesVersionsItem struct
type DeployablesVersionsItem struct {
// The version number.
Version *string `json:"version,omitempty"`
// The status of this version: To be finalized.
Status *string `json:"status,omitempty"`
// Should this version be preferred over others?.
IsPreferred *bool `json:"is_preferred,omitempty"`
// versions that this version can be upgraded to.
Transitions []DeployablesVersionsItemTransitionsItem `json:"transitions,omitempty"`
}
// Constants associated with the DeployablesVersionsItem.Status property.
// The status of this version: To be finalized.
const (
DeployablesVersionsItem_Status_Beta = "beta"
DeployablesVersionsItem_Status_Deprecated = "deprecated"
DeployablesVersionsItem_Status_Stable = "stable"
)
// UnmarshalDeployablesVersionsItem unmarshals an instance of DeployablesVersionsItem from the specified map of raw messages.
func UnmarshalDeployablesVersionsItem(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(DeployablesVersionsItem)
err = core.UnmarshalPrimitive(m, "version", &obj.Version)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "status", &obj.Status)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "is_preferred", &obj.IsPreferred)
if err != nil {
return
}
err = core.UnmarshalModel(m, "transitions", &obj.Transitions, UnmarshalDeployablesVersionsItemTransitionsItem)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// DeployablesVersionsItemTransitionsItem : DeployablesVersionsItemTransitionsItem struct
type DeployablesVersionsItemTransitionsItem struct {
// The database type.
Application *string `json:"application,omitempty"`
// method of going from from_version to to_version.
Method *string `json:"method,omitempty"`
// The version the transition in from.
FromVersion *string `json:"from_version,omitempty"`
// The version the transition is to.
ToVersion *string `json:"to_version,omitempty"`
}
// UnmarshalDeployablesVersionsItemTransitionsItem unmarshals an instance of DeployablesVersionsItemTransitionsItem from the specified map of raw messages.
func UnmarshalDeployablesVersionsItemTransitionsItem(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(DeployablesVersionsItemTransitionsItem)
err = core.UnmarshalPrimitive(m, "application", &obj.Application)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "method", &obj.Method)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "from_version", &obj.FromVersion)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "to_version", &obj.ToVersion)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// Deployment : Deployment struct
type Deployment struct {
// ID of this deployment.
ID *string `json:"id,omitempty"`
// Readable name of this deployment.
Name *string `json:"name,omitempty"`
// Database type within this deployment.
Type *string `json:"type,omitempty"`
// Platform-specific options for this deployment.
PlatformOptions interface{} `json:"platform_options,omitempty"`
// Version number of the database.
Version *string `json:"version,omitempty"`
// Login name of administration level user.
AdminUsernames *string `json:"admin_usernames,omitempty"`
// Whether access to this deployment is enabled from the public internet. This property can be modified by updating
// this service instance through the Resource Controller API.
EnablePublicEndpoints *bool `json:"enable_public_endpoints,omitempty"`
// Whether access to this deployment is enabled from IBM Cloud via the IBM Cloud backbone network. This property can be
// modified by updating this service instance through the Resource Controller API.
EnablePrivateEndpoints *bool `json:"enable_private_endpoints,omitempty"`
}
// UnmarshalDeployment unmarshals an instance of Deployment from the specified map of raw messages.
func UnmarshalDeployment(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Deployment)
err = core.UnmarshalPrimitive(m, "id", &obj.ID)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "name", &obj.Name)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "type", &obj.Type)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "platform_options", &obj.PlatformOptions)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "version", &obj.Version)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "admin_usernames", &obj.AdminUsernames)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "enable_public_endpoints", &obj.EnablePublicEndpoints)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "enable_private_endpoints", &obj.EnablePrivateEndpoints)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ElasticsearchConnectionHTTPS : ElasticsearchConnectionHTTPS struct
type ElasticsearchConnectionHTTPS struct {
// Type of connection being described.
Type *string `json:"type,omitempty"`
Composed []string `json:"composed,omitempty"`
// Scheme/protocol for URI connection.
Scheme *string `json:"scheme,omitempty"`
Hosts []ElasticsearchConnectionHTTPSHostsItem `json:"hosts,omitempty"`
// Path for URI connection.
Path *string `json:"path,omitempty"`
// Query options to add to the URI connection.
QueryOptions interface{} `json:"query_options,omitempty"`
Authentication *ElasticsearchConnectionHTTPSAuthentication `json:"authentication,omitempty"`
Certificate *ElasticsearchConnectionHTTPSCertificate `json:"certificate,omitempty"`
}
// UnmarshalElasticsearchConnectionHTTPS unmarshals an instance of ElasticsearchConnectionHTTPS from the specified map of raw messages.
func UnmarshalElasticsearchConnectionHTTPS(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ElasticsearchConnectionHTTPS)
err = core.UnmarshalPrimitive(m, "type", &obj.Type)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "composed", &obj.Composed)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "scheme", &obj.Scheme)
if err != nil {
return
}
err = core.UnmarshalModel(m, "hosts", &obj.Hosts, UnmarshalElasticsearchConnectionHTTPSHostsItem)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "path", &obj.Path)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "query_options", &obj.QueryOptions)
if err != nil {
return
}
err = core.UnmarshalModel(m, "authentication", &obj.Authentication, UnmarshalElasticsearchConnectionHTTPSAuthentication)
if err != nil {
return
}
err = core.UnmarshalModel(m, "certificate", &obj.Certificate, UnmarshalElasticsearchConnectionHTTPSCertificate)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ElasticsearchConnectionHTTPSAuthentication : ElasticsearchConnectionHTTPSAuthentication struct
type ElasticsearchConnectionHTTPSAuthentication struct {
// Authentication method for this credential.
Method *string `json:"method,omitempty"`
// Username part of credential.
Username *string `json:"username,omitempty"`
// Password part of credential.
Password *string `json:"password,omitempty"`
}
// UnmarshalElasticsearchConnectionHTTPSAuthentication unmarshals an instance of ElasticsearchConnectionHTTPSAuthentication from the specified map of raw messages.
func UnmarshalElasticsearchConnectionHTTPSAuthentication(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ElasticsearchConnectionHTTPSAuthentication)
err = core.UnmarshalPrimitive(m, "method", &obj.Method)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "username", &obj.Username)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "password", &obj.Password)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ElasticsearchConnectionHTTPSCertificate : ElasticsearchConnectionHTTPSCertificate struct
type ElasticsearchConnectionHTTPSCertificate struct {
// Name associated with the certificate.
Name *string `json:"name,omitempty"`
// Base64 encoded version of the certificate.
CertificateBase64 *string `json:"certificate_base64,omitempty"`
}
// UnmarshalElasticsearchConnectionHTTPSCertificate unmarshals an instance of ElasticsearchConnectionHTTPSCertificate from the specified map of raw messages.
func UnmarshalElasticsearchConnectionHTTPSCertificate(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ElasticsearchConnectionHTTPSCertificate)
err = core.UnmarshalPrimitive(m, "name", &obj.Name)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "certificate_base64", &obj.CertificateBase64)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ElasticsearchConnectionHTTPSHostsItem : ElasticsearchConnectionHTTPSHostsItem struct
type ElasticsearchConnectionHTTPSHostsItem struct {
// Hostname for connection.
Hostname *string `json:"hostname,omitempty"`
// Port number for connection.
Port *int64 `json:"port,omitempty"`
}
// UnmarshalElasticsearchConnectionHTTPSHostsItem unmarshals an instance of ElasticsearchConnectionHTTPSHostsItem from the specified map of raw messages.
func UnmarshalElasticsearchConnectionHTTPSHostsItem(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ElasticsearchConnectionHTTPSHostsItem)
err = core.UnmarshalPrimitive(m, "hostname", &obj.Hostname)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "port", &obj.Port)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// FileSyncOptions : The FileSync options.
type FileSyncOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewFileSyncOptions : Instantiate FileSyncOptions
func (*IbmCloudDatabasesV5) NewFileSyncOptions(id string) *FileSyncOptions {
return &FileSyncOptions{
ID: core.StringPtr(id),
}
}
// SetID : Allow user to set ID
func (options *FileSyncOptions) SetID(id string) *FileSyncOptions {
options.ID = core.StringPtr(id)
return options
}
// SetHeaders : Allow user to set Headers
func (options *FileSyncOptions) SetHeaders(param map[string]string) *FileSyncOptions {
options.Headers = param
return options
}
// FileSyncResponse : FileSyncResponse struct
type FileSyncResponse struct {
Task *Task `json:"task,omitempty"`
}
// UnmarshalFileSyncResponse unmarshals an instance of FileSyncResponse from the specified map of raw messages.
func UnmarshalFileSyncResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(FileSyncResponse)
err = core.UnmarshalModel(m, "task", &obj.Task, UnmarshalTask)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GRPCConnectionURI : GRPCConnectionURI struct
type GRPCConnectionURI struct {
// Type of connection being described.
Type *string `json:"type,omitempty"`
Composed []string `json:"composed,omitempty"`
// Scheme/protocol for URI connection.
Scheme *string `json:"scheme,omitempty"`
Hosts []GRPCConnectionURIHostsItem `json:"hosts,omitempty"`
// Path for URI connection.
Path *string `json:"path,omitempty"`
// Query options to add to the URI connection.
QueryOptions interface{} `json:"query_options,omitempty"`
Authentication *GRPCConnectionURIAuthentication `json:"authentication,omitempty"`
Certificate *GRPCConnectionURICertificate `json:"certificate,omitempty"`
}
// UnmarshalGRPCConnectionURI unmarshals an instance of GRPCConnectionURI from the specified map of raw messages.
func UnmarshalGRPCConnectionURI(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GRPCConnectionURI)
err = core.UnmarshalPrimitive(m, "type", &obj.Type)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "composed", &obj.Composed)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "scheme", &obj.Scheme)
if err != nil {
return
}
err = core.UnmarshalModel(m, "hosts", &obj.Hosts, UnmarshalGRPCConnectionURIHostsItem)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "path", &obj.Path)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "query_options", &obj.QueryOptions)
if err != nil {
return
}
err = core.UnmarshalModel(m, "authentication", &obj.Authentication, UnmarshalGRPCConnectionURIAuthentication)
if err != nil {
return
}
err = core.UnmarshalModel(m, "certificate", &obj.Certificate, UnmarshalGRPCConnectionURICertificate)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GRPCConnectionURIAuthentication : GRPCConnectionURIAuthentication struct
type GRPCConnectionURIAuthentication struct {
// Authentication method for this credential.
Method *string `json:"method,omitempty"`
// Username part of credential.
Username *string `json:"username,omitempty"`
// Password part of credential.
Password *string `json:"password,omitempty"`
}
// UnmarshalGRPCConnectionURIAuthentication unmarshals an instance of GRPCConnectionURIAuthentication from the specified map of raw messages.
func UnmarshalGRPCConnectionURIAuthentication(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GRPCConnectionURIAuthentication)
err = core.UnmarshalPrimitive(m, "method", &obj.Method)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "username", &obj.Username)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "password", &obj.Password)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GRPCConnectionURICertificate : GRPCConnectionURICertificate struct
type GRPCConnectionURICertificate struct {
// Name associated with the certificate.
Name *string `json:"name,omitempty"`
// Base64 encoded version of the certificate.
CertificateBase64 *string `json:"certificate_base64,omitempty"`
}
// UnmarshalGRPCConnectionURICertificate unmarshals an instance of GRPCConnectionURICertificate from the specified map of raw messages.
func UnmarshalGRPCConnectionURICertificate(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GRPCConnectionURICertificate)
err = core.UnmarshalPrimitive(m, "name", &obj.Name)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "certificate_base64", &obj.CertificateBase64)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GRPCConnectionURIHostsItem : GRPCConnectionURIHostsItem struct
type GRPCConnectionURIHostsItem struct {
// Hostname for connection.
Hostname *string `json:"hostname,omitempty"`
// Port number for connection.
Port *int64 `json:"port,omitempty"`
}
// UnmarshalGRPCConnectionURIHostsItem unmarshals an instance of GRPCConnectionURIHostsItem from the specified map of raw messages.
func UnmarshalGRPCConnectionURIHostsItem(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GRPCConnectionURIHostsItem)
err = core.UnmarshalPrimitive(m, "hostname", &obj.Hostname)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "port", &obj.Port)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetAutoscalingConditionsOptions : The GetAutoscalingConditions options.
type GetAutoscalingConditionsOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// Group ID.
GroupID *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetAutoscalingConditionsOptions : Instantiate GetAutoscalingConditionsOptions
func (*IbmCloudDatabasesV5) NewGetAutoscalingConditionsOptions(id string, groupID string) *GetAutoscalingConditionsOptions {
return &GetAutoscalingConditionsOptions{
ID: core.StringPtr(id),
GroupID: core.StringPtr(groupID),
}
}
// SetID : Allow user to set ID
func (options *GetAutoscalingConditionsOptions) SetID(id string) *GetAutoscalingConditionsOptions {
options.ID = core.StringPtr(id)
return options
}
// SetGroupID : Allow user to set GroupID
func (options *GetAutoscalingConditionsOptions) SetGroupID(groupID string) *GetAutoscalingConditionsOptions {
options.GroupID = core.StringPtr(groupID)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetAutoscalingConditionsOptions) SetHeaders(param map[string]string) *GetAutoscalingConditionsOptions {
options.Headers = param
return options
}
// GetAutoscalingConditionsResponse : GetAutoscalingConditionsResponse struct
type GetAutoscalingConditionsResponse struct {
Autoscaling *AutoscalingGroup `json:"autoscaling,omitempty"`
}
// UnmarshalGetAutoscalingConditionsResponse unmarshals an instance of GetAutoscalingConditionsResponse from the specified map of raw messages.
func UnmarshalGetAutoscalingConditionsResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetAutoscalingConditionsResponse)
err = core.UnmarshalModel(m, "autoscaling", &obj.Autoscaling, UnmarshalAutoscalingGroup)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetBackupInfoOptions : The GetBackupInfo options.
type GetBackupInfoOptions struct {
// Backup ID.
BackupID *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetBackupInfoOptions : Instantiate GetBackupInfoOptions
func (*IbmCloudDatabasesV5) NewGetBackupInfoOptions(backupID string) *GetBackupInfoOptions {
return &GetBackupInfoOptions{
BackupID: core.StringPtr(backupID),
}
}
// SetBackupID : Allow user to set BackupID
func (options *GetBackupInfoOptions) SetBackupID(backupID string) *GetBackupInfoOptions {
options.BackupID = core.StringPtr(backupID)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetBackupInfoOptions) SetHeaders(param map[string]string) *GetBackupInfoOptions {
options.Headers = param
return options
}
// GetBackupInfoResponse : GetBackupInfoResponse struct
type GetBackupInfoResponse struct {
Backup *Backup `json:"backup,omitempty"`
}
// UnmarshalGetBackupInfoResponse unmarshals an instance of GetBackupInfoResponse from the specified map of raw messages.
func UnmarshalGetBackupInfoResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetBackupInfoResponse)
err = core.UnmarshalModel(m, "backup", &obj.Backup, UnmarshalBackup)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetConnectionDeprecatedOptions : The GetConnectionDeprecated options.
type GetConnectionDeprecatedOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// User type.
UserType *string `validate:"required,ne="`
// User ID.
UserID *string `validate:"required,ne="`
// Optional certificate root path to prepend certificate names. Certificates would be stored in this directory for use
// by other commands.
CertificateRoot *string
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetConnectionDeprecatedOptions : Instantiate GetConnectionDeprecatedOptions
func (*IbmCloudDatabasesV5) NewGetConnectionDeprecatedOptions(id string, userType string, userID string) *GetConnectionDeprecatedOptions {
return &GetConnectionDeprecatedOptions{
ID: core.StringPtr(id),
UserType: core.StringPtr(userType),
UserID: core.StringPtr(userID),
}
}
// SetID : Allow user to set ID
func (options *GetConnectionDeprecatedOptions) SetID(id string) *GetConnectionDeprecatedOptions {
options.ID = core.StringPtr(id)
return options
}
// SetUserType : Allow user to set UserType
func (options *GetConnectionDeprecatedOptions) SetUserType(userType string) *GetConnectionDeprecatedOptions {
options.UserType = core.StringPtr(userType)
return options
}
// SetUserID : Allow user to set UserID
func (options *GetConnectionDeprecatedOptions) SetUserID(userID string) *GetConnectionDeprecatedOptions {
options.UserID = core.StringPtr(userID)
return options
}
// SetCertificateRoot : Allow user to set CertificateRoot
func (options *GetConnectionDeprecatedOptions) SetCertificateRoot(certificateRoot string) *GetConnectionDeprecatedOptions {
options.CertificateRoot = core.StringPtr(certificateRoot)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetConnectionDeprecatedOptions) SetHeaders(param map[string]string) *GetConnectionDeprecatedOptions {
options.Headers = param
return options
}
// GetConnectionOptions : The GetConnection options.
type GetConnectionOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// User type.
UserType *string `validate:"required,ne="`
// User ID.
UserID *string `validate:"required,ne="`
// Endpoint Type. The endpoint must be enabled on the deployment before its connection information can be fetched.
EndpointType *string `validate:"required,ne="`
// Optional certificate root path to prepend certificate names. Certificates would be stored in this directory for use
// by other commands.
CertificateRoot *string
// Allows users to set headers on API requests
Headers map[string]string
}
// Constants associated with the GetConnectionOptions.EndpointType property.
// Endpoint Type. The endpoint must be enabled on the deployment before its connection information can be fetched.
const (
GetConnectionOptions_EndpointType_Private = "private"
GetConnectionOptions_EndpointType_Public = "public"
)
// NewGetConnectionOptions : Instantiate GetConnectionOptions
func (*IbmCloudDatabasesV5) NewGetConnectionOptions(id string, userType string, userID string, endpointType string) *GetConnectionOptions {
return &GetConnectionOptions{
ID: core.StringPtr(id),
UserType: core.StringPtr(userType),
UserID: core.StringPtr(userID),
EndpointType: core.StringPtr(endpointType),
}
}
// SetID : Allow user to set ID
func (options *GetConnectionOptions) SetID(id string) *GetConnectionOptions {
options.ID = core.StringPtr(id)
return options
}
// SetUserType : Allow user to set UserType
func (options *GetConnectionOptions) SetUserType(userType string) *GetConnectionOptions {
options.UserType = core.StringPtr(userType)
return options
}
// SetUserID : Allow user to set UserID
func (options *GetConnectionOptions) SetUserID(userID string) *GetConnectionOptions {
options.UserID = core.StringPtr(userID)
return options
}
// SetEndpointType : Allow user to set EndpointType
func (options *GetConnectionOptions) SetEndpointType(endpointType string) *GetConnectionOptions {
options.EndpointType = core.StringPtr(endpointType)
return options
}
// SetCertificateRoot : Allow user to set CertificateRoot
func (options *GetConnectionOptions) SetCertificateRoot(certificateRoot string) *GetConnectionOptions {
options.CertificateRoot = core.StringPtr(certificateRoot)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetConnectionOptions) SetHeaders(param map[string]string) *GetConnectionOptions {
options.Headers = param
return options
}
// GetDatabaseConfigurationSchemaOptions : The GetDatabaseConfigurationSchema options.
type GetDatabaseConfigurationSchemaOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetDatabaseConfigurationSchemaOptions : Instantiate GetDatabaseConfigurationSchemaOptions
func (*IbmCloudDatabasesV5) NewGetDatabaseConfigurationSchemaOptions(id string) *GetDatabaseConfigurationSchemaOptions {
return &GetDatabaseConfigurationSchemaOptions{
ID: core.StringPtr(id),
}
}
// SetID : Allow user to set ID
func (options *GetDatabaseConfigurationSchemaOptions) SetID(id string) *GetDatabaseConfigurationSchemaOptions {
options.ID = core.StringPtr(id)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetDatabaseConfigurationSchemaOptions) SetHeaders(param map[string]string) *GetDatabaseConfigurationSchemaOptions {
options.Headers = param
return options
}
// GetDatabaseConfigurationSchemaResponse : GetDatabaseConfigurationSchemaResponse struct
type GetDatabaseConfigurationSchemaResponse struct {
// Database Configuration Schema.
Schema *ConfigurationSchema `json:"schema,omitempty"`
}
// UnmarshalGetDatabaseConfigurationSchemaResponse unmarshals an instance of GetDatabaseConfigurationSchemaResponse from the specified map of raw messages.
func UnmarshalGetDatabaseConfigurationSchemaResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetDatabaseConfigurationSchemaResponse)
err = core.UnmarshalModel(m, "schema", &obj.Schema, UnmarshalConfigurationSchema)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetDefaultScalingGroupsOptions : The GetDefaultScalingGroups options.
type GetDefaultScalingGroupsOptions struct {
// Database type name.
Type *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// Constants associated with the GetDefaultScalingGroupsOptions.Type property.
// Database type name.
const (
GetDefaultScalingGroupsOptions_Type_Etcd = "etcd"
GetDefaultScalingGroupsOptions_Type_Postgresql = "postgresql"
)
// NewGetDefaultScalingGroupsOptions : Instantiate GetDefaultScalingGroupsOptions
func (*IbmCloudDatabasesV5) NewGetDefaultScalingGroupsOptions(typeVar string) *GetDefaultScalingGroupsOptions {
return &GetDefaultScalingGroupsOptions{
Type: core.StringPtr(typeVar),
}
}
// SetType : Allow user to set Type
func (options *GetDefaultScalingGroupsOptions) SetType(typeVar string) *GetDefaultScalingGroupsOptions {
options.Type = core.StringPtr(typeVar)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetDefaultScalingGroupsOptions) SetHeaders(param map[string]string) *GetDefaultScalingGroupsOptions {
options.Headers = param
return options
}
// GetDeployablesOptions : The GetDeployables options.
type GetDeployablesOptions struct {
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetDeployablesOptions : Instantiate GetDeployablesOptions
func (*IbmCloudDatabasesV5) NewGetDeployablesOptions() *GetDeployablesOptions {
return &GetDeployablesOptions{}
}
// SetHeaders : Allow user to set Headers
func (options *GetDeployablesOptions) SetHeaders(param map[string]string) *GetDeployablesOptions {
options.Headers = param
return options
}
// GetDeployablesResponse : GetDeployablesResponse struct
type GetDeployablesResponse struct {
Deployables []Deployables `json:"deployables,omitempty"`
}
// UnmarshalGetDeployablesResponse unmarshals an instance of GetDeployablesResponse from the specified map of raw messages.
func UnmarshalGetDeployablesResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetDeployablesResponse)
err = core.UnmarshalModel(m, "deployables", &obj.Deployables, UnmarshalDeployables)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetDeploymentBackupsOptions : The GetDeploymentBackups options.
type GetDeploymentBackupsOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetDeploymentBackupsOptions : Instantiate GetDeploymentBackupsOptions
func (*IbmCloudDatabasesV5) NewGetDeploymentBackupsOptions(id string) *GetDeploymentBackupsOptions {
return &GetDeploymentBackupsOptions{
ID: core.StringPtr(id),
}
}
// SetID : Allow user to set ID
func (options *GetDeploymentBackupsOptions) SetID(id string) *GetDeploymentBackupsOptions {
options.ID = core.StringPtr(id)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetDeploymentBackupsOptions) SetHeaders(param map[string]string) *GetDeploymentBackupsOptions {
options.Headers = param
return options
}
// GetDeploymentInfoOptions : The GetDeploymentInfo options.
type GetDeploymentInfoOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetDeploymentInfoOptions : Instantiate GetDeploymentInfoOptions
func (*IbmCloudDatabasesV5) NewGetDeploymentInfoOptions(id string) *GetDeploymentInfoOptions {
return &GetDeploymentInfoOptions{
ID: core.StringPtr(id),
}
}
// SetID : Allow user to set ID
func (options *GetDeploymentInfoOptions) SetID(id string) *GetDeploymentInfoOptions {
options.ID = core.StringPtr(id)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetDeploymentInfoOptions) SetHeaders(param map[string]string) *GetDeploymentInfoOptions {
options.Headers = param
return options
}
// GetDeploymentInfoResponse : GetDeploymentInfoResponse struct
type GetDeploymentInfoResponse struct {
Deployment *Deployment `json:"deployment,omitempty"`
}
// UnmarshalGetDeploymentInfoResponse unmarshals an instance of GetDeploymentInfoResponse from the specified map of raw messages.
func UnmarshalGetDeploymentInfoResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetDeploymentInfoResponse)
err = core.UnmarshalModel(m, "deployment", &obj.Deployment, UnmarshalDeployment)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetDeploymentScalingGroupsOptions : The GetDeploymentScalingGroups options.
type GetDeploymentScalingGroupsOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetDeploymentScalingGroupsOptions : Instantiate GetDeploymentScalingGroupsOptions
func (*IbmCloudDatabasesV5) NewGetDeploymentScalingGroupsOptions(id string) *GetDeploymentScalingGroupsOptions {
return &GetDeploymentScalingGroupsOptions{
ID: core.StringPtr(id),
}
}
// SetID : Allow user to set ID
func (options *GetDeploymentScalingGroupsOptions) SetID(id string) *GetDeploymentScalingGroupsOptions {
options.ID = core.StringPtr(id)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetDeploymentScalingGroupsOptions) SetHeaders(param map[string]string) *GetDeploymentScalingGroupsOptions {
options.Headers = param
return options
}
// GetDeploymentTasksOptions : The GetDeploymentTasks options.
type GetDeploymentTasksOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetDeploymentTasksOptions : Instantiate GetDeploymentTasksOptions
func (*IbmCloudDatabasesV5) NewGetDeploymentTasksOptions(id string) *GetDeploymentTasksOptions {
return &GetDeploymentTasksOptions{
ID: core.StringPtr(id),
}
}
// SetID : Allow user to set ID
func (options *GetDeploymentTasksOptions) SetID(id string) *GetDeploymentTasksOptions {
options.ID = core.StringPtr(id)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetDeploymentTasksOptions) SetHeaders(param map[string]string) *GetDeploymentTasksOptions {
options.Headers = param
return options
}
// GetPITRdataOptions : The GetPITRdata options.
type GetPITRdataOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetPITRdataOptions : Instantiate GetPITRdataOptions
func (*IbmCloudDatabasesV5) NewGetPITRdataOptions(id string) *GetPITRdataOptions {
return &GetPITRdataOptions{
ID: core.StringPtr(id),
}
}
// SetID : Allow user to set ID
func (options *GetPITRdataOptions) SetID(id string) *GetPITRdataOptions {
options.ID = core.StringPtr(id)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetPITRdataOptions) SetHeaders(param map[string]string) *GetPITRdataOptions {
options.Headers = param
return options
}
// GetRegionsOptions : The GetRegions options.
type GetRegionsOptions struct {
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetRegionsOptions : Instantiate GetRegionsOptions
func (*IbmCloudDatabasesV5) NewGetRegionsOptions() *GetRegionsOptions {
return &GetRegionsOptions{}
}
// SetHeaders : Allow user to set Headers
func (options *GetRegionsOptions) SetHeaders(param map[string]string) *GetRegionsOptions {
options.Headers = param
return options
}
// GetRegionsResponse : GetRegionsResponse struct
type GetRegionsResponse struct {
// An array of region ids.
Regions []string `json:"regions,omitempty"`
}
// UnmarshalGetRegionsResponse unmarshals an instance of GetRegionsResponse from the specified map of raw messages.
func UnmarshalGetRegionsResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetRegionsResponse)
err = core.UnmarshalPrimitive(m, "regions", &obj.Regions)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetRemotesOptions : The GetRemotes options.
type GetRemotesOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetRemotesOptions : Instantiate GetRemotesOptions
func (*IbmCloudDatabasesV5) NewGetRemotesOptions(id string) *GetRemotesOptions {
return &GetRemotesOptions{
ID: core.StringPtr(id),
}
}
// SetID : Allow user to set ID
func (options *GetRemotesOptions) SetID(id string) *GetRemotesOptions {
options.ID = core.StringPtr(id)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetRemotesOptions) SetHeaders(param map[string]string) *GetRemotesOptions {
options.Headers = param
return options
}
// GetRemotesResponse : GetRemotesResponse struct
type GetRemotesResponse struct {
// Remotes.
Remotes *Remotes `json:"remotes,omitempty"`
}
// UnmarshalGetRemotesResponse unmarshals an instance of GetRemotesResponse from the specified map of raw messages.
func UnmarshalGetRemotesResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetRemotesResponse)
err = core.UnmarshalModel(m, "remotes", &obj.Remotes, UnmarshalRemotes)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetRemotesSchemaOptions : The GetRemotesSchema options.
type GetRemotesSchemaOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetRemotesSchemaOptions : Instantiate GetRemotesSchemaOptions
func (*IbmCloudDatabasesV5) NewGetRemotesSchemaOptions(id string) *GetRemotesSchemaOptions {
return &GetRemotesSchemaOptions{
ID: core.StringPtr(id),
}
}
// SetID : Allow user to set ID
func (options *GetRemotesSchemaOptions) SetID(id string) *GetRemotesSchemaOptions {
options.ID = core.StringPtr(id)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetRemotesSchemaOptions) SetHeaders(param map[string]string) *GetRemotesSchemaOptions {
options.Headers = param
return options
}
// GetRemotesSchemaResponse : GetRemotesSchemaResponse struct
type GetRemotesSchemaResponse struct {
Task *Task `json:"task,omitempty"`
}
// UnmarshalGetRemotesSchemaResponse unmarshals an instance of GetRemotesSchemaResponse from the specified map of raw messages.
func UnmarshalGetRemotesSchemaResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetRemotesSchemaResponse)
err = core.UnmarshalModel(m, "task", &obj.Task, UnmarshalTask)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetTasksOptions : The GetTasks options.
type GetTasksOptions struct {
// Task ID.
ID *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetTasksOptions : Instantiate GetTasksOptions
func (*IbmCloudDatabasesV5) NewGetTasksOptions(id string) *GetTasksOptions {
return &GetTasksOptions{
ID: core.StringPtr(id),
}
}
// SetID : Allow user to set ID
func (options *GetTasksOptions) SetID(id string) *GetTasksOptions {
options.ID = core.StringPtr(id)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetTasksOptions) SetHeaders(param map[string]string) *GetTasksOptions {
options.Headers = param
return options
}
// GetTasksResponse : GetTasksResponse struct
type GetTasksResponse struct {
Task *Task `json:"task,omitempty"`
}
// UnmarshalGetTasksResponse unmarshals an instance of GetTasksResponse from the specified map of raw messages.
func UnmarshalGetTasksResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetTasksResponse)
err = core.UnmarshalModel(m, "task", &obj.Task, UnmarshalTask)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetUserOptions : The GetUser options.
type GetUserOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// User ID.
UserID *string `validate:"required,ne="`
// Endpoint Type. The endpoint must be enabled on the deployment before its connection information can be fetched.
EndpointType *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// Constants associated with the GetUserOptions.EndpointType property.
// Endpoint Type. The endpoint must be enabled on the deployment before its connection information can be fetched.
const (
GetUserOptions_EndpointType_Private = "private"
GetUserOptions_EndpointType_Public = "public"
)
// NewGetUserOptions : Instantiate GetUserOptions
func (*IbmCloudDatabasesV5) NewGetUserOptions(id string, userID string, endpointType string) *GetUserOptions {
return &GetUserOptions{
ID: core.StringPtr(id),
UserID: core.StringPtr(userID),
EndpointType: core.StringPtr(endpointType),
}
}
// SetID : Allow user to set ID
func (options *GetUserOptions) SetID(id string) *GetUserOptions {
options.ID = core.StringPtr(id)
return options
}
// SetUserID : Allow user to set UserID
func (options *GetUserOptions) SetUserID(userID string) *GetUserOptions {
options.UserID = core.StringPtr(userID)
return options
}
// SetEndpointType : Allow user to set EndpointType
func (options *GetUserOptions) SetEndpointType(endpointType string) *GetUserOptions {
options.EndpointType = core.StringPtr(endpointType)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetUserOptions) SetHeaders(param map[string]string) *GetUserOptions {
options.Headers = param
return options
}
// GetWhitelistOptions : The GetWhitelist options.
type GetWhitelistOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetWhitelistOptions : Instantiate GetWhitelistOptions
func (*IbmCloudDatabasesV5) NewGetWhitelistOptions(id string) *GetWhitelistOptions {
return &GetWhitelistOptions{
ID: core.StringPtr(id),
}
}
// SetID : Allow user to set ID
func (options *GetWhitelistOptions) SetID(id string) *GetWhitelistOptions {
options.ID = core.StringPtr(id)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetWhitelistOptions) SetHeaders(param map[string]string) *GetWhitelistOptions {
options.Headers = param
return options
}
// Group : Group struct
type Group struct {
// Id/name for group.
ID *string `json:"id,omitempty"`
// Number of entities in the group.
Count *int64 `json:"count,omitempty"`
Members *GroupMembers `json:"members,omitempty"`
Memory *GroupMemory `json:"memory,omitempty"`
Cpu *GroupCpu `json:"cpu,omitempty"`
Disk *GroupDisk `json:"disk,omitempty"`
}
// UnmarshalGroup unmarshals an instance of Group from the specified map of raw messages.
func UnmarshalGroup(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Group)
err = core.UnmarshalPrimitive(m, "id", &obj.ID)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "count", &obj.Count)
if err != nil {
return
}
err = core.UnmarshalModel(m, "members", &obj.Members, UnmarshalGroupMembers)
if err != nil {
return
}
err = core.UnmarshalModel(m, "memory", &obj.Memory, UnmarshalGroupMemory)
if err != nil {
return
}
err = core.UnmarshalModel(m, "cpu", &obj.Cpu, UnmarshalGroupCpu)
if err != nil {
return
}
err = core.UnmarshalModel(m, "disk", &obj.Disk, UnmarshalGroupDisk)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GroupCpu : GroupCpu struct
type GroupCpu struct {
// Units used for scaling cpu - count means the value is the number of the unit(s) available.
Units *string `json:"units,omitempty"`
// Number of allocated CPUs.
AllocationCount *int64 `json:"allocation_count,omitempty"`
// Minimum number of CPUs.
MinimumCount *int64 `json:"minimum_count,omitempty"`
// Maximum number of CPUs.
MaximumCount *int64 `json:"maximum_count,omitempty"`
// Step size CPUs can be adjusted.
StepSizeCount *int64 `json:"step_size_count,omitempty"`
// Is this group's CPU count adjustable.
IsAdjustable *bool `json:"is_adjustable,omitempty"`
// Is this group's CPU optional?.
IsOptional *bool `json:"is_optional,omitempty"`
// Can this group's CPU scale down?.
CanScaleDown *bool `json:"can_scale_down,omitempty"`
}
// UnmarshalGroupCpu unmarshals an instance of GroupCpu from the specified map of raw messages.
func UnmarshalGroupCpu(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GroupCpu)
err = core.UnmarshalPrimitive(m, "units", &obj.Units)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "allocation_count", &obj.AllocationCount)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "minimum_count", &obj.MinimumCount)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "maximum_count", &obj.MaximumCount)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "step_size_count", &obj.StepSizeCount)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "is_adjustable", &obj.IsAdjustable)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "is_optional", &obj.IsOptional)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "can_scale_down", &obj.CanScaleDown)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GroupDisk : GroupDisk struct
type GroupDisk struct {
// Units used for scaling storage.
Units *string `json:"units,omitempty"`
// Allocated storage in MB.
AllocationMb *int64 `json:"allocation_mb,omitempty"`
// Minimum allocated storage.
MinimumMb *int64 `json:"minimum_mb,omitempty"`
// Maximum allocated storage.
MaximumMb *int64 `json:"maximum_mb,omitempty"`
// Step size storage can be adjusted.
StepSizeMb *int64 `json:"step_size_mb,omitempty"`
// Is this group's storage adjustable?.
IsAdjustable *bool `json:"is_adjustable,omitempty"`
// Is this group's storage optional?.
IsOptional *bool `json:"is_optional,omitempty"`
// Can this group's storage scale down?.
CanScaleDown *bool `json:"can_scale_down,omitempty"`
}
// UnmarshalGroupDisk unmarshals an instance of GroupDisk from the specified map of raw messages.
func UnmarshalGroupDisk(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GroupDisk)
err = core.UnmarshalPrimitive(m, "units", &obj.Units)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "allocation_mb", &obj.AllocationMb)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "minimum_mb", &obj.MinimumMb)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "maximum_mb", &obj.MaximumMb)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "step_size_mb", &obj.StepSizeMb)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "is_adjustable", &obj.IsAdjustable)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "is_optional", &obj.IsOptional)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "can_scale_down", &obj.CanScaleDown)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GroupMembers : GroupMembers struct
type GroupMembers struct {
// Units used for scaling number of members.
Units *string `json:"units,omitempty"`
// Allocated number of members.
AllocationCount *int64 `json:"allocation_count,omitempty"`
// Minimum number of members.
MinimumCount *int64 `json:"minimum_count,omitempty"`
// Maximum number of members.
MaximumCount *int64 `json:"maximum_count,omitempty"`
// Step size for number of members.
StepSizeCount *int64 `json:"step_size_count,omitempty"`
// Is this deployment's number of members adjustable?.
IsAdjustable *bool `json:"is_adjustable,omitempty"`
// Is this deployments's number of members optional?.
IsOptional *bool `json:"is_optional,omitempty"`
// Can this deployment's number of members scale down?.
CanScaleDown *bool `json:"can_scale_down,omitempty"`
}
// UnmarshalGroupMembers unmarshals an instance of GroupMembers from the specified map of raw messages.
func UnmarshalGroupMembers(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GroupMembers)
err = core.UnmarshalPrimitive(m, "units", &obj.Units)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "allocation_count", &obj.AllocationCount)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "minimum_count", &obj.MinimumCount)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "maximum_count", &obj.MaximumCount)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "step_size_count", &obj.StepSizeCount)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "is_adjustable", &obj.IsAdjustable)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "is_optional", &obj.IsOptional)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "can_scale_down", &obj.CanScaleDown)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GroupMemory : GroupMemory struct
type GroupMemory struct {
// Units used for scaling memory.
Units *string `json:"units,omitempty"`
// Allocated memory in MB.
AllocationMb *int64 `json:"allocation_mb,omitempty"`
// Minimum memory in MB.
MinimumMb *int64 `json:"minimum_mb,omitempty"`
// Maximum memory in MB.
MaximumMb *int64 `json:"maximum_mb,omitempty"`
// Step size memory can be adjusted by in MB.
StepSizeMb *int64 `json:"step_size_mb,omitempty"`
// Is this group's memory adjustable?.
IsAdjustable *bool `json:"is_adjustable,omitempty"`
// Is this group's memory optional?.
IsOptional *bool `json:"is_optional,omitempty"`
// Can this group's memory scale down?.
CanScaleDown *bool `json:"can_scale_down,omitempty"`
}
// UnmarshalGroupMemory unmarshals an instance of GroupMemory from the specified map of raw messages.
func UnmarshalGroupMemory(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GroupMemory)
err = core.UnmarshalPrimitive(m, "units", &obj.Units)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "allocation_mb", &obj.AllocationMb)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "minimum_mb", &obj.MinimumMb)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "maximum_mb", &obj.MaximumMb)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "step_size_mb", &obj.StepSizeMb)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "is_adjustable", &obj.IsAdjustable)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "is_optional", &obj.IsOptional)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "can_scale_down", &obj.CanScaleDown)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// Groups : Groups struct
type Groups struct {
Groups []Group `json:"groups,omitempty"`
}
// UnmarshalGroups unmarshals an instance of Groups from the specified map of raw messages.
func UnmarshalGroups(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Groups)
err = core.UnmarshalModel(m, "groups", &obj.Groups, UnmarshalGroup)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// IntegerPropertySchema : Integer Property Schema.
type IntegerPropertySchema struct {
// Whether the setting is customer-configurable.
CustomerConfigurable *bool `json:"customer_configurable,omitempty"`
// The default value of the setting.
Default *int64 `json:"default,omitempty"`
// The description of the default value.
DefaultDescription *string `json:"default_description,omitempty"`
// The description of the setting.
Description *string `json:"description,omitempty"`
// The type of this setting (e.g., string, integer).
Kind *string `json:"kind,omitempty"`
// Whether or not changing this setting will restart the database.
RequiresRestart *bool `json:"requires_restart,omitempty"`
// The minimum value that this setting accepts.
Min *int64 `json:"min,omitempty"`
// The maximum value that this setting accepts.
Max *int64 `json:"max,omitempty"`
// The number that should be skipped between each step of a slider rendered for this setting.
Step *int64 `json:"step,omitempty"`
}
// UnmarshalIntegerPropertySchema unmarshals an instance of IntegerPropertySchema from the specified map of raw messages.
func UnmarshalIntegerPropertySchema(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(IntegerPropertySchema)
err = core.UnmarshalPrimitive(m, "customer_configurable", &obj.CustomerConfigurable)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "default", &obj.Default)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "default_description", &obj.DefaultDescription)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "description", &obj.Description)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "kind", &obj.Kind)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "requires_restart", &obj.RequiresRestart)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "min", &obj.Min)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "max", &obj.Max)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "step", &obj.Step)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// KillConnectionsOptions : The KillConnections options.
type KillConnectionsOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewKillConnectionsOptions : Instantiate KillConnectionsOptions
func (*IbmCloudDatabasesV5) NewKillConnectionsOptions(id string) *KillConnectionsOptions {
return &KillConnectionsOptions{
ID: core.StringPtr(id),
}
}
// SetID : Allow user to set ID
func (options *KillConnectionsOptions) SetID(id string) *KillConnectionsOptions {
options.ID = core.StringPtr(id)
return options
}
// SetHeaders : Allow user to set Headers
func (options *KillConnectionsOptions) SetHeaders(param map[string]string) *KillConnectionsOptions {
options.Headers = param
return options
}
// KillConnectionsResponse : KillConnectionsResponse struct
type KillConnectionsResponse struct {
Task *Task `json:"task,omitempty"`
}
// UnmarshalKillConnectionsResponse unmarshals an instance of KillConnectionsResponse from the specified map of raw messages.
func UnmarshalKillConnectionsResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(KillConnectionsResponse)
err = core.UnmarshalModel(m, "task", &obj.Task, UnmarshalTask)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// LogicalReplicationSlotLogicalReplicationSlot : LogicalReplicationSlotLogicalReplicationSlot struct
type LogicalReplicationSlotLogicalReplicationSlot struct {
// name of the replication slot.
Name *string `json:"name,omitempty"`
// name of the database the replication slot is created on.
DatabaseName *string `json:"database_name,omitempty"`
// creating a replication slot is only supported for use with wal2json.
PluginType *string `json:"plugin_type,omitempty"`
}
// UnmarshalLogicalReplicationSlotLogicalReplicationSlot unmarshals an instance of LogicalReplicationSlotLogicalReplicationSlot from the specified map of raw messages.
func UnmarshalLogicalReplicationSlotLogicalReplicationSlot(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(LogicalReplicationSlotLogicalReplicationSlot)
err = core.UnmarshalPrimitive(m, "name", &obj.Name)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "database_name", &obj.DatabaseName)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "plugin_type", &obj.PluginType)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// MongoDBConnectionURI : MongoDBConnectionURI struct
type MongoDBConnectionURI struct {
// Type of connection being described.
Type *string `json:"type,omitempty"`
Composed []string `json:"composed,omitempty"`
// Scheme/protocol for URI connection.
Scheme *string `json:"scheme,omitempty"`
Hosts []MongoDBConnectionURIHostsItem `json:"hosts,omitempty"`
// Path for URI connection.
Path *string `json:"path,omitempty"`
// Query options to add to the URI connection.
QueryOptions interface{} `json:"query_options,omitempty"`
Authentication *MongoDBConnectionURIAuthentication `json:"authentication,omitempty"`
Certificate *MongoDBConnectionURICertificate `json:"certificate,omitempty"`
// Name of the database to use in the URI connection.
Database *string `json:"database,omitempty"`
// Name of the replica set to use in the URI connection.
ReplicaSet *string `json:"replica_set,omitempty"`
}
// UnmarshalMongoDBConnectionURI unmarshals an instance of MongoDBConnectionURI from the specified map of raw messages.
func UnmarshalMongoDBConnectionURI(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(MongoDBConnectionURI)
err = core.UnmarshalPrimitive(m, "type", &obj.Type)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "composed", &obj.Composed)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "scheme", &obj.Scheme)
if err != nil {
return
}
err = core.UnmarshalModel(m, "hosts", &obj.Hosts, UnmarshalMongoDBConnectionURIHostsItem)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "path", &obj.Path)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "query_options", &obj.QueryOptions)
if err != nil {
return
}
err = core.UnmarshalModel(m, "authentication", &obj.Authentication, UnmarshalMongoDBConnectionURIAuthentication)
if err != nil {
return
}
err = core.UnmarshalModel(m, "certificate", &obj.Certificate, UnmarshalMongoDBConnectionURICertificate)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "database", &obj.Database)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "replica_set", &obj.ReplicaSet)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// MongoDBConnectionURIAuthentication : MongoDBConnectionURIAuthentication struct
type MongoDBConnectionURIAuthentication struct {
// Authentication method for this credential.
Method *string `json:"method,omitempty"`
// Username part of credential.
Username *string `json:"username,omitempty"`
// Password part of credential.
Password *string `json:"password,omitempty"`
}
// UnmarshalMongoDBConnectionURIAuthentication unmarshals an instance of MongoDBConnectionURIAuthentication from the specified map of raw messages.
func UnmarshalMongoDBConnectionURIAuthentication(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(MongoDBConnectionURIAuthentication)
err = core.UnmarshalPrimitive(m, "method", &obj.Method)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "username", &obj.Username)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "password", &obj.Password)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// MongoDBConnectionURICertificate : MongoDBConnectionURICertificate struct
type MongoDBConnectionURICertificate struct {
// Name associated with the certificate.
Name *string `json:"name,omitempty"`
// Base64 encoded version of the certificate.
CertificateBase64 *string `json:"certificate_base64,omitempty"`
}
// UnmarshalMongoDBConnectionURICertificate unmarshals an instance of MongoDBConnectionURICertificate from the specified map of raw messages.
func UnmarshalMongoDBConnectionURICertificate(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(MongoDBConnectionURICertificate)
err = core.UnmarshalPrimitive(m, "name", &obj.Name)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "certificate_base64", &obj.CertificateBase64)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// MongoDBConnectionURIHostsItem : MongoDBConnectionURIHostsItem struct
type MongoDBConnectionURIHostsItem struct {
// Hostname for connection.
Hostname *string `json:"hostname,omitempty"`
// Port number for connection.
Port *int64 `json:"port,omitempty"`
}
// UnmarshalMongoDBConnectionURIHostsItem unmarshals an instance of MongoDBConnectionURIHostsItem from the specified map of raw messages.
func UnmarshalMongoDBConnectionURIHostsItem(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(MongoDBConnectionURIHostsItem)
err = core.UnmarshalPrimitive(m, "hostname", &obj.Hostname)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "port", &obj.Port)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// PointInTimeRecoveryData : PointInTimeRecoveryData struct
type PointInTimeRecoveryData struct {
EarliestPointInTimeRecoveryTime *string `json:"earliest_point_in_time_recovery_time,omitempty"`
}
// UnmarshalPointInTimeRecoveryData unmarshals an instance of PointInTimeRecoveryData from the specified map of raw messages.
func UnmarshalPointInTimeRecoveryData(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(PointInTimeRecoveryData)
err = core.UnmarshalPrimitive(m, "earliest_point_in_time_recovery_time", &obj.EarliestPointInTimeRecoveryTime)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// PostgreSQLConnectionURI : PostgreSQLConnectionURI struct
type PostgreSQLConnectionURI struct {
// Type of connection being described.
Type *string `json:"type,omitempty"`
Composed []string `json:"composed,omitempty"`
// Scheme/protocol for URI connection.
Scheme *string `json:"scheme,omitempty"`
Hosts []PostgreSQLConnectionURIHostsItem `json:"hosts,omitempty"`
// Path for URI connection.
Path *string `json:"path,omitempty"`
// Query options to add to the URI connection.
QueryOptions interface{} `json:"query_options,omitempty"`
Authentication *PostgreSQLConnectionURIAuthentication `json:"authentication,omitempty"`
Certificate *PostgreSQLConnectionURICertificate `json:"certificate,omitempty"`
// Name of the database to use in the URI connection.
Database *string `json:"database,omitempty"`
}
// UnmarshalPostgreSQLConnectionURI unmarshals an instance of PostgreSQLConnectionURI from the specified map of raw messages.
func UnmarshalPostgreSQLConnectionURI(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(PostgreSQLConnectionURI)
err = core.UnmarshalPrimitive(m, "type", &obj.Type)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "composed", &obj.Composed)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "scheme", &obj.Scheme)
if err != nil {
return
}
err = core.UnmarshalModel(m, "hosts", &obj.Hosts, UnmarshalPostgreSQLConnectionURIHostsItem)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "path", &obj.Path)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "query_options", &obj.QueryOptions)
if err != nil {
return
}
err = core.UnmarshalModel(m, "authentication", &obj.Authentication, UnmarshalPostgreSQLConnectionURIAuthentication)
if err != nil {
return
}
err = core.UnmarshalModel(m, "certificate", &obj.Certificate, UnmarshalPostgreSQLConnectionURICertificate)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "database", &obj.Database)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// PostgreSQLConnectionURIAuthentication : PostgreSQLConnectionURIAuthentication struct
type PostgreSQLConnectionURIAuthentication struct {
// Authentication method for this credential.
Method *string `json:"method,omitempty"`
// Username part of credential.
Username *string `json:"username,omitempty"`
// Password part of credential.
Password *string `json:"password,omitempty"`
}
// UnmarshalPostgreSQLConnectionURIAuthentication unmarshals an instance of PostgreSQLConnectionURIAuthentication from the specified map of raw messages.
func UnmarshalPostgreSQLConnectionURIAuthentication(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(PostgreSQLConnectionURIAuthentication)
err = core.UnmarshalPrimitive(m, "method", &obj.Method)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "username", &obj.Username)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "password", &obj.Password)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// PostgreSQLConnectionURICertificate : PostgreSQLConnectionURICertificate struct
type PostgreSQLConnectionURICertificate struct {
// Name associated with the certificate.
Name *string `json:"name,omitempty"`
// Base64 encoded version of the certificate.
CertificateBase64 *string `json:"certificate_base64,omitempty"`
}
// UnmarshalPostgreSQLConnectionURICertificate unmarshals an instance of PostgreSQLConnectionURICertificate from the specified map of raw messages.
func UnmarshalPostgreSQLConnectionURICertificate(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(PostgreSQLConnectionURICertificate)
err = core.UnmarshalPrimitive(m, "name", &obj.Name)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "certificate_base64", &obj.CertificateBase64)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// PostgreSQLConnectionURIHostsItem : PostgreSQLConnectionURIHostsItem struct
type PostgreSQLConnectionURIHostsItem struct {
// Hostname for connection.
Hostname *string `json:"hostname,omitempty"`
// Port number for connection.
Port *int64 `json:"port,omitempty"`
}
// UnmarshalPostgreSQLConnectionURIHostsItem unmarshals an instance of PostgreSQLConnectionURIHostsItem from the specified map of raw messages.
func UnmarshalPostgreSQLConnectionURIHostsItem(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(PostgreSQLConnectionURIHostsItem)
err = core.UnmarshalPrimitive(m, "hostname", &obj.Hostname)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "port", &obj.Port)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RabbitMQConnectionAMQPS : RabbitMQConnectionAMQPS struct
type RabbitMQConnectionAMQPS struct {
// Type of connection being described.
Type *string `json:"type,omitempty"`
Composed []string `json:"composed,omitempty"`
// Scheme/protocol for URI connection.
Scheme *string `json:"scheme,omitempty"`
Hosts []RabbitMQConnectionAMQPSHostsItem `json:"hosts,omitempty"`
// Path for URI connection.
Path *string `json:"path,omitempty"`
// Query options to add to the URI connection.
QueryOptions interface{} `json:"query_options,omitempty"`
Authentication *RabbitMQConnectionAMQPSAuthentication `json:"authentication,omitempty"`
Certificate *RabbitMQConnectionAMQPSCertificate `json:"certificate,omitempty"`
}
// UnmarshalRabbitMQConnectionAMQPS unmarshals an instance of RabbitMQConnectionAMQPS from the specified map of raw messages.
func UnmarshalRabbitMQConnectionAMQPS(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RabbitMQConnectionAMQPS)
err = core.UnmarshalPrimitive(m, "type", &obj.Type)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "composed", &obj.Composed)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "scheme", &obj.Scheme)
if err != nil {
return
}
err = core.UnmarshalModel(m, "hosts", &obj.Hosts, UnmarshalRabbitMQConnectionAMQPSHostsItem)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "path", &obj.Path)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "query_options", &obj.QueryOptions)
if err != nil {
return
}
err = core.UnmarshalModel(m, "authentication", &obj.Authentication, UnmarshalRabbitMQConnectionAMQPSAuthentication)
if err != nil {
return
}
err = core.UnmarshalModel(m, "certificate", &obj.Certificate, UnmarshalRabbitMQConnectionAMQPSCertificate)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RabbitMQConnectionAMQPSAuthentication : RabbitMQConnectionAMQPSAuthentication struct
type RabbitMQConnectionAMQPSAuthentication struct {
// Authentication method for this credential.
Method *string `json:"method,omitempty"`
// Username part of credential.
Username *string `json:"username,omitempty"`
// Password part of credential.
Password *string `json:"password,omitempty"`
}
// UnmarshalRabbitMQConnectionAMQPSAuthentication unmarshals an instance of RabbitMQConnectionAMQPSAuthentication from the specified map of raw messages.
func UnmarshalRabbitMQConnectionAMQPSAuthentication(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RabbitMQConnectionAMQPSAuthentication)
err = core.UnmarshalPrimitive(m, "method", &obj.Method)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "username", &obj.Username)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "password", &obj.Password)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RabbitMQConnectionAMQPSCertificate : RabbitMQConnectionAMQPSCertificate struct
type RabbitMQConnectionAMQPSCertificate struct {
// Name associated with the certificate.
Name *string `json:"name,omitempty"`
// Base64 encoded version of the certificate.
CertificateBase64 *string `json:"certificate_base64,omitempty"`
}
// UnmarshalRabbitMQConnectionAMQPSCertificate unmarshals an instance of RabbitMQConnectionAMQPSCertificate from the specified map of raw messages.
func UnmarshalRabbitMQConnectionAMQPSCertificate(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RabbitMQConnectionAMQPSCertificate)
err = core.UnmarshalPrimitive(m, "name", &obj.Name)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "certificate_base64", &obj.CertificateBase64)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RabbitMQConnectionAMQPSHostsItem : RabbitMQConnectionAMQPSHostsItem struct
type RabbitMQConnectionAMQPSHostsItem struct {
// Hostname for connection.
Hostname *string `json:"hostname,omitempty"`
// Port number for connection.
Port *int64 `json:"port,omitempty"`
}
// UnmarshalRabbitMQConnectionAMQPSHostsItem unmarshals an instance of RabbitMQConnectionAMQPSHostsItem from the specified map of raw messages.
func UnmarshalRabbitMQConnectionAMQPSHostsItem(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RabbitMQConnectionAMQPSHostsItem)
err = core.UnmarshalPrimitive(m, "hostname", &obj.Hostname)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "port", &obj.Port)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RabbitMQConnectionHTTPS : RabbitMQConnectionHTTPS struct
type RabbitMQConnectionHTTPS struct {
// Type of connection being described.
Type *string `json:"type,omitempty"`
Composed []string `json:"composed,omitempty"`
// Scheme/protocol for URI connection.
Scheme *string `json:"scheme,omitempty"`
Hosts []RabbitMQConnectionHTTPSHostsItem `json:"hosts,omitempty"`
// Path for URI connection.
Path *string `json:"path,omitempty"`
// Query options to add to the URI connection.
QueryOptions interface{} `json:"query_options,omitempty"`
Authentication *RabbitMQConnectionHTTPSAuthentication `json:"authentication,omitempty"`
Certificate *RabbitMQConnectionHTTPSCertificate `json:"certificate,omitempty"`
// Indicates the address is accessible by browser, for the RabbitMQ Management UI.
BrowserAccessible *bool `json:"browser_accessible,omitempty"`
}
// UnmarshalRabbitMQConnectionHTTPS unmarshals an instance of RabbitMQConnectionHTTPS from the specified map of raw messages.
func UnmarshalRabbitMQConnectionHTTPS(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RabbitMQConnectionHTTPS)
err = core.UnmarshalPrimitive(m, "type", &obj.Type)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "composed", &obj.Composed)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "scheme", &obj.Scheme)
if err != nil {
return
}
err = core.UnmarshalModel(m, "hosts", &obj.Hosts, UnmarshalRabbitMQConnectionHTTPSHostsItem)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "path", &obj.Path)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "query_options", &obj.QueryOptions)
if err != nil {
return
}
err = core.UnmarshalModel(m, "authentication", &obj.Authentication, UnmarshalRabbitMQConnectionHTTPSAuthentication)
if err != nil {
return
}
err = core.UnmarshalModel(m, "certificate", &obj.Certificate, UnmarshalRabbitMQConnectionHTTPSCertificate)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "browser_accessible", &obj.BrowserAccessible)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RabbitMQConnectionHTTPSAuthentication : RabbitMQConnectionHTTPSAuthentication struct
type RabbitMQConnectionHTTPSAuthentication struct {
// Authentication method for this credential.
Method *string `json:"method,omitempty"`
// Username part of credential.
Username *string `json:"username,omitempty"`
// Password part of credential.
Password *string `json:"password,omitempty"`
}
// UnmarshalRabbitMQConnectionHTTPSAuthentication unmarshals an instance of RabbitMQConnectionHTTPSAuthentication from the specified map of raw messages.
func UnmarshalRabbitMQConnectionHTTPSAuthentication(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RabbitMQConnectionHTTPSAuthentication)
err = core.UnmarshalPrimitive(m, "method", &obj.Method)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "username", &obj.Username)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "password", &obj.Password)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RabbitMQConnectionHTTPSCertificate : RabbitMQConnectionHTTPSCertificate struct
type RabbitMQConnectionHTTPSCertificate struct {
// Name associated with the certificate.
Name *string `json:"name,omitempty"`
// Base64 encoded version of the certificate.
CertificateBase64 *string `json:"certificate_base64,omitempty"`
}
// UnmarshalRabbitMQConnectionHTTPSCertificate unmarshals an instance of RabbitMQConnectionHTTPSCertificate from the specified map of raw messages.
func UnmarshalRabbitMQConnectionHTTPSCertificate(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RabbitMQConnectionHTTPSCertificate)
err = core.UnmarshalPrimitive(m, "name", &obj.Name)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "certificate_base64", &obj.CertificateBase64)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RabbitMQConnectionHTTPSHostsItem : RabbitMQConnectionHTTPSHostsItem struct
type RabbitMQConnectionHTTPSHostsItem struct {
// Hostname for connection.
Hostname *string `json:"hostname,omitempty"`
// Port number for connection.
Port *int64 `json:"port,omitempty"`
}
// UnmarshalRabbitMQConnectionHTTPSHostsItem unmarshals an instance of RabbitMQConnectionHTTPSHostsItem from the specified map of raw messages.
func UnmarshalRabbitMQConnectionHTTPSHostsItem(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RabbitMQConnectionHTTPSHostsItem)
err = core.UnmarshalPrimitive(m, "hostname", &obj.Hostname)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "port", &obj.Port)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RabbitMQConnectionMQTTS : RabbitMQConnectionMQTTS struct
type RabbitMQConnectionMQTTS struct {
// Type of connection being described.
Type *string `json:"type,omitempty"`
Composed []string `json:"composed,omitempty"`
// Scheme/protocol for URI connection.
Scheme *string `json:"scheme,omitempty"`
Hosts []RabbitMQConnectionMQTTSHostsItem `json:"hosts,omitempty"`
// Path for URI connection.
Path *string `json:"path,omitempty"`
// Query options to add to the URI connection.
QueryOptions interface{} `json:"query_options,omitempty"`
Authentication *RabbitMQConnectionMQTTSAuthentication `json:"authentication,omitempty"`
Certificate *RabbitMQConnectionMQTTSCertificate `json:"certificate,omitempty"`
}
// UnmarshalRabbitMQConnectionMQTTS unmarshals an instance of RabbitMQConnectionMQTTS from the specified map of raw messages.
func UnmarshalRabbitMQConnectionMQTTS(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RabbitMQConnectionMQTTS)
err = core.UnmarshalPrimitive(m, "type", &obj.Type)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "composed", &obj.Composed)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "scheme", &obj.Scheme)
if err != nil {
return
}
err = core.UnmarshalModel(m, "hosts", &obj.Hosts, UnmarshalRabbitMQConnectionMQTTSHostsItem)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "path", &obj.Path)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "query_options", &obj.QueryOptions)
if err != nil {
return
}
err = core.UnmarshalModel(m, "authentication", &obj.Authentication, UnmarshalRabbitMQConnectionMQTTSAuthentication)
if err != nil {
return
}
err = core.UnmarshalModel(m, "certificate", &obj.Certificate, UnmarshalRabbitMQConnectionMQTTSCertificate)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RabbitMQConnectionMQTTSAuthentication : RabbitMQConnectionMQTTSAuthentication struct
type RabbitMQConnectionMQTTSAuthentication struct {
// Authentication method for this credential.
Method *string `json:"method,omitempty"`
// Username part of credential.
Username *string `json:"username,omitempty"`
// Password part of credential.
Password *string `json:"password,omitempty"`
}
// UnmarshalRabbitMQConnectionMQTTSAuthentication unmarshals an instance of RabbitMQConnectionMQTTSAuthentication from the specified map of raw messages.
func UnmarshalRabbitMQConnectionMQTTSAuthentication(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RabbitMQConnectionMQTTSAuthentication)
err = core.UnmarshalPrimitive(m, "method", &obj.Method)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "username", &obj.Username)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "password", &obj.Password)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RabbitMQConnectionMQTTSCertificate : RabbitMQConnectionMQTTSCertificate struct
type RabbitMQConnectionMQTTSCertificate struct {
// Name associated with the certificate.
Name *string `json:"name,omitempty"`
// Base64 encoded version of the certificate.
CertificateBase64 *string `json:"certificate_base64,omitempty"`
}
// UnmarshalRabbitMQConnectionMQTTSCertificate unmarshals an instance of RabbitMQConnectionMQTTSCertificate from the specified map of raw messages.
func UnmarshalRabbitMQConnectionMQTTSCertificate(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RabbitMQConnectionMQTTSCertificate)
err = core.UnmarshalPrimitive(m, "name", &obj.Name)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "certificate_base64", &obj.CertificateBase64)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RabbitMQConnectionMQTTSHostsItem : RabbitMQConnectionMQTTSHostsItem struct
type RabbitMQConnectionMQTTSHostsItem struct {
// Hostname for connection.
Hostname *string `json:"hostname,omitempty"`
// Port number for connection.
Port *int64 `json:"port,omitempty"`
}
// UnmarshalRabbitMQConnectionMQTTSHostsItem unmarshals an instance of RabbitMQConnectionMQTTSHostsItem from the specified map of raw messages.
func UnmarshalRabbitMQConnectionMQTTSHostsItem(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RabbitMQConnectionMQTTSHostsItem)
err = core.UnmarshalPrimitive(m, "hostname", &obj.Hostname)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "port", &obj.Port)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RabbitMQConnectionStompSSL : RabbitMQConnectionStompSSL struct
type RabbitMQConnectionStompSSL struct {
// Type of connection being described.
Type *string `json:"type,omitempty"`
Composed []string `json:"composed,omitempty"`
Hosts []RabbitMQConnectionStompSSLHostsItem `json:"hosts,omitempty"`
Authentication *RabbitMQConnectionStompSSLAuthentication `json:"authentication,omitempty"`
Certificate *RabbitMQConnectionStompSSLCertificate `json:"certificate,omitempty"`
// Indicates ssl is required for the connection.
Ssl *bool `json:"ssl,omitempty"`
}
// UnmarshalRabbitMQConnectionStompSSL unmarshals an instance of RabbitMQConnectionStompSSL from the specified map of raw messages.
func UnmarshalRabbitMQConnectionStompSSL(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RabbitMQConnectionStompSSL)
err = core.UnmarshalPrimitive(m, "type", &obj.Type)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "composed", &obj.Composed)
if err != nil {
return
}
err = core.UnmarshalModel(m, "hosts", &obj.Hosts, UnmarshalRabbitMQConnectionStompSSLHostsItem)
if err != nil {
return
}
err = core.UnmarshalModel(m, "authentication", &obj.Authentication, UnmarshalRabbitMQConnectionStompSSLAuthentication)
if err != nil {
return
}
err = core.UnmarshalModel(m, "certificate", &obj.Certificate, UnmarshalRabbitMQConnectionStompSSLCertificate)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "ssl", &obj.Ssl)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RabbitMQConnectionStompSSLAuthentication : RabbitMQConnectionStompSSLAuthentication struct
type RabbitMQConnectionStompSSLAuthentication struct {
// Authentication method for this credential.
Method *string `json:"method,omitempty"`
// Username part of credential.
Username *string `json:"username,omitempty"`
// Password part of credential.
Password *string `json:"password,omitempty"`
}
// UnmarshalRabbitMQConnectionStompSSLAuthentication unmarshals an instance of RabbitMQConnectionStompSSLAuthentication from the specified map of raw messages.
func UnmarshalRabbitMQConnectionStompSSLAuthentication(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RabbitMQConnectionStompSSLAuthentication)
err = core.UnmarshalPrimitive(m, "method", &obj.Method)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "username", &obj.Username)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "password", &obj.Password)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RabbitMQConnectionStompSSLCertificate : RabbitMQConnectionStompSSLCertificate struct
type RabbitMQConnectionStompSSLCertificate struct {
// Name associated with the certificate.
Name *string `json:"name,omitempty"`
// Base64 encoded version of the certificate.
CertificateBase64 *string `json:"certificate_base64,omitempty"`
}
// UnmarshalRabbitMQConnectionStompSSLCertificate unmarshals an instance of RabbitMQConnectionStompSSLCertificate from the specified map of raw messages.
func UnmarshalRabbitMQConnectionStompSSLCertificate(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RabbitMQConnectionStompSSLCertificate)
err = core.UnmarshalPrimitive(m, "name", &obj.Name)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "certificate_base64", &obj.CertificateBase64)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RabbitMQConnectionStompSSLHostsItem : RabbitMQConnectionStompSSLHostsItem struct
type RabbitMQConnectionStompSSLHostsItem struct {
// Hostname for connection.
Hostname *string `json:"hostname,omitempty"`
// Port number for connection.
Port *int64 `json:"port,omitempty"`
}
// UnmarshalRabbitMQConnectionStompSSLHostsItem unmarshals an instance of RabbitMQConnectionStompSSLHostsItem from the specified map of raw messages.
func UnmarshalRabbitMQConnectionStompSSLHostsItem(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RabbitMQConnectionStompSSLHostsItem)
err = core.UnmarshalPrimitive(m, "hostname", &obj.Hostname)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "port", &obj.Port)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RedisConnectionURI : RedisConnectionURI struct
type RedisConnectionURI struct {
// Type of connection being described.
Type *string `json:"type,omitempty"`
Composed []string `json:"composed,omitempty"`
// Scheme/protocol for URI connection.
Scheme *string `json:"scheme,omitempty"`
Hosts []RedisConnectionURIHostsItem `json:"hosts,omitempty"`
// Path for URI connection.
Path *string `json:"path,omitempty"`
// Query options to add to the URI connection.
QueryOptions interface{} `json:"query_options,omitempty"`
Authentication *RedisConnectionURIAuthentication `json:"authentication,omitempty"`
Certificate *RedisConnectionURICertificate `json:"certificate,omitempty"`
// Number of the database to use in the URI connection.
Database *int64 `json:"database,omitempty"`
}
// UnmarshalRedisConnectionURI unmarshals an instance of RedisConnectionURI from the specified map of raw messages.
func UnmarshalRedisConnectionURI(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RedisConnectionURI)
err = core.UnmarshalPrimitive(m, "type", &obj.Type)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "composed", &obj.Composed)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "scheme", &obj.Scheme)
if err != nil {
return
}
err = core.UnmarshalModel(m, "hosts", &obj.Hosts, UnmarshalRedisConnectionURIHostsItem)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "path", &obj.Path)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "query_options", &obj.QueryOptions)
if err != nil {
return
}
err = core.UnmarshalModel(m, "authentication", &obj.Authentication, UnmarshalRedisConnectionURIAuthentication)
if err != nil {
return
}
err = core.UnmarshalModel(m, "certificate", &obj.Certificate, UnmarshalRedisConnectionURICertificate)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "database", &obj.Database)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RedisConnectionURIAuthentication : RedisConnectionURIAuthentication struct
type RedisConnectionURIAuthentication struct {
// Authentication method for this credential.
Method *string `json:"method,omitempty"`
// Username part of credential.
Username *string `json:"username,omitempty"`
// Password part of credential.
Password *string `json:"password,omitempty"`
}
// UnmarshalRedisConnectionURIAuthentication unmarshals an instance of RedisConnectionURIAuthentication from the specified map of raw messages.
func UnmarshalRedisConnectionURIAuthentication(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RedisConnectionURIAuthentication)
err = core.UnmarshalPrimitive(m, "method", &obj.Method)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "username", &obj.Username)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "password", &obj.Password)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RedisConnectionURICertificate : RedisConnectionURICertificate struct
type RedisConnectionURICertificate struct {
// Name associated with the certificate.
Name *string `json:"name,omitempty"`
// Base64 encoded version of the certificate.
CertificateBase64 *string `json:"certificate_base64,omitempty"`
}
// UnmarshalRedisConnectionURICertificate unmarshals an instance of RedisConnectionURICertificate from the specified map of raw messages.
func UnmarshalRedisConnectionURICertificate(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RedisConnectionURICertificate)
err = core.UnmarshalPrimitive(m, "name", &obj.Name)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "certificate_base64", &obj.CertificateBase64)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RedisConnectionURIHostsItem : RedisConnectionURIHostsItem struct
type RedisConnectionURIHostsItem struct {
// Hostname for connection.
Hostname *string `json:"hostname,omitempty"`
// Port number for connection.
Port *int64 `json:"port,omitempty"`
}
// UnmarshalRedisConnectionURIHostsItem unmarshals an instance of RedisConnectionURIHostsItem from the specified map of raw messages.
func UnmarshalRedisConnectionURIHostsItem(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RedisConnectionURIHostsItem)
err = core.UnmarshalPrimitive(m, "hostname", &obj.Hostname)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "port", &obj.Port)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// Remotes : Remotes.
type Remotes struct {
// Leader ID, if applicable.
Leader *string `json:"leader,omitempty"`
// Replica IDs, if applicable.
Replicas []string `json:"replicas,omitempty"`
}
// UnmarshalRemotes unmarshals an instance of Remotes from the specified map of raw messages.
func UnmarshalRemotes(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Remotes)
err = core.UnmarshalPrimitive(m, "leader", &obj.Leader)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "replicas", &obj.Replicas)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ReplaceWhitelistOptions : The ReplaceWhitelist options.
type ReplaceWhitelistOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// An array of allowlist entries.
IpAddresses []WhitelistEntry
// Verify that the current allowlist matches a provided ETag value. Use in conjunction with the GET operation's ETag
// header to ensure synchronicity between clients.
IfMatch *string
// Allows users to set headers on API requests
Headers map[string]string
}
// NewReplaceWhitelistOptions : Instantiate ReplaceWhitelistOptions
func (*IbmCloudDatabasesV5) NewReplaceWhitelistOptions(id string) *ReplaceWhitelistOptions {
return &ReplaceWhitelistOptions{
ID: core.StringPtr(id),
}
}
// SetID : Allow user to set ID
func (options *ReplaceWhitelistOptions) SetID(id string) *ReplaceWhitelistOptions {
options.ID = core.StringPtr(id)
return options
}
// SetIpAddresses : Allow user to set IpAddresses
func (options *ReplaceWhitelistOptions) SetIpAddresses(ipAddresses []WhitelistEntry) *ReplaceWhitelistOptions {
options.IpAddresses = ipAddresses
return options
}
// SetIfMatch : Allow user to set IfMatch
func (options *ReplaceWhitelistOptions) SetIfMatch(ifMatch string) *ReplaceWhitelistOptions {
options.IfMatch = core.StringPtr(ifMatch)
return options
}
// SetHeaders : Allow user to set Headers
func (options *ReplaceWhitelistOptions) SetHeaders(param map[string]string) *ReplaceWhitelistOptions {
options.Headers = param
return options
}
// ReplaceWhitelistResponse : ReplaceWhitelistResponse struct
type ReplaceWhitelistResponse struct {
Task *Task `json:"task,omitempty"`
}
// UnmarshalReplaceWhitelistResponse unmarshals an instance of ReplaceWhitelistResponse from the specified map of raw messages.
func UnmarshalReplaceWhitelistResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ReplaceWhitelistResponse)
err = core.UnmarshalModel(m, "task", &obj.Task, UnmarshalTask)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetAutoscalingConditionsOptions : The SetAutoscalingConditions options.
type SetAutoscalingConditionsOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// Group ID.
GroupID *string `validate:"required,ne="`
Autoscaling AutoscalingSetGroupIntf
// Allows users to set headers on API requests
Headers map[string]string
}
// NewSetAutoscalingConditionsOptions : Instantiate SetAutoscalingConditionsOptions
func (*IbmCloudDatabasesV5) NewSetAutoscalingConditionsOptions(id string, groupID string) *SetAutoscalingConditionsOptions {
return &SetAutoscalingConditionsOptions{
ID: core.StringPtr(id),
GroupID: core.StringPtr(groupID),
}
}
// SetID : Allow user to set ID
func (options *SetAutoscalingConditionsOptions) SetID(id string) *SetAutoscalingConditionsOptions {
options.ID = core.StringPtr(id)
return options
}
// SetGroupID : Allow user to set GroupID
func (options *SetAutoscalingConditionsOptions) SetGroupID(groupID string) *SetAutoscalingConditionsOptions {
options.GroupID = core.StringPtr(groupID)
return options
}
// SetAutoscaling : Allow user to set Autoscaling
func (options *SetAutoscalingConditionsOptions) SetAutoscaling(autoscaling AutoscalingSetGroupIntf) *SetAutoscalingConditionsOptions {
options.Autoscaling = autoscaling
return options
}
// SetHeaders : Allow user to set Headers
func (options *SetAutoscalingConditionsOptions) SetHeaders(param map[string]string) *SetAutoscalingConditionsOptions {
options.Headers = param
return options
}
// SetAutoscalingConditionsResponse : SetAutoscalingConditionsResponse struct
type SetAutoscalingConditionsResponse struct {
Task *Task `json:"task,omitempty"`
}
// UnmarshalSetAutoscalingConditionsResponse unmarshals an instance of SetAutoscalingConditionsResponse from the specified map of raw messages.
func UnmarshalSetAutoscalingConditionsResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetAutoscalingConditionsResponse)
err = core.UnmarshalModel(m, "task", &obj.Task, UnmarshalTask)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetCPUGroupCPU : SetCPUGroupCPU struct
type SetCPUGroupCPU struct {
// Number of allocated CPUs.
AllocationCount *int64 `json:"allocation_count,omitempty"`
}
// UnmarshalSetCPUGroupCPU unmarshals an instance of SetCPUGroupCPU from the specified map of raw messages.
func UnmarshalSetCPUGroupCPU(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetCPUGroupCPU)
err = core.UnmarshalPrimitive(m, "allocation_count", &obj.AllocationCount)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetConfigurationConfiguration : SetConfigurationConfiguration struct
// Models which "extend" this model:
// - SetConfigurationConfigurationPGConfiguration
// - SetConfigurationConfigurationRedisConfiguration
type SetConfigurationConfiguration struct {
// Maximum connections allowed.
MaxConnections *int64 `json:"max_connections,omitempty"`
// Max number of transactions that can be in the "prepared" state simultaneously.
MaxPreparedTransactions *int64 `json:"max_prepared_transactions,omitempty"`
// Deadlock timeout in ms. The time to wait on a lock before checking for deadlock. Also the duration where lock waits
// will be logged.
DeadlockTimeout *int64 `json:"deadlock_timeout,omitempty"`
// Number of simultaneous requests that can be handled efficiently by the disk subsystem.
EffectiveIoConcurrency *int64 `json:"effective_io_concurrency,omitempty"`
// Maximum number of simultaneously defined replication slots.
MaxReplicationSlots *int64 `json:"max_replication_slots,omitempty"`
// Maximum number of simultaneously running WAL sender processes.
MaxWalSenders *int64 `json:"max_wal_senders,omitempty"`
// The number of 8kB shared memory buffers used by the server. Set to 1/4 of memory. Setting too high will cause
// crashes or prevent the database from starting.
SharedBuffers *int64 `json:"shared_buffers,omitempty"`
// Sets the current transaction's synchronization level. Off can result in data loss. remote_write with enable
// synchronous replication which will impact performance and availabilty.
SynchronousCommit *string `json:"synchronous_commit,omitempty"`
// WAL level. Set to logical to use logical decoding or logical replication.
WalLevel *string `json:"wal_level,omitempty"`
// The number of seconds to wait before forces a switch to the next WAL file if a new file has not been started.
ArchiveTimeout *int64 `json:"archive_timeout,omitempty"`
// The minimum number of milliseconds for execution time above which statements will be logged.
LogMinDurationStatement *int64 `json:"log_min_duration_statement,omitempty"`
// The maximum memory Redis should use, as bytes.
MaxmemoryRedis *int64 `json:"maxmemory-redis,omitempty"`
// The policy with which Redis evicts keys when maximum memory is reached.
MaxmemoryPolicy *string `json:"maxmemory-policy,omitempty"`
// If set to yes this will enable AOF persistence.
Appendonly *string `json:"appendonly,omitempty"`
// The maximum memory Redis should use, as bytes.
MaxmemorySamples *int64 `json:"maxmemory-samples,omitempty"`
// Whether or not to stop accepting writes when background persistence actions fail.
StopWritesOnBgsaveError *string `json:"stop-writes-on-bgsave-error,omitempty"`
}
// Constants associated with the SetConfigurationConfiguration.SynchronousCommit property.
// Sets the current transaction's synchronization level. Off can result in data loss. remote_write with enable
// synchronous replication which will impact performance and availabilty.
const (
SetConfigurationConfiguration_SynchronousCommit_Local = "local"
SetConfigurationConfiguration_SynchronousCommit_Off = "off"
)
// Constants associated with the SetConfigurationConfiguration.WalLevel property.
// WAL level. Set to logical to use logical decoding or logical replication.
const (
SetConfigurationConfiguration_WalLevel_HotStandby = "hot_standby"
SetConfigurationConfiguration_WalLevel_Logical = "logical"
)
// Constants associated with the SetConfigurationConfiguration.MaxmemoryPolicy property.
// The policy with which Redis evicts keys when maximum memory is reached.
const (
SetConfigurationConfiguration_MaxmemoryPolicy_AllkeysLru = "allkeys-lru"
SetConfigurationConfiguration_MaxmemoryPolicy_AllkeysRandom = "allkeys-random"
SetConfigurationConfiguration_MaxmemoryPolicy_Noeviction = "noeviction"
SetConfigurationConfiguration_MaxmemoryPolicy_VolatileLru = "volatile-lru"
SetConfigurationConfiguration_MaxmemoryPolicy_VolatileRandom = "volatile-random"
SetConfigurationConfiguration_MaxmemoryPolicy_VolatileTTL = "volatile-ttl"
)
// Constants associated with the SetConfigurationConfiguration.Appendonly property.
// If set to yes this will enable AOF persistence.
const (
SetConfigurationConfiguration_Appendonly_No = "no"
SetConfigurationConfiguration_Appendonly_Yes = "yes"
)
// Constants associated with the SetConfigurationConfiguration.StopWritesOnBgsaveError property.
// Whether or not to stop accepting writes when background persistence actions fail.
const (
SetConfigurationConfiguration_StopWritesOnBgsaveError_No = "no"
SetConfigurationConfiguration_StopWritesOnBgsaveError_Yes = "yes"
)
func (*SetConfigurationConfiguration) isaSetConfigurationConfiguration() bool {
return true
}
type SetConfigurationConfigurationIntf interface {
isaSetConfigurationConfiguration() bool
}
// UnmarshalSetConfigurationConfiguration unmarshals an instance of SetConfigurationConfiguration from the specified map of raw messages.
func UnmarshalSetConfigurationConfiguration(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetConfigurationConfiguration)
err = core.UnmarshalPrimitive(m, "max_connections", &obj.MaxConnections)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "max_prepared_transactions", &obj.MaxPreparedTransactions)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "deadlock_timeout", &obj.DeadlockTimeout)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "effective_io_concurrency", &obj.EffectiveIoConcurrency)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "max_replication_slots", &obj.MaxReplicationSlots)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "max_wal_senders", &obj.MaxWalSenders)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "shared_buffers", &obj.SharedBuffers)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "synchronous_commit", &obj.SynchronousCommit)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "wal_level", &obj.WalLevel)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "archive_timeout", &obj.ArchiveTimeout)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "log_min_duration_statement", &obj.LogMinDurationStatement)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "maxmemory-redis", &obj.MaxmemoryRedis)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "maxmemory-policy", &obj.MaxmemoryPolicy)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "appendonly", &obj.Appendonly)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "maxmemory-samples", &obj.MaxmemorySamples)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "stop-writes-on-bgsave-error", &obj.StopWritesOnBgsaveError)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetDatabaseConfigurationOptions : The SetDatabaseConfiguration options.
type SetDatabaseConfigurationOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
Configuration SetConfigurationConfigurationIntf `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewSetDatabaseConfigurationOptions : Instantiate SetDatabaseConfigurationOptions
func (*IbmCloudDatabasesV5) NewSetDatabaseConfigurationOptions(id string, configuration SetConfigurationConfigurationIntf) *SetDatabaseConfigurationOptions {
return &SetDatabaseConfigurationOptions{
ID: core.StringPtr(id),
Configuration: configuration,
}
}
// SetID : Allow user to set ID
func (options *SetDatabaseConfigurationOptions) SetID(id string) *SetDatabaseConfigurationOptions {
options.ID = core.StringPtr(id)
return options
}
// SetConfiguration : Allow user to set Configuration
func (options *SetDatabaseConfigurationOptions) SetConfiguration(configuration SetConfigurationConfigurationIntf) *SetDatabaseConfigurationOptions {
options.Configuration = configuration
return options
}
// SetHeaders : Allow user to set Headers
func (options *SetDatabaseConfigurationOptions) SetHeaders(param map[string]string) *SetDatabaseConfigurationOptions {
options.Headers = param
return options
}
// SetDatabaseConfigurationResponse : SetDatabaseConfigurationResponse struct
type SetDatabaseConfigurationResponse struct {
Task *Task `json:"task,omitempty"`
}
// UnmarshalSetDatabaseConfigurationResponse unmarshals an instance of SetDatabaseConfigurationResponse from the specified map of raw messages.
func UnmarshalSetDatabaseConfigurationResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetDatabaseConfigurationResponse)
err = core.UnmarshalModel(m, "task", &obj.Task, UnmarshalTask)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetDeploymentScalingGroupOptions : The SetDeploymentScalingGroup options.
type SetDeploymentScalingGroupOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// Group Id.
GroupID *string `validate:"required,ne="`
// Scaling group settings.
SetDeploymentScalingGroupRequest SetDeploymentScalingGroupRequestIntf `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewSetDeploymentScalingGroupOptions : Instantiate SetDeploymentScalingGroupOptions
func (*IbmCloudDatabasesV5) NewSetDeploymentScalingGroupOptions(id string, groupID string, setDeploymentScalingGroupRequest SetDeploymentScalingGroupRequestIntf) *SetDeploymentScalingGroupOptions {
return &SetDeploymentScalingGroupOptions{
ID: core.StringPtr(id),
GroupID: core.StringPtr(groupID),
SetDeploymentScalingGroupRequest: setDeploymentScalingGroupRequest,
}
}
// SetID : Allow user to set ID
func (options *SetDeploymentScalingGroupOptions) SetID(id string) *SetDeploymentScalingGroupOptions {
options.ID = core.StringPtr(id)
return options
}
// SetGroupID : Allow user to set GroupID
func (options *SetDeploymentScalingGroupOptions) SetGroupID(groupID string) *SetDeploymentScalingGroupOptions {
options.GroupID = core.StringPtr(groupID)
return options
}
// SetSetDeploymentScalingGroupRequest : Allow user to set SetDeploymentScalingGroupRequest
func (options *SetDeploymentScalingGroupOptions) SetSetDeploymentScalingGroupRequest(setDeploymentScalingGroupRequest SetDeploymentScalingGroupRequestIntf) *SetDeploymentScalingGroupOptions {
options.SetDeploymentScalingGroupRequest = setDeploymentScalingGroupRequest
return options
}
// SetHeaders : Allow user to set Headers
func (options *SetDeploymentScalingGroupOptions) SetHeaders(param map[string]string) *SetDeploymentScalingGroupOptions {
options.Headers = param
return options
}
// SetDeploymentScalingGroupRequest : SetDeploymentScalingGroupRequest struct
// Models which "extend" this model:
// - SetDeploymentScalingGroupRequestSetMembersGroup
// - SetDeploymentScalingGroupRequestSetMemoryGroup
// - SetDeploymentScalingGroupRequestSetCPUGroup
// - SetDeploymentScalingGroupRequestSetDiskGroup
type SetDeploymentScalingGroupRequest struct {
Members *SetMembersGroupMembers `json:"members,omitempty"`
Memory *SetMemoryGroupMemory `json:"memory,omitempty"`
Cpu *SetCPUGroupCPU `json:"cpu,omitempty"`
Disk *SetDiskGroupDisk `json:"disk,omitempty"`
}
func (*SetDeploymentScalingGroupRequest) isaSetDeploymentScalingGroupRequest() bool {
return true
}
type SetDeploymentScalingGroupRequestIntf interface {
isaSetDeploymentScalingGroupRequest() bool
}
// UnmarshalSetDeploymentScalingGroupRequest unmarshals an instance of SetDeploymentScalingGroupRequest from the specified map of raw messages.
func UnmarshalSetDeploymentScalingGroupRequest(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetDeploymentScalingGroupRequest)
err = core.UnmarshalModel(m, "members", &obj.Members, UnmarshalSetMembersGroupMembers)
if err != nil {
return
}
err = core.UnmarshalModel(m, "memory", &obj.Memory, UnmarshalSetMemoryGroupMemory)
if err != nil {
return
}
err = core.UnmarshalModel(m, "cpu", &obj.Cpu, UnmarshalSetCPUGroupCPU)
if err != nil {
return
}
err = core.UnmarshalModel(m, "disk", &obj.Disk, UnmarshalSetDiskGroupDisk)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetDeploymentScalingGroupResponse : SetDeploymentScalingGroupResponse struct
type SetDeploymentScalingGroupResponse struct {
Task *Task `json:"task,omitempty"`
}
// UnmarshalSetDeploymentScalingGroupResponse unmarshals an instance of SetDeploymentScalingGroupResponse from the specified map of raw messages.
func UnmarshalSetDeploymentScalingGroupResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetDeploymentScalingGroupResponse)
err = core.UnmarshalModel(m, "task", &obj.Task, UnmarshalTask)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetDiskGroupDisk : SetDiskGroupDisk struct
type SetDiskGroupDisk struct {
// Allocated storage in MB.
AllocationMb *int64 `json:"allocation_mb,omitempty"`
}
// UnmarshalSetDiskGroupDisk unmarshals an instance of SetDiskGroupDisk from the specified map of raw messages.
func UnmarshalSetDiskGroupDisk(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetDiskGroupDisk)
err = core.UnmarshalPrimitive(m, "allocation_mb", &obj.AllocationMb)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetMembersGroupMembers : SetMembersGroupMembers struct
type SetMembersGroupMembers struct {
// Allocated number of members.
AllocationCount *int64 `json:"allocation_count,omitempty"`
}
// UnmarshalSetMembersGroupMembers unmarshals an instance of SetMembersGroupMembers from the specified map of raw messages.
func UnmarshalSetMembersGroupMembers(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetMembersGroupMembers)
err = core.UnmarshalPrimitive(m, "allocation_count", &obj.AllocationCount)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetMemoryGroupMemory : SetMemoryGroupMemory struct
type SetMemoryGroupMemory struct {
// Allocated memory in MB.
AllocationMb *int64 `json:"allocation_mb,omitempty"`
}
// UnmarshalSetMemoryGroupMemory unmarshals an instance of SetMemoryGroupMemory from the specified map of raw messages.
func UnmarshalSetMemoryGroupMemory(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetMemoryGroupMemory)
err = core.UnmarshalPrimitive(m, "allocation_mb", &obj.AllocationMb)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetPromotionOptions : The SetPromotion options.
type SetPromotionOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
Promotion SetPromotionPromotionIntf `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewSetPromotionOptions : Instantiate SetPromotionOptions
func (*IbmCloudDatabasesV5) NewSetPromotionOptions(id string, promotion SetPromotionPromotionIntf) *SetPromotionOptions {
return &SetPromotionOptions{
ID: core.StringPtr(id),
Promotion: promotion,
}
}
// SetID : Allow user to set ID
func (options *SetPromotionOptions) SetID(id string) *SetPromotionOptions {
options.ID = core.StringPtr(id)
return options
}
// SetPromotion : Allow user to set Promotion
func (options *SetPromotionOptions) SetPromotion(promotion SetPromotionPromotionIntf) *SetPromotionOptions {
options.Promotion = promotion
return options
}
// SetHeaders : Allow user to set Headers
func (options *SetPromotionOptions) SetHeaders(param map[string]string) *SetPromotionOptions {
options.Headers = param
return options
}
// SetPromotionPromotion : SetPromotionPromotion struct
// Models which "extend" this model:
// - SetPromotionPromotionPromote
// - SetPromotionPromotionUpgradePromote
type SetPromotionPromotion struct {
// Promotion options.
Promotion map[string]interface{} `json:"promotion,omitempty"`
}
func (*SetPromotionPromotion) isaSetPromotionPromotion() bool {
return true
}
type SetPromotionPromotionIntf interface {
isaSetPromotionPromotion() bool
}
// UnmarshalSetPromotionPromotion unmarshals an instance of SetPromotionPromotion from the specified map of raw messages.
func UnmarshalSetPromotionPromotion(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetPromotionPromotion)
err = core.UnmarshalPrimitive(m, "promotion", &obj.Promotion)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetPromotionResponse : SetPromotionResponse struct
type SetPromotionResponse struct {
Task *Task `json:"task,omitempty"`
}
// UnmarshalSetPromotionResponse unmarshals an instance of SetPromotionResponse from the specified map of raw messages.
func UnmarshalSetPromotionResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetPromotionResponse)
err = core.UnmarshalModel(m, "task", &obj.Task, UnmarshalTask)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetRemotesOptions : The SetRemotes options.
type SetRemotesOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
Remotes *SetRemotesRequestRemotes
// Option to restore instance without taking a backup once data is restored. Allows restored deployment to be available
// sooner.
SkipInitialBackup *bool
// Allows users to set headers on API requests
Headers map[string]string
}
// NewSetRemotesOptions : Instantiate SetRemotesOptions
func (*IbmCloudDatabasesV5) NewSetRemotesOptions(id string) *SetRemotesOptions {
return &SetRemotesOptions{
ID: core.StringPtr(id),
}
}
// SetID : Allow user to set ID
func (options *SetRemotesOptions) SetID(id string) *SetRemotesOptions {
options.ID = core.StringPtr(id)
return options
}
// SetRemotes : Allow user to set Remotes
func (options *SetRemotesOptions) SetRemotes(remotes *SetRemotesRequestRemotes) *SetRemotesOptions {
options.Remotes = remotes
return options
}
// SetSkipInitialBackup : Allow user to set SkipInitialBackup
func (options *SetRemotesOptions) SetSkipInitialBackup(skipInitialBackup bool) *SetRemotesOptions {
options.SkipInitialBackup = core.BoolPtr(skipInitialBackup)
return options
}
// SetHeaders : Allow user to set Headers
func (options *SetRemotesOptions) SetHeaders(param map[string]string) *SetRemotesOptions {
options.Headers = param
return options
}
// SetRemotesRequestRemotes : SetRemotesRequestRemotes struct
type SetRemotesRequestRemotes struct {
// Leader should be an empty string.
Leader *string `json:"leader,omitempty"`
}
// UnmarshalSetRemotesRequestRemotes unmarshals an instance of SetRemotesRequestRemotes from the specified map of raw messages.
func UnmarshalSetRemotesRequestRemotes(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetRemotesRequestRemotes)
err = core.UnmarshalPrimitive(m, "leader", &obj.Leader)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetRemotesResponse : SetRemotesResponse struct
type SetRemotesResponse struct {
Task *Task `json:"task,omitempty"`
}
// UnmarshalSetRemotesResponse unmarshals an instance of SetRemotesResponse from the specified map of raw messages.
func UnmarshalSetRemotesResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetRemotesResponse)
err = core.UnmarshalModel(m, "task", &obj.Task, UnmarshalTask)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// StartOndemandBackupOptions : The StartOndemandBackup options.
type StartOndemandBackupOptions struct {
// Deployment ID.
ID *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewStartOndemandBackupOptions : Instantiate StartOndemandBackupOptions
func (*IbmCloudDatabasesV5) NewStartOndemandBackupOptions(id string) *StartOndemandBackupOptions {
return &StartOndemandBackupOptions{
ID: core.StringPtr(id),
}
}
// SetID : Allow user to set ID
func (options *StartOndemandBackupOptions) SetID(id string) *StartOndemandBackupOptions {
options.ID = core.StringPtr(id)
return options
}
// SetHeaders : Allow user to set Headers
func (options *StartOndemandBackupOptions) SetHeaders(param map[string]string) *StartOndemandBackupOptions {
options.Headers = param
return options
}
// StartOndemandBackupResponse : StartOndemandBackupResponse struct
type StartOndemandBackupResponse struct {
Task *Task `json:"task,omitempty"`
}
// UnmarshalStartOndemandBackupResponse unmarshals an instance of StartOndemandBackupResponse from the specified map of raw messages.
func UnmarshalStartOndemandBackupResponse(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(StartOndemandBackupResponse)
err = core.UnmarshalModel(m, "task", &obj.Task, UnmarshalTask)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// Task : Task struct
type Task struct {
// ID of the task.
ID *string `json:"id,omitempty"`
// Human-readable description of the task.
Description *string `json:"description,omitempty"`
// The status of the task.
Status *string `json:"status,omitempty"`
// ID of the deployment the task is being performed on.
DeploymentID *string `json:"deployment_id,omitempty"`
// Indicator as percentage of progress of the task.
ProgressPercent *int64 `json:"progress_percent,omitempty"`
// Date and time when the task was created.
CreatedAt *strfmt.DateTime `json:"created_at,omitempty"`
}
// Constants associated with the Task.Status property.
// The status of the task.
const (
Task_Status_Completed = "completed"
Task_Status_Failed = "failed"
Task_Status_Running = "running"
)
// UnmarshalTask unmarshals an instance of Task from the specified map of raw messages.
func UnmarshalTask(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Task)
err = core.UnmarshalPrimitive(m, "id", &obj.ID)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "description", &obj.Description)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "status", &obj.Status)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "deployment_id", &obj.DeploymentID)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "progress_percent", &obj.ProgressPercent)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// Tasks : Tasks struct
type Tasks struct {
Tasks []Task `json:"tasks,omitempty"`
}
// UnmarshalTasks unmarshals an instance of Tasks from the specified map of raw messages.
func UnmarshalTasks(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Tasks)
err = core.UnmarshalModel(m, "tasks", &obj.Tasks, UnmarshalTask)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// Whitelist : Whitelist struct
type Whitelist struct {
// An array of allowlist entries.
IpAddresses []WhitelistEntry `json:"ip_addresses,omitempty"`
}
// UnmarshalWhitelist unmarshals an instance of Whitelist from the specified map of raw messages.
func UnmarshalWhitelist(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Whitelist)
err = core.UnmarshalModel(m, "ip_addresses", &obj.IpAddresses, UnmarshalWhitelistEntry)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// WhitelistEntry : WhitelistEntry struct
type WhitelistEntry struct {
// An IPv4 address or a CIDR range (netmasked IPv4 address).
Address *string `json:"address,omitempty"`
// A human readable description of the address or range for identification purposes.
Description *string `json:"description,omitempty"`
}
// UnmarshalWhitelistEntry unmarshals an instance of WhitelistEntry from the specified map of raw messages.
func UnmarshalWhitelistEntry(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(WhitelistEntry)
err = core.UnmarshalPrimitive(m, "address", &obj.Address)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "description", &obj.Description)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// AutoscalingSetGroupAutoscalingCPUGroup : AutoscalingSetGroupAutoscalingCPUGroup struct
// This model "extends" AutoscalingSetGroup
type AutoscalingSetGroupAutoscalingCPUGroup struct {
Cpu *AutoscalingCPUGroupCPU `json:"cpu,omitempty"`
}
func (*AutoscalingSetGroupAutoscalingCPUGroup) isaAutoscalingSetGroup() bool {
return true
}
// UnmarshalAutoscalingSetGroupAutoscalingCPUGroup unmarshals an instance of AutoscalingSetGroupAutoscalingCPUGroup from the specified map of raw messages.
func UnmarshalAutoscalingSetGroupAutoscalingCPUGroup(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(AutoscalingSetGroupAutoscalingCPUGroup)
err = core.UnmarshalModel(m, "cpu", &obj.Cpu, UnmarshalAutoscalingCPUGroupCPU)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// AutoscalingSetGroupAutoscalingDiskGroup : AutoscalingSetGroupAutoscalingDiskGroup struct
// This model "extends" AutoscalingSetGroup
type AutoscalingSetGroupAutoscalingDiskGroup struct {
Disk *AutoscalingDiskGroupDisk `json:"disk,omitempty"`
}
func (*AutoscalingSetGroupAutoscalingDiskGroup) isaAutoscalingSetGroup() bool {
return true
}
// UnmarshalAutoscalingSetGroupAutoscalingDiskGroup unmarshals an instance of AutoscalingSetGroupAutoscalingDiskGroup from the specified map of raw messages.
func UnmarshalAutoscalingSetGroupAutoscalingDiskGroup(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(AutoscalingSetGroupAutoscalingDiskGroup)
err = core.UnmarshalModel(m, "disk", &obj.Disk, UnmarshalAutoscalingDiskGroupDisk)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// AutoscalingSetGroupAutoscalingMemoryGroup : AutoscalingSetGroupAutoscalingMemoryGroup struct
// This model "extends" AutoscalingSetGroup
type AutoscalingSetGroupAutoscalingMemoryGroup struct {
Memory *AutoscalingMemoryGroupMemory `json:"memory,omitempty"`
}
func (*AutoscalingSetGroupAutoscalingMemoryGroup) isaAutoscalingSetGroup() bool {
return true
}
// UnmarshalAutoscalingSetGroupAutoscalingMemoryGroup unmarshals an instance of AutoscalingSetGroupAutoscalingMemoryGroup from the specified map of raw messages.
func UnmarshalAutoscalingSetGroupAutoscalingMemoryGroup(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(AutoscalingSetGroupAutoscalingMemoryGroup)
err = core.UnmarshalModel(m, "memory", &obj.Memory, UnmarshalAutoscalingMemoryGroupMemory)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ConfigurationSchemaSchemaPGConfigurationSchema : PostgreSQL and EnterpriseDB Configuration Schema.
// This model "extends" ConfigurationSchemaSchema
type ConfigurationSchemaSchemaPGConfigurationSchema struct {
// Integer Property Schema.
MaxConnections *IntegerPropertySchema `json:"max_connections" validate:"required"`
// Integer Property Schema.
MaxPreparedConnections *IntegerPropertySchema `json:"max_prepared_connections" validate:"required"`
// Integer Property Schema.
BackupRetentionPeriod *IntegerPropertySchema `json:"backup_retention_period" validate:"required"`
// Integer Property Schema.
DeadlockTimeout *IntegerPropertySchema `json:"deadlock_timeout" validate:"required"`
// Integer Property Schema.
EffectiveIoConcurrency *IntegerPropertySchema `json:"effective_io_concurrency" validate:"required"`
// Integer Property Schema.
MaxReplicationSlots *IntegerPropertySchema `json:"max_replication_slots" validate:"required"`
// Integer Property Schema.
MaxWalSenders *IntegerPropertySchema `json:"max_wal_senders" validate:"required"`
// Integer Property Schema.
SharedBuffers *IntegerPropertySchema `json:"shared_buffers" validate:"required"`
// Choice Property Schema.
SynchronousCommit *ChoicePropertySchema `json:"synchronous_commit" validate:"required"`
// Choice Property Schema.
WalLevel *ChoicePropertySchema `json:"wal_level" validate:"required"`
// Integer Property Schema.
ArchiveTimeout *IntegerPropertySchema `json:"archive_timeout" validate:"required"`
// Integer Property Schema.
LogMinDurationStatement *IntegerPropertySchema `json:"log_min_duration_statement" validate:"required"`
}
func (*ConfigurationSchemaSchemaPGConfigurationSchema) isaConfigurationSchemaSchema() bool {
return true
}
// UnmarshalConfigurationSchemaSchemaPGConfigurationSchema unmarshals an instance of ConfigurationSchemaSchemaPGConfigurationSchema from the specified map of raw messages.
func UnmarshalConfigurationSchemaSchemaPGConfigurationSchema(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ConfigurationSchemaSchemaPGConfigurationSchema)
err = core.UnmarshalModel(m, "max_connections", &obj.MaxConnections, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "max_prepared_connections", &obj.MaxPreparedConnections, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "backup_retention_period", &obj.BackupRetentionPeriod, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "deadlock_timeout", &obj.DeadlockTimeout, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "effective_io_concurrency", &obj.EffectiveIoConcurrency, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "max_replication_slots", &obj.MaxReplicationSlots, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "max_wal_senders", &obj.MaxWalSenders, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "shared_buffers", &obj.SharedBuffers, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "synchronous_commit", &obj.SynchronousCommit, UnmarshalChoicePropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "wal_level", &obj.WalLevel, UnmarshalChoicePropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "archive_timeout", &obj.ArchiveTimeout, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "log_min_duration_statement", &obj.LogMinDurationStatement, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ConfigurationSchemaSchemaRedisConfigurationSchema : Redis Configuration Schema.
// This model "extends" ConfigurationSchemaSchema
type ConfigurationSchemaSchemaRedisConfigurationSchema struct {
// Integer Property Schema.
MaxmemoryRedis *IntegerPropertySchema `json:"maxmemory-redis" validate:"required"`
// Choice Property Schema.
MaxmemoryPolicy *ChoicePropertySchema `json:"maxmemory-policy" validate:"required"`
// Choice Property Schema.
Appendonly *ChoicePropertySchema `json:"appendonly" validate:"required"`
// Integer Property Schema.
MaxmemorySamples *IntegerPropertySchema `json:"maxmemory-samples" validate:"required"`
// Choice Property Schema.
StopWritesOnBgsaveError *ChoicePropertySchema `json:"stop-writes-on-bgsave-error" validate:"required"`
}
func (*ConfigurationSchemaSchemaRedisConfigurationSchema) isaConfigurationSchemaSchema() bool {
return true
}
// UnmarshalConfigurationSchemaSchemaRedisConfigurationSchema unmarshals an instance of ConfigurationSchemaSchemaRedisConfigurationSchema from the specified map of raw messages.
func UnmarshalConfigurationSchemaSchemaRedisConfigurationSchema(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ConfigurationSchemaSchemaRedisConfigurationSchema)
err = core.UnmarshalModel(m, "maxmemory-redis", &obj.MaxmemoryRedis, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "maxmemory-policy", &obj.MaxmemoryPolicy, UnmarshalChoicePropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "appendonly", &obj.Appendonly, UnmarshalChoicePropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "maxmemory-samples", &obj.MaxmemorySamples, UnmarshalIntegerPropertySchema)
if err != nil {
return
}
err = core.UnmarshalModel(m, "stop-writes-on-bgsave-error", &obj.StopWritesOnBgsaveError, UnmarshalChoicePropertySchema)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ConnectionConnectionElasticsearchConnection : Elasticsearch Connection Strings.
// This model "extends" ConnectionConnection
type ConnectionConnectionElasticsearchConnection struct {
// Elasticsearch Connection information for drivers and libraries.
Https *ElasticsearchConnectionHTTPS `json:"https" validate:"required"`
// Connection information for cURL.
Cli *ConnectionCLI `json:"cli" validate:"required"`
}
func (*ConnectionConnectionElasticsearchConnection) isaConnectionConnection() bool {
return true
}
// UnmarshalConnectionConnectionElasticsearchConnection unmarshals an instance of ConnectionConnectionElasticsearchConnection from the specified map of raw messages.
func UnmarshalConnectionConnectionElasticsearchConnection(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ConnectionConnectionElasticsearchConnection)
err = core.UnmarshalModel(m, "https", &obj.Https, UnmarshalElasticsearchConnectionHTTPS)
if err != nil {
return
}
err = core.UnmarshalModel(m, "cli", &obj.Cli, UnmarshalConnectionCLI)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ConnectionConnectionEtcdConnection : etcd3 Connection Strings.
// This model "extends" ConnectionConnection
type ConnectionConnectionEtcdConnection struct {
// GRPC(etcd3) Connection information for drivers and libraries.
Grpc *GRPCConnectionURI `json:"grpc" validate:"required"`
// Connection information for etcdctl.
Cli *ConnectionCLI `json:"cli" validate:"required"`
}
func (*ConnectionConnectionEtcdConnection) isaConnectionConnection() bool {
return true
}
// UnmarshalConnectionConnectionEtcdConnection unmarshals an instance of ConnectionConnectionEtcdConnection from the specified map of raw messages.
func UnmarshalConnectionConnectionEtcdConnection(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ConnectionConnectionEtcdConnection)
err = core.UnmarshalModel(m, "grpc", &obj.Grpc, UnmarshalGRPCConnectionURI)
if err != nil {
return
}
err = core.UnmarshalModel(m, "cli", &obj.Cli, UnmarshalConnectionCLI)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ConnectionConnectionMongoDBConnection : MongoDB Connection Strings.
// This model "extends" ConnectionConnection
type ConnectionConnectionMongoDBConnection struct {
// MongoDB Connection information for drivers and libraries.
Mongodb *MongoDBConnectionURI `json:"mongodb" validate:"required"`
// Connection information for mongo shell.
Cli *ConnectionCLI `json:"cli" validate:"required"`
}
func (*ConnectionConnectionMongoDBConnection) isaConnectionConnection() bool {
return true
}
// UnmarshalConnectionConnectionMongoDBConnection unmarshals an instance of ConnectionConnectionMongoDBConnection from the specified map of raw messages.
func UnmarshalConnectionConnectionMongoDBConnection(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ConnectionConnectionMongoDBConnection)
err = core.UnmarshalModel(m, "mongodb", &obj.Mongodb, UnmarshalMongoDBConnectionURI)
if err != nil {
return
}
err = core.UnmarshalModel(m, "cli", &obj.Cli, UnmarshalConnectionCLI)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ConnectionConnectionPostgreSQLConnection : PostgreSQL and EnterpriseDB Connection Strings.
// This model "extends" ConnectionConnection
type ConnectionConnectionPostgreSQLConnection struct {
// Connection information for drivers and libraries.
Postgres *PostgreSQLConnectionURI `json:"postgres" validate:"required"`
// Connection information for psql.
Cli *ConnectionCLI `json:"cli" validate:"required"`
}
func (*ConnectionConnectionPostgreSQLConnection) isaConnectionConnection() bool {
return true
}
// UnmarshalConnectionConnectionPostgreSQLConnection unmarshals an instance of ConnectionConnectionPostgreSQLConnection from the specified map of raw messages.
func UnmarshalConnectionConnectionPostgreSQLConnection(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ConnectionConnectionPostgreSQLConnection)
err = core.UnmarshalModel(m, "postgres", &obj.Postgres, UnmarshalPostgreSQLConnectionURI)
if err != nil {
return
}
err = core.UnmarshalModel(m, "cli", &obj.Cli, UnmarshalConnectionCLI)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ConnectionConnectionRabbitMQConnection : RabbitMQ Connection Strings.
// This model "extends" ConnectionConnection
type ConnectionConnectionRabbitMQConnection struct {
// RabbitMQ Connection information for AMQPS drivers and libraries.
Amqps *RabbitMQConnectionAMQPS `json:"amqps" validate:"required"`
// RabbitMQ Connection information for MQTTS drivers and libraries.
Mqtts *RabbitMQConnectionMQTTS `json:"mqtts" validate:"required"`
// RabbitMQ Connection information for STOMP drivers and libraries.
StompSsl *RabbitMQConnectionStompSSL `json:"stomp_ssl" validate:"required"`
// RabbitMQ Connection information for HTTPS.
Https *RabbitMQConnectionHTTPS `json:"https" validate:"required"`
// Connection information for rabbitmqadmin.
Cli *ConnectionCLI `json:"cli" validate:"required"`
}
func (*ConnectionConnectionRabbitMQConnection) isaConnectionConnection() bool {
return true
}
// UnmarshalConnectionConnectionRabbitMQConnection unmarshals an instance of ConnectionConnectionRabbitMQConnection from the specified map of raw messages.
func UnmarshalConnectionConnectionRabbitMQConnection(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ConnectionConnectionRabbitMQConnection)
err = core.UnmarshalModel(m, "amqps", &obj.Amqps, UnmarshalRabbitMQConnectionAMQPS)
if err != nil {
return
}
err = core.UnmarshalModel(m, "mqtts", &obj.Mqtts, UnmarshalRabbitMQConnectionMQTTS)
if err != nil {
return
}
err = core.UnmarshalModel(m, "stomp_ssl", &obj.StompSsl, UnmarshalRabbitMQConnectionStompSSL)
if err != nil {
return
}
err = core.UnmarshalModel(m, "https", &obj.Https, UnmarshalRabbitMQConnectionHTTPS)
if err != nil {
return
}
err = core.UnmarshalModel(m, "cli", &obj.Cli, UnmarshalConnectionCLI)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ConnectionConnectionRedisConnection : Redis Connection Strings.
// This model "extends" ConnectionConnection
type ConnectionConnectionRedisConnection struct {
// Connection information for drivers and libraries.
Rediss *RedisConnectionURI `json:"rediss" validate:"required"`
// Connection information for a Redis CLI client.
Cli *ConnectionCLI `json:"cli" validate:"required"`
}
func (*ConnectionConnectionRedisConnection) isaConnectionConnection() bool {
return true
}
// UnmarshalConnectionConnectionRedisConnection unmarshals an instance of ConnectionConnectionRedisConnection from the specified map of raw messages.
func UnmarshalConnectionConnectionRedisConnection(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ConnectionConnectionRedisConnection)
err = core.UnmarshalModel(m, "rediss", &obj.Rediss, UnmarshalRedisConnectionURI)
if err != nil {
return
}
err = core.UnmarshalModel(m, "cli", &obj.Cli, UnmarshalConnectionCLI)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetConfigurationConfigurationPGConfiguration : PostgreSQL and EnterpriseDB Configuration.
// This model "extends" SetConfigurationConfiguration
type SetConfigurationConfigurationPGConfiguration struct {
// Maximum connections allowed.
MaxConnections *int64 `json:"max_connections,omitempty"`
// Max number of transactions that can be in the "prepared" state simultaneously.
MaxPreparedTransactions *int64 `json:"max_prepared_transactions,omitempty"`
// Deadlock timeout in ms. The time to wait on a lock before checking for deadlock. Also the duration where lock waits
// will be logged.
DeadlockTimeout *int64 `json:"deadlock_timeout,omitempty"`
// Number of simultaneous requests that can be handled efficiently by the disk subsystem.
EffectiveIoConcurrency *int64 `json:"effective_io_concurrency,omitempty"`
// Maximum number of simultaneously defined replication slots.
MaxReplicationSlots *int64 `json:"max_replication_slots,omitempty"`
// Maximum number of simultaneously running WAL sender processes.
MaxWalSenders *int64 `json:"max_wal_senders,omitempty"`
// The number of 8kB shared memory buffers used by the server. Set to 1/4 of memory. Setting too high will cause
// crashes or prevent the database from starting.
SharedBuffers *int64 `json:"shared_buffers,omitempty"`
// Sets the current transaction's synchronization level. Off can result in data loss. remote_write with enable
// synchronous replication which will impact performance and availabilty.
SynchronousCommit *string `json:"synchronous_commit,omitempty"`
// WAL level. Set to logical to use logical decoding or logical replication.
WalLevel *string `json:"wal_level,omitempty"`
// The number of seconds to wait before forces a switch to the next WAL file if a new file has not been started.
ArchiveTimeout *int64 `json:"archive_timeout,omitempty"`
// The minimum number of milliseconds for execution time above which statements will be logged.
LogMinDurationStatement *int64 `json:"log_min_duration_statement,omitempty"`
}
// Constants associated with the SetConfigurationConfigurationPGConfiguration.SynchronousCommit property.
// Sets the current transaction's synchronization level. Off can result in data loss. remote_write with enable
// synchronous replication which will impact performance and availabilty.
const (
SetConfigurationConfigurationPGConfiguration_SynchronousCommit_Local = "local"
SetConfigurationConfigurationPGConfiguration_SynchronousCommit_Off = "off"
)
// Constants associated with the SetConfigurationConfigurationPGConfiguration.WalLevel property.
// WAL level. Set to logical to use logical decoding or logical replication.
const (
SetConfigurationConfigurationPGConfiguration_WalLevel_HotStandby = "hot_standby"
SetConfigurationConfigurationPGConfiguration_WalLevel_Logical = "logical"
)
func (*SetConfigurationConfigurationPGConfiguration) isaSetConfigurationConfiguration() bool {
return true
}
// UnmarshalSetConfigurationConfigurationPGConfiguration unmarshals an instance of SetConfigurationConfigurationPGConfiguration from the specified map of raw messages.
func UnmarshalSetConfigurationConfigurationPGConfiguration(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetConfigurationConfigurationPGConfiguration)
err = core.UnmarshalPrimitive(m, "max_connections", &obj.MaxConnections)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "max_prepared_transactions", &obj.MaxPreparedTransactions)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "deadlock_timeout", &obj.DeadlockTimeout)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "effective_io_concurrency", &obj.EffectiveIoConcurrency)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "max_replication_slots", &obj.MaxReplicationSlots)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "max_wal_senders", &obj.MaxWalSenders)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "shared_buffers", &obj.SharedBuffers)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "synchronous_commit", &obj.SynchronousCommit)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "wal_level", &obj.WalLevel)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "archive_timeout", &obj.ArchiveTimeout)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "log_min_duration_statement", &obj.LogMinDurationStatement)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetConfigurationConfigurationRedisConfiguration : Redis Configuration.
// This model "extends" SetConfigurationConfiguration
type SetConfigurationConfigurationRedisConfiguration struct {
// The maximum memory Redis should use, as bytes.
MaxmemoryRedis *int64 `json:"maxmemory-redis,omitempty"`
// The policy with which Redis evicts keys when maximum memory is reached.
MaxmemoryPolicy *string `json:"maxmemory-policy,omitempty"`
// If set to yes this will enable AOF persistence.
Appendonly *string `json:"appendonly,omitempty"`
// The maximum memory Redis should use, as bytes.
MaxmemorySamples *int64 `json:"maxmemory-samples,omitempty"`
// Whether or not to stop accepting writes when background persistence actions fail.
StopWritesOnBgsaveError *string `json:"stop-writes-on-bgsave-error,omitempty"`
}
// Constants associated with the SetConfigurationConfigurationRedisConfiguration.MaxmemoryPolicy property.
// The policy with which Redis evicts keys when maximum memory is reached.
const (
SetConfigurationConfigurationRedisConfiguration_MaxmemoryPolicy_AllkeysLru = "allkeys-lru"
SetConfigurationConfigurationRedisConfiguration_MaxmemoryPolicy_AllkeysRandom = "allkeys-random"
SetConfigurationConfigurationRedisConfiguration_MaxmemoryPolicy_Noeviction = "noeviction"
SetConfigurationConfigurationRedisConfiguration_MaxmemoryPolicy_VolatileLru = "volatile-lru"
SetConfigurationConfigurationRedisConfiguration_MaxmemoryPolicy_VolatileRandom = "volatile-random"
SetConfigurationConfigurationRedisConfiguration_MaxmemoryPolicy_VolatileTTL = "volatile-ttl"
)
// Constants associated with the SetConfigurationConfigurationRedisConfiguration.Appendonly property.
// If set to yes this will enable AOF persistence.
const (
SetConfigurationConfigurationRedisConfiguration_Appendonly_No = "no"
SetConfigurationConfigurationRedisConfiguration_Appendonly_Yes = "yes"
)
// Constants associated with the SetConfigurationConfigurationRedisConfiguration.StopWritesOnBgsaveError property.
// Whether or not to stop accepting writes when background persistence actions fail.
const (
SetConfigurationConfigurationRedisConfiguration_StopWritesOnBgsaveError_No = "no"
SetConfigurationConfigurationRedisConfiguration_StopWritesOnBgsaveError_Yes = "yes"
)
func (*SetConfigurationConfigurationRedisConfiguration) isaSetConfigurationConfiguration() bool {
return true
}
// UnmarshalSetConfigurationConfigurationRedisConfiguration unmarshals an instance of SetConfigurationConfigurationRedisConfiguration from the specified map of raw messages.
func UnmarshalSetConfigurationConfigurationRedisConfiguration(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetConfigurationConfigurationRedisConfiguration)
err = core.UnmarshalPrimitive(m, "maxmemory-redis", &obj.MaxmemoryRedis)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "maxmemory-policy", &obj.MaxmemoryPolicy)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "appendonly", &obj.Appendonly)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "maxmemory-samples", &obj.MaxmemorySamples)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "stop-writes-on-bgsave-error", &obj.StopWritesOnBgsaveError)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetDeploymentScalingGroupRequestSetCPUGroup : SetDeploymentScalingGroupRequestSetCPUGroup struct
// This model "extends" SetDeploymentScalingGroupRequest
type SetDeploymentScalingGroupRequestSetCPUGroup struct {
Cpu *SetCPUGroupCPU `json:"cpu,omitempty"`
}
func (*SetDeploymentScalingGroupRequestSetCPUGroup) isaSetDeploymentScalingGroupRequest() bool {
return true
}
// UnmarshalSetDeploymentScalingGroupRequestSetCPUGroup unmarshals an instance of SetDeploymentScalingGroupRequestSetCPUGroup from the specified map of raw messages.
func UnmarshalSetDeploymentScalingGroupRequestSetCPUGroup(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetDeploymentScalingGroupRequestSetCPUGroup)
err = core.UnmarshalModel(m, "cpu", &obj.Cpu, UnmarshalSetCPUGroupCPU)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetDeploymentScalingGroupRequestSetDiskGroup : SetDeploymentScalingGroupRequestSetDiskGroup struct
// This model "extends" SetDeploymentScalingGroupRequest
type SetDeploymentScalingGroupRequestSetDiskGroup struct {
Disk *SetDiskGroupDisk `json:"disk,omitempty"`
}
func (*SetDeploymentScalingGroupRequestSetDiskGroup) isaSetDeploymentScalingGroupRequest() bool {
return true
}
// UnmarshalSetDeploymentScalingGroupRequestSetDiskGroup unmarshals an instance of SetDeploymentScalingGroupRequestSetDiskGroup from the specified map of raw messages.
func UnmarshalSetDeploymentScalingGroupRequestSetDiskGroup(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetDeploymentScalingGroupRequestSetDiskGroup)
err = core.UnmarshalModel(m, "disk", &obj.Disk, UnmarshalSetDiskGroupDisk)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetDeploymentScalingGroupRequestSetMembersGroup : SetDeploymentScalingGroupRequestSetMembersGroup struct
// This model "extends" SetDeploymentScalingGroupRequest
type SetDeploymentScalingGroupRequestSetMembersGroup struct {
Members *SetMembersGroupMembers `json:"members,omitempty"`
}
func (*SetDeploymentScalingGroupRequestSetMembersGroup) isaSetDeploymentScalingGroupRequest() bool {
return true
}
// UnmarshalSetDeploymentScalingGroupRequestSetMembersGroup unmarshals an instance of SetDeploymentScalingGroupRequestSetMembersGroup from the specified map of raw messages.
func UnmarshalSetDeploymentScalingGroupRequestSetMembersGroup(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetDeploymentScalingGroupRequestSetMembersGroup)
err = core.UnmarshalModel(m, "members", &obj.Members, UnmarshalSetMembersGroupMembers)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetDeploymentScalingGroupRequestSetMemoryGroup : SetDeploymentScalingGroupRequestSetMemoryGroup struct
// This model "extends" SetDeploymentScalingGroupRequest
type SetDeploymentScalingGroupRequestSetMemoryGroup struct {
Memory *SetMemoryGroupMemory `json:"memory,omitempty"`
}
func (*SetDeploymentScalingGroupRequestSetMemoryGroup) isaSetDeploymentScalingGroupRequest() bool {
return true
}
// UnmarshalSetDeploymentScalingGroupRequestSetMemoryGroup unmarshals an instance of SetDeploymentScalingGroupRequestSetMemoryGroup from the specified map of raw messages.
func UnmarshalSetDeploymentScalingGroupRequestSetMemoryGroup(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetDeploymentScalingGroupRequestSetMemoryGroup)
err = core.UnmarshalModel(m, "memory", &obj.Memory, UnmarshalSetMemoryGroupMemory)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetPromotionPromotionPromote : Promotes a read-only replica to a full deployment.
// This model "extends" SetPromotionPromotion
type SetPromotionPromotionPromote struct {
// Promotion options.
Promotion map[string]interface{} `json:"promotion,omitempty"`
}
func (*SetPromotionPromotionPromote) isaSetPromotionPromotion() bool {
return true
}
// UnmarshalSetPromotionPromotionPromote unmarshals an instance of SetPromotionPromotionPromote from the specified map of raw messages.
func UnmarshalSetPromotionPromotionPromote(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetPromotionPromotionPromote)
err = core.UnmarshalPrimitive(m, "promotion", &obj.Promotion)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// SetPromotionPromotionUpgradePromote : Promotes a read-only replica to a full deployment running a new database version.
// This model "extends" SetPromotionPromotion
type SetPromotionPromotionUpgradePromote struct {
// Promotion and Upgrade options.
Promotion map[string]interface{} `json:"promotion,omitempty"`
}
func (*SetPromotionPromotionUpgradePromote) isaSetPromotionPromotion() bool {
return true
}
// UnmarshalSetPromotionPromotionUpgradePromote unmarshals an instance of SetPromotionPromotionUpgradePromote from the specified map of raw messages.
func UnmarshalSetPromotionPromotionUpgradePromote(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(SetPromotionPromotionUpgradePromote)
err = core.UnmarshalPrimitive(m, "promotion", &obj.Promotion)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
|
package handlers_test
import (
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
"github.com/egamorim/star-wars-planets/cmd/api/response"
"github.com/egamorim/star-wars-planets/cmd/api/handlers"
"github.com/egamorim/star-wars-planets/cmd/api/routers"
"github.com/egamorim/star-wars-planets/pkg/domain"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
var (
planetTestDatabase = "star-wars-test"
apiPlanetResourcePath = "/planets"
)
func TestHandleInsertNewPlanet_Success(t *testing.T) {
mongo, _ := domain.GetMongoDBDatabase(planetTestDatabase)
defer dropTestDatabase(mongo)
payload := []byte(`{"name": "Tatooine", "terrain" : "mais um", "climate" : "gelado"}`)
req, _ := http.NewRequest("POST", apiPlanetResourcePath, bytes.NewBuffer(payload))
req.Header.Set("Content-Type", "application/json")
res := executeRequest(req, mongo)
checkResponseCode(t, http.StatusCreated, res.Code)
}
func TestHandleInsertNewPlanet_PlanetNotFoundSwapi(t *testing.T) {
mongo, _ := domain.GetMongoDBDatabase(planetTestDatabase)
defer dropTestDatabase(mongo)
payload := []byte(`{"name": "Terra", "terrain" : "mais um", "climate" : "gelado"}`)
req, _ := http.NewRequest("POST", apiPlanetResourcePath, bytes.NewBuffer(payload))
req.Header.Set("Content-Type", "application/json")
res := executeRequest(req, mongo)
checkResponseCode(t, http.StatusNotFound, res.Code)
}
func TestHandleGetAll(t *testing.T) {
mongo, _ := domain.GetMongoDBDatabase(planetTestDatabase)
defer dropTestDatabase(mongo)
loadPlanetData(mongo)
req, _ := http.NewRequest("GET", apiPlanetResourcePath, nil)
res := executeRequest(req, mongo)
var l response.ListPlanetResponse
decoder := json.NewDecoder(res.Body)
decoder.Decode(&l)
checkResponseCode(t, http.StatusOK, res.Code)
if len(l.Planets) != 4 {
t.Errorf("Expected 4 but got %d", len(l.Planets))
}
}
func TestHandleFindByName_Success(t *testing.T) {
mongo, _ := domain.GetMongoDBDatabase(planetTestDatabase)
defer dropTestDatabase(mongo)
loadPlanetData(mongo)
req, _ := http.NewRequest("GET", apiPlanetResourcePath+"/findByName/Tat", nil)
res := executeRequest(req, mongo)
var p domain.Planet
decoder := json.NewDecoder(res.Body)
decoder.Decode(&p)
checkResponseCode(t, http.StatusOK, res.Code)
if p.Name != "Tatooine" {
t.Errorf("Expected 'Tatooine' but got %s", p.Name)
}
}
func TestHandleFindByName_PlanetNotFound(t *testing.T) {
mongo, _ := domain.GetMongoDBDatabase(planetTestDatabase)
defer dropTestDatabase(mongo)
loadPlanetData(mongo)
req, _ := http.NewRequest("GET", apiPlanetResourcePath+"/findByName/Tataaa", nil)
res := executeRequest(req, mongo)
checkResponseCode(t, http.StatusNotFound, res.Code)
}
func TestHandleGetByID_Success(t *testing.T) {
mongo, _ := domain.GetMongoDBDatabase(planetTestDatabase)
defer dropTestDatabase(mongo)
loadPlanetData(mongo)
req, _ := http.NewRequest("GET", apiPlanetResourcePath+"/5ba005525f4f723340e0eb83", nil)
res := executeRequest(req, mongo)
checkResponseCode(t, http.StatusOK, res.Code)
var p domain.Planet
decoder := json.NewDecoder(res.Body)
decoder.Decode(&p)
checkResponseCode(t, http.StatusOK, res.Code)
if p.Name != "Yavin IV" {
t.Errorf("Expected 'Tatooine' but got %s", p.Name)
}
}
func TestHandleGetByID_NotFound(t *testing.T) {
mongo, _ := domain.GetMongoDBDatabase(planetTestDatabase)
defer dropTestDatabase(mongo)
loadPlanetData(mongo)
req, _ := http.NewRequest("GET", apiPlanetResourcePath+"/5ba005525f4f723340e0eb83TEST", nil)
res := executeRequest(req, mongo)
checkResponseCode(t, http.StatusNotFound, res.Code)
}
func TestHandleDelete(t *testing.T) {
mongo, _ := domain.GetMongoDBDatabase(planetTestDatabase)
defer dropTestDatabase(mongo)
loadPlanetData(mongo)
req, _ := http.NewRequest("GET", apiPlanetResourcePath, nil)
res := executeRequest(req, mongo)
var l response.ListPlanetResponse
decoder := json.NewDecoder(res.Body)
decoder.Decode(&l)
checkResponseCode(t, http.StatusOK, res.Code)
if len(l.Planets) != 4 {
t.Errorf("Expected 4 but got %d", len(l.Planets))
}
req, _ = http.NewRequest("DELETE", apiPlanetResourcePath+"/5ba005525f4f723340e0eb83", nil)
res = executeRequest(req, mongo)
checkResponseCode(t, http.StatusOK, res.Code)
req, _ = http.NewRequest("GET", apiPlanetResourcePath, nil)
res = executeRequest(req, mongo)
decoder = json.NewDecoder(res.Body)
decoder.Decode(&l)
if len(l.Planets) != 3 {
t.Errorf("Expected 3 but got %d", len(l.Planets))
}
}
func executeRequest(req *http.Request, mongo *mgo.Database) *httptest.ResponseRecorder {
planetRepository := domain.PlanetRepository{}
planetRepository.Mongo = mongo
rr := httptest.NewRecorder()
h := &handlers.Handler{
PlanetRepository: &planetRepository,
}
routers.Router(h).ServeHTTP(rr, req)
return rr
}
func checkResponseCode(t *testing.T, expected, actual int) {
if expected != actual {
t.Errorf("Expected response code %d. Got %d\n", expected, actual)
}
}
func loadPlanetData(mongo *mgo.Database) {
collection := mongo.C("planet")
collection.Insert(&domain.Planet{
ID: bson.ObjectIdHex("5ba005525f4f723340e0eb81"),
Name: "Tatooine",
Climate: "arid",
Terrain: "desert",
})
collection.Insert(&domain.Planet{
ID: bson.ObjectIdHex("5ba005525f4f723340e0eb82"),
Name: "Alderaan",
Climate: "temperate",
Terrain: "grasslands, mountains",
})
collection.Insert(&domain.Planet{
ID: bson.ObjectIdHex("5ba005525f4f723340e0eb83"),
Name: "Yavin IV",
Climate: "temperate, tropical",
Terrain: "jungle, rainforests",
})
collection.Insert(&domain.Planet{
ID: bson.ObjectIdHex("5ba005525f4f723340e0eb84"),
Name: "Hoth",
Climate: "frozen",
Terrain: "tundra, ice caves, mountain ranges",
})
}
func dropTestDatabase(mongo *mgo.Database) {
mongo.DropDatabase()
}
|
package main
import (
"encoding/json"
"errors"
"github.com/aws/aws-lambda-go/events"
"net/url"
"strings"
)
type SoaplessRequest struct {
Service string `json:"service"` // The endpoint for the desired SOAP service
RequestBody string `json:"requestBody"` // The location of an empty or sample SOAP service request
RequestMethod string `json:"requestMethod,omitempty"`
Encoding string `json:"encoding,omitempty"`
RequestProperties map[string]string `json:"requestProperties,omitempty"`
RequestMap map[string]string `json:"requestMap,omitempty"`
ResponseMap map[string]map[string]string `json:"responseMap"`
}
func NewSoaplessRequest(input events.APIGatewayProxyRequest) (*SoaplessRequest, error) {
r := &SoaplessRequest{}
if err := json.Unmarshal([]byte(input.Body), r); err != nil {
return nil, err
}
if _, err := url.ParseRequestURI(r.Service); err != nil {
return nil, errors.New("service url is malformed")
}
if _, err := url.ParseRequestURI(r.RequestBody); err != nil {
return nil, errors.New("request url is malformed")
}
if r.Encoding == "" {
r.Encoding = "ISO-8859-1"
}
if r.RequestProperties == nil {
host := strings.Replace(r.Service, "http://", "", -1)
host = strings.Replace(host, "https://", "", -1)
host = host[:strings.Index(host, "/")]
r.RequestProperties = map[string]string{
"Host": host,
"User-Agent": "Apache-HttpClient/4.1.1",
"Content-Type": "text/xml;charset=" + r.Encoding,
"Accept-Encoding": "gzip,deflate",
}
}
if r.RequestMethod == "" {
r.RequestMethod = "POST"
}
return r, nil
}
|
// Go files need a package at the top
// and files with the main method need to have package main at the top
package main
// Example import syntax for
import "fmt"
func main() {
fmt.Println("Hello")
}
|
package main
import (
"database/sql"
"fmt"
"io/ioutil"
_ "github.com/lib/pq"
)
func connectDB() *sql.DB{
fmt.Println("[*] Get password of the db")
b, err := ioutil.ReadFile("/usr/local/horizon/conf/db.pwd")
if err != nil {
fmt.Print(err)
}
password := string(b)
fmt.Println("[+] " + password)
var host = "localhost"
var port int = 5432
var user = "horizon"
var dbname = "saas"
psqlInfo := fmt.Sprintf("host=%s port=%d user=%s "+
"password=%s dbname=%s sslmode=disable",
host, port, user, password, dbname)
fmt.Println("[*] psqlInfo:" + psqlInfo)
db, err := sql.Open("postgres", psqlInfo)
if err != nil {
panic(err)
}
err = db.Ping()
if err != nil {
panic(err)
}
fmt.Println("[+] Successfully connected!")
return db
}
func queryUser(db *sql.DB){
fmt.Println("[*] Querying User")
var strUsername,strEmail,idUser,createdDate,domain string
var searchStr = `SELECT "strUsername","strEmail","idUser","createdDate","domain" FROM "Users"`
fmt.Println(" " + searchStr)
rows,err:=db.Query(searchStr)
if err!= nil{
panic(err)
}
defer rows.Close()
for rows.Next(){
err:= rows.Scan(&strUsername,&strEmail,&idUser,&createdDate,&domain)
if err!= nil{
//fmt.Println(err)
}
fmt.Println(" - idUser : " + idUser)
fmt.Println(" strUsername: " + strUsername)
fmt.Println(" strEmail : " + strEmail)
fmt.Println(" createdDate: " + createdDate)
fmt.Println(" domain : " + domain)
}
err = rows.Err()
if err!= nil{
panic(err)
}
}
func main() {
db:=connectDB()
queryUser(db)
} |
package main
import (
"context"
_ "expvar"
"fmt"
"math/rand"
"net/http"
_ "net/http/pprof"
"os"
"os/signal"
"strconv"
"syscall"
"time"
"github.com/atlassian/gostatsd/pkg/util"
"github.com/atlassian/gostatsd"
"github.com/atlassian/gostatsd/pkg/backends"
"github.com/atlassian/gostatsd/pkg/statsd"
"github.com/atlassian/gostatsd/pkg/transport"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"golang.org/x/time/rate"
)
const (
// ParamVerbose enables verbose logging.
ParamVerbose = "verbose"
// ParamProfile enables profiler endpoint on the specified address and port.
ParamProfile = "profile"
// ParamJSON makes logger log in JSON format.
ParamJSON = "json"
// ParamConfigPath provides file with configuration.
ParamConfigPath = "config-path"
// ParamVersion makes program output its version.
ParamVersion = "version"
)
func main() {
rand.Seed(time.Now().UnixNano())
v, version, err := setupConfiguration()
if err != nil {
if err == pflag.ErrHelp {
return
}
logrus.Fatalf("Error while parsing configuration: %v", err)
}
if version {
fmt.Printf("Version: %s - Commit: %s - Date: %s\n", Version, GitCommit, BuildDate)
return
}
if err := run(v); err != nil {
logrus.Fatalf("%v", err)
}
}
func run(v *viper.Viper) error {
profileAddr := v.GetString(ParamProfile)
if profileAddr != "" {
go func() {
logrus.Errorf("Profiler server failed: %v", http.ListenAndServe(profileAddr, nil))
}()
}
logrus.Info("Starting server")
s, err := constructServer(v)
if err != nil {
return err
}
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
cancelOnInterrupt(ctx, cancelFunc)
if err := s.Run(ctx); err != nil && err != context.Canceled {
return fmt.Errorf("server error: %v", err)
}
return nil
}
func constructServer(v *viper.Viper) (*statsd.Server, error) {
// Logger
logger := logrus.StandardLogger()
// HTTP client pool
pool := transport.NewTransportPool(logger, v)
// Cloud handler factory
cloud, err := statsd.NewCloudHandlerFactoryFromViper(v, logger, Version)
if err != nil {
return nil, err
}
if cloud != nil {
if err := cloud.InitCloudProvider(v); err != nil {
return nil, err
}
}
// Backends
backendNames := v.GetStringSlice(statsd.ParamBackends)
backendsList := make([]gostatsd.Backend, len(backendNames))
for i, backendName := range backendNames {
backend, errBackend := backends.InitBackend(backendName, v, pool)
if errBackend != nil {
return nil, errBackend
}
backendsList[i] = backend
}
// Percentiles
pt, err := getPercentiles(v.GetStringSlice(statsd.ParamPercentThreshold))
if err != nil {
return nil, err
}
// Create server
return &statsd.Server{
Backends: backendsList,
CloudHandlerFactory: cloud,
InternalTags: v.GetStringSlice(statsd.ParamInternalTags),
InternalNamespace: v.GetString(statsd.ParamInternalNamespace),
DefaultTags: v.GetStringSlice(statsd.ParamDefaultTags),
Hostname: v.GetString(statsd.ParamHostname),
ExpiryInterval: v.GetDuration(statsd.ParamExpiryInterval),
FlushInterval: v.GetDuration(statsd.ParamFlushInterval),
IgnoreHost: v.GetBool(statsd.ParamIgnoreHost),
MaxReaders: v.GetInt(statsd.ParamMaxReaders),
MaxParsers: v.GetInt(statsd.ParamMaxParsers),
MaxWorkers: v.GetInt(statsd.ParamMaxWorkers),
MaxQueueSize: v.GetInt(statsd.ParamMaxQueueSize),
MaxConcurrentEvents: v.GetInt(statsd.ParamMaxConcurrentEvents),
EstimatedTags: v.GetInt(statsd.ParamEstimatedTags),
MetricsAddr: v.GetString(statsd.ParamMetricsAddr),
Namespace: v.GetString(statsd.ParamNamespace),
StatserType: v.GetString(statsd.ParamStatserType),
PercentThreshold: pt,
HeartbeatEnabled: v.GetBool(statsd.ParamHeartbeatEnabled),
ReceiveBatchSize: v.GetInt(statsd.ParamReceiveBatchSize),
ConnPerReader: v.GetBool(statsd.ParamConnPerReader),
ServerMode: v.GetString(statsd.ParamServerMode),
LogRawMetric: v.GetBool(statsd.ParamLogRawMetric),
HeartbeatTags: gostatsd.Tags{
fmt.Sprintf("version:%s", Version),
fmt.Sprintf("commit:%s", GitCommit),
},
DisabledSubTypes: gostatsd.DisabledSubMetrics(v),
BadLineRateLimitPerSecond: rate.Limit(v.GetFloat64(statsd.ParamBadLinesPerMinute) / 60.0),
HistogramLimit: v.GetUint32(statsd.ParamTimerHistogramLimit),
Viper: v,
TransportPool: pool,
}, nil
}
func getPercentiles(s []string) ([]float64, error) {
percentThresholds := make([]float64, len(s))
for i, sPercentThreshold := range s {
pt, err := strconv.ParseFloat(sPercentThreshold, 64)
if err != nil {
return nil, err
}
percentThresholds[i] = pt
}
return percentThresholds, nil
}
// cancelOnInterrupt calls f when os.Interrupt or SIGTERM is received.
func cancelOnInterrupt(ctx context.Context, f context.CancelFunc) {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
select {
case <-ctx.Done():
case <-c:
f()
}
}()
}
func setupConfiguration() (*viper.Viper, bool, error) {
v := viper.New()
defer setupLogger(v) // Apply logging configuration in case of early exit
util.InitViper(v, "")
var version bool
cmd := pflag.NewFlagSet(os.Args[0], pflag.ContinueOnError)
cmd.BoolVar(&version, ParamVersion, false, "Print the version and exit")
cmd.Bool(ParamVerbose, false, "Verbose")
cmd.Bool(ParamJSON, false, "Log in JSON format")
cmd.String(ParamProfile, "", "Enable profiler endpoint on the specified address and port")
cmd.String(ParamConfigPath, "", "Path to the configuration file")
statsd.AddFlags(cmd)
cmd.VisitAll(func(flag *pflag.Flag) {
if err := v.BindPFlag(flag.Name, flag); err != nil {
panic(err) // Should never happen
}
})
if err := cmd.Parse(os.Args[1:]); err != nil {
return nil, false, err
}
configPath := v.GetString(ParamConfigPath)
if configPath != "" {
v.SetConfigFile(configPath)
if err := v.ReadInConfig(); err != nil {
return nil, false, err
}
}
return v, version, nil
}
func setupLogger(v *viper.Viper) {
if v.GetBool(ParamVerbose) {
logrus.SetLevel(logrus.DebugLevel)
}
if v.GetBool(ParamJSON) {
logrus.SetFormatter(&logrus.JSONFormatter{})
}
}
|
package rpcRouter
import (
"bytes"
"context"
"github.com/go-xe2/x/os/xlog"
"github.com/go-xe2/xthrift/pdl"
)
// 发送错误消息
func makeErrorData(pktId int64, msg string, code int32) ([]byte, error) {
buf := bytes.NewBuffer([]byte{})
proto := NewRouterBinaryProto(buf)
if err := proto.WritePacketBegin(ERR_RES_PACKET, pktId); err != nil {
return nil, err
}
if err := proto.WriteError(msg, code); err != nil {
return nil, err
}
if err := proto.WritePacketEnd(); err != nil {
return nil, err
}
if err := proto.Flush(context.Background()); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func makeCallReplyData(ctx context.Context, pktId int64, namespace string, method string, seqId int32, data []byte) []byte {
buf := bytes.NewBuffer([]byte{})
proto := NewRouterBinaryProto(buf)
if err := proto.WritePacketBegin(REPLY_PACKET, pktId); err != nil {
xlog.Error(err)
}
if err := proto.WriteCallBegin(namespace, method, seqId); err != nil {
xlog.Error(err)
}
if err := proto.WriteData(data); err != nil {
xlog.Error(err)
}
if err := proto.WriteCallEnd(); err != nil {
xlog.Error(err)
}
if err := proto.WritePacketEnd(); err != nil {
xlog.Error(err)
}
if err := proto.Flush(ctx); err != nil {
xlog.Error(err)
}
return buf.Bytes()
}
func makeRegResData(ctx context.Context, pktId int64, projectName string, md5 string) []byte {
outBuf := bytes.NewBuffer([]byte{})
outProto := NewRouterBinaryProto(outBuf)
if err := outProto.WritePacketBegin(REG_RES_PACKET, pktId); err != nil {
xlog.Error(err)
}
if err := outProto.WriteData([]byte(projectName + "," + md5)); err != nil {
xlog.Error(err)
}
if err := outProto.WritePacketEnd(); err != nil {
xlog.Error(err)
}
if err := outProto.Flush(ctx); err != nil {
xlog.Error(err)
}
return outBuf.Bytes()
}
func makeRegData(ctx context.Context, clientId string, pktId int64, proj *pdl.FileProject, md5 string) []byte {
outBuf := bytes.NewBuffer([]byte{})
outProto := NewRouterBinaryProto(outBuf)
projBuf := bytes.NewBuffer([]byte{})
if err := proj.SaveProject(projBuf); err != nil {
xlog.Error(err)
}
if err := outProto.WritePacketBegin(REG_PACKET, pktId); err != nil {
xlog.Error(err)
}
if err := outProto.WriteRegBegin(clientId, proj.GetProjectName(), md5); err != nil {
xlog.Error(err)
}
if err := outProto.WriteData(projBuf.Bytes()); err != nil {
xlog.Error(err)
}
if err := outProto.WriteRegEnd(); err != nil {
xlog.Error(err)
}
if err := outProto.WritePacketEnd(); err != nil {
xlog.Error(err)
}
if err := outProto.Flush(ctx); err != nil {
xlog.Error(err)
}
return outBuf.Bytes()
}
func makeCallData(ctx context.Context, pktId int64, namespace string, method string, seqId int32, rpcData []byte) []byte {
outBuf := bytes.NewBuffer([]byte{})
outProto := NewRouterBinaryProto(outBuf)
if err := outProto.WritePacketBegin(CALL_PACKET, pktId); err != nil {
xlog.Error(err)
}
if err := outProto.WriteCallBegin(namespace, method, seqId); err != nil {
xlog.Error(err)
}
if err := outProto.WriteData(rpcData); err != nil {
xlog.Error(err)
}
if err := outProto.WriteCallEnd(); err != nil {
xlog.Error(err)
}
if err := outProto.WritePacketEnd(); err != nil {
xlog.Error(err)
}
if err := outProto.Flush(ctx); err != nil {
xlog.Error(err)
}
return outBuf.Bytes()
}
|
/*
* Copyright 1999-2018 Alibaba Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package core implements the core modules of dfget.
package core
import (
"fmt"
"io/ioutil"
"math/rand"
"os"
"path"
"time"
"github.com/alibaba/Dragonfly/dfget/config"
"github.com/alibaba/Dragonfly/dfget/core/downloader"
"github.com/alibaba/Dragonfly/dfget/errors"
"github.com/alibaba/Dragonfly/dfget/util"
"github.com/alibaba/Dragonfly/version"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
// Start function creates a new task and starts it to download file.
func Start(ctx *config.Context) *errors.DFGetError {
var err error
util.Printer.Println(fmt.Sprintf("--%s-- %s",
ctx.StartTime.Format(config.DefaultTimestampFormat), ctx.URL))
if err = prepare(ctx); err != nil {
return errors.New(1100, err.Error())
}
if err = registerToSuperNode(ctx); err != nil {
return errors.New(1200, err.Error())
}
if err = downloadFile(ctx); err != nil {
return errors.New(1300, err.Error())
}
return nil
}
func prepare(ctx *config.Context) (err error) {
defer func() {
if r := recover(); r != nil {
err = r.(error)
}
}()
util.Printer.Printf("dfget version:%s", version.DFGetVersion)
util.Printer.Printf("workspace:%s sign:%s", ctx.WorkHome, ctx.Sign)
ctx.ClientLogger.Infof("target file path:%s", ctx.Output)
ctx.RealTarget = ctx.Output
ctx.TargetDir = path.Dir(ctx.RealTarget)
panicIf(util.CreateDirectory(ctx.TargetDir))
ctx.TempTarget, err = createTempTargetFile(ctx.TargetDir, ctx.Sign)
panicIf(err)
panicIf(util.CreateDirectory(path.Dir(ctx.MetaPath)))
panicIf(util.CreateDirectory(ctx.WorkHome))
panicIf(util.CreateDirectory(ctx.SystemDataDir))
ctx.DataDir = ctx.SystemDataDir
return nil
}
func registerToSuperNode(ctx *config.Context) error {
return register(ctx)
}
func downloadFile(ctx *config.Context) error {
var getter downloader.Downloader
if ctx.BackSourceReason > 0 {
getter = &downloader.BackDownloader{}
} else {
getter = &downloader.P2PDownloader{}
}
getter.Run()
return nil
}
func createTempTargetFile(targetDir string, sign string) (name string, e error) {
var (
f *os.File
)
defer func() {
if e == nil {
f.Close()
}
}()
prefix := "dfget-" + sign + ".tmp-"
f, e = ioutil.TempFile(targetDir, prefix)
if e == nil {
return f.Name(), e
}
f, e = os.OpenFile(path.Join(targetDir, fmt.Sprintf("%s%d", prefix, rand.Uint64())),
os.O_CREATE|os.O_EXCL, 0755)
if e == nil {
return f.Name(), e
}
return "", e
}
func panicIf(err error) {
if err != nil {
panic(err)
}
}
|
package handler
import (
"fmt"
"html/template"
"io/ioutil"
"log"
"net/http"
"net/http/httputil"
"sort"
"github.com/ryotarai/kube-daemonset-proxy/pkg/k8s"
"github.com/gin-gonic/gin"
"github.com/rakyll/statik/fs"
_ "github.com/ryotarai/kube-daemonset-proxy/statik"
corev1 "k8s.io/api/core/v1"
)
func New(options Options) (*Handler, error) {
s := &Handler{Options: options}
err := s.prepare()
if err != nil {
return nil, err
}
return s, nil
}
type Options struct {
Watcher *k8s.Watcher
PodPortName string
Title string
}
type Handler struct {
Options
router *gin.Engine
}
func (s *Handler) prepare() error {
statikFS, err := fs.New()
if err != nil {
return err
}
tmpl := template.New("")
tmpl, err = s.loadTemplate(tmpl, statikFS, "/templates/index.html.tmpl")
if err != nil {
return err
}
router := gin.Default()
router.SetHTMLTemplate(tmpl)
router.GET("/", s.handleIndex)
for _, m := range []string{"GET", "POST", "PUT", "PATCH", "DELETE"} {
router.Handle(m, "/n/:nodename/*path", s.handleNodeProxy)
}
router.GET("/public/*path", gin.WrapH(http.FileServer(statikFS)))
s.router = router
return nil
}
func (s *Handler) loadTemplate(t *template.Template, filesystem http.FileSystem, name string) (*template.Template, error) {
f, err := filesystem.Open(name)
if err != nil {
return nil, err
}
defer f.Close()
h, err := ioutil.ReadAll(f)
if err != nil {
return nil, err
}
t, err = t.New(name).Parse(string(h))
if err != nil {
return nil, err
}
return t, nil
}
func (s *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
s.router.ServeHTTP(w, r)
}
func (s *Handler) handleIndex(c *gin.Context) {
pods, err := s.Watcher.Pods()
if err != nil {
handleError(c, err)
return
}
sort.Slice(pods, func(i, j int) bool {
return pods[i].Spec.NodeName < pods[j].Spec.NodeName
})
c.HTML(200, "/templates/index.html.tmpl", map[string]interface{}{
"Pods": pods,
"Title": s.Title,
})
}
func (s *Handler) findPortInPod(pod *corev1.Pod) int32 {
for _, c := range pod.Spec.Containers {
for _, p := range c.Ports {
if p.Name == s.PodPortName {
return p.ContainerPort
}
}
}
return -1
}
func handleError(c *gin.Context, err error) {
log.Printf("error: %v", err)
c.String(500, "Internal error: %v\n", err)
}
func (s *Handler) handleNodeProxy(c *gin.Context) {
nodeName := c.Param("nodename")
path := c.Param("path")
pods, err := s.Watcher.Pods()
if err != nil {
handleError(c, err)
return
}
var pod *corev1.Pod
for _, p := range pods {
if p.Spec.NodeName == nodeName {
pod = p
}
}
if pod == nil {
c.Status(404)
return
}
port := s.findPortInPod(pod)
if port < 0 {
handleError(c, err)
return
}
director := func(req *http.Request) {
req.URL.Scheme = "http"
req.URL.Host = fmt.Sprintf("%s:%d", pod.Status.PodIP, port)
req.URL.Path = path
if _, ok := req.Header["User-Agent"]; !ok {
// explicitly disable User-Agent so it's not set to default value
req.Header.Set("User-Agent", "")
}
}
// TODO: reuse reverseProxy
p := &httputil.ReverseProxy{Director: director}
p.ServeHTTP(c.Writer, c.Request)
}
|
package leetcode
func nextPermutation(nums []int) {
i := len(nums) - 2
for i >= 0 && nums[i] >= nums[i+1] {
i--
}
if i >= 0 {
j := len(nums) - 1
for j >= 0 && nums[i] >= nums[j] {
j--
}
if j >= 0 {
nums[i], nums[j] = nums[j], nums[i]
}
}
reverse(nums[i+1:])
}
func reverse(nums []int) {
l, r := 0, len(nums)-1
for l < r {
nums[l], nums[r] = nums[r], nums[l]
l++
r--
}
}
|
package inmem
import (
"github.com/smilga/analyzer/api"
)
type FilterStore struct {
filters []*api.Filter
}
func (s *FilterStore) Save(target *api.Filter) error {
if target.ID == 0 {
var last int64
for _, n := range s.filters {
if int64(n.ID) > last {
last = int64(n.ID)
}
}
target.ID = api.FilterID(last + 1)
}
for i, t := range s.filters {
if t.ID == api.FilterID(target.ID) {
s.filters = append(s.filters[:i], s.filters[i+1:]...)
}
}
s.filters = append(s.filters, target)
return nil
}
func (s *FilterStore) All() ([]*api.Filter, error) {
return s.filters, nil
}
func (s *FilterStore) Get(id api.FilterID) (*api.Filter, error) {
for _, t := range s.filters {
if t.ID == api.FilterID(id) {
return t, nil
}
}
return nil, api.ErrFilterNotFound
}
func NewFilterStore() *FilterStore {
return &FilterStore{
filters: filters,
}
}
var filters = []*api.Filter{
&api.Filter{
ID: 1,
Name: "Marketing Services",
Description: "",
Tags: []*api.Tag{},
},
&api.Filter{
ID: 2,
Name: "Analytics providers",
Description: "",
Tags: []*api.Tag{},
},
}
|
package realm
import (
"encoding/json"
"errors"
"fmt"
"golang.org/x/mod/semver"
)
// Override is a Toggle value to be consumed by and restricted to a semantic version range
type Override struct {
*Toggle
MinimumVersion string `json:"minimumVersion"`
MaximumVersion string `json:"maximumVersion"`
}
// UnmarshalJSON Custom UnmarshalJSON method for validating Override
func (o *Override) UnmarshalJSON(b []byte) error {
var toggle Toggle
err := json.Unmarshal(b, &toggle)
if err != nil {
return err
}
o.Toggle = &toggle
var m map[string]json.RawMessage
if err := json.Unmarshal(b, &m); err != nil {
return err
}
for k, v := range m {
switch k {
case "minimumVersion":
var min string
if err := json.Unmarshal(v, &min); err != nil {
return err
}
o.MinimumVersion = min
case "maximumVersion":
var max string
if err := json.Unmarshal(v, &max); err != nil {
return err
}
o.MaximumVersion = max
}
}
if o.Value == nil {
return errors.New("Override value cannot be empty/nil")
}
if isValidMin := semver.IsValid(o.MinimumVersion); !isValidMin {
return fmt.Errorf("%q is not a valid semantic version", o.MinimumVersion)
}
if isValidMax := semver.IsValid(o.MaximumVersion); !isValidMax {
return fmt.Errorf("%q is not a valid semantic version", o.MaximumVersion)
}
// if minimum version is greater than maximum version
if semver.Compare(o.MinimumVersion, o.MaximumVersion) == 1 {
return fmt.Errorf("an override with the minimum version of %v is greater than its maximum version (%v)", o.MinimumVersion, o.MaximumVersion)
}
return nil
}
|
package spec
import (
"strconv"
"strings"
"github.com/pkg/errors"
)
type Size int
func (size *Size) Parse(s string) error {
i := strings.IndexFunc(s, func(r rune) bool {
return r < '0' || r > '9'
})
if i < 0 {
v, err := strconv.Atoi(s)
*size = Size(v)
return err
}
switch v, _ := strconv.Atoi(s[0:i]); strings.ToUpper(s[i:]) {
case "K", "KIB":
*size = Size(v * KiB)
case "M", "MIB":
*size = Size(v * MiB)
case "G", "GIB":
*size = Size(v * GiB)
case "KB":
*size = Size(v * KB)
case "MB":
*size = Size(v * MB)
case "GB":
*size = Size(v * GB)
default:
*size = Size(v)
return errors.Errorf("unknown unit %s", s)
}
return nil
}
const (
KiB = 1024
MiB = 1024 * KiB
GiB = 1024 * MiB
KB = 1000
MB = 1000 * KB
GB = 1000 * MB
)
|
package main
import (
"fmt"
// "flag"
"testing"
"time"
)
func TestFunc(t *testing.T) {
}
func TestFunc1(t *testing.T) {
t.Log("hello")
re, err := testing.CompileRegexp("mybench")
if err != "" {
t.Error("regexp compile failed")
}
if !re.MatchString("mybench") {
t.Error("mybench regexp doesn't match mybench string")
}
}
func TestFunc2(t *testing.T) {
t.Error("Error occured")
}
func TestFunc3(t *testing.T) {
t.Fatal("Fatal error occured")
}
func BenchmarkFunc (b *testing.B) {
fmt.Println("Inside benchmark")
time.Sleep(1e9)
}
// can't redeclare flag. Its already declared in testing package
//var matchBenchmarks = flag.String("benchmarks", "", "regular expression to select benchmarks to run")
//func main() {
//
//// if len(*matchBenchmarks) == 0 {
//// fmt.Println("Please specify -benchmarks='mybench' flag")
//// }
//
// flag.Parse()
//
//// fmt.Println(flag.Lookup("v").Value)
// if flag.Lookup("v").Value.String() == "false" {
// fmt.Println("Please specify -v=true for verbose print")
// }
//
// if len(flag.Lookup("benchmarks").Value.String()) == 0 {
// fmt.Println("Please specify -benchmarks='<regexp>' to run benchmarks. See benchmarks.go")
// } else {
// fmt.Println("You specified -" + flag.Lookup("benchmarks").Name + "=" + flag.Lookup("benchmarks").Value.String())
// fmt.Println("Running Benchmarks")
// bench := testing.Benchmark{"mybench", BenchmarkFunc}
//
// // These 2 lines wont work because benchmark field is not public
// // b := &testing.B{benchmark: bench}
// // b.run()
//
// testing.RunBenchmarks(&[...]testing.Benchmark{bench}) //Wont work because
// // of check for -benchmark flag on command line. So HAVE TO SUPPLY -benchmarks
// // on cmd line
// }
//
// fmt.Println("Running Testcases")
// tst := testing.Test {"mytest", TestFunc}
// tst1 := testing.Test {"mytest1", TestFunc1}
// tst2 := testing.Test {"mytest2", TestFunc2}
// tst3 := testing.Test {"mytest3", TestFunc3}
// tstarr := [...]testing.Test{tst, tst1, tst2, tst3} // FAIL
//// tstarr := [...]testing.Test{tst, tst1} // PASS
// testing.Main(&tstarr)
//
//}
|
package cluster
type Bootstrap struct {
}
func NewBootstrap() *Bootstrap {
return &Bootstrap{}
}
|
package httpservice
import (
"crypto-performance-compare/crypto"
"crypto-performance-compare/utils"
"fmt"
"html/template"
"net/http"
"strings"
)
func (h *HTTPHandler) ServeStats(w http.ResponseWriter, r *http.Request) {
data := make(map[string]interface{})
tmpl, err := template.ParseFiles("./httpservice/template.html")
if err != nil {
http.Error(w, "error parsing template:"+err.Error(), http.StatusInternalServerError)
return
}
items := utils.GetCurrencies()
dataItems := make(map[string][]crypto.Response)
for _, symbol := range items {
data, err := h.cache.Read(symbol)
if err != nil {
http.Error(w, fmt.Sprintf("error reading data for %s : %s", symbol, err.Error()), http.StatusInternalServerError)
return
}
dataItems[symbol] = data
}
data["title"] = fmt.Sprintf("Compare %s", strings.Join(items, ", "))
data["dataItems"] = dataItems
err = tmpl.Execute(w, data)
if err != nil {
http.Error(w, "error serving data:"+err.Error(), http.StatusInternalServerError)
return
}
}
|
package rsapi_test
import (
"encoding/json"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/rightscale/rsc/cm15"
"github.com/rightscale/rsc/rsapi"
"github.com/rightscale/rsc/ss/ssm"
)
var _ = Describe("normalize", func() {
var payload rsapi.APIParams
var name string
var value interface{}
var res rsapi.APIParams
var resErr error
JustBeforeEach(func() {
res, resErr = rsapi.Normalize(payload, name, value)
})
BeforeEach(func() {
payload = rsapi.APIParams{}
})
Describe("with a simple string", func() {
BeforeEach(func() {
name = "val"
value = "foo"
})
It("sets the value", func() {
Ω(resErr).ShouldNot(HaveOccurred())
Ω(res).Should(Equal(rsapi.APIParams{"val": "foo"}))
})
})
Describe("with an int", func() {
BeforeEach(func() {
name = "val"
value = 42
})
It("sets the value", func() {
Ω(resErr).ShouldNot(HaveOccurred())
Ω(res).Should(Equal(rsapi.APIParams{"val": 42}))
})
})
Describe("with a float", func() {
BeforeEach(func() {
name = "val"
value = 42.5
})
It("sets the value", func() {
Ω(resErr).ShouldNot(HaveOccurred())
Ω(res).Should(Equal(rsapi.APIParams{"val": 42.5}))
})
})
Describe("with a bool", func() {
BeforeEach(func() {
name = "val"
value = true
})
It("sets the value", func() {
Ω(resErr).ShouldNot(HaveOccurred())
Ω(res).Should(Equal(rsapi.APIParams{"val": true}))
})
})
Describe("with a simple array", func() {
BeforeEach(func() {
name = "val[]"
value = "foo"
})
It("sets the value", func() {
Ω(resErr).ShouldNot(HaveOccurred())
Ω(res).Should(Equal(rsapi.APIParams{"val": []interface{}{"foo"}}))
})
})
Describe("with a simple map", func() {
BeforeEach(func() {
name = "val[a]"
value = "foo"
})
It("sets the value", func() {
Ω(resErr).ShouldNot(HaveOccurred())
Ω(res).Should(Equal(rsapi.APIParams{"val": rsapi.APIParams{"a": "foo"}}))
})
})
Describe("with a map of arrays", func() {
BeforeEach(func() {
name = "val[a][]"
value = "foo"
})
It("sets the value", func() {
Ω(resErr).ShouldNot(HaveOccurred())
expected := rsapi.APIParams{
"val": rsapi.APIParams{
"a": []interface{}{"foo"},
},
}
Ω(res).Should(Equal(expected))
})
})
Describe("with a map of arrays with existing values", func() {
BeforeEach(func() {
name = "val[a][]"
value = "foo"
})
It("sets the value", func() {
Ω(resErr).ShouldNot(HaveOccurred())
expected := rsapi.APIParams{
"val": rsapi.APIParams{
"a": []interface{}{"foo"},
},
}
Ω(res).Should(Equal(expected))
name = "val[a][]"
value = "bar"
res, resErr = rsapi.Normalize(res, name, value)
Ω(resErr).ShouldNot(HaveOccurred())
expected = rsapi.APIParams{
"val": rsapi.APIParams{
"a": []interface{}{"foo", "bar"},
},
}
Ω(res).Should(Equal(expected))
})
})
Describe("with an array of maps", func() {
BeforeEach(func() {
name = "val[][a]"
value = "foo"
})
It("sets the value", func() {
Ω(resErr).ShouldNot(HaveOccurred())
expected := rsapi.APIParams{
"val": []interface{}{rsapi.APIParams{"a": "foo"}},
}
Ω(res).Should(Equal(expected))
})
})
Describe("with an array of maps of arrays", func() {
BeforeEach(func() {
name = "val[][a][]"
value = "foo"
})
It("sets the value", func() {
Ω(resErr).ShouldNot(HaveOccurred())
expected := rsapi.APIParams{
"val": []interface{}{rsapi.APIParams{"a": []interface{}{"foo"}}},
}
Ω(res).Should(Equal(expected))
})
})
Describe("with an array of maps with existing keys", func() {
BeforeEach(func() {
name = "val[][a]"
value = "foo"
})
It("sets the value", func() {
Ω(resErr).ShouldNot(HaveOccurred())
expected := rsapi.APIParams{
"val": []interface{}{rsapi.APIParams{"a": "foo"}},
}
Ω(res).Should(Equal(expected))
name = "val[][b]"
value = "bar"
res, resErr = rsapi.Normalize(res, name, value)
Ω(resErr).ShouldNot(HaveOccurred())
expected = rsapi.APIParams{
"val": []interface{}{rsapi.APIParams{"a": "foo", "b": "bar"}},
}
Ω(res).Should(Equal(expected))
})
})
Describe("with an array of maps with existing keys with more than one element", func() {
BeforeEach(func() {
name = "val[][b]"
value = "baz"
payload = rsapi.APIParams{
"val": []interface{}{rsapi.APIParams{"a": "foo", "b": "bar"}},
}
})
It("sets the value", func() {
Ω(resErr).ShouldNot(HaveOccurred())
expected := rsapi.APIParams{
"val": []interface{}{
rsapi.APIParams{"a": "foo", "b": "bar"},
rsapi.APIParams{"b": "baz"},
},
}
Ω(res).Should(Equal(expected))
})
})
})
var _ = Describe("ParseCommand", func() {
var cmd, hrefPrefix string
var values rsapi.ActionCommands
var api *rsapi.API
var parsed *rsapi.ParsedCommand
var parseErr error
BeforeEach(func() {
values = nil
ssm := ssm.New("", nil)
api = ssm.API
})
JustBeforeEach(func() {
parsed, parseErr = api.ParseCommand(cmd, hrefPrefix, values)
})
Describe("with some raw json bits", func() {
BeforeEach(func() {
cmd = "run"
runCmd := rsapi.ActionCommand{
Href: "/api/manager/projects/42/executions/54",
Params: []string{
"name=Tip CWF",
"configuration_options[][name]=environment_name",
`configuration_options[][value]:=["a","json","array"]`,
},
}
values = rsapi.ActionCommands{"run": &runCmd}
})
It("parses", func() {
Ω(parseErr).ShouldNot(HaveOccurred())
Ω(parsed).ShouldNot(BeNil())
payload := rsapi.APIParams{
"name": "Tip CWF",
"configuration_options": []interface{}{rsapi.APIParams{
"name": "environment_name",
"value": []string{"a", "json", "array"},
}},
}
expected := rsapi.ParsedCommand{
HTTPMethod: "POST",
URI: "/api/manager/projects/42/executions/54/actions/run",
QueryParams: rsapi.APIParams{},
PayloadParams: payload,
}
Ω(dumpJSON(parsed)).Should(Equal(dumpJSON(&expected)))
})
})
Describe("with array of maps with one element", func() {
BeforeEach(func() {
cmd = "run"
runCmd := rsapi.ActionCommand{
Href: "/api/manager/projects/42/executions/54",
Params: []string{
"name=Tip CWF",
"configuration_options[][name]=environment_name",
"configuration_options[][type]=string",
"configuration_options[][value]=ss2",
},
}
values = rsapi.ActionCommands{"run": &runCmd}
})
It("parses", func() {
Ω(parseErr).ShouldNot(HaveOccurred())
Ω(parsed).ShouldNot(BeNil())
payload := rsapi.APIParams{
"name": "Tip CWF",
"configuration_options": []interface{}{rsapi.APIParams{
"name": "environment_name",
"type": "string",
"value": "ss2",
}},
}
expected := rsapi.ParsedCommand{
HTTPMethod: "POST",
URI: "/api/manager/projects/42/executions/54/actions/run",
QueryParams: rsapi.APIParams{},
PayloadParams: payload,
}
Ω(*parsed).Should(Equal(expected))
})
})
Describe("with array of maps with two elements", func() {
BeforeEach(func() {
cmd = "run"
runCmd := rsapi.ActionCommand{
Href: "/api/manager/projects/42/executions/54",
Params: []string{
"name=Tip CWF2",
"configuration_options[][name]=environment_name",
"configuration_options[][type]=string",
"configuration_options[][value]=ss2",
"configuration_options[][name]=environment_name2",
"configuration_options[][type]=string",
"configuration_options[][value]=ss2",
},
}
values = rsapi.ActionCommands{"run": &runCmd}
})
It("parses", func() {
Ω(parseErr).ShouldNot(HaveOccurred())
Ω(parsed).ShouldNot(BeNil())
payload := rsapi.APIParams{
"name": "Tip CWF2",
"configuration_options": []interface{}{
rsapi.APIParams{
"name": "environment_name",
"type": "string",
"value": "ss2",
},
rsapi.APIParams{
"name": "environment_name2",
"type": "string",
"value": "ss2",
},
},
}
expected := rsapi.ParsedCommand{
HTTPMethod: "POST",
URI: "/api/manager/projects/42/executions/54/actions/run",
QueryParams: rsapi.APIParams{},
PayloadParams: payload,
}
Ω(*parsed).Should(Equal(expected))
})
})
Describe("with an array of query parameters", func() {
BeforeEach(func() {
cmd = "index"
indexCmd := rsapi.ActionCommand{
Href: "/api/manager/projects/42/executions",
Params: []string{
"filter[]=status==running",
"filter[]=status==stopped",
},
}
values = rsapi.ActionCommands{"index": &indexCmd}
})
It("parses", func() {
Ω(parseErr).ShouldNot(HaveOccurred())
Ω(parsed).ShouldNot(BeNil())
query := rsapi.APIParams{
"filter[]": []interface{}{"status==running", "status==stopped"},
}
expected := rsapi.ParsedCommand{
HTTPMethod: "GET",
URI: "/api/manager/projects/42/executions",
QueryParams: query,
PayloadParams: rsapi.APIParams{},
}
Ω(*parsed).Should(Equal(expected))
})
})
})
var _ = Describe("ParseCommand with cm15", func() {
var cmd, hrefPrefix string
var values rsapi.ActionCommands
var api *rsapi.API
var parsed *rsapi.ParsedCommand
var parseErr error
BeforeEach(func() {
values = nil
cm := cm15.New("", nil)
api = cm.API
})
JustBeforeEach(func() {
parsed, parseErr = api.ParseCommand(cmd, hrefPrefix, values)
})
Describe("with a deep map of inputs", func() {
BeforeEach(func() {
cmd = "wrap_instance"
wrapCmd := rsapi.ActionCommand{
Href: "/api/servers",
Params: []string{
"server[name]=server name",
"server[deployment_href]=/api/deployments/1",
"server[instance][href]=/api/clouds/1/instances/42",
"server[instance][server_template_href]=/api/server_templates/123",
"server[instance][inputs][STRING_INPUT_1]=text:testing123",
"server[instance][inputs][STRING_INPUT_2]=text:testing124",
},
}
values = rsapi.ActionCommands{"wrap_instance": &wrapCmd}
})
It("parses", func() {
Ω(parseErr).ShouldNot(HaveOccurred())
Ω(parsed).ShouldNot(BeNil())
payload := rsapi.APIParams{
"server": rsapi.APIParams{
"name": "server name",
"deployment_href": "/api/deployments/1",
"instance": rsapi.APIParams{
"href": "/api/clouds/1/instances/42",
"server_template_href": "/api/server_templates/123",
"inputs": rsapi.APIParams{
"STRING_INPUT_1": "text:testing123",
"STRING_INPUT_2": "text:testing124",
},
},
},
}
expected := rsapi.ParsedCommand{
HTTPMethod: "POST",
URI: "/api/servers/wrap_instance",
QueryParams: rsapi.APIParams{},
PayloadParams: payload,
}
Ω(*parsed).Should(Equal(expected))
})
})
})
func dumpJSON(o interface{}) string {
byt, _ := json.Marshal(o)
return string(byt)
}
|
// Package ctx contains setters and getters for request context.
//
//
package ctx
import (
"context"
"net/http"
)
// generic context setter
func set(key interface{}, value interface{}, r *http.Request) *http.Request {
ctx := r.Context()
ctx = context.WithValue(ctx, key, value)
return r.WithContext(ctx)
}
// string context getter
func getStr(key interface{}, r *http.Request) (string, bool) {
ctx := r.Context()
val, ok := ctx.Value(key).(string)
return val, ok
}
|
package main
import "fmt"
func main() {
defer func() {
if e := recover(); e != nil {
fmt.Println(e)
}
}()
var a [2]int
n := 2
println(a[n])
}
|
package HashTable
import (
"github.com/Amertz08/EECS560-go/Lab01/LinkedList"
"fmt"
)
type HashTable struct {
list []LinkedList.LinkedList
mod int
}
func NewHashTable(mod int) HashTable {
table := HashTable{mod:mod}
for i := 0; i < mod; i++ {
table.list = append(table.list, LinkedList.NewLinkedList())
}
return table
}
func (t *HashTable) Print() {
for i := 0; i < t.mod; i++ {
fmt.Printf("%d: ", i)
t.list[i].Print()
}
}
func (t *HashTable) hash(val int) int {
return val % t.mod
}
func (t *HashTable) Insert(val int) {
if t.Find(val) {
fmt.Println("Value already exists")
return
}
t.list[t.hash(val)].InsertFront(val)
}
func (t *HashTable) Delete(val int) {
t.list[t.hash(val)].Erase(val)
}
func (t *HashTable) Find(val int) bool {
return t.list[t.hash(val)].Find(val)
} |
package opa_auditor
import (
"fmt"
"time"
"github.com/MagalixCorp/magalix-agent/v3/agent"
"github.com/MagalixCorp/magalix-agent/v3/entities"
"github.com/MagalixCorp/magalix-agent/v3/kuber"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
opa "github.com/MagalixTechnologies/opa-core"
)
const (
PolicyQuery = "violation"
)
type Template struct {
Id string
Name string
Policy opa.Policy
Description string
HowToSolve string
UsageCount int
}
type Constraint struct {
Id string
TemplateId string
Name string
Parameters map[string]interface{}
Match agent.Match
UpdatedAt time.Time
CategoryId string
Severity string
Controls []string
Standards []string
}
type OpaAuditor struct {
templates map[string]*Template
constraints map[string]*Constraint
cache *AuditResultsCache
entitiesWatcher entities.EntitiesWatcherSource
}
func New(entitiesWatcher entities.EntitiesWatcherSource) *OpaAuditor {
return &OpaAuditor{
templates: make(map[string]*Template),
constraints: make(map[string]*Constraint),
cache: NewAuditResultsCache(),
entitiesWatcher: entitiesWatcher,
}
}
func (a *OpaAuditor) GetConstraintsSize() int {
return len(a.constraints)
}
func (a *OpaAuditor) UpdateConstraint(constraint *agent.Constraint) (bool, error) {
cId := constraint.Id
tId := constraint.TemplateId
c, cFound := a.constraints[cId]
t, tFound := a.templates[tId]
updated := false
if !cFound {
a.constraints[cId] = &Constraint{
Id: cId,
TemplateId: tId,
Name: constraint.Name,
Parameters: constraint.Parameters,
Match: constraint.Match,
UpdatedAt: constraint.UpdatedAt,
CategoryId: constraint.CategoryId,
Severity: constraint.Severity,
Standards: constraint.Standards,
Controls: constraint.Controls,
}
if !tFound {
policy, err := opa.Parse(constraint.Code, PolicyQuery)
if err != nil {
return false, errors.Wrapf(
err, "couldn't parse template %s, template id: %s for constraint: %s, constraint id: %s",
constraint.TemplateName, constraint.TemplateId, constraint.Name, constraint.Id,
)
}
a.templates[tId] = &Template{
Id: tId,
Name: constraint.TemplateName,
Policy: policy,
Description: constraint.Description,
HowToSolve: constraint.HowToSolve,
UsageCount: 1,
}
} else {
t.UsageCount++
}
updated = true
} else if cFound && constraint.UpdatedAt.After(c.UpdatedAt) {
a.constraints[cId] = &Constraint{
Id: cId,
TemplateId: tId,
Name: constraint.Name,
Parameters: constraint.Parameters,
Match: constraint.Match,
UpdatedAt: constraint.UpdatedAt,
CategoryId: constraint.CategoryId,
Severity: constraint.Severity,
Standards: constraint.Standards,
Controls: constraint.Controls,
}
policy, err := opa.Parse(constraint.Code, PolicyQuery)
if err != nil {
return false, errors.Wrapf(
err, "couldn't parse template %s, template id: %s for constraint: %s, constraint id: %s",
constraint.TemplateName, constraint.TemplateId, constraint.Name, constraint.Id,
)
}
a.templates[tId] = &Template{
Id: tId,
Name: constraint.TemplateName,
Policy: policy,
Description: constraint.Description,
HowToSolve: constraint.HowToSolve,
UsageCount: t.UsageCount,
}
updated = true
}
return updated, nil
}
func (a *OpaAuditor) RemoveConstraint(id string) {
c, cFound := a.constraints[id]
if cFound {
t := a.templates[c.TemplateId]
if t.UsageCount > 1 {
t.UsageCount--
} else {
delete(a.templates, c.TemplateId)
}
delete(a.constraints, id)
a.cache.RemoveConstraint(id)
}
}
func (a *OpaAuditor) UpdateConstraints(constraints []*agent.Constraint) ([]string, map[string]error) {
errorsMap := make(map[string]error)
updated := make([]string, 0)
for _, constraint := range constraints {
if constraint.DeletedAt != nil {
a.RemoveConstraint(constraint.Id)
continue
}
constraintUpdated, err := a.UpdateConstraint(constraint)
if err != nil {
errorsMap[constraint.Id] = err
continue
}
if constraintUpdated {
updated = append(updated, constraint.Id)
a.cache.RemoveConstraint(constraint.Id)
}
}
return updated, errorsMap
}
func (a *OpaAuditor) RemoveResource(resource *unstructured.Unstructured) {
a.cache.RemoveResource(getResourceKey(resource))
}
func (a *OpaAuditor) CheckResourceStatusWithConstraint(constraintId string, resource *unstructured.Unstructured, currentStatus agent.AuditResultStatus) bool {
oldStatus, found := a.cache.Get(constraintId, getResourceKey(resource))
return !found || oldStatus != currentStatus
}
func (a *OpaAuditor) UpdateCache(results []*agent.AuditResult) {
for i := range results {
result := results[i]
namespace := ""
if result.NamespaceName != nil {
namespace = *result.NamespaceName
}
kind := ""
if result.EntityKind != nil {
kind = *result.EntityKind
}
name := ""
if result.EntityName != nil {
name = *result.EntityName
}
a.cache.Put(*result.ConstraintID, kuber.GetEntityKey(namespace, kind, name), result.Status)
}
}
// evaluate constraint, construct recommendation obj
func (a *OpaAuditor) Audit(resource *unstructured.Unstructured, constraintIds []string, triggerType string) ([]*agent.AuditResult, []error) {
if len(resource.GetOwnerReferences()) > 0 {
return nil, nil
}
constraints := getConstraints(constraintIds, a.constraints)
results := make([]*agent.AuditResult, 0, len(constraints))
errs := make([]error, 0)
// Get resource identity info based on resource type
namespace := resource.GetNamespace()
kind := resource.GetKind()
name := resource.GetName()
parent, found := a.entitiesWatcher.GetParents(namespace, kind, name)
var parentName, parentKind string
if found && parent != nil {
// Ignore audit result for pod and replicasets with parents
if kind == "Pod" || kind == "ReplicaSet" {
return nil, nil
}
// RootParent func should move outside kuber
topParent := kuber.RootParent(parent)
parentName = topParent.Name
parentKind = topParent.Kind
}
for idx := range constraints {
c := constraints[idx]
templateId := c.TemplateId
constraintId := c.Id
categoryId := c.CategoryId
severity := c.Severity
match := matchEntity(resource, c.Match)
if !match {
continue
} else {
res := agent.AuditResult{
TemplateID: &templateId,
ConstraintID: &constraintId,
CategoryID: &categoryId,
Severity: &severity,
Standards: c.Standards,
Controls: c.Controls,
Description: a.templates[templateId].Description,
HowToSolve: a.templates[templateId].HowToSolve,
EntityName: &name,
EntityKind: &kind,
NamespaceName: &namespace,
ParentName: &parentName,
ParentKind: &parentKind,
EntitySpec: resource.Object,
Trigger: triggerType,
}
res.GenerateID()
t := a.templates[c.TemplateId]
err := t.Policy.EvalGateKeeperCompliant(resource.Object, c.Parameters, PolicyQuery)
var opaErr opa.OPAError
if err != nil {
if errors.As(err, &opaErr) {
details := make(map[string]interface{})
detailsInt := opaErr.GetDetails()
detailsMap, ok := detailsInt.(map[string]interface{})
if ok {
details = detailsMap
} else {
details["issue"] = detailsInt
}
var title string
if msg, ok := details["msg"]; ok {
title = msg.(string)
} else {
title = c.Name
}
msg := fmt.Sprintf("%s in %s %s", title, kind, name)
res.Status = agent.AuditResultStatusViolating
res.Msg = &msg
} else {
errs = append(errs, fmt.Errorf("unable to evaluate resource against policy. template id: %s, constraint id: %s. %w", c.TemplateId, c.Id, err))
}
} else {
res.Status = agent.AuditResultStatusCompliant
}
results = append(results, &res)
}
}
return results, errs
}
func getConstraints(constraintIds []string, cachedConstraints map[string]*Constraint) map[string]*Constraint {
var constraints map[string]*Constraint
if constraintIds == nil || len(constraintIds) == 0 {
constraints = cachedConstraints
} else {
constraints = make(map[string]*Constraint)
for _, id := range constraintIds {
c, ok := cachedConstraints[id]
if ok {
constraints[id] = c
}
}
}
return constraints
}
func matchEntity(resource *unstructured.Unstructured, match agent.Match) bool {
var matchKind bool
var matchNamespace bool
var matchLabel bool
if len(match.Kinds) == 0 {
matchKind = true
} else {
resourceKind := resource.GetKind()
for _, kind := range match.Kinds {
if resourceKind == kind {
matchKind = true
break
}
}
}
if len(match.Namespaces) == 0 {
matchNamespace = true
} else {
resourceNamespace := resource.GetNamespace()
for _, namespace := range match.Namespaces {
if resourceNamespace == namespace {
matchNamespace = true
break
}
}
}
if len(match.Labels) == 0 {
matchLabel = true
} else {
outer:
for _, obj := range match.Labels {
for key, val := range obj {
entityVal, ok := resource.GetLabels()[key]
if ok {
if val != "*" && val != entityVal {
continue
}
matchLabel = true
break outer
}
}
}
}
return matchKind && matchNamespace && matchLabel
}
func getResourceKey(resource *unstructured.Unstructured) string {
return kuber.GetEntityKey(resource.GetNamespace(), resource.GetKind(), resource.GetName())
}
|
package config
import (
"testing"
"fmt"
)
type SysParameters struct {
//parameters for websocket
Port int // Open Port of the service.
WriteWait int // Milliseconds until write times out.
PongWait int // Timeout for waiting on pong.
PingPeriod int // Milliseconds between pings.
MaxMessageSize int64 // Maximum size in bytes of a message.
MessageBufferSize int
////parameters for DB and Cache.
DBInfoTable string
CacheInfoTable string
}
func (h* SysParameters) GetType() string{
return "test"
}
func TestConfigDB_GetParameters(t *testing.T) {
config := ConfigDB{
DriverName: "mysql",
Dbhost: "172.26.164.74",
Dbport: "3306",
Dbuser: "root",
Dbpassword: "12345678",
Dbname: "dbconfig",
Tblname: "params",
}
param := SysParameters{}
config.GetParameters(¶m)
fmt.Println(param)
}
|
package tickets
import (
"encoding/json"
"net/http"
"os"
// CORS
"github.com/rs/cors"
// JWT
"github.com/dgrijalva/jwt-go"
// JSON Web Tokens middleware Auth0
"github.com/auth0/go-jwt-middleware"
// Mongodb
"gopkg.in/mgo.v2"
// Gorilla Mux
"github.com/gorilla/mux"
// Negroni framework
"github.com/urfave/negroni"
// Stats
"github.com/thoas/stats"
)
// Router Returns the mux api router with CORS, stats and logging
func Router() *mux.Router {
apirouter := mux.NewRouter()
//For production, keep HTTPSProtection = true
/*
HTTPSProtection := false
if HTTPSProtection {
apirouter.Use(restgate.New("X-Auth-Key", "X-Auth-Secret", restgate.Static, restgate.Config{HTTPSProtectionOff: false, Key: []string{c.API_ENDPOINT_KEY}, Secret: []string{c.API_ENDPOINT_SECRET}}))
}
*/
// API Router
apirouter.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Ticketing API\n"))
})
return apirouter
}
func V1Router(apirouter *mux.Router) *mux.Router {
// Stats middleware
statsmw := stats.New()
// CORS for cross-domain access controls
corsmw := cors.New(cors.Options{
AllowedOrigins: []string{"https://store.kings.cam.ac.uk"},
AllowedMethods: []string{"GET", "HEAD", "PUT", "PATCH", "POST", "DELETE"},
AllowCredentials: true,
AllowedHeaders: []string{"*"},
})
// API V1 router
apiv1router := mux.NewRouter().PathPrefix("/api/v1").Subrouter().StrictSlash(true)
apirouter.PathPrefix("/api/v1").Handler(negroni.New(
negroni.NewRecovery(),
negroni.NewLogger(),
statsmw,
corsmw,
negroni.Wrap(apiv1router),
))
// Stats
apiv1router.HandleFunc("/stats", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
stats := statsmw.Data()
b, _ := json.Marshal(stats)
w.Write(b)
})
return apiv1router
}
func V1CONFIGRouter(apirouter *mux.Router) *mux.Router {
// Stats middleware
statsmw := stats.New()
// CORS for cross-domain access controls
corsmw := cors.New(cors.Options{
AllowedOrigins: []string{"https://store.kings.cam.ac.uk"},
AllowedMethods: []string{"GET", "HEAD", "PUT", "PATCH", "POST", "DELETE"},
AllowCredentials: true,
AllowedHeaders: []string{"*"},
})
// Auth0 JWT middleware
jwtMiddleware := jwtmiddleware.New(jwtmiddleware.Options{
ValidationKeyGetter: func(token *jwt.Token) (interface{}, error) {
return []byte(os.Getenv("Auth0")), nil
},
SigningMethod: jwt.SigningMethodHS256,
})
// API V1CONFIG router
apiconfigrouter := mux.NewRouter().PathPrefix("/config").Subrouter().StrictSlash(true)
apirouter.PathPrefix("/config").Handler(negroni.New(
negroni.NewRecovery(),
negroni.NewLogger(),
negroni.HandlerFunc(jwtMiddleware.HandlerWithNext),
statsmw,
corsmw,
negroni.Wrap(apiconfigrouter),
))
// Stats
apiconfigrouter.HandleFunc("/stats", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
stats := statsmw.Data()
b, _ := json.Marshal(stats)
w.Write(b)
})
return apiconfigrouter
}
// Routes define API version 1.0 router for tickets package
func Routes(apiv1router *mux.Router, session *mgo.Session) {
// API version 1.0 welcome
apiv1router.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Ticketing API version 1!\n"))
})
// Booking dates
apiv1router.HandleFunc("/dates", BookingDates(session, false)).Methods("GET")
// Get Session
apiv1router.HandleFunc("/sessions/{date}", BookingSessions(session)).Methods("GET")
// Get pricing
apiv1router.HandleFunc("/prices", GetPrices(session)).Methods("GET")
// Create a new booking
apiv1router.HandleFunc("/bookings/{uuid}", CreateBooking(session)).Methods("POST")
}
// Routes define API version 1.0 router for tickets package
func ConfigRoutes(apiv1router *mux.Router, session *mgo.Session) {
// API version 1.0 welcome
apiv1router.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Ticketing API config version 1!\n"))
})
// Config Booking dates
apiv1router.HandleFunc("/dates", ConfigBookingDates(session, false)).Methods("POST")
// Get Config Booking dates
apiv1router.HandleFunc("/dates", GetConfigDates(session, false)).Methods("GET")
// Config Booking dates
apiv1router.HandleFunc("/prices", ConfigPricing(session)).Methods("POST")
// Test configuration
// Test config Booking dates
apiv1router.HandleFunc("/test/dates", ConfigBookingDates(session, true)).Methods("POST")
// Test booking dates
apiv1router.HandleFunc("/test/dates", BookingDates(session, true)).Methods("GET")
// Get existing booking
apiv1router.HandleFunc("/bookings/{uuid}", GetBooking(session)).Methods("GET")
// Return all bookings
apiv1router.HandleFunc("/bookings", GetBookings(session)).Methods("GET")
// Return all bookings matching a date
apiv1router.HandleFunc("/bookings/date/{date}", GetBookingsDate(session)).Methods("GET")
// Return a summary of bookings matching a date range
apiv1router.HandleFunc("/bookings/range/{date}", GetBookingsRangeSummary(session)).Methods("GET")
// Update an existing booking
apiv1router.HandleFunc("/bookings/{uuid}", UpdateBooking(session)).Methods("PUT")
// Delete an existing booking
apiv1router.HandleFunc("/bookings/{uuid}", DeleteBooking(session)).Methods("DELETE")
}
|
package main
import "fmt"
func main() {
fmt.Println("Product of Array Except Self")
fmt.Println("Result= ", productExceptSelf([]int{1, 2, 3, 4}))
fmt.Println("Result= ", productExceptSelf([]int{1, 1}))
fmt.Println("Result= ", productExceptSelf([]int{1, -1}))
fmt.Println("Result= ", productExceptSelf([]int{0, 0}))
fmt.Println("Result= ", productExceptSelf([]int{0, 1, 2, 3}))
fmt.Println("Result= ", productExceptSelf([]int{1, 2, 3, 0}))
fmt.Println("Result= ", productExceptSelf([]int{5, 2, 0, 10, 5}))
}
//using 1 slice
func productExceptSelf(nums []int) []int {
l := len(nums)
last := make([]int, l)
last[l-1] = nums[l-1]
for i := l - 2; i > -1; i-- {
last[i] = nums[i] * last[i+1]
}
first := nums[0]
nums[0] = last[1]
for i := 1; i < l; i++ {
if i == l-1 {
nums[i] = first
continue
}
nums[i], first = first*last[i+1], first*nums[i]
}
return nums
}
//using 2 slice
func productExceptSelf_old(nums []int) []int {
l := len(nums)
first, last := make([]int, l), make([]int, l)
first[0], last[l-1] = nums[0], nums[l-1]
for i := 1; i < l; i++ {
first[i] = nums[i] * first[i-1]
}
for i := l - 2; i > -1; i-- {
last[i] = nums[i] * last[i+1]
}
nums[0] = last[1]
nums[l-1] = first[l-2]
for i := 1; i < l-1; i++ {
nums[i] = first[i-1] * last[i+1]
}
return nums
}
|
package main
import (
"flag"
"fmt"
"log"
"os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
)
var sess = session.Must(session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
}))
var cloudwatch = cloudwatchlogs.New(sess)
type logPayload struct {
logStream string
events []*cloudwatchlogs.OutputLogEvent
}
var logChannel chan logPayload = make(chan logPayload)
func printLogs() {
for {
payload := <-logChannel
if len(payload.events) == 0 {
continue
}
fmt.Println()
fmt.Println("==>", payload.logStream, "<==")
for _, event := range payload.events {
fmt.Println(*event.Message)
}
}
}
func logFetch(logGroupName string, logStreamName string, token string) string {
input := &cloudwatchlogs.GetLogEventsInput{
Limit: aws.Int64(10),
LogGroupName: aws.String(logGroupName),
LogStreamName: aws.String(logStreamName),
NextToken: aws.String(token),
StartFromHead: aws.Bool(true),
}
output, err := cloudwatch.GetLogEvents(input)
if err != nil {
log.Fatal(err)
}
logChannel <- logPayload{
events: output.Events,
logStream: logStreamName,
}
if *output.NextForwardToken == token {
time.Sleep(5 * time.Second)
}
return *output.NextForwardToken
}
func initialLogFetch(logGroupName string, logStreamName string) string {
input := &cloudwatchlogs.GetLogEventsInput{
Limit: aws.Int64(10),
LogGroupName: aws.String(logGroupName),
LogStreamName: aws.String(logStreamName),
}
output, err := cloudwatch.GetLogEvents(input)
if err != nil {
log.Fatal(err)
}
logChannel <- logPayload{
events: output.Events,
logStream: logStreamName,
}
return *output.NextForwardToken
}
var logStreams map[string]bool = map[string]bool{}
func fetchLogStreams(logGroup string) {
// just looking at the last 50 - because this polls if you have more recent
// events they'll bubble up but i never have more than 50 active log streams
input := &cloudwatchlogs.DescribeLogStreamsInput{
Descending: aws.Bool(true),
LogGroupName: aws.String(logGroup),
OrderBy: aws.String("LastEventTime"),
}
output, err := cloudwatch.DescribeLogStreams(input)
if err != nil {
log.Fatal(err)
}
dayAgo := time.Now().Add(-24*time.Hour).Unix() * 1000
for _, stream := range output.LogStreams {
if *stream.LastEventTimestamp >= dayAgo && !logStreams[*stream.LogStreamName] {
logStreams[*stream.LogStreamName] = true
go watchLogStream(logGroup, *stream.LogStreamName)
}
}
}
func watchLogGroup(logGroup string) {
for {
fetchLogStreams(logGroup)
time.Sleep(time.Minute)
}
}
func watchLogStream(logGroup string, logStream string) {
token := initialLogFetch(logGroup, logStream)
for {
token = logFetch(logGroup, logStream, token)
}
}
func main() {
if len(os.Args) > 1 && os.Args[1] == "version" {
// TODO move this to something not hardcoded
fmt.Println("cloudtail version v0.0.2")
os.Exit(2)
}
logGroup := flag.String("log", "", "log group")
logStream := flag.String("stream", "", "log stream")
flag.Parse()
if *logGroup == "" {
flag.PrintDefaults()
log.Fatal("must pass a log group, -log=xxx")
}
go printLogs()
if *logStream == "" {
watchLogGroup(*logGroup)
} else {
watchLogStream(*logGroup, *logStream)
}
}
|
package cli
import (
"bytes"
"errors"
"io"
"strings"
"testing"
"github.com/pjbgf/go-test/should"
)
func TestNewConsole(t *testing.T) {
assertThat := func(assumption string, stdOut, stdErr *bytes.Buffer, shouldError bool) {
should := should.New(t)
hasErrored := false
defer func() {
if r := recover(); r != nil {
hasErrored = true
}
}()
NewConsole(stdOut, stdErr, func(int) {})
should.BeEqual(shouldError, hasErrored, assumption)
}
var stdOut, stdErr bytes.Buffer
assertThat("should panic for nil stdOut", nil, &stdErr, true)
assertThat("should panic for nil stdErr", &stdOut, nil, true)
assertThat("should not panic if stdErr and stdOut are not nil", &stdOut, &stdErr, false)
}
type commandStub struct {
hasExecuted bool
err error
}
func (c *commandStub) run(output io.Writer) error {
c.hasExecuted = true
return c.err
}
func TestRun(t *testing.T) {
assertThat := func(assumption string, factoryErr, cmdErr error, errored, executedCmd bool) {
should := should.New(t)
var (
hasErrored bool = false
stdOut, stdErr bytes.Buffer
)
stub := &commandStub{err: cmdErr}
c := NewConsole(&stdOut, &stdErr, func(code int) { hasErrored = true })
c.commandFactory = func(args []string) (cliCommand, error) {
return stub, factoryErr
}
c.Run([]string{})
should.BeEqual(errored, hasErrored, assumption)
should.BeEqual(executedCmd, stub.hasExecuted, assumption)
}
assertThat("should not run command when get error", errors.New("some error"), nil, true, false)
assertThat("should run command when no errors", nil, nil, false, true)
assertThat("should handle command errors", nil, errors.New("cmd error"), true, true)
}
func TestCli_GetCommand(t *testing.T) {
assertThat := func(assumption string, command string, expected interface{}, expectedErr error) {
should := should.New(t)
var output bytes.Buffer
cmdGot, err := getCommand(strings.Split(command, " "))
outputGot := output.String()
outputWanted := ""
should.BeEqual(expectedErr, err, assumption)
should.BeEqual(outputWanted, outputGot, assumption)
should.HaveSameType(expected, cmdGot, assumption)
}
assertThat("should get 'template' subcommand", "zaz seccomp template web", &seccompTemplate{}, nil)
assertThat("should get 'brute-force' subcommand", "zaz seccomp docker alpine", &seccompBruteForce{}, nil)
assertThat("should get 'from-go' subcommand", "zaz seccomp ../../test/simple-app", &seccompFromGo{}, nil)
assertThat("should get 'from-log' subcommand", "zaz seccomp --log-file=../../test/syslog 123", &seccompFromLog{}, nil)
assertThat("should error for invalid command", "zaz something", nil, errors.New("invalid syntax"))
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package apng
const PngHeader = "\x89PNG\r\n\x1a\n"
// Filter type, as per the PNG spec.
const (
ftNone = 0
ftSub = 1
ftUp = 2
ftAverage = 3
ftPaeth = 4
nFilter = 5
)
// The absolute value of a byte interpreted as a signed int8.
func abs8(d uint8) int {
if d < 128 {
return int(d)
}
return 256 - int(d)
}
// Chooses the filter to use for encoding the current row, and applies it.
// The return value is the index of the filter and also of the row in cr that has had it applied.
func filter(cr *[nFilter][]byte, pr []byte, bpp int) int {
// We try all five filter types, and pick the one that minimizes the sum of absolute differences.
// This is the same heuristic that libpng uses, although the filters are attempted in order of
// estimated most likely to be minimal (ftUp, ftPaeth, ftNone, ftSub, ftAverage), rather than
// in their enumeration order (ftNone, ftSub, ftUp, ftAverage, ftPaeth).
cdat0 := cr[0][1:]
cdat1 := cr[1][1:]
cdat2 := cr[2][1:]
cdat3 := cr[3][1:]
cdat4 := cr[4][1:]
pdat := pr[1:]
n := len(cdat0)
// The up filter.
sum := 0
for i := 0; i < n; i++ {
cdat2[i] = cdat0[i] - pdat[i]
sum += abs8(cdat2[i])
}
best := sum
filter := ftUp
// The Paeth filter.
sum = 0
for i := 0; i < bpp; i++ {
cdat4[i] = cdat0[i] - pdat[i]
sum += abs8(cdat4[i])
}
for i := bpp; i < n; i++ {
cdat4[i] = cdat0[i] - paeth(cdat0[i-bpp], pdat[i], pdat[i-bpp])
sum += abs8(cdat4[i])
if sum >= best {
break
}
}
if sum < best {
best = sum
filter = ftPaeth
}
// The none filter.
sum = 0
for i := 0; i < n; i++ {
sum += abs8(cdat0[i])
if sum >= best {
break
}
}
if sum < best {
best = sum
filter = ftNone
}
// The sub filter.
sum = 0
for i := 0; i < bpp; i++ {
cdat1[i] = cdat0[i]
sum += abs8(cdat1[i])
}
for i := bpp; i < n; i++ {
cdat1[i] = cdat0[i] - cdat0[i-bpp]
sum += abs8(cdat1[i])
if sum >= best {
break
}
}
if sum < best {
best = sum
filter = ftSub
}
// The average filter.
sum = 0
for i := 0; i < bpp; i++ {
cdat3[i] = cdat0[i] - pdat[i]/2
sum += abs8(cdat3[i])
}
for i := bpp; i < n; i++ {
cdat3[i] = cdat0[i] - uint8((int(cdat0[i-bpp])+int(pdat[i]))/2)
sum += abs8(cdat3[i])
if sum >= best {
break
}
}
if sum < best {
best = sum
filter = ftAverage
}
return filter
}
|
package link
import (
"testing"
"github.com/cilium/ebpf/internal/testutils"
)
func TestHaveProgAttach(t *testing.T) {
testutils.CheckFeatureTest(t, haveProgAttach)
}
func TestHaveProgAttachReplace(t *testing.T) {
testutils.CheckFeatureTest(t, haveProgAttachReplace)
}
func TestHaveBPFLink(t *testing.T) {
testutils.CheckFeatureTest(t, haveBPFLink)
}
|
package server
import (
"context"
"net/http"
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
)
func TestRequestID(t *testing.T) {
t.Run("returns empty id if no id exists", func(t *testing.T) {
req, err := http.NewRequest(http.MethodGet, "example.com", nil)
require.Nil(t, err)
requestID := RequestID(req)
require.Equal(t, requestID, uuid.Nil)
})
t.Run("returns empty id if invalid id exists", func(t *testing.T) {
req, err := http.NewRequest(http.MethodGet, "example.com", nil)
require.Nil(t, err)
req = req.WithContext(context.WithValue(req.Context(), requestIDKey, "foobar"))
requestID := RequestID(req)
require.Equal(t, requestID, uuid.Nil)
})
t.Run("returns id if one exists", func(t *testing.T) {
req, err := http.NewRequest(http.MethodGet, "example.com", nil)
require.Nil(t, err)
expected, err := uuid.NewRandom()
require.Nil(t, err)
req = req.WithContext(context.WithValue(req.Context(), requestIDKey, expected))
requestID := RequestID(req)
require.Equal(t, requestID, expected)
})
}
|
package fakerp
import (
"context"
"strings"
azdns "github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2017-10-01/dns"
"github.com/Azure/go-autorest/autorest/to"
"github.com/sirupsen/logrus"
"github.com/openshift/openshift-azure/pkg/api"
"github.com/openshift/openshift-azure/pkg/util/azureclient"
"github.com/openshift/openshift-azure/pkg/util/azureclient/dns"
)
type dnsManager struct {
zc dns.ZonesClient
rsc dns.RecordSetsClient
dnsResourceGroup string
dnsDomain string
}
func newDNSManager(ctx context.Context, log *logrus.Entry, subscriptionID, dnsResourceGroup, dnsDomain string) (*dnsManager, error) {
authorizer, err := azureclient.GetAuthorizerFromContext(ctx, api.ContextKeyClientAuthorizer)
if err != nil {
return nil, err
}
return &dnsManager{
zc: dns.NewZonesClient(ctx, log, subscriptionID, authorizer),
rsc: dns.NewRecordSetsClient(ctx, log, subscriptionID, authorizer),
dnsResourceGroup: dnsResourceGroup,
dnsDomain: dnsDomain,
}, nil
}
func (dm *dnsManager) createOrUpdateZone(ctx context.Context, resourceGroup, zoneName, parentResourceGroup, parentZoneName string) error {
zone, err := dm.zc.CreateOrUpdate(ctx, resourceGroup, zoneName, azdns.Zone{
Location: to.StringPtr("global"),
}, "", "")
if err != nil {
return err
}
// update TTLs
rs, err := dm.rsc.Get(ctx, resourceGroup, zoneName, "@", azdns.SOA)
if err != nil {
return err
}
rs.RecordSetProperties.TTL = to.Int64Ptr(60)
rs.RecordSetProperties.SoaRecord.RefreshTime = to.Int64Ptr(60)
rs.RecordSetProperties.SoaRecord.RetryTime = to.Int64Ptr(60)
rs.RecordSetProperties.SoaRecord.ExpireTime = to.Int64Ptr(60)
rs.RecordSetProperties.SoaRecord.MinimumTTL = to.Int64Ptr(60)
_, err = dm.rsc.CreateOrUpdate(ctx, resourceGroup, zoneName, "@", azdns.SOA, rs, "", "")
if err != nil {
return err
}
rs, err = dm.rsc.Get(ctx, resourceGroup, zoneName, "@", azdns.NS)
if err != nil {
return err
}
rs.RecordSetProperties.TTL = to.Int64Ptr(60)
_, err = dm.rsc.CreateOrUpdate(ctx, resourceGroup, zoneName, "@", azdns.NS, rs, "", "")
if err != nil {
return err
}
nsRecords := make([]azdns.NsRecord, len(*zone.NameServers))
for i := range *zone.NameServers {
nsRecords[i] = azdns.NsRecord{
Nsdname: &(*zone.NameServers)[i],
}
}
// create glue record in parent zone
_, err = dm.rsc.CreateOrUpdate(ctx, parentResourceGroup, parentZoneName, strings.Split(zoneName, ".")[0], azdns.NS, azdns.RecordSet{
RecordSetProperties: &azdns.RecordSetProperties{
TTL: to.Int64Ptr(60),
NsRecords: &nsRecords,
},
}, "", "")
return err
}
func (dm *dnsManager) deleteZone(ctx context.Context, resourceGroup, zoneName, parentResourceGroup, parentZoneName string) error {
// delete glue record in parent zone
_, err := dm.rsc.Delete(ctx, parentResourceGroup, parentZoneName, strings.Split(zoneName, ".")[0], azdns.NS, "")
if err != nil {
return err
}
return dm.zc.Delete(ctx, resourceGroup, zoneName, "")
}
func (dm *dnsManager) createOrUpdateOCPDNS(ctx context.Context, cs *api.OpenShiftManagedCluster) error {
parentZone := strings.SplitN(cs.Properties.RouterProfiles[0].PublicSubdomain, ".", 2)[1]
// <random>.osacloud.dev zone
err := dm.createOrUpdateZone(ctx, cs.Properties.AzProfile.ResourceGroup, parentZone, dm.dnsResourceGroup, dm.dnsDomain)
if err != nil {
return err
}
// openshift.<random>.osacloud.dev cname
_, err = dm.rsc.CreateOrUpdate(ctx, cs.Properties.AzProfile.ResourceGroup, parentZone, "openshift", azdns.CNAME, azdns.RecordSet{
RecordSetProperties: &azdns.RecordSetProperties{
CnameRecord: &azdns.CnameRecord{
Cname: &cs.Properties.FQDN,
},
TTL: to.Int64Ptr(60),
},
}, "", "")
// apps.<random>.osacloud.dev zone
err = dm.createOrUpdateZone(ctx, cs.Properties.AzProfile.ResourceGroup, cs.Properties.RouterProfiles[0].PublicSubdomain, cs.Properties.AzProfile.ResourceGroup, parentZone)
if err != nil {
return err
}
// *.apps.<random>.osacloud.dev cname
_, err = dm.rsc.CreateOrUpdate(ctx, cs.Properties.AzProfile.ResourceGroup, cs.Properties.RouterProfiles[0].PublicSubdomain, "*", azdns.CNAME, azdns.RecordSet{
RecordSetProperties: &azdns.RecordSetProperties{
CnameRecord: &azdns.CnameRecord{
Cname: &cs.Properties.RouterProfiles[0].FQDN,
},
TTL: to.Int64Ptr(60),
},
}, "", "")
return err
}
func (dm *dnsManager) deleteOCPDNS(ctx context.Context, cs *api.OpenShiftManagedCluster) error {
parentZone := strings.SplitN(cs.Properties.RouterProfiles[0].PublicSubdomain, ".", 2)[1]
// apps.<random>.osacloud.dev zone
err := dm.deleteZone(ctx, cs.Properties.AzProfile.ResourceGroup, cs.Properties.RouterProfiles[0].PublicSubdomain, cs.Properties.AzProfile.ResourceGroup, parentZone)
if err != nil {
return err
}
// <random>.osacloud.dev zone
return dm.deleteZone(ctx, cs.Properties.AzProfile.ResourceGroup, parentZone, dm.dnsResourceGroup, dm.dnsDomain)
}
|
//
// Copyright 2020 The AVFS authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// +build !datarace
package memfs
import (
"os"
"testing"
"github.com/avfs/avfs"
)
var (
// dirNode struct implements node interface.
_ node = &dirNode{}
// fileNode struct implements node interface.
_ node = &fileNode{}
// symlinkNode struct implements node interface.
_ node = &symlinkNode{}
// fStat struct implements os.FileInfo interface.
_ os.FileInfo = &fStat{}
)
func TestSearchNode(t *testing.T) {
vfs, err := New()
if err != nil {
t.Fatalf("New : want err to be nil, got %v", err)
}
rn := vfs.rootNode
// Directories
da := vfs.createDir(rn, "a", avfs.DefaultDirPerm)
db := vfs.createDir(rn, "b", avfs.DefaultDirPerm)
dc := vfs.createDir(rn, "c", avfs.DefaultDirPerm)
da1 := vfs.createDir(da, "a1", avfs.DefaultDirPerm)
da2 := vfs.createDir(da, "a2", avfs.DefaultDirPerm)
db1 := vfs.createDir(db, "b1", avfs.DefaultDirPerm)
db1a := vfs.createDir(db1, "b1A", avfs.DefaultDirPerm)
db1b := vfs.createDir(db1, "b1B", avfs.DefaultDirPerm)
// Files
f1 := vfs.createFile(rn, "file1", avfs.DefaultFilePerm)
fa1 := vfs.createFile(da, "afile1", avfs.DefaultFilePerm)
fa2 := vfs.createFile(da, "afile2", avfs.DefaultFilePerm)
fa3 := vfs.createFile(da, "afile3", avfs.DefaultFilePerm)
// Symlinks
vfs.createSymlink(rn, "lroot", "/")
vfs.createSymlink(rn, "la", "/a")
vfs.createSymlink(db1b, "lb1", "/b/b1")
vfs.createSymlink(dc, "lafile3", "../a/afile3")
lloop1 := vfs.createSymlink(rn, "loop1", "/loop2")
vfs.createSymlink(rn, "loop2", "/loop1")
tests := []struct { //nolint:govet // no fieldalignment for test structs
path string
parent *dirNode
child node
first, rest string
err error
}{
// Existing directories
{path: "/", parent: rn, child: rn, first: "", rest: "", err: avfs.ErrFileExists},
{path: "/a", parent: rn, child: da, first: "a", rest: "", err: avfs.ErrFileExists},
{path: "/b", parent: rn, child: db, first: "b", rest: "", err: avfs.ErrFileExists},
{path: "/c", parent: rn, child: dc, first: "c", rest: "", err: avfs.ErrFileExists},
{path: "/a/a1", parent: da, child: da1, first: "a1", rest: "", err: avfs.ErrFileExists},
{path: "/a/a2", parent: da, child: da2, first: "a2", rest: "", err: avfs.ErrFileExists},
{path: "/b/b1", parent: db, child: db1, first: "b1", rest: "", err: avfs.ErrFileExists},
{path: "/b/b1/b1A", parent: db1, child: db1a, first: "b1A", rest: "", err: avfs.ErrFileExists},
{path: "/b/b1/b1B", parent: db1, child: db1b, first: "b1B", rest: "", err: avfs.ErrFileExists},
// Existing files
{path: "/file1", parent: rn, child: f1, first: "file1", rest: "", err: avfs.ErrFileExists},
{path: "/a/afile1", parent: da, child: fa1, first: "afile1", rest: "", err: avfs.ErrFileExists},
{path: "/a/afile2", parent: da, child: fa2, first: "afile2", rest: "", err: avfs.ErrFileExists},
{path: "/a/afile3", parent: da, child: fa3, first: "afile3", rest: "", err: avfs.ErrFileExists},
// Symlinks
{path: "/lroot", parent: rn, child: rn, first: "", rest: "", err: avfs.ErrFileExists},
{path: "/lroot/a", parent: rn, child: da, first: "a", rest: "", err: avfs.ErrFileExists},
{path: "/la/a1", parent: da, child: da1, first: "a1", rest: "", err: avfs.ErrFileExists},
{path: "/b/b1/b1B/lb1/b1A", parent: db1, child: db1a, first: "b1A", rest: "", err: avfs.ErrFileExists},
{path: "/c/lafile3", parent: da, child: fa3, first: "afile3", rest: "", err: avfs.ErrFileExists},
{path: "/loop1", parent: rn, child: lloop1, first: "loop1", rest: "", err: avfs.ErrTooManySymlinks},
// Non existing
{path: "/z", parent: rn, first: "z", rest: "", err: avfs.ErrNoSuchFileOrDir},
{path: "/a/az", parent: da, first: "az", rest: "", err: avfs.ErrNoSuchFileOrDir},
{path: "/b/b1/b1z", parent: db1, first: "b1z", err: avfs.ErrNoSuchFileOrDir},
{path: "/b/b1/b1A/b1Az", parent: db1a, first: "b1Az", err: avfs.ErrNoSuchFileOrDir},
{
path: "/b/b1/b1A/b1Az/not/exist", parent: db1a, first: "b1Az", rest: "/not/exist",
err: avfs.ErrNoSuchFileOrDir,
},
{
path: "/a/afile1/not/a/dir", parent: da, child: fa1, first: "afile1", rest: "/not/a/dir",
err: avfs.ErrNotADirectory,
},
}
for _, test := range tests {
parent, child, absPath, start, end, err := vfs.searchNode(test.path, slmEval)
first := absPath[start:end]
rest := absPath[end:]
if test.err != err {
t.Errorf("%s : want error to be %v, got %v", test.path, test.err, err)
}
if test.parent != parent {
t.Errorf("%s : want parent to be %v, got %v", test.path, test.parent, parent)
}
if test.child != child {
t.Errorf("%s : want child to be %v, got %v", test.path, test.child, child)
}
if test.first != first {
t.Errorf("%s : want first to be %s, got %s", test.path, test.first, first)
}
if test.rest != rest {
t.Errorf("%s : want rest to be %s, got %s", test.path, test.rest, rest)
}
}
}
|
package main
import (
//"fmt"
"math"
)
type Color struct {
H uint16 /*0..360*/
S, V float64 /*0..1*/
R, G, B uint8 /*0..255*/
}
func (a *Color) RGB() (r, g, b uint8) {
return a.R, a.G, a.B
}
func (a *Color) RGBfromHSV() (r, g, b uint8) {
// Direct implementation of the graph in this image:
// https://en.wikipedia.org/wiki/HSL_and_HSV#/media/File:HSV-RGB-comparison.svg
a.fixRanges()
C := a.V * a.S
segment := float64(a.H) / 60.0
X := C * (1 - math.Abs(math.Mod(segment, 2)-1))
var r1, g1, b1 float64
switch uint8(segment) {
case 0:
r1 = C
g1 = X
b1 = 0
case 1:
r1 = X
g1 = C
b1 = 0
case 2:
r1 = 0
g1 = C
b1 = X
case 3:
r1 = 0
g1 = X
b1 = C
case 4:
r1 = X
g1 = 0
b1 = C
case 5:
r1 = C
g1 = 0
b1 = X
}
m := a.V - C
r1 += m
g1 += m
b1 += m
a.R = uint8(r1 * 255)
a.G = uint8(g1 * 255)
a.B = uint8(b1 * 255)
return a.R, a.G, a.B
}
func (a *Color) FromRGB(rInt, gInt, bInt uint8) {
// from https://www.rapidtables.com/convert/color/rgb-to-hsv.html
a.R = rInt
a.G = gInt
a.B = bInt
r := float64(rInt) / 255.0
g := float64(gInt) / 255.0
b := float64(bInt) / 255.0
cMax := math.Max(r, math.Max(g, b))
cMin := math.Min(r, math.Min(g, b))
delta := cMax - cMin
// calc Hue
nH := 0.0
if delta > 0 {
if cMax == r {
nH = (g - b) / delta
} else if cMax == g {
nH = ((b - r) / delta) + 2
} else if cMax == b {
nH = ((r - g) / delta) + 4
}
}
a.H = uint16(60*nH) % 360
// calc Saturation
if cMax > 0 {
a.S = delta / cMax
} else {
a.S = 0
}
// calc Value
a.V = cMax
}
func (a *Color) Black() bool {
if a.V > 1e-3 {
return false
} else {
return true
}
}
func (a *Color) fixRanges() {
if a.V > 1 {
a.V = 1
}
if a.V < 0 {
a.V = 0
}
if a.S > 1 {
a.S = 1
}
if a.S < 0 {
a.S = 0
}
a.H = a.H % 360
}
func (a *Color) reset() {
a.H = 0
a.S = 0
a.V = 0
a.R = 0
a.G = 0
a.B = 0
}
func (a *Color) setV(nV float64) {
a.V = nV
// after Setting V: RGB has to be updated as well
a.R, a.G, a.B = a.RGBfromHSV()
}
|
package server
import (
"flag"
"fmt"
"net"
log "../log"
)
const (
MaxRead = 1024 * 1024 //1MB
)
var Opts *Options
type Options struct {
TcpAddr string
Port string
LogTo string
LogLevel string
DBPath string
DBType string
Rcl bool
}
func parseArgs() *Options {
tcpAddr := flag.String("addr", "112.74.112.103", "address to serve the tcp service")
portPtr := flag.String("port", ":37001", "port to serve the service")
dbPath := flag.String("dbpath", "../goadmin/goadmin.db", "path to sqlite3 database")
dbType := flag.String("dbtype", "sqlite3", "Type of database(sqlite3, mysql),default:sqlite3")
logto := flag.String("log", "stdout", "Write log messages to this file. 'stdout' and 'none' have special meanings")
loglevel := flag.String("log-level", "DEBUG", "The level of messages to log. One of: DEBUG, INFO, WARNING, ERROR")
rcl := flag.Bool("rcl", false, "Enable Remote control local support, default: false")
flag.Parse()
return &Options{
TcpAddr: *tcpAddr,
Port: *portPtr,
LogTo: *logto,
LogLevel: *loglevel,
DBPath: *dbPath,
DBType: *dbType,
Rcl: *rcl,
}
}
func GetInterfaceAddr(iface string) string {
var ifi *net.Interface
var err error
if ifi, err = net.InterfaceByName(iface); err != nil {
log.Error("Interface Error:", err.Error())
return ""
}
addrs, err := ifi.Addrs()
checkError(err, "addrs:")
for _, address := range addrs {
// 检查ip地址判断是否回环地址
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
fmt.Println(ipnet.IP.String())
return ipnet.IP.String()
}
}
}
return ""
}
|
package k8sml
type Cloud interface {
GetID() string
GetVariableValue(variable string) interface{}
GetCloudProvider() CloudProvider
GetIPv4Cidr() []*IPv4Cidr
GetInternetGateway() *InternetGateway
GetKubernetes() *Kubernetes
AddRuntimeVariable(key, value string)
GetRuntimeVariables() map[string]string
ExportModule() error
} |
package testlogger_test
import (
"testing"
"github.com/rwool/ex/log"
"github.com/rwool/ex/test/helpers/testlogger"
"github.com/stretchr/testify/assert"
)
func TestNewTestLogger(t *testing.T) {
// Creating a new testing object to prevent unnecessary output.
t2 := &testing.T{}
l, buf := testlogger.NewTestLogger(t2, log.Debug)
l.Debug("1")
l.Warn("2")
l.Error("3")
assert.True(t, buf.Len() > 0, "no data in log buffer")
}
func TestBuffer(t *testing.T) {
// Intended to be run with race detector.
buf := &testlogger.Buffer{}
for i := 0; i < 50; i++ {
go func() {
buf.Write([]byte("123"))
}()
go func() {
outBuf := make([]byte, 20)
buf.Read(outBuf)
}()
}
}
|
package main
import (
"fmt"
"time"
"net/http"
"github.com/julienschmidt/httprouter"
"encoding/json"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/cloudwatch"
)
type AWSResponse struct {
Datapoints []struct {
Average float64 `json:"Average"`
Maximum interface{} `json:"Maximum"`
Minimum interface{} `json:"Minimum"`
SampleCount interface{} `json:"SampleCount"`
Sum interface{} `json:"Sum"`
Timestamp string `json:"Timestamp"`
Unit string `json:"Unit"`
} `json:"Datapoints"`
Label string `json:"Label"`
InstanceId string `json:"InstanceId"`
}
func main() {
r := httprouter.New()
r.GET("/metrics", getMetrics)
r.GET("/cpu" ,getCPUUtilization)
r.GET("/netwrkinput",getNetworkInput)
r.GET("/netwrkoutput",getNetworkOutput)
r.GET("/diskreadops",getDiskReadOps)
r.GET("/diskreadbytes",getDiskReadBytes)
r.GET("/diskwritebytes",getDiskWriteBytes)
r.GET("/memory", getMemoryUtilization)
r.GET("/count", getHTTPCount)
http.ListenAndServe("localhost:8080",r)
}
func getMetrics(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
svc := ec2.New(session.New())
resp, err := svc.DescribeInstances(nil)
if err != nil {
panic(err)
}
// resp has all of the response data, pull out instance IDs:
fmt.Println("The Response data is", resp)
fmt.Println("> Number of reservation sets: ", len(resp.Reservations))
for idx, res := range resp.Reservations {
fmt.Println(" > Number of instances: ", len(res.Instances))
for _, inst := range resp.Reservations[idx].Instances {
fmt.Println(" - Instance ID: ", *inst.InstanceId)
}
}
jsonResp, _ := json.Marshal(resp)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(200)
fmt.Fprintf(w, "%s", jsonResp)
}
func getCPUUtilization(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
svc := ec2.New(session.New())
resp, err := svc.DescribeInstances(nil)
if err != nil {
panic(err)
}
var sliceArray []AWSResponse
// resp has all of the response data, pull out instance IDs:
//fmt.Println("The Response data is", resp)
fmt.Println("> Number of reservation sets: ", len(resp.Reservations))
for idx, res := range resp.Reservations {
fmt.Println(" > Number of instances: ", len(res.Instances))
for _, inst := range resp.Reservations[idx].Instances {
fmt.Println(" - Instance ID: ", *inst.InstanceId)
cw := cloudwatch.New(session.New())
params := &cloudwatch.GetMetricStatisticsInput{
EndTime: aws.Time(time.Now()), // Required
MetricName: aws.String("CPUUtilization"), // Required
Namespace: aws.String("AWS/EC2"), // Required
Period: aws.Int64(3600), // Required
StartTime: aws.Time(time.Now().Add(-120 *time.Minute)), // Required
Statistics: []*string{ // Required
aws.String("Average"), // Required
// More values...
},
Dimensions: []*cloudwatch.Dimension{
{ // Required
Name: aws.String("InstanceId"), // Required
Value: aws.String(*inst.InstanceId), // Required
},
// More values...
},
Unit: aws.String("Percent"),
}
fmt.Println("Params are", params)
metrics, err := cw.GetMetricStatistics(params)
if err != nil {
fmt.Println("Error")
return
}
fmt.Println(metrics)
AWS := AWSResponse{}
jsonResp, _ := json.Marshal(metrics)
err1 := json.Unmarshal(jsonResp, &AWS)
fmt.Println("Instance id is", *inst.InstanceId)
AWS.InstanceId = *inst.InstanceId
fmt.Println("Instance ID inside is" , AWS.InstanceId)
sliceArray = append(sliceArray, AWS)
if err1!= nil {
panic(err1)
}
}
}
fmt.Println("Final array is", sliceArray)
finalResponse, _ :=json.Marshal(sliceArray)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(200)
fmt.Fprintf(w, "%s", finalResponse)
}
func getNetworkInput(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
svc := ec2.New(session.New())
resp, err := svc.DescribeInstances(nil)
if err != nil {
panic(err)
}
var sliceArray []AWSResponse
// resp has all of the response data, pull out instance IDs:
//fmt.Println("The Response data is", resp)
fmt.Println("> Number of reservation sets: ", len(resp.Reservations))
for idx, res := range resp.Reservations {
fmt.Println(" > Number of instances: ", len(res.Instances))
for _, inst := range resp.Reservations[idx].Instances {
fmt.Println(" - Instance ID: ", *inst.InstanceId)
cw := cloudwatch.New(session.New())
networkIn := &cloudwatch.GetMetricStatisticsInput{
EndTime: aws.Time(time.Now()), // Required
MetricName: aws.String("NetworkIn"), // Required
Namespace: aws.String("AWS/EC2"), // Required
Period: aws.Int64(3600), // Required
StartTime: aws.Time(time.Now().Add(-120 * time.Minute)), // Required
Statistics: []*string{ // Required
aws.String("Average"), // Required
// More values...
},
Dimensions: []*cloudwatch.Dimension{
{ // Required
Name: aws.String("InstanceId"), // Required
Value: aws.String(*inst.InstanceId), // Required
},
// More values...
},
Unit: aws.String("Bytes"),
}
networkInMetrics, err := cw.GetMetricStatistics(networkIn)
if err != nil {
fmt.Println("Error")
return
}
fmt.Println(networkInMetrics)
AWS := AWSResponse{}
jsonResp, _ := json.Marshal(networkInMetrics)
err1 := json.Unmarshal(jsonResp, &AWS)
fmt.Println("Instance id is", *inst.InstanceId)
AWS.InstanceId = *inst.InstanceId
fmt.Println("Instance ID inside is" , AWS.InstanceId)
sliceArray = append(sliceArray, AWS)
if err1!= nil {
panic(err1)
}
}
}
fmt.Println("Final array is", sliceArray)
finalResponse, _ :=json.Marshal(sliceArray)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(200)
fmt.Fprintf(w, "%s", finalResponse)
}
func getNetworkOutput(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
svc := ec2.New(session.New())
resp, err := svc.DescribeInstances(nil)
if err != nil {
panic(err)
}
var sliceArray []AWSResponse
// resp has all of the response data, pull out instance IDs:
//fmt.Println("The Response data is", resp)
fmt.Println("> Number of reservation sets: ", len(resp.Reservations))
for idx, res := range resp.Reservations {
fmt.Println(" > Number of instances: ", len(res.Instances))
for _, inst := range resp.Reservations[idx].Instances {
fmt.Println(" - Instance ID: ", *inst.InstanceId)
cw := cloudwatch.New(session.New())
networkOut := &cloudwatch.GetMetricStatisticsInput{
EndTime: aws.Time(time.Now()), // Required
MetricName: aws.String("NetworkOut"), // Required
Namespace: aws.String("AWS/EC2"), // Required
Period: aws.Int64(3600), // Required
StartTime: aws.Time(time.Now().Add(-120 * time.Minute)), // Required
Statistics: []*string{ // Required
aws.String("Average"), // Required
// More values...
},
Dimensions: []*cloudwatch.Dimension{
{ // Required
Name: aws.String("InstanceId"), // Required
Value: aws.String(*inst.InstanceId), // Required
},
// More values...
},
Unit: aws.String("Bytes"),
}
networkOutMetrics, err := cw.GetMetricStatistics(networkOut)
if err != nil {
fmt.Println("Error")
return
}
fmt.Println(networkOutMetrics)
AWS := AWSResponse{}
jsonResp, _ := json.Marshal(networkOutMetrics)
err1 := json.Unmarshal(jsonResp, &AWS)
fmt.Println("Instance id is", *inst.InstanceId)
AWS.InstanceId = *inst.InstanceId
fmt.Println("Instance ID inside is" , AWS.InstanceId)
sliceArray = append(sliceArray, AWS)
if err1!= nil {
panic(err1)
}
}
}
fmt.Println("Final array is", sliceArray)
finalResponse, _ :=json.Marshal(sliceArray)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(200)
fmt.Fprintf(w, "%s", finalResponse)
}
func getDiskReadOps(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
svc := ec2.New(session.New())
resp, err := svc.DescribeInstances(nil)
if err != nil {
panic(err)
}
var sliceArray []AWSResponse
// resp has all of the response data, pull out instance IDs:
//fmt.Println("The Response data is", resp)
fmt.Println("> Number of reservation sets: ", len(resp.Reservations))
for idx, res := range resp.Reservations {
fmt.Println(" > Number of instances: ", len(res.Instances))
for _, inst := range resp.Reservations[idx].Instances {
fmt.Println(" - Instance ID: ", *inst.InstanceId)
cw := cloudwatch.New(session.New())
diskReadOps := &cloudwatch.GetMetricStatisticsInput{
EndTime: aws.Time(time.Now()), // Required
MetricName: aws.String("DiskReadOps"), // Required
Namespace: aws.String("AWS/EC2"), // Required
Period: aws.Int64(3600), // Required
StartTime: aws.Time(time.Now().Add(-120 * time.Minute)), // Required
Statistics: []*string{ // Required
aws.String("Average"), // Required
// More values...
},
Dimensions: []*cloudwatch.Dimension{
{ // Required
Name: aws.String("InstanceId"), // Required
Value: aws.String(*inst.InstanceId), // Required
},
// More values...
},
Unit: aws.String("Count"),
}
diskReadOpsMetrics, err := cw.GetMetricStatistics(diskReadOps)
if err != nil {
fmt.Println("Error")
return
}
fmt.Println(diskReadOpsMetrics)
AWS := AWSResponse{}
jsonResp, _ := json.Marshal(diskReadOpsMetrics)
err1 := json.Unmarshal(jsonResp, &AWS)
fmt.Println("Instance id is", *inst.InstanceId)
AWS.InstanceId = *inst.InstanceId
fmt.Println("Instance ID inside is" , AWS.InstanceId)
sliceArray = append(sliceArray, AWS)
if err1!= nil {
panic(err1)
}
}
}
fmt.Println("Final array is", sliceArray)
finalResponse, _ :=json.Marshal(sliceArray)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(200)
fmt.Fprintf(w, "%s", finalResponse)
}
func getDiskReadBytes(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
svc := ec2.New(session.New())
resp, err := svc.DescribeInstances(nil)
if err != nil {
panic(err)
}
var sliceArray []AWSResponse
// resp has all of the response data, pull out instance IDs:
//fmt.Println("The Response data is", resp)
fmt.Println("> Number of reservation sets: ", len(resp.Reservations))
for idx, res := range resp.Reservations {
fmt.Println(" > Number of instances: ", len(res.Instances))
for _, inst := range resp.Reservations[idx].Instances {
fmt.Println(" - Instance ID: ", *inst.InstanceId)
cw := cloudwatch.New(session.New())
diskReadBytes := &cloudwatch.GetMetricStatisticsInput{
EndTime: aws.Time(time.Now()), // Required
MetricName: aws.String("DiskReadBytes"), // Required
Namespace: aws.String("AWS/EC2"), // Required
Period: aws.Int64(3600), // Required
StartTime: aws.Time(time.Now().Add(-120 * time.Minute)), // Required
Statistics: []*string{ // Required
aws.String("Average"), // Required
// More values...
},
Dimensions: []*cloudwatch.Dimension{
{ // Required
Name: aws.String("InstanceId"), // Required
Value: aws.String(*inst.InstanceId), // Required
},
// More values...
},
Unit: aws.String("Bytes"),
}
diskReadBytesMetrics, err := cw.GetMetricStatistics(diskReadBytes)
if err != nil {
fmt.Println("Error")
return
}
fmt.Println(diskReadBytesMetrics)
AWS := AWSResponse{}
jsonResp, _ := json.Marshal(diskReadBytesMetrics)
err1 := json.Unmarshal(jsonResp, &AWS)
fmt.Println("Instance id is", *inst.InstanceId)
AWS.InstanceId = *inst.InstanceId
fmt.Println("Instance ID inside is" , AWS.InstanceId)
sliceArray = append(sliceArray, AWS)
if err1!= nil {
panic(err1)
}
}
}
fmt.Println("Final array is", sliceArray)
finalResponse, _ :=json.Marshal(sliceArray)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(200)
fmt.Fprintf(w, "%s", finalResponse)
}
func getDiskWriteBytes(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
svc := ec2.New(session.New())
resp, err := svc.DescribeInstances(nil)
if err != nil {
panic(err)
}
var sliceArray []AWSResponse
// resp has all of the response data, pull out instance IDs:
//fmt.Println("The Response data is", resp)
fmt.Println("> Number of reservation sets: ", len(resp.Reservations))
for idx, res := range resp.Reservations {
fmt.Println(" > Number of instances: ", len(res.Instances))
for _, inst := range resp.Reservations[idx].Instances {
fmt.Println(" - Instance ID: ", *inst.InstanceId)
cw := cloudwatch.New(session.New())
diskWriteBytes := &cloudwatch.GetMetricStatisticsInput{
EndTime: aws.Time(time.Now()), // Required
MetricName: aws.String("DiskWriteBytes"), // Required
Namespace: aws.String("AWS/EC2"), // Required
Period: aws.Int64(3600), // Required
StartTime: aws.Time(time.Now().Add(-120 * time.Minute)), // Required
Statistics: []*string{ // Required
aws.String("Average"), // Required
// More values...
},
Dimensions: []*cloudwatch.Dimension{
{ // Required
Name: aws.String("InstanceId"), // Required
Value: aws.String(*inst.InstanceId), // Required
},
// More values...
},
Unit: aws.String("Bytes"),
}
diskWriteBytesMetrics, err := cw.GetMetricStatistics(diskWriteBytes)
if err != nil {
fmt.Println("Error")
return
}
fmt.Println(diskWriteBytesMetrics)
AWS := AWSResponse{}
jsonResp, _ := json.Marshal(diskWriteBytesMetrics)
err1 := json.Unmarshal(jsonResp, &AWS)
fmt.Println("Instance id is", *inst.InstanceId)
AWS.InstanceId = *inst.InstanceId
fmt.Println("Instance ID inside is" , AWS.InstanceId)
sliceArray = append(sliceArray, AWS)
if err1!= nil {
panic(err1)
}
}
}
fmt.Println("Final array is", sliceArray)
finalResponse, _ :=json.Marshal(sliceArray)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(200)
fmt.Fprintf(w, "%s", finalResponse)
}
func getMemoryUtilization(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
svc := ec2.New(session.New())
resp, err := svc.DescribeInstances(nil)
if err != nil {
panic(err)
}
var sliceArray []AWSResponse
// resp has all of the response data, pull out instance IDs:
//fmt.Println("The Response data is", resp)
fmt.Println("> Number of reservation sets: ", len(resp.Reservations))
for idx, res := range resp.Reservations {
fmt.Println(" > Number of instances: ", len(res.Instances))
for _, inst := range resp.Reservations[idx].Instances {
fmt.Println(" - Instance ID: ", *inst.InstanceId)
cw := cloudwatch.New(session.New())
memoryStats := &cloudwatch.GetMetricStatisticsInput{
EndTime: aws.Time(time.Now()), // Required
MetricName: aws.String("MemoryUtilization"), // Required
Namespace: aws.String("System/Linux"), // Required
Period: aws.Int64(3600), // Required
StartTime: aws.Time(time.Now().Add(-120 * time.Minute)), // Required
Statistics: []*string{ // Required
aws.String("Average"), // Required
// More values...
},
Dimensions: []*cloudwatch.Dimension{
{ // Required
Name: aws.String("InstanceId"), // Required
Value: aws.String(*inst.InstanceId), // Required
},
// More values...
},
}
fmt.Println("Memory Stats are",memoryStats)
memoryStatsResponse, err := cw.GetMetricStatistics(memoryStats)
if err != nil {
fmt.Println("Inside Error")
return
}
fmt.Println(memoryStatsResponse)
AWS := AWSResponse{}
jsonResp, _ := json.Marshal(memoryStatsResponse)
err1 := json.Unmarshal(jsonResp, &AWS)
fmt.Println("Instance id is", *inst.InstanceId)
AWS.InstanceId = *inst.InstanceId
fmt.Println("Instance ID inside is" , AWS.InstanceId)
sliceArray = append(sliceArray, AWS)
if err1!= nil {
panic(err1)
}
}
}
fmt.Println("Final array is", sliceArray)
finalResponse, _ :=json.Marshal(sliceArray)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(200)
fmt.Fprintf(w, "%s", finalResponse)
}
func getHTTPCount(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
svc := ec2.New(session.New())
resp, err := svc.DescribeInstances(nil)
if err != nil {
panic(err)
}
var sliceArray []AWSResponse
// resp has all of the response data, pull out instance IDs:
//fmt.Println("The Response data is", resp)
fmt.Println("> Number of reservation sets: ", len(resp.Reservations))
for idx, res := range resp.Reservations {
fmt.Println(" > Number of instances: ", len(res.Instances))
for _, inst := range resp.Reservations[idx].Instances {
fmt.Println(" - Instance ID: ", *inst.InstanceId)
cw := cloudwatch.New(session.New())
fmt.Println("CW is", cw)
memoryStats := &cloudwatch.GetMetricStatisticsInput{
EndTime: aws.Time(time.Now()), // Required
MetricName: aws.String("RequestCount"), // Required
Namespace: aws.String("AWS/ELB"), // Required
Period: aws.Int64(3600), // Required
StartTime: aws.Time(time.Now().Add(-120 * time.Minute)), // Required
Statistics: []*string{ // Required
aws.String("Sum"), // Required
// More values...
},
Dimensions: []*cloudwatch.Dimension{
{ // Required
Name: aws.String("LoadBalancerName"), // Required
Value: aws.String("PlsWork"), // Required
},
// More values...
},
}
// fmt.Println("Memory Stats are",memoryStats)
memoryStatsResponse, err := cw.GetMetricStatistics(memoryStats)
if err != nil {
fmt.Println("Inside Error")
return
}
fmt.Println(memoryStatsResponse)
AWS := AWSResponse{}
jsonResp, _ := json.Marshal(memoryStatsResponse)
err1 := json.Unmarshal(jsonResp, &AWS)
fmt.Println("Instance id is", *inst.InstanceId)
AWS.InstanceId = *inst.InstanceId
fmt.Println("Instance ID inside is" , AWS.InstanceId)
sliceArray = append(sliceArray, AWS)
if err1!= nil {
panic(err1)
}
}
}
fmt.Println("Final array is", sliceArray)
finalResponse, _ :=json.Marshal(sliceArray)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(200)
fmt.Fprintf(w, "%s", finalResponse)
}
|
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v20200107
import (
"github.com/tencentyun/tcecloud-sdk-go/tcecloud/common"
tchttp "github.com/tencentyun/tcecloud-sdk-go/tcecloud/common/http"
"github.com/tencentyun/tcecloud-sdk-go/tcecloud/common/profile"
)
const APIVersion = "2020-01-07"
type Client struct {
common.Client
}
// Deprecated
func NewClientWithSecretId(secretId, secretKey, region string) (client *Client, err error) {
cpf := profile.NewClientProfile()
client = &Client{}
client.Init(region).WithSecretId(secretId, secretKey).WithProfile(cpf)
return
}
func NewClient(credential *common.Credential, region string, clientProfile *profile.ClientProfile) (client *Client, err error) {
client = &Client{}
client.Init(region).
WithCredential(credential).
WithProfile(clientProfile)
return
}
func NewGetStatDayRequest() (request *GetStatDayRequest) {
request = &GetStatDayRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("csp", APIVersion, "GetStatDay")
return
}
func NewGetStatDayResponse() (response *GetStatDayResponse) {
response = &GetStatDayResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 获取单日单存储桶统计信息
func (c *Client) GetStatDay(request *GetStatDayRequest) (response *GetStatDayResponse, err error) {
if request == nil {
request = NewGetStatDayRequest()
}
response = NewGetStatDayResponse()
err = c.Send(request, response)
return
}
|
package main
import (
"fmt"
"bufio"
"github.com/dspinhirne/netaddr-go"
"github.com/libp2p/go-reuseport"
)
const (
REMOTE = "%s:9090"
LOCAL = "127.0.0.1:9091"
)
func connect(conn int, laddr, raddr string, next chan bool) {
c, err := reuseport.Dial("tcp4", laddr, raddr)
if err != nil {
panic(fmt.Sprintf("%T: %s", err, err))
}
defer c.Close()
fmt.Printf("Connected #%d, from %s -> %s\n", conn, laddr, raddr)
next <- true
for {
d, err := bufio.NewReader(c).ReadString('\n')
if err != nil {
panic(fmt.Sprintf("%T: %s", err, err))
}
fmt.Printf("Data (Connection #%d) %s\n", conn, d)
}
}
func main() {
const MAX = 1024*32
next := make(chan bool)
conn := 0
local := LOCAL
ip, _ := netaddr.ParseIPv4("127.0.0.1")
for i := 0; i< MAX; i++ {
conn++
remote := fmt.Sprintf(REMOTE, ip)
go connect(conn, local, remote, next)
<-next
ip = ip.Next()
}
fmt.Println("Reached the end of the line!")
<-next
}
|
package DMST
import (
"errors"
"fmt"
"log"
"strconv"
"sync"
"time"
"os"
"path"
)
const Infinite = (1<<31) - 1
// Node possible states
const SleepingState = "Sleeping"
const FindState = "Find"
const FoundState = "Found"
// Edge possible states
const RejectedState = "Rejected"
const BranchState = "Branch"
const BasicState = "Basic"
// Connection possible types
const ConnectType = "Connect"
const InitiateType = "Initiate"
const TestType = "Test"
const AcceptType = "Accept"
const RejectType = "Reject"
const ReportType = "Report"
const ChangeCoreType = "Change-core"
// Node is the struct that hold all information that is used by this instance of node
type Node struct {
sync.Mutex
serv *server
done chan struct{}
peers map[int]string
me int
// Communication channels
msgChan chan *MessageArgs
// Log channel
logChan chan string
// GHS variables
level int // LN
state string // SN
fragment int // FN
findCount int
inBranch int
bestEdgeWeight int
testEdge *Edge
edgeMap map[int]*Edge // todo initialize this variable on new Nodes
}
type Edge struct {
weight int
state string // SE
targetNodeID int
}
func initializeEdgeMap(nIDs []string, nWTs []string) map[int]*Edge{
var edge *Edge
var intID int
var intWT int
edgeMap := make(map[int]*Edge)
for i, _ := range nIDs {
intWT, _ = strconv.Atoi(nWTs[i])
intID, _ = strconv.Atoi(nIDs[i])
edge = &Edge{
weight: intWT,
state: BasicState,
targetNodeID: intID,
}
edgeMap[edge.weight] = edge
}
fmt.Print(edgeMap)
return edgeMap
}
// NewNode create a new node object and return a pointer to it.
func NewNode(peers map[int]string, nIDs []string, nWTs []string, me int) *Node {
var err error
// 0 is reserved to represent undefined vote/leader
if me == 0 {
panic(errors.New("Reserved instanceID('0')"))
}
edgeMap := initializeEdgeMap(nIDs, nWTs)
node := &Node{
done: make(chan struct{}),
peers: peers,
me: me,
state: SleepingState,
edgeMap: edgeMap,
logChan: make(chan string, 20),
msgChan: make(chan *MessageArgs, 20*len(peers)),
}
node.serv, err = newServer(node, peers[me])
if err != nil {
panic(err)
}
go node.loop()
return node
}
// Done returns a channel that will be used when the instance is done.
func (node *Node) Done() <-chan struct{} {
return node.done
}
func (node *Node) loop() {
err := node.serv.startListening()
if err != nil {
panic(err)
}
go node.writeLog()
if node.me == 1 {
node.awakeningResponse()
}
for {
node.handler()
}
}
func (node *Node) writeLog(){
fmt.Print("START LOG GO ROUTINE\n")
f, err := os.Create(path.Join("logs", strconv.Itoa(node.me) + ".txt"))
if err != nil {
fmt.Println(err)
return
}
defer f.Close()
node.logNode()
node.logEdges()
for{
logEntry := <- node.logChan
_, err := f.WriteString(logEntry)
if err != nil {
fmt.Println(err)
f.Close()
return
}
}
}
func (node *Node) handler() {
log.Println("Starting Handler")
for {
msg := <-node.msgChan
node.messageLog(msg)
switch msg.Type{
case ConnectType:
node.responseToConnect(msg)
case InitiateType:
node.responseToInitiate(msg)
case TestType:
node.responseToTest(msg)
case AcceptType:
node.responseToAccept(msg)
case RejectType:
node.responseToReject(msg)
case ReportType:
node.responseToReport(msg)
case ChangeCoreType:
node.responseToChangeCore(msg)
}
node.logNode()
node.logEdges()
}
}
func (node *Node) messageLog(msg *MessageArgs){
log.Printf("[NODE %d] %s message received from node %d", node.me, msg.Type, msg.FromID)
}
func debugPrint(s string){
fmt.Print(s)
}
func (node *Node) awakeningResponse() {
// Is a reponse to a awake call, this can only occur to sleeping node
if node.state == SleepingState {
// ok
node.wakeupProcedure()
} else {
// problem
debugPrint("Error: awakeningResponse called when node not in sleeping state")
}
}
func (node *Node) getMinEdge() *Edge {
var minEdge *Edge
var minEdgeVal = Infinite
for _, edge := range node.edgeMap {
if edge.weight < minEdgeVal {
minEdgeVal = edge.weight
minEdge = edge
}
}
return minEdge
}
func (node *Node) connect(EdgeWeight int, targetNodeID int) {
args := &MessageArgs{
FromID: node.me,
Type: ConnectType,
NodeLevel: node.level,
EdgeWeight: EdgeWeight,
}
go func(peer int) {
reply := &MessageReply{}
node.sendMessage(peer, args, reply)
}(targetNodeID)
}
func (node *Node) sendInitiate(level int, fragment int, state string, edgeWeight int) {
args := &MessageArgs{
FromID: node.me,
Type: InitiateType,
NodeLevel: level,
NodeFragment: fragment,
NodeState: state,
EdgeWeight: edgeWeight,
}
go func(peer int) {
reply := &MessageReply{}
node.sendMessage(peer, args, reply)
}(node.edgeMap[edgeWeight].targetNodeID)
}
func (node *Node) sendTest(level int, fragment int, edgeWeight int) {
args := &MessageArgs{
FromID: node.me,
Type: TestType,
NodeLevel: level,
NodeFragment: fragment,
EdgeWeight: edgeWeight,
}
go func(peer int) {
reply := &MessageReply{}
node.sendMessage(peer, args, reply)
}(node.edgeMap[edgeWeight].targetNodeID)
}
func (node *Node) wakeupProcedure() {
minEdge := node.getMinEdge()
minEdge.state = BranchState
node.level = 0
node.state = FoundState
node.findCount = 0
node.connect(minEdge.weight, minEdge.targetNodeID)
}
func (node *Node) placeMessageEndOfQueue(msg *MessageArgs) {
time.Sleep(100 * time.Millisecond)
node.msgChan <- msg
}
func (node *Node) responseToConnect(msg *MessageArgs) {
if node.state == SleepingState {
node.wakeupProcedure()
}
if msg.NodeLevel < node.level {
node.edgeMap[msg.EdgeWeight].state = BranchState
node.sendInitiate(node.level, node.fragment, node.state, msg.EdgeWeight)
if node.state == FindState {
node.findCount++
}
} else {
fmt.Print(msg.EdgeWeight, node.edgeMap[msg.EdgeWeight], BasicState)
if node.edgeMap[msg.EdgeWeight].state == BasicState {
node.placeMessageEndOfQueue(msg)
} else {
node.sendInitiate(node.level +1, node.fragment, FindState, msg.EdgeWeight)
}
}
}
func (node *Node) responseToInitiate(msg *MessageArgs) {
node.level = msg.NodeLevel
node.fragment = msg.NodeFragment
node.state = msg.NodeState
node.inBranch = msg.EdgeWeight
node.bestEdgeWeight = Infinite
for _, edge := range node.edgeMap {
if edge.state == BranchState && edge.weight != msg.EdgeWeight {
node.sendInitiate(node.level, node.fragment, node.state, edge.weight)
if msg.NodeState == FindState {
node.findCount ++
}
}
}
if msg.NodeState == FindState {
node.testProcedure()
}
}
func (node *Node) testProcedure() {
minWeightedEdge := Infinite
for _, edge := range node.edgeMap {
if edge.state == BasicState {
if edge.weight < minWeightedEdge {
minWeightedEdge = edge.weight
}
}
}
if minWeightedEdge != Infinite {
node.testEdge = node.edgeMap[minWeightedEdge]
node.sendTest(node.level, node.fragment, minWeightedEdge)
} else {
node.testEdge = nil
node.reportProcedure()
}
}
func (node *Node) sendAccept(EdgeWeight int) {
args := &MessageArgs{
FromID: node.me,
Type: AcceptType,
EdgeWeight: EdgeWeight,
}
go func(peer int) {
reply := &MessageReply{}
node.sendMessage(peer, args, reply)
}(node.edgeMap[EdgeWeight].targetNodeID)
}
func (node *Node) sendReject(EdgeWeight int) {
args := &MessageArgs{
FromID: node.me,
Type: RejectType,
EdgeWeight: EdgeWeight,
}
go func(peer int) {
reply := &MessageReply{}
node.sendMessage(peer, args, reply)
}(node.edgeMap[EdgeWeight].targetNodeID)
}
func (node *Node) responseToTest(msg *MessageArgs) {
if node.state == SleepingState {
node.wakeupProcedure()
}
if msg.NodeLevel > node.level {
node.placeMessageEndOfQueue(msg)
} else{
if msg.NodeFragment != node.fragment {
node.sendAccept(msg.EdgeWeight)
} else {
if node.edgeMap[msg.EdgeWeight].state == BasicState {
node.edgeMap[msg.EdgeWeight].state = RejectedState
}
if node.testEdge != nil && node.testEdge.weight == msg.EdgeWeight {
node.sendReject(msg.EdgeWeight)
} else {
node.testProcedure()
}
}
}
}
func (node *Node) responseToAccept(msg *MessageArgs){
node.testEdge = nil
if msg.EdgeWeight < node.bestEdgeWeight{
node.bestEdgeWeight = msg.EdgeWeight
}
node.reportProcedure()
}
func (node *Node) responseToReject(msg *MessageArgs){
if node.edgeMap[msg.EdgeWeight].state == BasicState {
node.edgeMap[msg.EdgeWeight].state = RejectedState
}
node.testProcedure()
}
func (node *Node) reportProcedure() {
if node.findCount == 0 && node.testEdge == nil {
node.state = FoundState
args := &MessageArgs{
FromID: node.me,
Type: ReportType,
BestEdgeWeight: node.bestEdgeWeight,
}
go func(peer int) {
reply := &MessageReply{}
node.sendMessage(peer, args, reply)
}(node.inBranch)
}
}
func (node *Node) responseToReport(msg *MessageArgs) {
if msg.FromID != node.inBranch {
node.findCount -= 1
if msg.BestEdgeWeight < node.bestEdgeWeight {
node.bestEdgeWeight = msg.BestEdgeWeight
}
node.reportProcedure()
} else if node.state == FindState {
node.placeMessageEndOfQueue(msg)
} else if msg.BestEdgeWeight > node.bestEdgeWeight {
node.changeCoreProcedure()
} else if msg.BestEdgeWeight == node.bestEdgeWeight && node.bestEdgeWeight == Infinite {
node.halt()
}
}
func (node *Node) changeCoreProcedure() {
if node.edgeMap[node.bestEdgeWeight].state == BranchState {
args := &MessageArgs{
FromID: node.me,
Type: ChangeCoreType,
}
go func(peer int) {
reply := &MessageReply{}
node.sendMessage(peer, args, reply)
}(node.edgeMap[node.bestEdgeWeight].targetNodeID)
} else {
node.connect(node.bestEdgeWeight, node.edgeMap[node.bestEdgeWeight].targetNodeID)
node.edgeMap[node.bestEdgeWeight].state = BranchState
}
}
func (node *Node) logNode() {
node.logChan <- fmt.Sprintf("TIME >> %v >> NODE >> %d %s\n", time.Now().UnixNano() , node.me, node.state)
}
func (node *Node) logEdges() {
for _, v := range node.edgeMap {
node.logChan <- fmt.Sprintf("TIME >> %v >> EDGE >> %d %d %d %s\n", time.Now().UnixNano() , node.me, v.targetNodeID, v.weight, v.state)
}
}
func (node *Node) responseToChangeCore(msg *MessageArgs) {
node.changeCoreProcedure()
}
func (node *Node) halt() {
node.logNode()
node.logEdges()
}
|
package colorize
import "testing"
func TestNew(t *testing.T) {
c := NewColorize()
if r := c.Paint("test"); r != "\033[0;39;49mtest\033[0m" {
t.Errorf("Incorrect format - " + r)
}
}
func TestSkip(t *testing.T) {
c := NewColorize()
c.Fg(Red)
SkipFormatting(true)
if r := c.Paint("test"); r != "test" {
t.Errorf("Incorrect format - " + r)
}
SkipFormatting(false)
if r := c.Paint("test"); r != "\033[0;31;49mtest\033[0m" {
t.Errorf("Incorrect format - " + r)
}
}
func TestRedFg(t *testing.T) {
c := NewColorize()
c.Fg(Red)
if r := c.Paint("test"); r != "\033[0;31;49mtest\033[0m" {
t.Errorf("Incorrect format - " + r)
}
}
func TestGreenBg(t *testing.T) {
c := NewColorize()
c.Bg(Green)
if r := c.Paint("test"); r != "\033[0;39;42mtest\033[0m" {
t.Errorf("Incorrect format - " + r)
}
}
func TestBlueFgYellowBg(t *testing.T) {
c := NewColorize()
c.Fg(Blue)
c.Bg(Yellow)
if r := c.Paint("test"); r != "\033[0;34;43mtest\033[0m" {
t.Errorf("Incorrect format - " + r)
}
}
|
package cmd
import (
"context"
"fmt"
"net/http"
"sort"
"strings"
"time"
"github.com/google/go-github/v30/github"
"github.com/integr8ly/delorean/pkg/quay"
"github.com/integr8ly/delorean/pkg/services"
"github.com/integr8ly/delorean/pkg/utils"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/util/wait"
)
const (
commitIDLabelFilter = "io.openshift.build.commit.id"
)
type tagReleaseOptions struct {
releaseVersion string
branch string
wait bool
waitInterval int64
waitMax int64
quayRepos string
olmType string
sourceTag string
}
var tagReleaseCmdOpts = &tagReleaseOptions{}
// tagReleaseCmd represents the tagRelease command
var tagReleaseCmd = &cobra.Command{
Use: "tag-release",
Short: "Tag the integreatly repo and image with the given release",
Long: `Change a release tag using the given release version for the HEAD of the given branch.
Also create the same tag for the image that is built from the same commit`,
Run: func(cmd *cobra.Command, args []string) {
var ghToken string
var quayToken string
var err error
if ghToken, err = requireValue(GithubTokenKey); err != nil {
handleError(err)
}
if quayToken, err = requireValue(QuayTokenKey); err != nil {
handleError(err)
}
ghClient := newGithubClient(ghToken)
quayClient := newQuayClient(quayToken)
repoInfo := &githubRepoInfo{owner: integreatlyGHOrg, repo: integreatlyOperatorRepo}
tagReleaseCmdOpts.releaseVersion = releaseVersion
tagReleaseCmdOpts.olmType = olmType
if err = DoTagRelease(cmd.Context(), ghClient.Git, repoInfo, quayClient, tagReleaseCmdOpts); err != nil {
handleError(err)
}
},
}
func DoTagRelease(ctx context.Context, ghClient services.GitService, gitRepoInfo *githubRepoInfo, quayClient *quay.Client, cmdOpts *tagReleaseOptions) error {
rv, err := utils.NewVersion(cmdOpts.releaseVersion, cmdOpts.olmType)
if err != nil {
return err
}
fmt.Println("Fetch git ref:", fmt.Sprintf("refs/heads/%s", cmdOpts.branch))
headRef, err := getGitRef(ctx, ghClient, gitRepoInfo, fmt.Sprintf("refs/heads/%s", cmdOpts.branch), false)
if err != nil {
return err
}
fmt.Println("Fetch git ref:", fmt.Sprintf("refs/tags/%s", rv.TagName()))
existingRCTagRef, err := getGitRef(ctx, ghClient, gitRepoInfo, fmt.Sprintf("refs/tags/%s", rv.RCTagRef()), true)
if err != nil {
return err
}
fmt.Println("Create git tag:", rv.TagName())
if headRef == nil {
return fmt.Errorf("can not find git ref: refs/heads/%s", cmdOpts.branch)
}
tagRef, err := createGitTag(ctx, ghClient, gitRepoInfo, rv.TagName(), headRef.GetObject().GetSHA())
if err != nil {
return err
}
fmt.Println("Git tag", rv.TagName(), "created:", tagRef.GetURL())
if len(cmdOpts.quayRepos) > 0 {
var quaySrcTag string
fmt.Println("Try to create image tags on quay.io:")
quayRepos := cmdOpts.quayRepos
quayDstTag := rv.TagName()
quaySrcTag = rv.ReleaseBranchImageTag()
//If this is an OSDe2e image, and destination tag has been passed through the pipeline, set this tag, otherwise continue as normal
if len(cmdOpts.sourceTag) > 0 {
quaySrcTag = cmdOpts.sourceTag
}
commitSHA := headRef.GetObject().GetSHA()
//If this is a final release and we have an existing tag (rc tag), promote the existing rc tag to the final release, otherwise continue as normal
if !rv.IsPreRelease() && existingRCTagRef != nil {
quaySrcTag = strings.Replace(existingRCTagRef.GetRef(), "refs/tags/", "", -1)
commitSHA = existingRCTagRef.GetObject().GetSHA()
}
ok := tryCreateQuayTag(ctx, quayClient, quayRepos, quaySrcTag, quayDstTag, commitSHA)
if !ok {
if cmdOpts.wait {
fmt.Println("Wait for the latest image to be available on quay.io. Will check every", cmdOpts.waitInterval, "minutes for", cmdOpts.waitMax, "minutes")
err = wait.Poll(time.Duration(cmdOpts.waitInterval)*time.Minute, time.Duration(cmdOpts.waitMax)*time.Minute, func() (bool, error) {
ok = tryCreateQuayTag(ctx, quayClient, quayRepos, quaySrcTag, quayDstTag, commitSHA)
if !ok {
fmt.Println("Failed. Will try again later.")
}
return ok, nil
})
if err != nil {
fmt.Println("Can not create image tag on quay.io")
return err
}
} else {
return err
}
}
fmt.Println("Image tags created:", rv.TagName())
} else {
fmt.Println("Skip creating image tags as no quay repos specified")
}
return nil
}
func getGitRef(ctx context.Context, client services.GitService, gitRepoInfo *githubRepoInfo, ref string, mostRecent bool) (*github.Reference, error) {
gitRefs, resp, err := client.GetRefs(ctx, gitRepoInfo.owner, gitRepoInfo.repo, ref)
if err != nil {
if resp != nil && resp.StatusCode == http.StatusNotFound {
return nil, nil
}
return nil, err
}
//If mostRecent is true, sort the list of refs and return the most recent one based on it's name v1.1.1-rc3 vs v1.1.1-rc2 etc..
if mostRecent && len(gitRefs) > 0 {
sort.Slice(gitRefs, func(i, j int) bool {
return gitRefs[i].GetRef() > gitRefs[j].GetRef()
})
return gitRefs[0], nil
} else {
for _, r := range gitRefs {
if r.GetRef() == ref {
return r, nil
}
}
}
return nil, nil
}
func createGitTag(ctx context.Context, client services.GitService, gitRepoInfo *githubRepoInfo, tag string, sha string) (*github.Reference, error) {
tagRefVal := fmt.Sprintf("refs/tags/%s", tag)
tagRef, err := getGitRef(ctx, client, gitRepoInfo, tagRefVal, false)
if err != nil {
return nil, err
}
if tagRef != nil {
if tagRef.GetObject().GetSHA() != sha {
return nil, fmt.Errorf("tag %s is already created but pointing to a different commit. Please delete it first", tag)
}
return tagRef, nil
}
tagRef = &github.Reference{
Ref: &tagRefVal,
Object: &github.GitObject{
SHA: &sha,
},
}
created, _, err := client.CreateRef(ctx, gitRepoInfo.owner, gitRepoInfo.repo, tagRef)
if err != nil {
return nil, err
}
return created, nil
}
func tryCreateQuayTag(ctx context.Context, quayClient *quay.Client, quayRepos string, quaySrcTag string, quayDstTag string, commitSHA string) bool {
repos := strings.Split(quayRepos, ",")
ok := true
for _, r := range repos {
repo, tag := getImageRepoAndTag(r, quayDstTag)
err := createTagForImage(ctx, quayClient, *repo, quaySrcTag, *tag, commitSHA)
if err != nil {
ok = false
fmt.Println("Failed to create the image tag for", r, "due to error:", err)
} else {
fmt.Println(fmt.Sprintf("Image tag '%s' created from tag '%s' with commit '%s' in repo '%s'", *tag, quaySrcTag, commitSHA, *repo))
}
}
return ok
}
func createTagForImage(ctx context.Context, quayClient *quay.Client, quayRepo string, quaySrcTag string, quayDstTag string, commitSHA string) error {
tags, _, err := quayClient.Tags.List(ctx, quayRepo, &quay.ListTagsOptions{
SpecificTag: quaySrcTag,
})
if err != nil {
return err
}
if len(tags.Tags) == 0 {
return fmt.Errorf("tag %s doesn't exit", quaySrcTag)
}
tag := tags.Tags[0]
commitID, _, err := quayClient.Manifests.ListLabels(ctx, quayRepo, *tag.ManifestDigest, &quay.ListManifestLabelsOptions{Filter: commitIDLabelFilter})
if err != nil {
return err
}
if len(commitID.Labels) == 0 {
return fmt.Errorf("label %s doesn't exist", commitIDLabelFilter)
}
if *commitID.Labels[0].Value != commitSHA {
return fmt.Errorf("can't find an image with given tag %s that matches the given commit SHA: %s", quaySrcTag, commitSHA)
} else {
_, err = quayClient.Tags.Change(ctx, quayRepo, quayDstTag, &quay.ChangTag{
ManifestDigest: *tag.ManifestDigest,
})
if err != nil {
return err
}
return nil
}
}
func getImageRepoAndTag(s string, defaultTag string) (*string, *string) {
p := strings.Split(s, ":")
if len(p) > 1 {
return &p[0], &p[1]
}
return &s, &defaultTag
}
func init() {
releaseCmd.AddCommand(tagReleaseCmd)
tagReleaseCmd.Flags().StringVarP(&tagReleaseCmdOpts.branch, "branch", "b", "master", "Branch to create the tag")
tagReleaseCmd.Flags().StringVar(&tagReleaseCmdOpts.quayRepos, "quayRepos", fmt.Sprintf("%s,%s", DefaultIntegreatlyOperatorQuayRepo, DefaultIntegreatlyOperatorTestQuayRepo), "Quay repositories. Multiple repos can be specified and separated by ','")
tagReleaseCmd.Flags().BoolVarP(&tagReleaseCmdOpts.wait, "wait", "w", false, "Wait for the quay tag to be created (it could take up to 1 hour)")
tagReleaseCmd.Flags().Int64Var(&tagReleaseCmdOpts.waitInterval, "wait-interval", 5, "Specify the interval to check tags in quay while waiting. In minutes.")
tagReleaseCmd.Flags().Int64Var(&tagReleaseCmdOpts.waitMax, "wait-max", 90, "Specify the max wait time for tags be to created in quay. In minutes.")
tagReleaseCmd.Flags().StringVar(&tagReleaseCmdOpts.sourceTag, "sourceTag", "", "OSD Source Tag passed through pipeline.")
}
|
package main
var ans []string
func letterCasePermutation(S string) []string {
ans = make([]string,0)
letterCasePermutationExec([]byte(S),[]byte{})
return ans
}
func letterCasePermutationExec(bytes []byte,seq []byte) {
if len(bytes)==0{
ans = append(ans,string(seq))
return
}
if bytes[0]>='a' && bytes[0]<='z' {
letterCasePermutationExec(bytes[1:],append(seq,bytes[0]-'a'+'A'))
}
if bytes[0]>='A' && bytes[0]<='Z' {
letterCasePermutationExec(bytes[1:],append(seq,bytes[0]-'A'+'a'))
}
letterCasePermutationExec(bytes[1:],append(seq,bytes[0]))
}
/*
总结
1. 这题刚开始做的时候,在letterCasePermutationExec函数中采用了一个for循环遍历bytes,然后发现是错误的。
理清思路后,去除了for循环,然后就AC了。
*/ |
package main
import (
"context"
"fmt"
"github.com/gin-gonic/gin"
"log"
"net/http"
"os"
"os/signal"
"syscall"
"time"
)
func main(){
r:=gin.Default()
r.GET("/test", func(c *gin.Context) {
time.Sleep(8*time.Second)
c.String(200, "hello test\n")
})
srv:=&http.Server{
Addr:":8085",
Handler: r,
}
go func() {
if err:=srv.ListenAndServe(); err!=nil && err!=http.ErrServerClosed{ // 并且服务器没有关闭
log.Fatalf("listen: %s\n", err)
}
}()
quit := make(chan os.Signal)
// 退出信号的捕获
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
fmt.Println("看chan阻塞")
<-quit // chan进行阻塞
fmt.Print("拿到信号了")
log.Print("...shutdown server...")
// 设定超时的上下文
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// 真正关闭服务器
if err:=srv.Shutdown(ctx); err!=nil {
log.Fatal("server shutdown", err)
}
log.Println("server exiting")
}
|
package goSolution
import "sort"
type BuildingDiff struct {
Diff, Index int
}
type BuildingDiffs []BuildingDiff
func (a BuildingDiffs) Len() int {return len(a)}
func (a BuildingDiffs) Less(i, j int) bool {return a[i].Diff > a[j].Diff || (a[i].Diff == a[j].Diff && a[i].Index < a[j].Index)}
func (a BuildingDiffs) Swap(i, j int) {a[i], a[j] = a[j], a[i]}
func (a BuildingDiffs) CanReachTarget(target int, bricks int, ladders int) bool {
m := len(a)
for i := 0; i < m; i++ {
if a[i].Index <= target {
if ladders > 0 {
ladders--
} else {
if bricks >= a[i].Diff {
bricks -= a[i].Diff
} else {
return false
}
}
}
}
return true
}
func furthestBuilding(heights []int, bricks int, ladders int) int {
n := len(heights)
a := BuildingDiffs{}
for i := 1; i < n; i++ {
d := heights[i] - heights[i - 1]
if d > 0 {
a = append(a, BuildingDiff{Index: i, Diff: d})
}
}
sort.Sort(a)
ret := 0
for l, r := 0, n - 1; l <= r; {
mid := (l + r) >> 1
if a.CanReachTarget(mid, bricks, ladders) {
l = mid + 1
ret = mid
} else {
r = mid - 1
}
}
return ret
}
|
package main
import "defrag"
import "fmt"
func main() {
fmt.Println("Part1 - How Many 1's in the Grid?")
seed := "ffayrhll"
grid := defrag.SeedToGrid(seed)
population := defrag.CountOccupiedBits(grid)
fmt.Println("\t",population)
}
|
package main
import "testing"
func TestFetchRepoRoot(t *testing.T) {
for _, tc := range []struct {
imp string
exp string
}{
{"gopkg.in/alecthomas/kingpin.v2", "https://github.com/alecthomas/kingpin"},
{"k8s.io/api/v1", "https://github.com/kubernetes/api"},
{"k8s.io/kubernetes/pkg/apis/core/validation", "https://github.com/kubernetes/kubernetes"},
{"gonum.org/v1/gonum", "https://github.com/gonum/gonum"},
{"gonum.org/v1/hdf5", "https://github.com/gonum/hdf5"},
} {
rr, err := repoRootForImportDynamic(tc.imp, IgnoreMod)
if err != nil {
t.Error(err)
}
if rr.Repo != tc.exp {
t.Error("got: ", rr, "expected: ", tc.exp)
}
}
}
|
package main
import (
"fmt"
"math/rand"
"time"
)
func random(min, max int) int {
rand.Seed(time.Now().Unix())
return rand.Intn(max-min) + min
}
func main() {
var hiddenNum int = random(1, 10)
var guessNum int
var amt int = 1
fmt.Println("Please enter a number 1-10... ")
fmt.Scan(&guessNum)
for ; guessNum != hiddenNum; fmt.Scan(&guessNum) {
amt++
if guessNum < hiddenNum {
fmt.Printf("Too Low! Guess Higher... \n")
} else {
fmt.Printf("Too High! Lower... \n")
}
}
if guessNum == hiddenNum {
fmt.Printf("That's right! %d was the number! \n", hiddenNum)
fmt.Printf("It took you %d guesses to get it correctly! \n", amt)
}
var play string
fmt.Println("Would you like to play again? Y/N")
fmt.Scan(&play)
if play == "Y" || play == "y" {
main()
} else {
if play == "n" || play == "N" {
fmt.Println("Goodbye!")
} else {
fmt.Println("Not valid response. You don't deserve to play again...")
}
}
}
|
package matchserver
import (
"context"
"io"
"log"
"time"
"github.com/ekotlikoff/gochess/internal/model"
pb "github.com/ekotlikoff/gochess/api"
"google.golang.org/grpc"
)
func (matchingServer *MatchingServer) createEngineClient(
engineAddr string, engineConnTimeout time.Duration) {
var opts []grpc.DialOption
opts = append(opts, grpc.WithInsecure())
opts = append(opts, grpc.WithBlock())
ctx, cancel := context.WithTimeout(context.Background(), engineConnTimeout)
defer cancel()
conn, err := grpc.DialContext(ctx, engineAddr, opts...)
if err != nil {
log.Println("ERROR: Failed to connect to chess engine at addr: " +
engineAddr + " with error: " + err.Error())
} else {
log.Println("Successfully connected to chess engine")
matchingServer.botMatchingEnabled = true
matchingServer.engineClient = pb.NewRustChessClient(conn)
matchingServer.engineClientConn = conn
}
}
func (matchingServer *MatchingServer) engineSession(botPlayer *Player) {
stream, err := matchingServer.engineClient.Game(context.Background())
if err != nil {
log.Println("FATAL: Failed to instantiate GRPC conn to engine")
}
err = botPlayer.WaitForMatchStart()
if err != nil {
log.Println("Bot failed to find match")
return
}
botPlayer.SetSearchingForMatch(false)
botPBColor := pb.GameStart_BLACK
if botPlayer.Color() == model.White {
botPBColor = pb.GameStart_WHITE
}
gameStartMsg := pb.GameMessage{
Request: &pb.GameMessage_GameStart{
GameStart: &pb.GameStart{
PlayerColor: botPBColor,
PlayerGameTime: &pb.GameTime{
PlayerMainTime: uint32(botPlayer.MatchMaxTimeMs()),
},
},
},
}
if err := stream.Send(&gameStartMsg); err != nil {
log.Printf("FATAL: Failed to send gameStartMsg to bot: %v", err)
// TODO resign in this case?
return
}
gameOver := botPlayer.match.gameOverChan
waitc := make(chan struct{})
go engineReceiveLoop(matchingServer, botPlayer, stream, waitc)
for {
select {
case move := <-botPlayer.OpponentPlayedMove:
moveMsg := moveToPB(move)
stream.Send(&moveMsg)
case <-botPlayer.ResponseChanAsync:
// Do nothing.
case <-gameOver:
stream.CloseSend()
<-waitc
botPlayer.WaitForMatchOver()
botPlayer.Reset()
return
}
}
}
func engineReceiveLoop(
matchingServer *MatchingServer, botPlayer *Player,
stream pb.RustChess_GameClient, waitc chan struct{}) {
for {
in, err := stream.Recv()
if err == io.EOF {
// read done.
close(waitc)
return
}
if err != nil {
log.Printf("Failed to receive a msg, closing engine conn: %v", err)
if botPlayer.GetMatch() != nil && !botPlayer.GetMatch().GameOver() {
botPlayer.RequestChanAsync <- RequestAsync{
Resign: true,
}
}
matchingServer.botMatchingEnabled = false
close(waitc)
return
}
botMove := pbToMove(in.GetChessMove())
botPlayer.requestChanSync <- botMove
if !(<-botPlayer.ResponseChanSync).MoveSuccess {
promoteTo := model.Queen
botMove.PromoteTo = &promoteTo
botPlayer.requestChanSync <- botMove
<-botPlayer.ResponseChanSync
}
}
}
func moveToPB(move model.MoveRequest) pb.GameMessage {
promotePiece := pb.PromotePiece{}
if move.PromoteTo != nil {
switch *move.PromoteTo {
case model.Queen:
promotePiece = pb.PromotePiece{Piece: pb.PromotePiece_QUEEN}
case model.Rook:
promotePiece = pb.PromotePiece{Piece: pb.PromotePiece_ROOK}
case model.Bishop:
promotePiece = pb.PromotePiece{Piece: pb.PromotePiece_BISHOP}
case model.Knight:
promotePiece = pb.PromotePiece{Piece: pb.PromotePiece_KNIGHT}
}
}
return pb.GameMessage{
Request: &pb.GameMessage_ChessMove{
ChessMove: &pb.ChessMove{
OriginalPosition: &pb.Position{
File: uint32(move.Position.File),
Rank: uint32(move.Position.Rank),
},
NewPosition: &pb.Position{
File: uint32(int8(move.Position.File) + move.Move.X),
Rank: uint32(int8(move.Position.Rank) + move.Move.Y),
},
PromotePiece: &promotePiece,
},
},
}
}
func pbToMove(msg *pb.ChessMove) model.MoveRequest {
return model.MoveRequest{
Position: model.Position{
File: uint8(msg.OriginalPosition.File),
Rank: uint8(msg.OriginalPosition.Rank),
},
Move: model.Move{
X: int8(msg.NewPosition.File -
msg.OriginalPosition.File),
Y: int8(msg.NewPosition.Rank -
msg.OriginalPosition.Rank),
},
}
}
|
package cosmos
import "testing"
func TestLineString(t *testing.T) {
coords := Coordinates{{5.0, 10.0}, {10.0, 11.0}, {5.0, 10.0}}
ls := NewLineString()
for _, p := range coords {
ls.AddPoint(p[0], p[1])
}
if len(ls.Coordinates) != 3 {
t.Fatalf("expected %d coords, got: %d", 3, len(ls.Coordinates))
}
for i, c := range *ls.Coords() {
if c[0] != coords[i][0] {
t.Fatalf("Invalid longitude. Expected: %f, got: %f", coords[i][0], c[0])
}
if c[1] != coords[i][1] {
t.Fatalf("Invalid latitude. Expected: %f, got: %f", coords[i][1], c[1])
}
}
ls2 := NewLineString(Coordinate{5.0, 10}, Coordinate{10, 11}, Coordinate{5, 10})
if len(ls2.Coordinates) != 3 {
t.Fatalf("expected %d coords got: %d", 3, len(ls.Coordinates))
}
if ls2.GeoType() != "LineString" {
t.Fatalf("expected: LineString, got: %s", ls2.GeoType())
}
}
func TestPolygon(t *testing.T) {
coords := []Coordinates{{{5.0, 10.0}, {10.0, 11.0}, {5.0, 10.0}}}
poly := NewPolygon(coords...)
if len(poly.Coords()) != len(coords) {
t.Fatalf("expected %d coords, got: %d", len(coords), len(poly.Coords()))
}
for i, cc := range poly.Coords() {
for j, c := range cc {
coord := coords[i][j]
if c[0] != coord[0] {
t.Fatalf("Invalid longitude. Expected: %f, got: %f", coord[0], c[0])
}
if c[1] != coord[1] {
t.Fatalf("Invalid latitude. Expected: %f, got: %f", coord[1], c[1])
}
}
}
if poly.GeoType() != "Polygon" {
t.Fatalf("expected: Polygon, got: %s", poly.GeoType())
}
}
func TestPoint(t *testing.T) {
coord := Coordinate{5.0, 10.0}
point := NewPoint(coord)
if point.Coords()[0] != coord[0] {
t.Fatalf("Invalid longitude. Expected: %f, got: %f", coord[0], point.Coords()[0])
}
if point.Coords()[1] != coord[1] {
t.Fatalf("Invalid latitude. Expected: %f, got: %f", coord[1], point.Coords()[1])
}
if point.GeoType() != "Point" {
t.Fatalf("expected: Point, got: %s", point.GeoType())
}
}
|
// Copyright (c) 2017 jelmersnoeck
// Copyright (c) 2018 Aiven, Helsinki, Finland. https://aiven.io/
package aiven
import (
"fmt"
"github.com/aiven/terraform-provider-aiven/pkg/cache"
"log"
"net/url"
"strings"
"time"
"github.com/aiven/aiven-go-client"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/terraform"
)
// Provider returns a terraform.ResourceProvider.
func Provider() terraform.ResourceProvider {
return &schema.Provider{
Schema: map[string]*schema.Schema{
"api_token": {
Type: schema.TypeString,
Required: true,
Sensitive: true,
DefaultFunc: schema.EnvDefaultFunc("AIVEN_TOKEN", nil),
Description: "Aiven Authentication Token",
},
},
DataSourcesMap: map[string]*schema.Resource{
"aiven_connection_pool": datasourceConnectionPool(),
"aiven_database": datasourceDatabase(),
"aiven_kafka_acl": datasourceKafkaACL(),
"aiven_kafka_topic": datasourceKafkaTopic(),
"aiven_kafka_connector": datasourceKafkaConnector(),
"aiven_kafka_schema": datasourceKafkaSchema(),
"aiven_kafka_schema_configuration": datasourceKafkaSchemaConfiguration(),
"aiven_elasticsearch_acl": datasourceElasticsearchACL(),
"aiven_project": datasourceProject(),
"aiven_project_user": datasourceProjectUser(),
"aiven_project_vpc": datasourceProjectVPC(),
"aiven_vpc_peering_connection": datasourceVPCPeeringConnection(),
"aiven_service": datasourceService(),
"aiven_service_integration": datasourceServiceIntegration(),
"aiven_service_integration_endpoint": datasourceServiceIntegrationEndpoint(),
"aiven_service_user": datasourceServiceUser(),
"aiven_account": datasourceAccount(),
"aiven_account_team": datasourceAccountTeam(),
"aiven_account_team_project": datasourceAccountTeamProject(),
"aiven_account_team_member": datasourceAccountTeamMember(),
"aiven_mirrormaker_replication_flow": datasourceMirrorMakerReplicationFlowTopic(),
"aiven_account_authentication": datasourceAccountAuthentication(),
},
ResourcesMap: map[string]*schema.Resource{
"aiven_connection_pool": resourceConnectionPool(),
"aiven_database": resourceDatabase(),
"aiven_kafka_acl": resourceKafkaACL(),
"aiven_kafka_topic": resourceKafkaTopic(),
"aiven_kafka_connector": resourceKafkaConnector(),
"aiven_kafka_schema": resourceKafkaSchema(),
"aiven_kafka_schema_configuration": resourceKafkaSchemaConfiguration(),
"aiven_elasticsearch_acl": resourceElasticsearchACL(),
"aiven_project": resourceProject(),
"aiven_project_user": resourceProjectUser(),
"aiven_project_vpc": resourceProjectVPC(),
"aiven_vpc_peering_connection": resourceVPCPeeringConnection(),
"aiven_service": resourceService(),
"aiven_service_integration": resourceServiceIntegration(),
"aiven_service_integration_endpoint": resourceServiceIntegrationEndpoint(),
"aiven_service_user": resourceServiceUser(),
"aiven_account": resourceAccount(),
"aiven_account_team": resourceAccountTeam(),
"aiven_account_team_project": resourceAccountTeamProject(),
"aiven_account_team_member": resourceAccountTeamMember(),
"aiven_mirrormaker_replication_flow": resourceMirrorMakerReplicationFlow(),
"aiven_account_authentication": resourceAccountAuthentication(),
},
ConfigureFunc: func(d *schema.ResourceData) (interface{}, error) {
_ = cache.NewTopicCache()
return aiven.NewTokenClient(d.Get("api_token").(string), "terraform-provider-aiven/")
},
}
}
func optionalString(d *schema.ResourceData, key string) string {
str, ok := d.Get(key).(string)
if !ok {
return ""
}
return str
}
func optionalStringPointer(d *schema.ResourceData, key string) *string {
val, ok := d.GetOk(key)
if !ok {
return nil
}
str, ok := val.(string)
if !ok {
return nil
}
return &str
}
func optionalIntPointer(d *schema.ResourceData, key string) *int {
val, ok := d.GetOk(key)
if !ok {
return nil
}
intValue, ok := val.(int)
if !ok {
return nil
}
return &intValue
}
func buildResourceID(parts ...string) string {
finalParts := make([]string, len(parts))
for idx, part := range parts {
finalParts[idx] = url.PathEscape(part)
}
return strings.Join(finalParts, "/")
}
func splitResourceID(resourceID string, n int) []string {
parts := strings.SplitN(resourceID, "/", n)
for idx, part := range parts {
part, _ := url.PathUnescape(part)
parts[idx] = part
}
return parts
}
func splitResourceID2(resourceID string) (string, string) {
parts := splitResourceID(resourceID, 2)
return parts[0], parts[1]
}
func splitResourceID3(resourceID string) (string, string, string) {
parts := splitResourceID(resourceID, 3)
return parts[0], parts[1], parts[2]
}
func splitResourceID4(resourceID string) (string, string, string, string) {
parts := splitResourceID(resourceID, 4)
return parts[0], parts[1], parts[2], parts[3]
}
func resourceExists(err error) (bool, error) {
if err == nil {
return true, nil
}
aivenError, ok := err.(aiven.Error)
if !ok {
return true, err
}
if aivenError.Status == 404 {
return false, nil
}
if aivenError.Status < 200 || aivenError.Status >= 300 {
return true, err
}
return true, nil
}
func createOnlyDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool {
return len(d.Id()) > 0
}
// emptyObjectDiffSuppressFunc suppresses a diff for service user configuration options when
// fields are not set by the user but have default or previously defined values.
func emptyObjectDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool {
// When a map inside a list contains only default values without explicit values set by
// the user Terraform inteprets the map as not being present and the array length being
// zero, resulting in bogus update that does nothing. Allow ignoring those.
if old == "1" && new == "0" && strings.HasSuffix(k, ".#") {
return true
}
// When a field is not set to any value and consequently is null (empty string) but had
// a non-empty parameter before. Allow ignoring those.
if new == "" && old != "" {
return true
}
// There is a bug in Terraform 0.11 which interprets "true" as "0" and "false" as "1"
if (new == "0" && old == "false") || (new == "1" && old == "true") {
return true
}
return false
}
// Terraform does not allow default values for arrays but the IP filter user config value
// has default. We don't want to force users to always define explicit value just because
// of the Terraform restriction so suppress the change from default to empty (which would
// be nonsensical operation anyway)
func ipFilterArrayDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool {
return old == "1" && new == "0" && strings.HasSuffix(k, ".ip_filter.#")
}
func ipFilterValueDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool {
return old == "0.0.0.0/0" && new == "" && strings.HasSuffix(k, ".ip_filter.0")
}
// validateDurationString is a ValidateFunc that ensures a string parses
// as time.Duration format
func validateDurationString(v interface{}, k string) (ws []string, errors []error) {
if _, err := time.ParseDuration(v.(string)); err != nil {
log.Printf("[DEBUG] invalid duration: %s", err)
errors = append(errors, fmt.Errorf("%q: invalid duration", k))
}
return
}
// generateClientTimeoutsSchema generates client_timeout Terraform schema
// based on name of the timeout and default duration
// Deprecated: generateClientTimeoutsSchema exists for historical compatibility
// and should not be used. To set timeouts use native TF timeouts functionality.
func generateClientTimeoutsSchema(timeouts map[string]time.Duration) *schema.Schema {
schemaTimeouts := map[string]*schema.Schema{}
for name := range timeouts {
schemaTimeouts[name] = &schema.Schema{
Type: schema.TypeString,
Description: name + " timeout",
Optional: true,
ValidateFunc: validateDurationString,
}
}
return &schema.Schema{
Type: schema.TypeSet,
MaxItems: 1,
Description: "Custom Terraform Client timeouts",
ForceNew: true,
Optional: true,
Deprecated: "use timeouts instead",
Elem: &schema.Resource{
Schema: schemaTimeouts,
},
}
}
// getTimeoutHelper is a helper which extract from a resource data client timeout
// Deprecated: getTimeoutHelper exists for historical compatibility
// and should not be used. To set timeouts use native TF timeouts functionality.
func getTimeoutHelper(d *schema.ResourceData, name string) (time.Duration, error) {
clientTimeouts, ok := d.GetOk("client_timeout")
if !ok || clientTimeouts.(*schema.Set).Len() == 0 {
return 0, nil
}
for _, timeouts := range clientTimeouts.(*schema.Set).List() {
t := timeouts.(map[string]interface{})
if _, ok := t[name]; ok {
timeout, err := time.ParseDuration(t[name].(string))
if err != nil {
return 0, err
}
return timeout, nil
}
}
return 0, nil
}
func flattenToString(a []interface{}) []string {
r := make([]string, len(a))
for i, v := range a {
r[i] = fmt.Sprint(v)
}
return r
}
|
package web
import (
"os"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/logger"
"github.com/gofiber/session/v2"
"github.com/gofiber/template/html"
"github.com/iamtraining/forum/apiserver"
"github.com/iamtraining/forum/store"
)
type Handler struct {
store store.Store
App *fiber.App
Session *session.Session
}
func NewHandler(store *store.Store, cfg apiserver.Config) *Handler {
engine := html.New("./templates/", ".html")
h := &Handler{
store: *store,
App: fiber.New(fiber.Config{
Views: engine,
}),
}
threads := ThreadHandler{store: store}
posts := PostHandler{store: store}
comments := CommentHandler{store: store}
users := UserHandler{store: store}
h.App.Use(logger.New(logger.Config{
Next: nil,
Format: "[${time}] ${status} - ${latency} - ${method} ${path}\n",
TimeFormat: "2006-01-02 15:04:05",
TimeZone: "Local",
Output: os.Stderr,
}))
web := h.App.Group("")
web.Get("/", h.Home())
web.Post("/login", users.PrepareLogin, users.CommitLogin)
web.Get("/login", h.LoginPage())
web.Post("/register", users.Register)
web.Get("/logout", users.Logout)
web.Get("/register", h.RegisterPage())
h.App.Post("/thread/:id/delete", threads.deleteThread)
routes := h.App.Group("/threads")
//routes.Post("/", threads.getThread)
//routes.Get("/", threads.getThreads)
routes.Patch("/", threads.updateThread)
routes.Post("/:id/delete", threads.deleteThread)
routes.Post("/", threads.createThread)
routes.Post("/posts", posts.getPost)
routes.Get("/posts/:id", posts.getPostsByThread)
routes.Patch("/posts", posts.updatePost)
routes.Delete("/posts/delete", posts.deletePost)
routes.Post("/:id", posts.createPost)
routes.Post("/:threadID/:postID", comments.createComment)
routes.Delete("/posts/comments/delete", comments.deleteComment)
routes.Patch("/posts/comments", comments.updateComment)
routes.Get("/", h.ThreadList())
routes.Get("/:id/new", h.CreatePost())
routes.Get("/:threadID/:postID", h.Post())
routes.Get("/new", h.CreateThread())
routes.Get("/:threadID", h.Thread())
return h
}
|
package server
import (
"net/http"
"github.com/asaskevich/govalidator"
"github.com/go-chi/render"
log "github.com/sirupsen/logrus"
)
type response struct {
Data interface{} `json:"data"`
Errors []apiError `json:"errors"`
Status int `json:"status"`
}
func (response *response) Render(w http.ResponseWriter, r *http.Request) error {
render.Status(r, response.Status)
if response.Status == 500 {
for _, e := range response.Errors {
log.Warnf("API ERROR: %s %s %s", e.Reference, e.Field, e.Message)
}
}
return nil
}
type apiError struct {
Message string `json:"message"`
Field string `json:"field"`
Reference string `json:"ref"`
}
func createSuccessResponse(data interface{}, status ...int) response {
finalStatus := http.StatusOK
if len(status) > 0 {
finalStatus = status[0]
}
return response{
Data: data,
Errors: nil,
Status: finalStatus,
}
}
func createAPIErrorsResponse(errors []apiError, status ...int) response {
finalStatus := http.StatusInternalServerError
if len(status) > 0 {
finalStatus = status[0]
}
return response{
Data: nil,
Errors: errors,
Status: finalStatus,
}
}
func createAPIErrorResponse(err apiError, status ...int) response {
finalStatus := http.StatusInternalServerError
if len(status) > 0 {
finalStatus = status[0]
}
return response{
Data: nil,
Errors: []apiError{err},
Status: finalStatus,
}
}
func createErrorResponse(err error, status ...int) response {
finalStatus := http.StatusInternalServerError
if len(status) > 0 {
finalStatus = status[0]
}
return response{
Data: nil,
Errors: []apiError{{Message: err.Error()}},
Status: finalStatus,
}
}
func respondSuccess(w http.ResponseWriter, r *http.Request, data interface{}, status ...int) {
rsp := createSuccessResponse(data, status...)
_ = render.Render(w, r, &rsp)
}
func respondAPIError(w http.ResponseWriter, r *http.Request, err apiError, status ...int) {
rsp := createAPIErrorResponse(err, status...)
_ = render.Render(w, r, &rsp)
}
func respondError(w http.ResponseWriter, r *http.Request, err error, status ...int) {
rsp := createErrorResponse(err, status...)
_ = render.Render(w, r, &rsp)
}
func respondValidationErrors(w http.ResponseWriter, r *http.Request, err error, status ...int) {
validationErrors, ok := err.(govalidator.Errors)
if !ok {
respondError(w, r, err, status...)
return
}
var apiErrors []apiError
for _, fieldErrors := range validationErrors {
fieldValidationErrors, ok := fieldErrors.(govalidator.Errors)
if !ok {
fieldValidationError, ok := fieldErrors.(govalidator.Error)
if ok {
apiError := apiError{
Field: fieldValidationError.Name,
Message: fieldValidationError.Err.Error(),
}
apiErrors = append(apiErrors, apiError)
continue
}
apiErrors = append(apiErrors, apiError{Message: fieldErrors.Error()})
continue
}
for _, fieldError := range fieldValidationErrors {
fieldValidationError, ok := fieldError.(govalidator.Error)
if !ok {
apiErrors = append(apiErrors, apiError{Message: fieldError.Error()})
continue
}
apiError := apiError{
Field: fieldValidationError.Name,
Message: fieldValidationError.Err.Error(),
}
apiErrors = append(apiErrors, apiError)
}
}
rsp := createAPIErrorsResponse(apiErrors, status...)
_ = render.Render(w, r, &rsp)
}
|
package api
import (
"context"
"testing"
"github.com/brigadecore/brigade/v2/apiserver/internal/meta"
metaTesting "github.com/brigadecore/brigade/v2/apiserver/internal/meta/testing" // nolint: lll
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
)
func TestUserMarshalJSON(t *testing.T) {
metaTesting.RequireAPIVersionAndType(t, User{}, "User")
}
func TestNewUsersService(t *testing.T) {
usersStore := &mockUsersStore{}
sessionsStore := &mockSessionsStore{}
roleAssignmentsStore := &mockRoleAssignmentsStore{}
projectRoleAssignmentsStore := &mockProjectRoleAssignmentsStore{}
svc, ok := NewUsersService(
alwaysAuthorize,
usersStore,
sessionsStore,
roleAssignmentsStore,
projectRoleAssignmentsStore,
UsersServiceConfig{},
).(*usersService)
require.True(t, ok)
require.NotNil(t, svc.authorize)
require.Same(t, usersStore, svc.usersStore)
require.Same(t, sessionsStore, svc.sessionsStore)
require.Same(t, roleAssignmentsStore, svc.roleAssignmentsStore)
require.Same(t, projectRoleAssignmentsStore, svc.projectRoleAssignmentsStore)
}
func TestUserServiceList(t *testing.T) {
testCases := []struct {
name string
service UsersService
assertions func(error)
}{
{
name: "unauthorized",
service: &usersService{
authorize: neverAuthorize,
},
assertions: func(err error) {
require.Error(t, err)
require.IsType(t, &meta.ErrAuthorization{}, err)
},
},
{
name: "user management functions unavailable",
service: &usersService{
authorize: alwaysAuthorize,
config: UsersServiceConfig{
ThirdPartyAuthEnabled: false,
},
},
assertions: func(err error) {
require.Error(t, err)
require.IsType(t, &meta.ErrNotSupported{}, err)
},
},
{
name: "error getting users from store",
service: &usersService{
authorize: alwaysAuthorize,
usersStore: &mockUsersStore{
ListFn: func(
context.Context,
meta.ListOptions,
) (meta.List[User], error) {
return meta.List[User]{}, errors.New("error listing users")
},
},
config: UsersServiceConfig{
ThirdPartyAuthEnabled: true,
},
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "error listing users")
require.Contains(t, err.Error(), "error retrieving users from store")
},
},
{
name: "success",
service: &usersService{
authorize: alwaysAuthorize,
usersStore: &mockUsersStore{
ListFn: func(
context.Context,
meta.ListOptions,
) (meta.List[User], error) {
return meta.List[User]{}, nil
},
},
config: UsersServiceConfig{
ThirdPartyAuthEnabled: true,
},
},
assertions: func(err error) {
require.NoError(t, err)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
_, err :=
testCase.service.List(context.Background(), meta.ListOptions{})
testCase.assertions(err)
})
}
}
func TestUsersServiceGet(t *testing.T) {
testUser := User{
ObjectMeta: meta.ObjectMeta{
ID: "tony@starkindustries.com",
},
}
testCases := []struct {
name string
service UsersService
assertions func(user User, err error)
}{
{
name: "unauthorized",
service: &usersService{
authorize: neverAuthorize,
},
assertions: func(_ User, err error) {
require.Error(t, err)
require.IsType(t, &meta.ErrAuthorization{}, err)
},
},
{
name: "user management functions unavailable",
service: &usersService{
authorize: alwaysAuthorize,
config: UsersServiceConfig{
ThirdPartyAuthEnabled: false,
},
},
assertions: func(_ User, err error) {
require.Error(t, err)
require.IsType(t, &meta.ErrNotSupported{}, err)
},
},
{
name: "with error from store",
service: &usersService{
authorize: alwaysAuthorize,
usersStore: &mockUsersStore{
GetFn: func(context.Context, string) (User, error) {
return User{}, &meta.ErrNotFound{}
},
},
config: UsersServiceConfig{
ThirdPartyAuthEnabled: true,
},
},
assertions: func(user User, err error) {
require.Error(t, err)
require.IsType(t, &meta.ErrNotFound{}, errors.Cause(err))
},
},
{
name: "success",
service: &usersService{
authorize: alwaysAuthorize,
usersStore: &mockUsersStore{
GetFn: func(context.Context, string) (User, error) {
return testUser, nil
},
},
config: UsersServiceConfig{
ThirdPartyAuthEnabled: true,
},
},
assertions: func(user User, err error) {
require.NoError(t, err)
require.Equal(t, testUser, user)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
user, err := testCase.service.Get(context.Background(), testUser.ID)
testCase.assertions(user, err)
})
}
}
func TestUsersServiceLock(t *testing.T) {
testCases := []struct {
name string
service UsersService
assertions func(error)
}{
{
name: "unauthorized",
service: &usersService{
authorize: neverAuthorize,
},
assertions: func(err error) {
require.Error(t, err)
require.IsType(t, &meta.ErrAuthorization{}, err)
},
},
{
name: "user management functions unavailable",
service: &usersService{
authorize: alwaysAuthorize,
config: UsersServiceConfig{
ThirdPartyAuthEnabled: false,
},
},
assertions: func(err error) {
require.Error(t, err)
require.IsType(t, &meta.ErrNotSupported{}, err)
},
},
{
name: "error updating user in store",
service: &usersService{
authorize: alwaysAuthorize,
usersStore: &mockUsersStore{
LockFn: func(context.Context, string) error {
return errors.New("store error")
},
},
config: UsersServiceConfig{
ThirdPartyAuthEnabled: true,
},
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "store error")
require.Contains(t, err.Error(), "error locking user")
},
},
{
name: "error deleting user sessions from store",
service: &usersService{
authorize: alwaysAuthorize,
usersStore: &mockUsersStore{
LockFn: func(context.Context, string) error {
return nil
},
},
sessionsStore: &mockSessionsStore{
DeleteByUserFn: func(c context.Context, s string) error {
return errors.New("store error")
},
},
config: UsersServiceConfig{
ThirdPartyAuthEnabled: true,
},
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "store error")
require.Contains(t, err.Error(), "error deleting user")
require.Contains(t, err.Error(), "sessions from store")
},
},
{
name: "success",
service: &usersService{
authorize: alwaysAuthorize,
usersStore: &mockUsersStore{
LockFn: func(context.Context, string) error {
return nil
},
},
sessionsStore: &mockSessionsStore{
DeleteByUserFn: func(c context.Context, s string) error {
return nil
},
},
config: UsersServiceConfig{
ThirdPartyAuthEnabled: true,
},
},
assertions: func(err error) {
require.NoError(t, err)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
err :=
testCase.service.Lock(context.Background(), "tony@starkindustries.com")
testCase.assertions(err)
})
}
}
func TestUsersServiceUnlock(t *testing.T) {
testCases := []struct {
name string
service UsersService
assertions func(error)
}{
{
name: "unauthorized",
service: &usersService{
authorize: neverAuthorize,
},
assertions: func(err error) {
require.Error(t, err)
require.IsType(t, &meta.ErrAuthorization{}, err)
},
},
{
name: "user management functions unavailable",
service: &usersService{
authorize: alwaysAuthorize,
config: UsersServiceConfig{
ThirdPartyAuthEnabled: false,
},
},
assertions: func(err error) {
require.Error(t, err)
require.IsType(t, &meta.ErrNotSupported{}, err)
},
},
{
name: "error updating user in store",
service: &usersService{
authorize: alwaysAuthorize,
usersStore: &mockUsersStore{
UnlockFn: func(context.Context, string) error {
return errors.New("store error")
},
},
config: UsersServiceConfig{
ThirdPartyAuthEnabled: true,
},
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "store error")
require.Contains(t, err.Error(), "error unlocking user")
},
},
{
name: "success",
service: &usersService{
authorize: alwaysAuthorize,
usersStore: &mockUsersStore{
UnlockFn: func(context.Context, string) error {
return nil
},
},
config: UsersServiceConfig{
ThirdPartyAuthEnabled: true,
},
},
assertions: func(err error) {
require.NoError(t, err)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
err := testCase.service.Unlock(
context.Background(),
"tony@starkindustries.com",
)
testCase.assertions(err)
})
}
}
func TestUsersServiceDelete(t *testing.T) {
testCases := []struct {
name string
service UsersService
assertions func(error)
}{
{
name: "unauthorized",
service: &usersService{
authorize: neverAuthorize,
},
assertions: func(err error) {
require.Error(t, err)
require.IsType(t, &meta.ErrAuthorization{}, err)
},
},
{
name: "error deleting role assignments",
service: &usersService{
authorize: alwaysAuthorize,
roleAssignmentsStore: &mockRoleAssignmentsStore{
RevokeByPrincipalFn: func(context.Context, PrincipalReference) error {
return errors.New("store error")
},
},
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "store error")
require.Contains(t, err.Error(), "error deleting user")
require.Contains(t, err.Error(), "role assignments")
},
},
{
name: "error deleting project role assignments",
service: &usersService{
authorize: alwaysAuthorize,
roleAssignmentsStore: &mockRoleAssignmentsStore{
RevokeByPrincipalFn: func(context.Context, PrincipalReference) error {
return nil
},
},
projectRoleAssignmentsStore: &mockProjectRoleAssignmentsStore{
RevokeByPrincipalFn: func(context.Context, PrincipalReference) error {
return errors.New("store error")
},
},
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "store error")
require.Contains(t, err.Error(), "error deleting user")
require.Contains(t, err.Error(), "project role assignments")
},
},
{
name: "error deleting user",
service: &usersService{
authorize: alwaysAuthorize,
roleAssignmentsStore: &mockRoleAssignmentsStore{
RevokeByPrincipalFn: func(context.Context, PrincipalReference) error {
return nil
},
},
projectRoleAssignmentsStore: &mockProjectRoleAssignmentsStore{
RevokeByPrincipalFn: func(context.Context, PrincipalReference) error {
return nil
},
},
usersStore: &mockUsersStore{
DeleteFn: func(context.Context, string) error {
return errors.New("store error")
},
},
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "store error")
require.Contains(
t,
err.Error(),
"error deleting user",
)
},
},
{
name: "error deleting user sessions",
service: &usersService{
authorize: alwaysAuthorize,
roleAssignmentsStore: &mockRoleAssignmentsStore{
RevokeByPrincipalFn: func(context.Context, PrincipalReference) error {
return nil
},
},
projectRoleAssignmentsStore: &mockProjectRoleAssignmentsStore{
RevokeByPrincipalFn: func(context.Context, PrincipalReference) error {
return nil
},
},
usersStore: &mockUsersStore{
DeleteFn: func(context.Context, string) error {
return nil
},
},
sessionsStore: &mockSessionsStore{
DeleteByUserFn: func(context.Context, string) error {
return errors.New("store error")
},
},
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "store error")
require.Contains(
t,
err.Error(),
"error deleting user",
)
},
},
{
name: "success",
service: &usersService{
authorize: alwaysAuthorize,
roleAssignmentsStore: &mockRoleAssignmentsStore{
RevokeByPrincipalFn: func(context.Context, PrincipalReference) error {
return nil
},
},
projectRoleAssignmentsStore: &mockProjectRoleAssignmentsStore{
RevokeByPrincipalFn: func(context.Context, PrincipalReference) error {
return nil
},
},
usersStore: &mockUsersStore{
DeleteFn: func(context.Context, string) error {
return nil
},
},
sessionsStore: &mockSessionsStore{
DeleteByUserFn: func(context.Context, string) error {
return nil
},
},
},
assertions: func(err error) {
require.NoError(t, err)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
testCase.assertions(
testCase.service.Delete(
context.Background(),
"tony@starkindustries.com",
),
)
})
}
}
type mockUsersStore struct {
CreateFn func(context.Context, User) error
ListFn func(context.Context, meta.ListOptions) (meta.List[User], error)
GetFn func(context.Context, string) (User, error)
LockFn func(context.Context, string) error
UnlockFn func(context.Context, string) error
DeleteFn func(context.Context, string) error
}
func (m *mockUsersStore) Create(ctx context.Context, user User) error {
return m.CreateFn(ctx, user)
}
func (m *mockUsersStore) List(
ctx context.Context,
opts meta.ListOptions,
) (meta.List[User], error) {
return m.ListFn(ctx, opts)
}
func (m *mockUsersStore) Get(ctx context.Context, id string) (User, error) {
return m.GetFn(ctx, id)
}
func (m *mockUsersStore) Lock(ctx context.Context, id string) error {
return m.LockFn(ctx, id)
}
func (m *mockUsersStore) Unlock(ctx context.Context, id string) error {
return m.UnlockFn(ctx, id)
}
func (m *mockUsersStore) Delete(ctx context.Context, id string) error {
return m.DeleteFn(ctx, id)
}
|
package throttle_limiter
type funnel interface {
// 漏斗限流
// @param key: 行为
// @param capacity: 漏斗的初始容量
// @param opsPerSecond: 每秒生成的令牌数量
// @param quota: 每个行为占用的令牌数
Throttle(key string, capacity int, opsPerSecond int, quota int)
}
|
package main
import (
"flag"
"sync"
"github.com/k8guard/k8guard-discover/caching"
"github.com/k8guard/k8guard-discover/discover"
"github.com/k8guard/k8guard-discover/messaging"
"github.com/k8guard/k8guard-discover/metrics"
lib "github.com/k8guard/k8guardlibs"
)
var (
Version string
Build string
)
var err error
func init() {
metrics.PromRegister()
caching.InitCache()
}
func main() {
messagingMode := flag.Bool("kmode", false, "messaging mode, no router")
flag.Parse()
if *messagingMode {
defer messaging.CloseBroker()
messaging.InitBroker()
// test if broker is there before making api calls
messaging.TestBrokerWithTestMessage()
var waitGroup sync.WaitGroup
waitGroup.Add(7)
lib.Log.Infof("Starting in message mode using %s as broker", lib.Cfg.MessageBroker)
lib.Log.Info("Version: ", Version)
lib.Log.Info("BuildNumber: ", Build)
if err != nil {
panic(err)
}
go func() {
defer waitGroup.Done()
discover.GetBadNamespaces(discover.GetAllNamespaceFromApi(), true)
}()
go func() {
defer waitGroup.Done()
discover.GetBadDeploys(discover.GetAllDeployFromApi(), true)
}()
go func() {
defer waitGroup.Done()
discover.GetBadDaemonSets(discover.GetAllDaemonSetFromApi(), true)
}()
go func() {
defer waitGroup.Done()
discover.GetBadIngresses(discover.GetAllIngressFromApi(), true)
}()
go func() {
defer waitGroup.Done()
discover.GetBadPods(discover.GetAllPodsFromApi(), true)
}()
go func() {
defer waitGroup.Done()
discover.GetBadJobs(discover.GetAllJobFromApi(), true)
}()
go func() {
defer waitGroup.Done()
discover.GetBadCronJobs(discover.GetAllCronJobFromApi(), true)
}()
waitGroup.Wait()
} else {
go func() {
messaging.InitStatsHandler()
}()
startHttpServer()
}
}
|
package util
import (
"bufio"
"context"
"fmt"
"github.com/yahoo/vssh"
"io"
"log"
"os"
"strings"
"time"
)
type SSHObject struct {
Host string
Port string
Username string
Password string
}
func (ssh SSHObject) ExecuteOriginCmd(cmd string, timeout time.Duration) (msg string, exitCode int) {
var outTxt, errTxt string
vs := vssh.New().Start()
config := vssh.GetConfigUserPass(ssh.Username, ssh.Password)
//for _, addr := range []string{"192.168.239.131:22", "192.168.239.141:22"} {
// vs.AddClient(addr, config, vssh.SetMaxSessions(2))i
vs.AddClient(fmt.Sprintf(ssh.Host+":"+ssh.Port), config, vssh.SetMaxSessions(2))
vs.Wait()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
//cmd = "ping -c 4 192.168.239.2"
//timeout, _ := time.ParseDuration("6s")
respChan := vs.Run(ctx, cmd, timeout)
for resp := range respChan {
if err := resp.Err(); err != nil {
log.Println(err)
return err.Error(), resp.ExitStatus()
continue
}
outTxt, errTxt, _ = resp.GetText(vs)
return errTxt, resp.ExitStatus()
//fmt.Println(outTxt, errTxt, resp.ExitStatus())
}
return outTxt, 0
}
func ReadSSHInfoFromFile(hostsFile string) (objects []SSHObject) {
f, err := os.OpenFile(hostsFile, os.O_RDONLY, 0644)
if err != nil {
panic(err)
}
defer f.Close()
rd := bufio.NewReader(f)
for {
line, err := rd.ReadString('\n') //以'\n'为结束符读入一行
for _, v := range strings.Split(line, " ") {
fmt.Printf("%v", v)
var object SSHObject
host := strings.Contains(v, "host")
port := strings.Contains(v, "port")
username := strings.Contains(v, "username")
password := strings.Contains(v, "password")
if host && port && password && username {
object.Host = strings.Trim(line, "host=")
object.Port = strings.Trim(line, "port=")
object.Username = strings.Trim(line, "username=")
object.Password = strings.Trim(line, "password=")
objects = append(objects, object)
}
}
if err != nil || io.EOF == err {
break
}
}
//fmt.Printf("+%v", objects)
return objects
}
|
package arrays
func findPairs(nums []int, k int) int {
if nums == nil || len(nums) == 0 || k < 0 {
return 0
}
h := map[int]int{}
count := 0
for _, v := range nums {
h[v]++
}
if k == 0 {
for _, v := range h {
if v > 1 {
count++
}
}
return count
}
hh := map[int]bool{}
for v := range h {
if _, ok := hh[v+k]; ok {
count++
}
if _, ok := hh[v-k]; ok {
count++
}
hh[v] = true
}
return count
}
|
/*
* Minio Client (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"io"
"os"
"regexp"
"runtime"
"strings"
"github.com/minio/mc/pkg/console"
"github.com/minio/minio/pkg/probe"
)
// Check if the target URL represents folder. It may or may not exist yet.
func isTargetURLDir(targetURL string) bool {
targetURLParse := newClientURL(targetURL)
_, targetContent, err := url2Stat(targetURL)
if err != nil {
_, aliasedTargetURL, _ := mustExpandAlias(targetURL)
if aliasedTargetURL == targetURL {
return false
}
if targetURLParse.Path == string(targetURLParse.Separator) && targetURLParse.Scheme != "" {
return false
}
if strings.HasSuffix(targetURLParse.Path, string(targetURLParse.Separator)) {
return true
}
return false
}
if !targetContent.Type.IsDir() { // Target is a dir.
return false
}
return true
}
// getSource gets a reader from URL.
func getSourceStream(urlStr string) (reader io.Reader, err *probe.Error) {
alias, urlStrFull, _, err := expandAlias(urlStr)
if err != nil {
return nil, err.Trace(urlStr)
}
return getSourceStreamFromAlias(alias, urlStrFull)
}
// getSourceStreamFromAlias gets a reader from URL.
func getSourceStreamFromAlias(alias string, urlStr string) (reader io.Reader, err *probe.Error) {
sourceClnt, err := newClientFromAlias(alias, urlStr)
if err != nil {
return nil, err.Trace(alias, urlStr)
}
reader, err = sourceClnt.Get()
if err != nil {
return nil, err.Trace(alias, urlStr)
}
return reader, nil
}
// putTargetStreamFromAlias writes to URL from Reader.
func putTargetStreamFromAlias(alias string, urlStr string, reader io.Reader, size int64, progress io.Reader) (int64, *probe.Error) {
targetClnt, err := newClientFromAlias(alias, urlStr)
if err != nil {
return 0, err.Trace(alias, urlStr)
}
contentType := guessURLContentType(urlStr)
var n int64
n, err = targetClnt.Put(reader, size, contentType, progress)
if err != nil {
return n, err.Trace(alias, urlStr)
}
return n, nil
}
// putTargetStream writes to URL from reader. If length=-1, read until EOF.
func putTargetStream(urlStr string, reader io.Reader, size int64) (int64, *probe.Error) {
alias, urlStrFull, _, err := expandAlias(urlStr)
if err != nil {
return 0, err.Trace(alias, urlStr)
}
return putTargetStreamFromAlias(alias, urlStrFull, reader, size, nil)
}
// copyTargetStreamFromAlias copies to URL from source.
func copySourceStreamFromAlias(alias string, urlStr string, source string, size int64, progress io.Reader) *probe.Error {
targetClnt, err := newClientFromAlias(alias, urlStr)
if err != nil {
return err.Trace(alias, urlStr)
}
err = targetClnt.Copy(source, size, progress)
if err != nil {
return err.Trace(alias, urlStr)
}
return nil
}
// newClientFromAlias gives a new client interface for matching
// alias entry in the mc config file. If no matching host config entry
// is found, fs client is returned.
func newClientFromAlias(alias string, urlStr string) (Client, *probe.Error) {
hostCfg := mustGetHostConfig(alias)
if hostCfg == nil {
// No matching host config. So we treat it like a
// filesystem.
fsClient, err := fsNew(urlStr)
if err != nil {
return nil, err.Trace(alias, urlStr)
}
return fsClient, nil
}
// We have a valid alias and hostConfig. We populate the
// credentials from the match found in the config file.
s3Config := new(Config)
// secretKey retrieved from the environement overrides the one
// present in the config file
keysPairEnv := os.Getenv("MC_SECRET_" + alias)
keysPairArray := strings.Split(keysPairEnv, ":")
var accessKeyEnv, secretKeyEnv string
if len(keysPairArray) >= 1 {
accessKeyEnv = keysPairArray[0]
}
if len(keysPairArray) >= 2 {
secretKeyEnv = keysPairArray[1]
}
if len(keysPairEnv) > 0 &&
isValidAccessKey(accessKeyEnv) && isValidSecretKey(secretKeyEnv) {
s3Config.AccessKey = accessKeyEnv
s3Config.SecretKey = secretKeyEnv
} else {
if len(keysPairEnv) > 0 {
console.Errorln("Access/Secret keys associated to `" + alias + "' " +
"are found in your environment but not suitable for use. " +
"Falling back to the standard config.")
}
s3Config.AccessKey = hostCfg.AccessKey
s3Config.SecretKey = hostCfg.SecretKey
}
s3Config.Signature = hostCfg.API
s3Config.AppName = "mc"
s3Config.AppVersion = mcVersion
s3Config.AppComments = []string{os.Args[0], runtime.GOOS, runtime.GOARCH}
s3Config.HostURL = urlStr
s3Config.Debug = globalDebug
s3Client, err := s3New(s3Config)
if err != nil {
return nil, err.Trace(alias, urlStr)
}
return s3Client, nil
}
// urlRgx - verify if aliased url is real URL.
var urlRgx = regexp.MustCompile("^https?://")
// newClient gives a new client interface
func newClient(aliasedURL string) (Client, *probe.Error) {
alias, urlStrFull, hostCfg, err := expandAlias(aliasedURL)
if err != nil {
return nil, err.Trace(aliasedURL)
}
// Verify if the aliasedURL is a real URL, fail in those cases
// indicating the user to add alias.
if hostCfg == nil && urlRgx.MatchString(aliasedURL) {
return nil, errInvalidAliasedURL(aliasedURL).Trace(aliasedURL)
}
return newClientFromAlias(alias, urlStrFull)
}
|
package main
import (
"compress/gzip"
f "fmt"
"io/ioutil"
"os"
)
func main() {
file, err := os.Open("hello.txt.gz")
if err != err {
f.Println(err)
return
}
defer file.Close()
r, err := gzip.NewReader(file)
if err != nil {
f.Println(err)
return
}
defer r.Close()
b, err := ioutil.ReadAll(r)
if err != nil {
f.Println(err)
return
}
f.Println(string(b))
}
|
package main
import (
"fmt"
"strconv"
)
func Parse() {
// 接受1、0、t、f、T、F、true、false、True、False、TRUE、FALSE;否则返回错误
v := "true"
if s, err := strconv.ParseBool(v); err == nil {
fmt.Printf("%T, %v\n", s, s) // bool, true
}
// ------ ParseInt 返回字符串表示的整数值,接受正负号 ------
// base 指定进制(2到36),如果 base 为0,则会从字符串前置判断,”0x”是16进制,”0”是 8 进制,否则是 10 进制;
// bitSize 指定结果必须能无溢出赋值的整数类型,0、8、16、32、64 分别代表 int、int8、int16、int32、int64;
v32 := "-354634382"
if s, err := strconv.ParseInt(v32, 10, 32); err == nil {
fmt.Printf("%T, %v\n", s, s) // int64, -354634382
}
v64 := "-3546343826724305832"
if s, err := strconv.ParseInt(v64, 10, 64); err == nil {
fmt.Printf("%T, %v\n", s, s) // int64, -3546343826724305832
}
// ------ ParseUint 类似ParseInt但不接受正负号,用于无符号整型 ------
vu32 := "354634382"
if s, err := strconv.ParseInt(vu32, 10, 32); err == nil {
fmt.Printf("%T, %v\n", s, s) // int64, 354634382
}
// ------ ParseFloat 返回字符串表示的浮点数 ------
// s 取值:nan、NaN、inf、+Inf、-Inf、-0 、+0 ......
f := "3.1415926535"
if s, err := strconv.ParseFloat(f, 32); err == nil {
fmt.Printf("%T, %v\n", s, s) // float64, 3.1415927410125732
}
}
func Format() {
/**
Format 系列函数实现了将给定类型数据格式化为 string 类型数据的功能。
*/
v := int64(-42)
s10 := strconv.FormatInt(v, 10)
fmt.Printf("%T, %v\n", s10, s10) // string, -42
s16 := strconv.FormatInt(v, 16)
fmt.Printf("%T, %v\n", s16, s16) // string, -2a
f := 3.1415926535
s32 := strconv.FormatFloat(f, 'e', -1, 32)
fmt.Printf("%T, %v\n", s32, s32) // string, 3.1415927e+00
s64 := strconv.FormatFloat(f, 'E', -1, 64)
fmt.Printf("%T, %v\n", s64, s64) // string, 3.1415926535E+00
b := true
s := strconv.FormatBool(b)
fmt.Printf("%T, %v\n", s, s) // string, true
}
func Append() {
b := []byte("bool:")
b = strconv.AppendBool(b, true)
fmt.Println(string(b)) // bool:true
b10 := []byte("int (base 10):")
b10 = strconv.AppendInt(b10, -42, 10)
fmt.Println(string(b10)) // int (base 10):-42
b16 := []byte("int (base 16):")
b16 = strconv.AppendInt(b16, -42, 16)
fmt.Println(string(b16)) // int (base 16):-2a
b32 := []byte("float32:")
b32 = strconv.AppendFloat(b32, 3.1415926535, 'e', -1, 32)
fmt.Println(string(b32)) // float32:3.1415927e+00
b64 := []byte("float64:")
b64 = strconv.AppendFloat(b64, 3.1415926535, 'E', -1, 64)
fmt.Println(string(b64)) // float64:3.1415926535E+00
q := []byte("quote:")
b = strconv.AppendQuote(q, `"Fran & Freddie's Diner"`)
fmt.Println(string(b)) // quote:"\"Fran & Freddie's Diner\""
}
func main() {
// --------- Atoi 字符串转换为整数 ---------
v := "10"
if s, err := strconv.Atoi(v); err == nil {
fmt.Printf("%T, %v \n", s, s)
}
// --------- Itoa 整数转换字符串 ---------
i := 10
s := strconv.Itoa(i)
fmt.Printf("%T, %v \n", s, s)
Parse()
Format()
Append()
}
|
/**
* Copyright (C) 2019, Xiongfa Li.
* All right reserved.
* @author xiongfa.li
* @version V1.0
* Description:
*/
package util
import "strings"
func AddParam(url string, param map[string]string) string {
if strings.LastIndex(url, "?") == -1 {
url += "?"
}
size := len(param)
for k, v := range param {
url += k + "=" + v
size--
if size != 0 {
url += "&"
}
}
return url
}
func AddFragment(url string, fragmentKey string, fragmentValue string) string {
kv := ""
if fragmentValue != "" {
kv = "="
}
return url + "#" + fragmentKey + kv + fragmentValue
}
|
package main
import "fmt"
func changeVal(str *string){
*str = "change new"
}
//func changeVal2(str string){
// str = "change new"
//}
func main() {
// & => stand for get the address/map pointer of memory
// * => stand for pointer based/derefrences
toChange := "coba"
fmt.Println(toChange)
changeVal(&toChange)
fmt.Println(toChange)
//changeVal2(toChange)
/*
x := 7 // create init 7 of new clean mem addr
y := &x // y is address / refrences to `x memory location`
fmt.Println(x,y)
*y = 11 // leads to value of declared `y => x memory location`
fmt.Println(x,y)
*/
}
|
package rstreams
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/batchcorp/plumber-schemas/build/go/protos/args"
"github.com/batchcorp/plumber-schemas/build/go/protos/opts"
"github.com/batchcorp/plumber/validate"
"github.com/batchcorp/plumber/types"
)
var _ = Describe("Redis Streams Backend", func() {
Context("Name", func() {
It("Returns name", func() {
Expect((&RedisStreams{}).Name()).To(Equal(BackendName))
})
})
Context("Close", func() {
// Can't test due to lack of interface
})
Context("Test", func() {
It("returns not implemented error", func() {
Expect((&RedisStreams{}).Test(nil)).To(Equal(types.NotImplementedErr))
})
})
Context("Close", func() {
// Unable to test as Close() is not present in the interface
})
Context("validateBaseConnOpts", func() {
var connOpts *opts.ConnectionOptions
BeforeEach(func() {
connOpts = &opts.ConnectionOptions{
Conn: &opts.ConnectionOptions_RedisStreams{
RedisStreams: &args.RedisStreamsConn{
Username: "test",
Password: "test",
},
},
}
})
It("validates conn presence", func() {
err := validateBaseConnOpts(nil)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(validate.ErrMissingConnOpts))
})
It("validates conn config", func() {
connOpts = &opts.ConnectionOptions{}
err := validateBaseConnOpts(connOpts)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(validate.ErrMissingConnCfg))
})
It("validates RedisStreams presence", func() {
connOpts = &opts.ConnectionOptions{
Conn: &opts.ConnectionOptions_RedisStreams{
RedisStreams: nil,
},
}
err := validateBaseConnOpts(connOpts)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(validate.ErrMissingConnArgs))
})
It("validates username and password", func() {
// Password can be specified without username,
// but password must be specified if username is specified
connOpts.GetRedisStreams().Password = ""
err := validateBaseConnOpts(connOpts)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(ErrMissingPassword))
})
})
})
|
package backends
type GolangConfig interface {
ConfigCommon
PackageName() string
SetPackageName(val string)
}
type GolangConfigImpl struct {
packageName string
}
func (this *GolangConfigImpl) Init() {
this.packageName = "abnf"
}
func (this *GolangConfigImpl) PackageName() string {
return this.packageName
}
func (this *GolangConfigImpl) SetPackageName(val string) {
this.packageName = val
}
func NewGolangConfig() *Config {
ret := NewConfig()
ret.Indent().Switch = 0
return ret
}
|
package myapp
import (
"time"
)
//Payment represents Model Payment
type Payment struct {
PaymentID int `sql:"AUTO_INCREMENT" gorm:"primary_key"`
SubscriptionID uint
Amount float64
DatePaid time.Time `sql:"DEFAULT:current_timestamp"`
}
|
/*
Copyright 2019 Dmitry Kolesnikov, All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package emitter
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
µ "github.com/fogfish/gouldian/v2"
)
func Send(data any) µ.Result {
return func(out *µ.Output) error {
chunked := out.GetHeader(string(TransferEncoding)) == "chunked"
content := out.GetHeader(string(ContentType))
switch stream := data.(type) {
case string:
if !chunked {
out.SetHeader(string(ContentLength), strconv.Itoa(len(stream)))
}
out.Body = stream
return nil
case *strings.Reader:
if !chunked {
out.SetHeader(string(ContentLength), strconv.Itoa(stream.Len()))
}
val, err := io.ReadAll(stream)
if err != nil {
return err
}
out.Body = string(val)
case []byte:
if !chunked {
out.SetHeader(string(ContentLength), strconv.Itoa(len(stream)))
}
out.Body = string(stream)
case *bytes.Buffer:
if !chunked {
out.SetHeader(string(ContentLength), strconv.Itoa(stream.Len()))
}
val, err := io.ReadAll(stream)
if err != nil {
return err
}
out.Body = string(val)
case *bytes.Reader:
if !chunked {
out.SetHeader(string(ContentLength), strconv.Itoa(stream.Len()))
}
val, err := io.ReadAll(stream)
if err != nil {
return err
}
out.Body = string(val)
case io.Reader:
val, err := io.ReadAll(stream)
if err != nil {
return err
}
if !chunked {
out.SetHeader(string(ContentLength), strconv.Itoa(len(val)))
}
out.Body = string(val)
default:
val, err := encode(content, data)
if err != nil {
out.Status = http.StatusInternalServerError
out.SetIssue(fmt.Errorf("serialization is failed for <%T>", val))
return nil
}
if !chunked {
out.SetHeader(string(ContentLength), strconv.Itoa(len(val)))
}
out.Body = string(val)
}
return nil
}
}
func encode(content string, data any) (buf []byte, err error) {
switch {
// "application/json" and other variants
case strings.Contains(content, "json"):
buf, err = encodeJSON(data)
// "application/x-www-form-urlencoded"
case strings.Contains(content, "www-form"):
buf, err = encodeForm(data)
default:
buf, err = encodeJSON(data)
}
return
}
func encodeJSON(data interface{}) ([]byte, error) {
json, err := json.Marshal(data)
return json, err
}
func encodeForm(data interface{}) ([]byte, error) {
bin, err := json.Marshal(data)
if err != nil {
return nil, err
}
var req map[string]string
err = json.Unmarshal(bin, &req)
if err != nil {
return nil, fmt.Errorf("encode application/x-www-form-urlencoded: %w", err)
}
var payload url.Values = make(map[string][]string)
for key, val := range req {
payload[key] = []string{val}
}
return []byte(payload.Encode()), nil
}
// Error appends Issue, RFC 7807: Problem Details for HTTP APIs
func Error(failure error, title ...string) µ.Result {
return func(out *µ.Output) error {
out.SetIssue(failure, title...)
return nil
}
}
|
package trees
import (
"fmt"
"strings"
"github.com/Nv7-Github/Nv7Haven/eod/types"
)
// Tree calculator
type Tree struct {
text *strings.Builder
rawTxt *strings.Builder
calced map[string]types.Empty
num int
dat types.ServerData
}
func (t *Tree) AddElem(elem string) (bool, string) {
_, exists := t.calced[strings.ToLower(elem)]
if !exists {
el, res := t.dat.GetElement(elem)
if !res.Exists {
return false, elem
}
if len(el.Parents) == 1 {
el.Parents = append(el.Parents, el.Parents[0])
}
for _, parent := range el.Parents {
if len(strings.TrimSpace(parent)) == 0 {
continue
}
suc, msg := t.AddElem(parent)
if !suc {
return false, msg
}
}
perf := &strings.Builder{}
perf.WriteString("%d. ")
params := make([]interface{}, len(el.Parents))
for i, val := range el.Parents {
if i == 0 {
perf.WriteString("%s")
} else {
perf.WriteString(" + %s")
}
el, _ := t.dat.GetElement(val)
params[i] = interface{}(el.Name)
}
params = append([]interface{}{t.num}, params...)
params = append(params, el.Name)
if len(el.Parents) >= 2 {
p := perf.String()
fmt.Fprintf(t.text, p+" = **%s**\n", params...)
fmt.Fprintf(t.rawTxt, p+" = %s\n", params...)
t.num++
}
t.calced[strings.ToLower(elem)] = types.Empty{}
}
return true, ""
}
// Tree calculation utilities
func CalcTree(dat types.ServerData, elem string) (string, bool, string) {
// Commented out code is for profiling
/*runtime.GC()
cpuprof, _ := os.Create("cpuprof.pprof")
pprof.StartCPUProfile(cpuprof)*/
t := Tree{
text: &strings.Builder{},
rawTxt: &strings.Builder{},
calced: make(map[string]types.Empty),
num: 1,
dat: dat,
}
suc, msg := t.AddElem(elem)
/*pprof.StopCPUProfile()
memprof, _ := os.Create("memprof.pprof")
_ = pprof.WriteHeapProfile(memprof)*/
text := t.text.String()
if len(text) > 2000 {
return t.rawTxt.String(), suc, msg
}
return text, suc, msg
}
func CalcTreeCat(dat types.ServerData, elems types.Container) (string, bool, string) {
// Commented out code is for profiling
/*runtime.GC()
cpuprof, _ := os.Create("cpuprof.pprof")
pprof.StartCPUProfile(cpuprof)*/
t := Tree{
text: &strings.Builder{},
rawTxt: &strings.Builder{},
calced: make(map[string]types.Empty),
num: 1,
dat: dat,
}
for elem := range elems {
suc, msg := t.AddElem(elem)
if !suc {
return "", false, msg
}
}
/*pprof.StopCPUProfile()
memprof, _ := os.Create("memprof.pprof")
_ = pprof.WriteHeapProfile(memprof)*/
text := t.text.String()
if len(text) > 2000 {
return t.rawTxt.String(), true, ""
}
return text, true, ""
}
|
package report
import (
"strings"
"fmt"
"time"
)
type CLIReporter struct {
}
func NewCLIReporter() *CLIReporter {
return &CLIReporter{}
}
func (r *CLIReporter) AddReport(options ReporterOptions) error {
// Heading printing
fmt.Printf("====== %s ======\n", strings.ToUpper(options.Name))
fmt.Printf(" %d requests completed in %s\n", options.Requests, options.Elapsed)
fmt.Printf(" %d parallel clients\n", options.Clients)
fmt.Printf(" keep alive: %t\n", options.Keepalive)
fmt.Println()
// Printing timings for requests range
medians := getMedians(options.Timings, options.Requests)
if len(medians) > 0 {
for _, r := range medians {
if r.p < 1.0 {
fmt.Printf("<1%%(%d) `<=` %d milliseconds\n", r.r, r.t)
} else {
fmt.Printf("%.2f%% `<=` %d milliseconds\n", r.p, r.t)
}
}
fmt.Println()
}
// Printing footer
rps := float64(options.Requests) / (float64(options.Elapsed) / float64(time.Second))
fmt.Printf("%.2f requests per second\n", rps)
fmt.Println()
return nil
}
func (r *CLIReporter) Footer() error {
fmt.Println("====== END ======")
fmt.Println("Benchmark finished")
fmt.Println()
return nil
}
|
package heap
import (
"testing"
)
func TestCreateHeap(t *testing.T) {
//var data []int
//for i := 0; i < 10; i++ {
// data = append(data, randomdata.Number(1, 100))
//}
data := []int{18,66,50,49,96,44,25,72,17,54}
t.Log("origin: ", data)
heap := CreateHeap(data)
t.Log(heap)
t.Log(heap.Pop())
t.Log(heap)
}
|
package simple_goroutine_pool
import (
"fmt"
"sync"
"time"
)
type GoroutinePool struct {
mu sync.Mutex //mu 线程锁,用于处理并发
coreSize int32 //coreSize 核心工作协程数量
maxSize int32 //maxSize 最大工作协程数量
queue chan Task //queue 任务队列
duration time.Duration //duration 最大协程等待时间
status int32 //status 线程池状态,0:初始化,1:正在运行,2:关闭,4:停止
size int32 //size 当前线程池中的工作线程数
maxQueueSize int32 //maxQueueSize 工作队列中的最大数量
reject func() //reject 丢弃任务后回调该方法
}
//NewGoroutinePool 构建一个新的线程池对象
func NewGoroutinePool(core int32, max int32, queueSize int32,
duration time.Duration, rejectHandler func()) *GoroutinePool {
return &GoroutinePool{
mu: sync.Mutex{},
coreSize: core,
maxSize: max,
maxQueueSize: queueSize,
queue: make(chan Task, queueSize),
duration: duration,
reject: rejectHandler,
}
}
//atomicOper 原子化线程池操作
func (pool *GoroutinePool) atomicOper(f func() bool) bool {
pool.mu.Lock()
defer pool.mu.Unlock()
return f()
}
//Start 开始一个新的线程池,新的线程池必须在Put()调用之前调用一次该函数
func (pool *GoroutinePool) Start() {
if pool.status != 0 {
panic("GoroutinePool has started!")
}
pool.status = 1
}
//Put 向当前线程池任务队列中加入一个新的任务,返回一个是否接收该任务
func (pool *GoroutinePool) Put(t Task) bool {
if pool.status != 1 {
err := fmt.Sprintf("GroutinePool status: %d\n", pool.status)
panic(err)
}
return pool.atomicOper(func() bool {
//如果当前协程数小于核心协程数则创建新协程处理任务
if pool.size < pool.coreSize {
pool.size++
go pool.run(t)
return true
} else if len(pool.queue) < int(pool.maxQueueSize) {
//当前协程数大于等于核心协程数并且队列没有满,则将任务放入队列
pool.queue <- t
return true
} else if pool.size < pool.maxSize {
//如果当前协程数量小于最大支持协程数,则扩大协程池
pool.size++
go pool.run(t)
return true
} else {
//协程池已满,调用reject方法
pool.reject()
return false
}
})
}
func (pool *GoroutinePool) run(t Task) {
if t != nil {
t.Run()
}
loop:
for pool.status == 1 || pool.status == 2 {
select {
case t := <-pool.queue:
t.Run()
case <-time.After(pool.duration):
//如果超时还没有拿到任务,说明该协程已经空闲了一段时间,该协程的生命周期可以结束
if pool.atomicOper(func() bool {
if pool.size > pool.coreSize {
pool.size--
return true
} else if pool.status == 2 {
//如果协程池状态为shutdown,需要查看队列中是否还有任务,如果有任务则保留协程,否则结束协程
if len(pool.queue) > 0 {
return false
} else {
pool.size--
return true
}
} else {
return false
}
}) {
break loop
}
}
}
}
func (pool *GoroutinePool) Shutdown() {
pool.status = 2
}
func (pool *GoroutinePool) ShutdownNow() {
pool.status = 3
}
func (pool *GoroutinePool) GetStatus() int32 {
return pool.status
}
func (pool *GoroutinePool) GetQueueSize() int32 {
return int32(len(pool.queue))
}
func (pool *GoroutinePool) GetPoolSize() int32 {
return pool.size
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"database/sql"
_ "github.com/go-sql-driver/mysql"
)
// DData data structure for import
type DData map[string][]string
//Dict struct maps DData map to mongodb struct
type Dict struct {
Word string `json:"word"`
Explain []string `json:"explain"`
}
// Dictionary slice for dictionaries
type Dictionary []Dict
// port map to struct
func port(d map[string][]string) []Dict {
record := Dict{}
db := []Dict{}
for key, value := range d {
record.Word = key
record.Explain = value
db = append(db, record)
record = Dict{} //empty record
}
return db
}
// createDB translates imported dictionary to
func createDB(d []Dict) {
// Start sql client
fmt.Println("Connecting to mySQL")
// Open up our database connection.
connString := "dictionaryuser:Password10@tcp(127.0.0.1:3306)/wordsdb?charset=utf8mb4"
db, err := sql.Open("mysql", connString)
if err != nil {
log.Fatalf("Cannot open connection: %v\n", err)
}
fmt.Println("opened connection")
// defer the close till after the main function has finished
defer db.Close()
// test the link
err = db.Ping()
if err != nil {
log.Fatalf("Not connected to mySQL: %v\n", err)
}
fmt.Println("Connected to mySQL")
// Execute the query
// results, err := db.Query("SELECT * FROM dictionary")
// if err != nil {
// log.Fatalf("Cannot execute query: %v\n", err)
// }
// for results.Next() {
// record := Dict{}
// // for each row, scan the result into our tag composite object
// err = results.Scan(&record.Word, &record.Explain)
// if err != nil {
// panic(err.Error()) // proper error handling instead of panic in your app
// }
// }
// Insert the data into wordsdb
tx, err := db.Begin()
if err != nil {
log.Fatal(err)
}
defer tx.Rollback() // The rollback will be ignored if the tx has been committed later in the function.
numV := "?, ?, ?, ?, ?, ?, ?"
fields := "word,explain1,explain2,explain3,explain4,explain5,explain6"
query := fmt.Sprintf("INSERT INTO dictionary(%s) VALUES(%s)", fields, numV)
stmt, err := db.Prepare(query)
if err != nil {
log.Fatalf("Cannot prepare record into database: %v\n", err)
}
defer stmt.Close() // Prepared statements take up server resources and should be closed after use.
args := []string{}
for _, v := range d {
// build the query
for _, item := range v.Explain {
args = append(args, item)
}
for i := len(args); i < 6; i++ {
args = append(args, "")
}
// fmt.Printf("record %d: %v\n", i, v)
// fmt.Println(args)
// execute the SQL query
if _, err := stmt.Exec(v.Word, args[0], args[1], args[2], args[3], args[4], args[5]); err != nil {
log.Fatalf("Cannot insert record into database: %v\n", err)
}
args = args[:0] //empty args to load new record
}
}
func read(fName string, d DData) {
file, err := ioutil.ReadFile(fName)
if err != nil {
log.Fatalf("Could not open file: %v\n", err)
}
jErr := json.Unmarshal(file, &d)
if jErr != nil {
log.Fatalf("Could not unmarshal the file: %v\n", err)
}
}
func main() {
dData := DData{}
fName := "data/data.json"
read(fName, dData)
portedDB := port(dData)
// for _, v := range portedDB {
// fmt.Println(v)
// }
createDB(portedDB)
fmt.Println("Database created")
}
|
// Copyright © 2018 Inanc Gumus
// Learn Go Programming Course
// License: https://creativecommons.org/licenses/by-nc-sa/4.0/
//
// For more tutorials : https://learngoprogramming.com
// In-person training : https://www.linkedin.com/in/inancgumus/
// Follow me on twitter: https://twitter.com/inancgumus
package main
// normal declaration use cases
// -----------------------------------------------------
// when you need a package scoped variable
// -----------------------------------------------------
// version := 0 // YOU CAN'T
var version int
func main() {
// -----------------------------------------------------
// if you don't know the initial value
// -----------------------------------------------------
// DON'T DO THIS:
// score := 0
// DO THIS:
// var score int
// -----------------------------------------------------
// group variables for readability
// -----------------------------------------------------
// var (
// video string
// duration int
// current int
// )
}
|
package main
//Create your own type “person” which will have an underlying type of “struct” so that it can store the following data:
//first name
//last name
//favorite ice cream flavors
//Create two VALUES of TYPE person. Print out the values, ranging over the elements in the slice which stores the favorite flavors.
import (
"fmt"
)
type person struct {
first, last string
favFlavours []string
}
func main() {
pers1 := person{
first: "Huginn",
last: "The Raven",
favFlavours: []string{
"blueberry",
"chocolate",
},
}
pers2 := person{
first: "Pooh",
last: "The Master",
favFlavours: []string{
"raspberry",
"honey",
},
}
for _, temp := range pers1.favFlavours {
fmt.Printf("%v %v loves ice-cream with %v flavour\n", pers1.first, pers1.last, temp)
}
for _, temp := range pers2.favFlavours {
fmt.Printf("%v %v loves ice-cream with %v flavour\n", pers2.first, pers2.last, temp)
}
}
|
package main
import (
"errors"
"log"
"os"
"strings"
"github.com/evan-buss/openbooks/cli"
"github.com/spf13/cobra"
)
var config cli.Config
func init() {
rootCmd.AddCommand(cliCmd)
cliCmd.AddCommand(downloadCmd)
cliCmd.AddCommand(searchCmd)
cwd, err := os.Getwd()
if err != nil {
log.Fatalln("Could not get current working directory.", err)
}
cliCmd.PersistentFlags().StringVarP(&config.UserName, "name", "n", generateUserName(), "Use a name that isn't randomly generated. One word only.")
cliCmd.PersistentFlags().StringVarP(&config.Dir, "directory", "d", cwd, "Directory where files are downloaded.")
cliCmd.PersistentFlags().BoolVarP(&config.Log, "log", "l", false, "Whether or not to log IRC messages to an output file.")
cliCmd.PersistentFlags().StringVarP(&config.Server, "server", "s", "irc.irchighway.net", "IRC server to connect to.")
}
var cliCmd = &cobra.Command{
Use: "cli",
Short: "Run openbooks from the terminal in CLI mode.",
Run: func(cmd *cobra.Command, args []string) {
cli.StartInteractive(config)
},
}
var downloadCmd = &cobra.Command{
Use: "download [flags] identifier",
Short: "Downloads a single file and exits.",
Example: `openbooks cli download '!Oatmeal - F. Scott Fitzgerald - The Great Gatsby.epub'`,
Args: func(cmd *cobra.Command, args []string) error {
err := cobra.ExactArgs(1)(cmd, args)
if err != nil {
return err
}
if !strings.HasPrefix(args[0], "!") {
return errors.New("identifier must begin with '!'")
}
return nil
},
Run: func(cmd *cobra.Command, args []string) {
cli.StartDownload(config, args[0])
},
}
var searchCmd = &cobra.Command{
Use: "search",
Short: "Searches for a book and exits.",
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
cli.StartSearch(config, args[0])
},
}
|
package gov
import (
"encoding/json"
"fmt"
"strings"
"time"
sdk "github.com/irisnet/irishub/types"
"github.com/pkg/errors"
)
//-----------------------------------------------------------
// Proposal interface
type Proposal interface {
GetProposalID() uint64
SetProposalID(uint64)
GetTitle() string
SetTitle(string)
GetDescription() string
SetDescription(string)
GetProposalType() ProposalKind
SetProposalType(ProposalKind)
GetStatus() ProposalStatus
SetStatus(ProposalStatus)
GetTallyResult() TallyResult
SetTallyResult(TallyResult)
GetSubmitTime() time.Time
SetSubmitTime(time.Time)
GetDepositEndTime() time.Time
SetDepositEndTime(time.Time)
GetTotalDeposit() sdk.Coins
SetTotalDeposit(sdk.Coins)
GetVotingStartTime() time.Time
SetVotingStartTime(time.Time)
GetVotingEndTime() time.Time
SetVotingEndTime(time.Time)
GetProposalLevel() ProposalLevel
GetProposer() sdk.AccAddress
String() string
Validate(ctx sdk.Context, gk Keeper, verifyPropNum bool) sdk.Error
Execute(ctx sdk.Context, gk Keeper) sdk.Error
}
//-----------------------------------------------------------
// Basic Proposals
type BasicProposal struct {
ProposalID uint64 `json:"proposal_id"` // ID of the proposal
Title string `json:"title"` // Title of the proposal
Description string `json:"description"` // Description of the proposal
ProposalType ProposalKind `json:"proposal_type"` // Type of proposal. Initial set {PlainTextProposal, SoftwareUpgradeProposal}
Status ProposalStatus `json:"proposal_status"` // Status of the Proposal {Pending, Active, Passed, Rejected}
TallyResult TallyResult `json:"tally_result"` // Result of Tallys
SubmitTime time.Time `json:"submit_time"` // Time of the block where TxGovSubmitProposal was included
DepositEndTime time.Time `json:"deposit_end_time"` // Time that the Proposal would expire if deposit amount isn't met
TotalDeposit sdk.Coins `json:"total_deposit"` // Current deposit on this proposal. Initial value is set at InitialDeposit
VotingStartTime time.Time `json:"voting_start_time"` // Time of the block where MinDeposit was reached. -1 if MinDeposit is not reached
VotingEndTime time.Time `json:"voting_end_time"` // Time that the VotingPeriod for this proposal will end and votes will be tallied
Proposer sdk.AccAddress `json:"proposer"`
}
func (bp BasicProposal) String() string {
return fmt.Sprintf(`Proposal %d:
Title: %s
Type: %s
Proposer: %s
Status: %s
Submit Time: %s
Deposit End Time: %s
Total Deposit: %s
Voting Start Time: %s
Voting End Time: %s
Description: %s`,
bp.ProposalID, bp.Title, bp.ProposalType, bp.Proposer.String(),
bp.Status, bp.SubmitTime, bp.DepositEndTime,
bp.TotalDeposit.String(), bp.VotingStartTime, bp.VotingEndTime, bp.GetDescription(),
)
}
func (bp BasicProposal) HumanString(converter sdk.CoinsConverter) string {
return fmt.Sprintf(`Proposal %d:
Title: %s
Type: %s
Status: %s
Submit Time: %s
Deposit End Time: %s
Total Deposit: %s
Voting Start Time: %s
Voting End Time: %s
Description: %s`,
bp.ProposalID, bp.Title, bp.ProposalType,
bp.Status, bp.SubmitTime, bp.DepositEndTime,
converter.ToMainUnit(bp.TotalDeposit), bp.VotingStartTime, bp.VotingEndTime, bp.GetDescription(),
)
}
// Proposals is an array of proposal
type Proposals []Proposal
// nolint
func (p Proposals) String() string {
if len(p) == 0 {
return "[]"
}
out := "ID - (Status) [Type] [TotalDeposit] Title\n"
for _, prop := range p {
out += fmt.Sprintf("%d - (%s) [%s] [%s] %s\n",
prop.GetProposalID(), prop.GetStatus(),
prop.GetProposalType(), prop.GetTotalDeposit().String(), prop.GetTitle())
}
return strings.TrimSpace(out)
}
func (p Proposals) HumanString(converter sdk.CoinsConverter) string {
if len(p) == 0 {
return "[]"
}
out := "ID - (Status) [Type] [TotalDeposit] Title\n"
for _, prop := range p {
out += fmt.Sprintf("%d - (%s) [%s] [%s] %s\n",
prop.GetProposalID(), prop.GetStatus(),
prop.GetProposalType(), converter.ToMainUnit(prop.GetTotalDeposit()), prop.GetTitle())
}
return strings.TrimSpace(out)
}
// Implements Proposal Interface
var _ Proposal = (*BasicProposal)(nil)
// nolint
func (bp BasicProposal) GetProposalID() uint64 { return bp.ProposalID }
func (bp *BasicProposal) SetProposalID(proposalID uint64) { bp.ProposalID = proposalID }
func (bp BasicProposal) GetTitle() string { return bp.Title }
func (bp *BasicProposal) SetTitle(title string) { bp.Title = title }
func (bp BasicProposal) GetDescription() string { return bp.Description }
func (bp *BasicProposal) SetDescription(description string) { bp.Description = description }
func (bp BasicProposal) GetProposalType() ProposalKind { return bp.ProposalType }
func (bp *BasicProposal) SetProposalType(proposalType ProposalKind) { bp.ProposalType = proposalType }
func (bp BasicProposal) GetStatus() ProposalStatus { return bp.Status }
func (bp *BasicProposal) SetStatus(status ProposalStatus) { bp.Status = status }
func (bp BasicProposal) GetTallyResult() TallyResult { return bp.TallyResult }
func (bp *BasicProposal) SetTallyResult(tallyResult TallyResult) { bp.TallyResult = tallyResult }
func (bp BasicProposal) GetSubmitTime() time.Time { return bp.SubmitTime }
func (bp *BasicProposal) SetSubmitTime(submitTime time.Time) { bp.SubmitTime = submitTime }
func (bp BasicProposal) GetDepositEndTime() time.Time { return bp.DepositEndTime }
func (bp *BasicProposal) SetDepositEndTime(depositEndTime time.Time) {
bp.DepositEndTime = depositEndTime
}
func (bp BasicProposal) GetTotalDeposit() sdk.Coins { return bp.TotalDeposit }
func (bp *BasicProposal) SetTotalDeposit(totalDeposit sdk.Coins) { bp.TotalDeposit = totalDeposit }
func (bp BasicProposal) GetVotingStartTime() time.Time { return bp.VotingStartTime }
func (bp *BasicProposal) SetVotingStartTime(votingStartTime time.Time) {
bp.VotingStartTime = votingStartTime
}
func (bp BasicProposal) GetVotingEndTime() time.Time { return bp.VotingEndTime }
func (bp *BasicProposal) SetVotingEndTime(votingEndTime time.Time) {
bp.VotingEndTime = votingEndTime
}
func (bp BasicProposal) GetProtocolDefinition() sdk.ProtocolDefinition {
return sdk.ProtocolDefinition{}
}
func (bp *BasicProposal) SetProtocolDefinition(sdk.ProtocolDefinition) {}
func (bp BasicProposal) GetTaxUsage() TaxUsage { return TaxUsage{} }
func (bp *BasicProposal) SetTaxUsage(taxUsage TaxUsage) {}
func (bp *BasicProposal) Validate(ctx sdk.Context, k Keeper, verify bool) sdk.Error {
if !verify {
return nil
}
pLevel := bp.ProposalType.GetProposalLevel()
if num, ok := k.HasReachedTheMaxProposalNum(ctx, pLevel); ok {
return ErrMoreThanMaxProposal(k.codespace, num, pLevel.string())
}
return nil
}
func (bp *BasicProposal) GetProposalLevel() ProposalLevel {
return bp.ProposalType.GetProposalLevel()
}
func (bp *BasicProposal) GetProposer() sdk.AccAddress {
return bp.Proposer
}
func (bp *BasicProposal) Execute(ctx sdk.Context, gk Keeper) sdk.Error {
return sdk.MarshalResultErr(errors.New("BasicProposal can not execute 'Execute' method"))
}
//-----------------------------------------------------------
// ProposalQueue
type ProposalQueue []uint64
//-----------------------------------------------------------
// ProposalKind
// Type that represents Proposal Type as a byte
type ProposalKind byte
//nolint
const (
ProposalTypeNil ProposalKind = 0x00
ProposalTypeParameter ProposalKind = 0x01
ProposalTypeSoftwareUpgrade ProposalKind = 0x02
ProposalTypeSystemHalt ProposalKind = 0x03
ProposalTypeCommunityTaxUsage ProposalKind = 0x04
ProposalTypePlainText ProposalKind = 0x05
ProposalTypeTokenAddition ProposalKind = 0x06
)
var pTypeMap = map[string]pTypeInfo{
"PlainText": createPlainTextInfo(),
"Parameter": createParameterInfo(),
"SoftwareUpgrade": createSoftwareUpgradeInfo(),
"SystemHalt": createSystemHaltInfo(),
"CommunityTaxUsage": createCommunityTaxUsageInfo(),
"TokenAddition": createTokenAdditionInfo(),
}
// String to proposalType byte. Returns ff if invalid.
func ProposalTypeFromString(str string) (ProposalKind, error) {
kind, ok := pTypeMap[str]
if !ok {
return ProposalKind(0xff), errors.Errorf("'%s' is not a valid proposal type", str)
}
return kind.Type, nil
}
// is defined ProposalType?
func ValidProposalType(pt ProposalKind) bool {
_, ok := pTypeMap[pt.String()]
return ok
}
// Marshal needed for protobuf compatibility
func (pt ProposalKind) Marshal() ([]byte, error) {
return []byte{byte(pt)}, nil
}
// Unmarshal needed for protobuf compatibility
func (pt *ProposalKind) Unmarshal(data []byte) error {
*pt = ProposalKind(data[0])
return nil
}
// Marshals to JSON using string
func (pt ProposalKind) MarshalJSON() ([]byte, error) {
return json.Marshal(pt.String())
}
// Unmarshals from JSON assuming Bech32 encoding
func (pt *ProposalKind) UnmarshalJSON(data []byte) error {
var s string
err := json.Unmarshal(data, &s)
if err != nil {
return nil
}
bz2, err := ProposalTypeFromString(s)
if err != nil {
return err
}
*pt = bz2
return nil
}
// Turns VoteOption byte to String
func (pt ProposalKind) String() string {
for k, v := range pTypeMap {
if v.Type == pt {
return k
}
}
return ""
}
func (pt ProposalKind) NewProposal(content Content) (Proposal, sdk.Error) {
typInfo, ok := pTypeMap[pt.String()]
if !ok {
return nil, ErrInvalidProposalType(DefaultCodespace, pt)
}
return typInfo.createProposal(content), nil
}
// For Printf / Sprintf, returns bech32 when using %s
// nolint: errcheck
func (pt ProposalKind) Format(s fmt.State, verb rune) {
switch verb {
case 's':
s.Write([]byte(pt.String()))
default:
// TODO: Do this conversion more directly
s.Write([]byte(fmt.Sprintf("%v", byte(pt))))
}
}
func (pt ProposalKind) GetProposalLevel() ProposalLevel {
return pTypeMap[pt.String()].Level
}
//-----------------------------------------------------------
// ProposalStatus
// Type that represents Proposal Status as a byte
type ProposalStatus byte
//nolint
const (
StatusNil ProposalStatus = 0x00
StatusDepositPeriod ProposalStatus = 0x01
StatusVotingPeriod ProposalStatus = 0x02
StatusPassed ProposalStatus = 0x03
StatusRejected ProposalStatus = 0x04
)
var pStatusMap = map[string]ProposalStatus{
"DepositPeriod": StatusDepositPeriod,
"VotingPeriod": StatusVotingPeriod,
"Passed": StatusPassed,
"Rejected": StatusRejected,
}
// ProposalStatusToString turns a string into a ProposalStatus
func ProposalStatusFromString(str string) (ProposalStatus, error) {
status, ok := pStatusMap[str]
if !ok {
return ProposalStatus(0xff), errors.Errorf("'%s' is not a valid proposal status", str)
}
return status, nil
}
// is defined ProposalType?
func ValidProposalStatus(status ProposalStatus) bool {
_, ok := pStatusMap[status.String()]
return ok
}
// Marshal needed for protobuf compatibility
func (status ProposalStatus) Marshal() ([]byte, error) {
return []byte{byte(status)}, nil
}
// Unmarshal needed for protobuf compatibility
func (status *ProposalStatus) Unmarshal(data []byte) error {
*status = ProposalStatus(data[0])
return nil
}
// Marshals to JSON using string
func (status ProposalStatus) MarshalJSON() ([]byte, error) {
return json.Marshal(status.String())
}
// Unmarshals from JSON assuming Bech32 encoding
func (status *ProposalStatus) UnmarshalJSON(data []byte) error {
var s string
err := json.Unmarshal(data, &s)
if err != nil {
return nil
}
bz2, err := ProposalStatusFromString(s)
if err != nil {
return err
}
*status = bz2
return nil
}
// Turns VoteStatus byte to String
func (status ProposalStatus) String() string {
for k, v := range pStatusMap {
if v == status {
return k
}
}
return ""
}
// For Printf / Sprintf, returns bech32 when using %s
// nolint: errcheck
func (status ProposalStatus) Format(s fmt.State, verb rune) {
switch verb {
case 's':
s.Write([]byte(status.String()))
default:
// TODO: Do this conversion more directly
s.Write([]byte(fmt.Sprintf("%v", byte(status))))
}
}
//-----------------------------------------------------------
// Tally Results
type TallyResult struct {
Yes sdk.Dec `json:"yes"`
Abstain sdk.Dec `json:"abstain"`
No sdk.Dec `json:"no"`
NoWithVeto sdk.Dec `json:"no_with_veto"`
SystemVotingPower sdk.Dec `json:"system_voting_power"`
}
// checks if two proposals are equal
func EmptyTallyResult() TallyResult {
return TallyResult{
Yes: sdk.ZeroDec(),
Abstain: sdk.ZeroDec(),
No: sdk.ZeroDec(),
NoWithVeto: sdk.ZeroDec(),
SystemVotingPower: sdk.ZeroDec(),
}
}
// checks if two proposals are equal
func (tr TallyResult) Equals(resultB TallyResult) bool {
return tr.Yes.Equal(resultB.Yes) &&
tr.Abstain.Equal(resultB.Abstain) &&
tr.No.Equal(resultB.No) &&
tr.NoWithVeto.Equal(resultB.NoWithVeto) &&
tr.SystemVotingPower.Equal(resultB.SystemVotingPower)
}
func (tr TallyResult) String() string {
return fmt.Sprintf(`Tally Result:
Yes: %s
Abstain: %s
No: %s
NoWithVeto: %s
SystemVotingPower: %s`, tr.Yes.String(), tr.Abstain.String(), tr.No.String(), tr.NoWithVeto.String(), tr.SystemVotingPower.String())
}
|
package postgres
import (
"database/sql"
"reflect"
"regexp"
"testing"
"time"
sqlmock "github.com/DATA-DOG/go-sqlmock"
"github.com/kylegrantlucas/platform-exercise/models"
_ "github.com/lib/pq"
)
func TestCreateDatabase(t *testing.T) {
type args struct {
host string
port string
user string
password string
dbName string
}
tests := []struct {
name string
args args
want *DatabaseConnection
wantErr bool
}{
{
name: "creating db with empty env var",
args: args{
user: "test",
password: "",
host: "bad",
port: "1234",
dbName: "test",
},
want: nil,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := CreateDatabase(tt.args.host, tt.args.port, tt.args.user, tt.args.password, tt.args.dbName)
if (err != nil) != tt.wantErr {
t.Errorf("CreateDatabase() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("CreateDatabase() = %v, want %v", got, tt.want)
}
})
}
}
func TestDatabaseConnection_CreateUser(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("error building sqlmock: %v", err)
}
type fields struct {
Connection *sql.DB
}
type args struct {
email string
name string
plaintextPassword string
}
tests := []struct {
name string
fields fields
args args
want models.User
wantErr bool
}{
{
name: "valid user create",
fields: fields{
Connection: db,
},
args: args{
email: "test@test.com",
name: "testy testerson",
plaintextPassword: "completelytestpassword",
},
want: models.User{
Email: "test@test.com",
Name: "testy testerson",
},
},
}
for _, tt := range tests {
mock.ExpectQuery(regexp.QuoteMeta(queries["create_user"])).WillReturnRows(sqlmock.NewRows([]string{"uuid", "email", "name", "created_at", "updated_at"}).AddRow("abc", "test@test.com", "testy testerson", time.Now(), time.Now()))
t.Run(tt.name, func(t *testing.T) {
d := &DatabaseConnection{
Connection: tt.fields.Connection,
}
got, err := d.CreateUser(tt.args.email, tt.args.name, tt.args.plaintextPassword)
if (err != nil) != tt.wantErr {
t.Errorf("DatabaseConnection.CreateUser() error = %v, wantErr %v", err, tt.wantErr)
return
}
tt.want.CreatedAt = got.CreatedAt
tt.want.UpdatedAt = got.UpdatedAt
tt.want.UUID = got.UUID
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("DatabaseConnection.CreateUser() = %v, want %v", got, tt.want)
}
})
}
}
func TestDatabaseConnection_UpdateUserByUUID(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("error building sqlmock: %v", err)
}
currentTime := time.Now()
type fields struct {
Connection *sql.DB
}
type args struct {
uuid string
email string
name string
plaintextPassword string
}
tests := []struct {
name string
fields fields
args args
want models.User
wantErr bool
}{
{
name: "valid user update",
fields: fields{
Connection: db,
},
args: args{
uuid: "abc",
email: "test@test.com",
name: "testy testerson",
plaintextPassword: "completelytestpassword",
},
want: models.User{
UUID: "abc",
Email: "test@test.com",
Name: "testy testerson",
CreatedAt: currentTime,
UpdatedAt: currentTime,
},
},
}
for _, tt := range tests {
mock.ExpectQuery(regexp.QuoteMeta("update users set email=$1,name=$2,password=$3 where uuid=$4 AND deleted_at IS NULL returning uuid, email, name, created_at, updated_at;")).WillReturnRows(sqlmock.NewRows([]string{"uuid", "email", "name", "created_at", "updated_at"}).AddRow("abc", "test@test.com", "testy testerson", currentTime, currentTime))
t.Run(tt.name, func(t *testing.T) {
d := &DatabaseConnection{
Connection: tt.fields.Connection,
}
got, err := d.UpdateUserByUUID(tt.args.uuid, tt.args.email, tt.args.name, tt.args.plaintextPassword)
if (err != nil) != tt.wantErr {
t.Errorf("DatabaseConnection.UpdateUserByUUID() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("DatabaseConnection.UpdateUserByUUID() = %v, want %v", got, tt.want)
}
})
}
}
func TestDatabaseConnection_GetUserByEmail(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("error building sqlmock: %v", err)
}
currentTime := time.Now()
type fields struct {
Connection *sql.DB
}
type args struct {
email string
}
tests := []struct {
name string
fields fields
args args
want models.User
wantErr bool
}{
{
name: "valid user create",
fields: fields{
Connection: db,
},
args: args{
email: "test@test.com",
},
want: models.User{
UUID: "abc",
Email: "test@test.com",
Name: "testy testerson",
CreatedAt: currentTime,
UpdatedAt: currentTime,
Password: "abc",
},
},
}
for _, tt := range tests {
mock.ExpectQuery(regexp.QuoteMeta(queries["get_user_by_email"])).WillReturnRows(sqlmock.NewRows([]string{"uuid", "email", "name", "created_at", "updated_at", "password"}).AddRow("abc", "test@test.com", "testy testerson", currentTime, currentTime, "abc"))
t.Run(tt.name, func(t *testing.T) {
d := &DatabaseConnection{
Connection: tt.fields.Connection,
}
got, err := d.GetUserByEmail(tt.args.email)
if (err != nil) != tt.wantErr {
t.Errorf("DatabaseConnection.GetUserByEmail() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("DatabaseConnection.GetUserByEmail() = %v, want %v", got, tt.want)
}
})
}
}
func TestDatabaseConnection_GetUserByUUID(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("error building sqlmock: %v", err)
}
currentTime := time.Now()
type fields struct {
Connection *sql.DB
}
type args struct {
uuid string
}
tests := []struct {
name string
fields fields
args args
want models.User
wantErr bool
}{
{
name: "valid user create",
fields: fields{
Connection: db,
},
args: args{
uuid: "abc",
},
want: models.User{
UUID: "abc",
Email: "test@test.com",
Name: "testy testerson",
CreatedAt: currentTime,
UpdatedAt: currentTime,
Password: "abc",
},
},
}
for _, tt := range tests {
mock.ExpectQuery(regexp.QuoteMeta(queries["get_user_by_uuid"])).WillReturnRows(sqlmock.NewRows([]string{"uuid", "email", "name", "created_at", "updated_at", "password"}).AddRow("abc", "test@test.com", "testy testerson", currentTime, currentTime, "abc"))
t.Run(tt.name, func(t *testing.T) {
d := &DatabaseConnection{
Connection: tt.fields.Connection,
}
got, err := d.GetUserByUUID(tt.args.uuid)
if (err != nil) != tt.wantErr {
t.Errorf("DatabaseConnection.GetUserByUUID() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("DatabaseConnection.GetUserByUUID() = %v, want %v", got, tt.want)
}
})
}
}
func TestDatabaseConnection_SoftDeleteUserByUUID(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("error building sqlmock: %v", err)
}
currentTime := time.Now()
type fields struct {
Connection *sql.DB
}
type args struct {
uuid string
}
tests := []struct {
name string
fields fields
args args
want models.User
wantErr bool
}{
{
name: "valid user update",
fields: fields{
Connection: db,
},
args: args{
uuid: "abc",
},
want: models.User{
UUID: "abc",
Email: "test@test.com",
Name: "testy testerson",
CreatedAt: currentTime,
UpdatedAt: currentTime,
DeletedAt: ¤tTime,
},
},
}
for _, tt := range tests {
mock.ExpectQuery(regexp.QuoteMeta(queries["soft_delete_user_by_uuid"])).WillReturnRows(sqlmock.NewRows([]string{"uuid", "email", "name", "created_at", "updated_at", "deleted_at"}).AddRow("abc", "test@test.com", "testy testerson", currentTime, currentTime, currentTime))
t.Run(tt.name, func(t *testing.T) {
d := &DatabaseConnection{
Connection: tt.fields.Connection,
}
got, err := d.SoftDeleteUserByUUID(tt.args.uuid)
if (err != nil) != tt.wantErr {
t.Errorf("DatabaseConnection.SoftDeleteUserByUUID() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("DatabaseConnection.SoftDeleteUserByUUID() = %v, want %v", got, tt.want)
}
})
}
}
func TestDatabaseConnection_CreateSession(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("error building sqlmock: %v", err)
}
currentTime := time.Now()
type fields struct {
Connection *sql.DB
}
type args struct {
userUUID string
expiresAt time.Time
}
tests := []struct {
name string
fields fields
args args
want models.Session
wantErr bool
}{
{
name: "valid user update",
fields: fields{
Connection: db,
},
args: args{
userUUID: "abc",
expiresAt: currentTime,
},
want: models.Session{
UUID: "abc",
UserUUID: "abc",
CreatedAt: currentTime,
ExpiresAt: currentTime,
},
},
}
for _, tt := range tests {
mock.ExpectQuery(regexp.QuoteMeta(queries["create_session"])).WillReturnRows(sqlmock.NewRows([]string{"uuid", "user_uuid", "created_at", "expires_at"}).AddRow("abc", "abc", currentTime, currentTime))
t.Run(tt.name, func(t *testing.T) {
d := &DatabaseConnection{
Connection: tt.fields.Connection,
}
got, err := d.CreateSession(tt.args.userUUID, tt.args.expiresAt)
if (err != nil) != tt.wantErr {
t.Errorf("DatabaseConnection.CreateSession() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("DatabaseConnection.CreateSession() = %v, want %v", got, tt.want)
}
})
}
}
func TestDatabaseConnection_GetSessionByUUID(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("error building sqlmock: %v", err)
}
currentTime := time.Now()
type fields struct {
Connection *sql.DB
}
type args struct {
uuid string
}
tests := []struct {
name string
fields fields
args args
want models.Session
wantErr bool
}{
{
name: "valid user update",
fields: fields{
Connection: db,
},
args: args{
uuid: "abc",
},
want: models.Session{
UUID: "abc",
UserUUID: "abc",
CreatedAt: currentTime,
ExpiresAt: currentTime,
DeletedAt: ¤tTime,
},
},
}
for _, tt := range tests {
mock.ExpectQuery(regexp.QuoteMeta(queries["get_session_by_uuid"])).WillReturnRows(sqlmock.NewRows([]string{"uuid", "user_uuid", "created_at", "expires_at", "deleted_at"}).AddRow("abc", "abc", currentTime, currentTime, currentTime))
t.Run(tt.name, func(t *testing.T) {
d := &DatabaseConnection{
Connection: tt.fields.Connection,
}
got, err := d.GetSessionByUUID(tt.args.uuid)
if (err != nil) != tt.wantErr {
t.Errorf("DatabaseConnection.GetSessionByUUID() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("DatabaseConnection.GetSessionByUUID() = %v, want %v", got, tt.want)
}
})
}
}
func TestDatabaseConnection_SoftDeleteSessionByUUID(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("error building sqlmock: %v", err)
}
type fields struct {
Connection *sql.DB
}
type args struct {
uuid string
}
tests := []struct {
name string
fields fields
args args
want int
wantErr bool
}{
{
name: "valid session delete",
fields: fields{
Connection: db,
},
args: args{
uuid: "abc",
},
want: 1,
},
}
for _, tt := range tests {
mock.ExpectExec(regexp.QuoteMeta(queries["soft_delete_session_by_uuid"])).WillReturnResult(sqlmock.NewResult(1, 1))
t.Run(tt.name, func(t *testing.T) {
d := &DatabaseConnection{
Connection: tt.fields.Connection,
}
got, err := d.SoftDeleteSessionByUUID(tt.args.uuid)
if (err != nil) != tt.wantErr {
t.Errorf("DatabaseConnection.SoftDeleteSessionByUUID() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("DatabaseConnection.SoftDeleteSessionByUUID() = %v, want %v", got, tt.want)
}
})
}
}
|
package common
var Config struct {
Auth0Secret string `short:"s" long:"auth-secret" description:"The secret from Auth0" required:"true"`
ServerPort int `short:"p" long:"server-port" description:"The server port" default:"8000" required:"true"`
Database struct {
Address string `long:"db-address" description:"The database address" default:"localhost" required:"true"`
Username string `long:"db-user" description:"The database username" required:"true"`
Password string `long:"db-password" description:"The database password" required:"true"`
Name string `long:"db-name" description:"The database name" required:"true"`
}
}
|
package core
const (
LOW = iota
HIGH
)
const (
INPUT = iota
OUTPUT
PULL_OFF
PULL_DOWN
PULL_UP
PWM_OUTPUT
GPIO_CLOCK
SOFT_PWM_OUTPUT
SOFT_TONE_OUTPUT
PWM_TONE_OUTPUT
)
const (
LSBFIRST = iota
MSBFIRST
)
const (
_ = iota
CHANGE
FALLING
RISING
)
const (
MMAP_BLOCK_SIZE = 4096
DEV_GPIO_MEM = "/dev/gpiomem"
DEV_MEM = "/dev/mem"
SYS_SOC_RANGES = "/sys/firmware/devicetree/base/soc/ranges"
)
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"errors"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
monitoringpb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/monitoring/monitoring_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring"
)
// MetricsScopeServer implements the gRPC interface for MetricsScope.
type MetricsScopeServer struct{}
// ProtoToMetricsScopeMonitoredProjects converts a MetricsScopeMonitoredProjects object from its proto representation.
func ProtoToMonitoringMetricsScopeMonitoredProjects(p *monitoringpb.MonitoringMetricsScopeMonitoredProjects) *monitoring.MetricsScopeMonitoredProjects {
if p == nil {
return nil
}
obj := &monitoring.MetricsScopeMonitoredProjects{
Name: dcl.StringOrNil(p.GetName()),
CreateTime: dcl.StringOrNil(p.GetCreateTime()),
}
return obj
}
// ProtoToMetricsScope converts a MetricsScope resource from its proto representation.
func ProtoToMetricsScope(p *monitoringpb.MonitoringMetricsScope) *monitoring.MetricsScope {
obj := &monitoring.MetricsScope{
Name: dcl.StringOrNil(p.GetName()),
CreateTime: dcl.StringOrNil(p.GetCreateTime()),
UpdateTime: dcl.StringOrNil(p.GetUpdateTime()),
}
for _, r := range p.GetMonitoredProjects() {
obj.MonitoredProjects = append(obj.MonitoredProjects, *ProtoToMonitoringMetricsScopeMonitoredProjects(r))
}
return obj
}
// MetricsScopeMonitoredProjectsToProto converts a MetricsScopeMonitoredProjects object to its proto representation.
func MonitoringMetricsScopeMonitoredProjectsToProto(o *monitoring.MetricsScopeMonitoredProjects) *monitoringpb.MonitoringMetricsScopeMonitoredProjects {
if o == nil {
return nil
}
p := &monitoringpb.MonitoringMetricsScopeMonitoredProjects{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
p.SetCreateTime(dcl.ValueOrEmptyString(o.CreateTime))
return p
}
// MetricsScopeToProto converts a MetricsScope resource to its proto representation.
func MetricsScopeToProto(resource *monitoring.MetricsScope) *monitoringpb.MonitoringMetricsScope {
p := &monitoringpb.MonitoringMetricsScope{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetCreateTime(dcl.ValueOrEmptyString(resource.CreateTime))
p.SetUpdateTime(dcl.ValueOrEmptyString(resource.UpdateTime))
sMonitoredProjects := make([]*monitoringpb.MonitoringMetricsScopeMonitoredProjects, len(resource.MonitoredProjects))
for i, r := range resource.MonitoredProjects {
sMonitoredProjects[i] = MonitoringMetricsScopeMonitoredProjectsToProto(&r)
}
p.SetMonitoredProjects(sMonitoredProjects)
return p
}
// applyMetricsScope handles the gRPC request by passing it to the underlying MetricsScope Apply() method.
func (s *MetricsScopeServer) applyMetricsScope(ctx context.Context, c *monitoring.Client, request *monitoringpb.ApplyMonitoringMetricsScopeRequest) (*monitoringpb.MonitoringMetricsScope, error) {
p := ProtoToMetricsScope(request.GetResource())
res, err := c.ApplyMetricsScope(ctx, p)
if err != nil {
return nil, err
}
r := MetricsScopeToProto(res)
return r, nil
}
// applyMonitoringMetricsScope handles the gRPC request by passing it to the underlying MetricsScope Apply() method.
func (s *MetricsScopeServer) ApplyMonitoringMetricsScope(ctx context.Context, request *monitoringpb.ApplyMonitoringMetricsScopeRequest) (*monitoringpb.MonitoringMetricsScope, error) {
cl, err := createConfigMetricsScope(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyMetricsScope(ctx, cl, request)
}
// DeleteMetricsScope handles the gRPC request by passing it to the underlying MetricsScope Delete() method.
func (s *MetricsScopeServer) DeleteMonitoringMetricsScope(ctx context.Context, request *monitoringpb.DeleteMonitoringMetricsScopeRequest) (*emptypb.Empty, error) {
return nil, errors.New("no delete endpoint for MetricsScope")
}
// ListMonitoringMetricsScope is a no-op method because MetricsScope has no list method.
func (s *MetricsScopeServer) ListMonitoringMetricsScope(_ context.Context, _ *monitoringpb.ListMonitoringMetricsScopeRequest) (*monitoringpb.ListMonitoringMetricsScopeResponse, error) {
return nil, nil
}
func createConfigMetricsScope(ctx context.Context, service_account_file string) (*monitoring.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return monitoring.NewClient(conf), nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.